text stringlengths 38 1.54M |
|---|
import os
from ruamel.yaml import YAML
from itertools import combinations
import pprint
# YAML
yamlDrawings = YAML(pure=True)
# pprint
pp = pprint.PrettyPrinter(compact=True)
# Combinations
simples = combinations(range(1,50), 1)
doubles = combinations(range(1,50), 2)
triples = combinations(range(1,50), 3)
quadrup = combinations(range(1,50), 4)
quintup = combinations(range(1,50), 5)
sixtupl = combinations(range(1,50), 6)
# Lists
drawings = []
simplesList = {}
doublesList = {}
triplesList = {}
quadrupList = {}
quintupList = {}
sixtuplList = {}
for i in simples:
simplesList[i] = 0
for i in doubles:
doublesList[i] = 0
for i in triples:
triplesList[i] = 0
for i in quadrup:
quadrupList[i] = 0
for i in quintup:
quintupList[i] = 0
for i in sixtupl:
sixtuplList[i] = 0
pp.pprint("Simples : {}".format(len(simplesList)))
pp.pprint("Doubles : {}".format(len(doublesList)))
pp.pprint("Triples : {}".format(len(triplesList)))
pp.pprint("Quadrup : {}".format(len(quadrupList)))
pp.pprint("Quintup : {}".format(len(quintupList)))
pp.pprint("Sixtupl : {}".format(len(sixtuplList)))
print("Current Directory : ", os.getcwd())
rootDir = os.getcwd()
for dirName, subdirList, fileList in os.walk(rootDir):
if (not '.git' in dirName.split("\\")):
for fname in fileList:
if ('yaml' in fname.split(".")):
openedDrawingFile = open(dirName + "\\" + fname)
readDrawingText = openedDrawingFile.read()
openedDrawingFile.close()
drawingDictionary = yamlDrawings.load(readDrawingText)
drawingList = []
drawingListSorted = []
#drawingList.append(drawingDictionary["date"])
drawingList.append(int(drawingDictionary["no1"]))
drawingList.append(int(drawingDictionary["no2"]))
drawingList.append(int(drawingDictionary["no3"]))
drawingList.append(int(drawingDictionary["no4"]))
drawingList.append(int(drawingDictionary["no5"]))
drawingList.append(int(drawingDictionary["no6"]))
drawings.append(drawingList)
drawingListSorted = drawingList.sort()
#
# simples
#
simplesDrawing = combinations(drawingList, 1)
for i in simplesDrawing:
simplesList[i] += 1
#
# doubles
#
doublesDrawing = combinations(drawingList, 2)
for i in doublesDrawing:
doublesList[i] += 1
#
# triples
#
triplesDrawing = combinations(drawingList, 3)
for i in triplesDrawing:
triplesList[i] += 1
#
# quadrup
#
quadrupDrawing = combinations(drawingList, 4)
for i in quadrupDrawing:
quadrupList[i] += 1
#
# quintup
#
quintupDrawing = combinations(drawingList, 5)
for i in quintupDrawing:
quintupList[i] += 1
#
# sixtupl
#
sixtuplDrawing = combinations(drawingList, 6)
for i in sixtuplDrawing:
sixtuplList[i] += 1
#pp.pprint(simples)
#for w in sorted(simplesList, key=simplesList.get, reverse=True):
# print(w, simplesList[w])
#for w in sorted(doublesList, key=doublesList.get, reverse=True):
# print(w, doublesList[w])
#for w in sorted(triplesList, key=triplesList.get, reverse=True):
# print(w, triplesList[w])
#for w in sorted(quadrupList, key=quadrupList.get, reverse=True):
# print(w, quadrupList[w])
for w in sorted(quintupList, key=quintupList.get, reverse=True):
print(w, quintupList[w])
#for w in sorted(sixtuplList, key=sixtuplList.get, reverse=True):
# print(w, sixtuplList[w])
|
from flask import Flask, request, flash, url_for, redirect, render_template
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import relationship
#import config
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///datos.sqlite3'
app.config['SECRET_KEY'] = "random string"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
#app.config.from_object(config)
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
nombre = db.Column(db.String(80), nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(db.String(120), nullable=False)
post = db.relationship('Post', backref='user', cascade="all, delete-orphan" , lazy='dynamic')
def __init__(self, nombre, email, password):
self.nombre = nombre
self.email = email
self.password = password
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
contenido = db.Column(db.Text)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __init__(self, contenido, user_id):
self.contenido = contenido
self.user_id = user_id
@app.route('/')
def inicio():
return render_template("inicio.html")
@app.errorhandler(404)
def page_not_found(error):
return render_template("error.html",error="Página no encontrada..."), 404
@app.route('/listar_usuarios')
def listar_usuarios():
return render_template('listar_usuarios.html', users = User.query.all() )
@app.route('/new', methods = ['GET', 'POST'])
def new():
if request.method == 'POST':
if not request.form['nombre'] or not request.form['email'] or not request.form['password']:
flash('Please enter all the fields', 'error')
else:
user = User(request.form['nombre'], request.form['email'], request.form['password'])
db.session.add(user)
db.session.commit()
flash('El usuario se registró existosamente')
return render_template('nuevo_usuario.html')
@app.route('/nuevo_post', methods = ['GET', 'POST'])
def nuevo_post():
if request.method == 'POST':
if not request.form['email']:
flash('Por favor ingrese el correo', 'error')
else:
user_actual= User.query.filter_by(email=request.form['email']).first()
if user_actual is None:
return render_template('error.html', error="Usuario no encontrado..."), 1
else:
return render_template('ingresar_post.html', user= user_actual)
return render_template('nuevo_post.html')
@app.route('/ingresar_post', methods = ['GET', 'POST'])
def ingresar_post():
if request.method == 'POST':
if not request.form['contenido']:
return render_template('error.html', error="contenido no ingresado..."), 2
else:
post= Post(request.form['contenido'], request.form['userId'])
db.session.add(post)
db.session.commit()
return render_template('inicio.html')
return render_template('inicio.html')
@app.route('/listar_post')
def listar_post():
return render_template('listar_post.html', posts = Post.query.all() )
@app.route('/listar_post_user', methods = ['GET', 'POST'])
def listar_post_user():
if request.method == 'POST':
if not request.form['usuarios']:
return render_template('listar_post_user.html', users = User.query.all(), user_seleccionado = None )
else:
return render_template('listar_post_user.html', users= None, user_seleccionado = User.query.get(request.form['usuarios']))
else:
return render_template('listar_post_user.html', users = User.query.all(), user_seleccionado = None )
@app.route("/post_user/<int:user_id>")
def post_user(user_id):
user = User.query.get_or_404(user_id)
print(user.nombre)
return render_template('post_user.html', user_post=user.post)
if __name__ == '__main__':
db.create_all()
app.run(debug = True) |
# To be included in pyth.py
class PythParseError(Exception):
def __init__(self, active_char, rest_code):
self.active_char = active_char
self.rest_code = rest_code
def __str__(self):
return "%s is not implemented, %d from the end." % \
(self.active_char, len(self.rest_code) + 1)
class UnsafeInputError(Exception):
def __init__(self, active_char, rest_code):
self.active_char = active_char
self.rest_code = rest_code
def __str__(self):
return "%s is unsafe, %d from the end." % \
(self.active_char, len(self.rest_code) + 1)
def str_parse_next(active_token):
point = 0
out = []
while point < len(active_token):
if active_token[point] == '\\':
if len(active_token) == point + 1:
out.append('\\\\')
break
elif active_token[point + 1] in ('\\', '"'):
out.append(active_token[point:point+2])
point += 2
continue
elif active_token[point + 1] == '\n':
point += 2
continue
if active_token[point] == '\n':
out.append('\\n')
elif active_token[point] == '\r':
out.append('\\r')
elif active_token[point] == '\0':
out.append('\\000')
else:
out.append(active_token[point])
point += 1
if out.count('"') == 1:
out.append('"')
assert out.count('"') == 2
return ''.join(out)
|
#-*- coding: UTF-8 -*-
class TrackableObject:
def __init__(self, objectID, centroid):
# 存储目标ID
self.objectID = objectID
# 形心列表 存储在整个过程中该目标所有的形心位置
self.centroids = [centroid]
# 是否被计数器统计过的布尔量
self.counted = False |
## Calculate feature importance, but focus on "meta-features" which are categorized by
## rules from different perspectives: orders, directions, powers.
## for "simple methods"
from util_relaimpo import *
from util_ca import *
from util import loadNpy
def main(x_name, y_name, method, divided_by = "", feature_names = []):
# INFO
print("Dataset", x_name.split('_')[0])
print("Method", str(method).split(' ')[1])
# load data
X = loadNpy(['data', 'X', x_name])
Y = loadNpy(['data', 'Y', y_name])
# make dataframe
if feature_names: xdf = pd.DataFrame(data=X, columns=feature_names)
else: xdf = pd.DataFrame(data=X)
# divide X
x_list, feature_names = dvdX(xdf, divided_by=divided_by)
print("bootstrapping ...")
coef_boot = bootstrapping(x_list, Y, method)
printBootResult(coef_boot, list(feature_names), list(feature_names))
pt = tTestTopTwo(coef_boot)
pa = anovaBoot(coef_boot)
print(returnTable([['t-test'], [str(pt)], ['ANOVA'], [str(pa)]]))
if __name__ == '__main__':
# first and structcoef
x_prefix = ["HM", "AF", "NFL", "PAC", "MMA", "NHTSA", "NASCAR"]
y_suffix = ["MPS95", "MPSCC95", "CSDM"]
x_main = "{}_X_ang_vel.npy"
y_main = "{}_{}.npy"
divided_list = ["order", "direction", "power"]
methods = [first, structcoef]
for ys in y_suffix:
for xp in x_prefix:
for method in methods:
for divide in divided_list:
x_name = x_main.format(xp)
y_name = y_main.format(xp, ys)
main(x_name, y_name, method, divide, feature_names)
|
# Locations of the database and csv files
data_root = '/fill/this/in'
airport_list_loc = '/fill/this/in/too/airports.csv/airports.csv'
|
#%%
"""
Analysis of a 6-pulse DQC signal with multiple dipolar pathways
-------------------------------------------------------------------------
Fit an experimental 6-pulse DQC signal with a model with a non-parametric
distribution and a homogeneous background, using Tikhonov regularization.
The model assumes three dipolar pathways (#1, #2, and #3) to be contributing
to the data.
"""
import numpy as np
import deerlab as dl
import matplotlib.pyplot as plt
violet = '#4550e6'
# Load experimental data
file = '../data/experimental_dqc_1.DTA'
t,Vexp = dl.deerload(file)
# Experimental parameters
tau2 = 2.0 # μs
tau1 = 1.8 # μs
tau3 = 0.2 # μs
# Pre-processing
Vexp = dl.correctphase(Vexp)
t = t-t[0]
# Remove data outside of the detectable range
Vexp = Vexp[t<=2*tau2-4*tau3]
t = t[t<=2*tau2-4*tau3]
# Mask out artificial spike due to spectrometer issue
mask = (t<0.05) | (t>0.15)
Vexp = Vexp/max(Vexp[mask])
# Construct the model
r = np.arange(2.5,4,0.01) # nm
experiment = dl.ex_dqc(tau1,tau2,tau3,pathways=[1,2,3])
Vmodel = dl.dipolarmodel(t,r,experiment=experiment)
# The amplitudes of the second and third pathways must be equal
Vmodel = dl.link(Vmodel,lam23=['lam2','lam3'])
# Fit the model to the data
results = dl.fit(Vmodel,Vexp,mask=mask)
# Display a summary of the results
print(results)
#%%
# Plot the results
plt.figure(figsize=[8,5])
# Plot the full detectable range
tfull = np.arange(-2*tau1,2*tau2-4*tau3,0.008)
Vmodelext = dl.dipolarmodel(tfull,r,experiment=experiment)
Vmodelext = dl.link(Vmodelext,lam23=['lam2','lam3'])
# Extract results
Pfit = results.P
Pci = results.PUncert.ci(95)
lams = [results.lam1, results.lam23, results.lam23]
reftimes = [results.reftime1, results.reftime2, results.reftime3]
colors= [violet,'tab:orange','tab:red']
# Plot the data and fit
plt.subplot(221)
plt.plot(t,Vexp,'.',color='grey',label='Data')
plt.plot(tfull,results.evaluate(Vmodelext),color=violet,label='Model fit')
plt.legend(frameon=False,loc='best')
plt.xlabel('Time $t$ (μs)')
plt.ylabel('$V(t)$ (arb.u.)')
# Plot the individual pathway contributions
plt.subplot(223)
Vinter = results.P_scale*(1-np.sum(lams))*np.prod([dl.bg_hom3d(tfull-reftime,results.conc,lam) for lam,reftime in zip(lams,reftimes)],axis=0)
for n,(lam,reftime,color) in enumerate(zip(lams,reftimes,colors)):
Vpath = (1-np.sum(lams) + lam*dl.dipolarkernel(tfull-reftime,r)@Pfit)*Vinter
plt.plot(tfull,Vpath,label=f'Pathway #{n+1}',color=color)
plt.legend(frameon=False,loc='best')
plt.xlabel('Time $t$ (μs)')
plt.ylabel('$V(t)$ (arb.u.)')
# Plot the distance distribution
plt.subplot(122)
plt.plot(r,Pfit,color=violet,label='Fit')
plt.fill_between(r,*Pci.T,color=violet,alpha=0.4,label='95% CI')
plt.legend(frameon=False,loc='best')
plt.xlabel('Distance r (nm)')
plt.ylabel('P(r) (nm$^{-1}$)')
plt.autoscale(enable=True, axis='both', tight=True)
plt.tight_layout()
plt.show()
# %%
|
# all data going to /glusterfs/users/metaknowledge/rawdata
import codecs # used for creating output file
import re # used for parsing timeout errors and resumptionTokens
import time # to be used for sleeping
import urllib2 # used for fetching data
import zlib # used for checking compression levels
nDataBytes, nRawBytes, nRecoveries, maxRecoveries = 0, 0, 0, 3
def getFile(fetchBase, command, verbose=1, sleepTime=0):
global nRecoveries, nDataBytes, nRawBytes
# sleep if server commands function to timeout
if sleepTime:
time.sleep(sleepTime)
remoteAddr = fetchBase + command
# verbose option used primarily when getting HTTP error from server
if verbose:
print "\r", "getFile '%s'" % remoteAddr[-90:]
try:
remoteData = urllib2.urlopen(remoteAddr).read()
# checking for and handling HTTP error
except urllib2.HTTPError, exValue:
if exValue.code == 503:
# parse data to check how long wants us to wait
retryWait = int(exValue.hdrs.get("Retry-After", "-1"))
if retryWait < 0:
return None
print 'Waiting %d seconds' % retryWait
return getFile(fetchBase, command, 0, retryWait)
print exValue
# if server keeps continually failing, we stop trying
# otherwise, keep trying with new timeout and verbosity set
if nRecoveries < maxRecoveries:
nRecoveries += 1
return getFile(fetchBase, command, 1, 60)
return
nRawBytes += len(remoteData)
# check for data compression and decompress if is present
try:
remoteData = zlib.decompressobj().decompress(remoteData)
except:
pass
nDataBytes += len(remoteData)
# check and catch any errors
oaiError = re.search('<error *code=\"([^"]*)">(.*)</error>', remoteData)
if oaiError:
print "OAIERROR: code=%s '%s'" % (oaiError.group(1), oaiError.group(2))
else:
return remoteData
if __name__ == "__main__":
# url base for arXiv data location
fetchBase = 'http://export.arxiv.org/oai2/request?verb=ListRecords'
# our URL option-- will be replaced by resumption token in next iteration
fetchCommand = '&metadataPrefix=arXiv'
# name for our output file
outputFileName = 'metadata.xml'
print "Writing records to %s from archive %s" % (outputFileName, fetchBase)
# creating file for data output
outputFile = codecs.lookup('utf-8')[-1](file(outputFileName, 'wb'))
# getting our initial data
data = getFile(fetchBase, fetchCommand)
# will loop while we're still getting data from the server
while data:
outputFile.write(data)
outputFile.write('\n')
# parse resumption token from output
resToken = re.search('<resumptionToken[^>]*>(.*)</resumptionToken>', data)
# if we don't have the resumption token, we assume that we're done
if not resToken:
break
# call OAI API using reumption token
data = getFile(fetchBase, "&resumptionToken=%s" % resToken.group(1))
# wrap and close our output file
print "\nRead %d bytes (%.2f compression)" % (nDataBytes, float(nDataBytes) / nRawBytes)
|
from tkinter import *
root=Tk()
root.geometry("400x170+100+100")
label_1=Label(wraplength=400,text='The N category describes spread only to the lymph nodes near the bladder (in the true pelvis) '
'and those along the blood vessel called the common iliac artery. These lymph nodes are called regional'
' lymph nodes. Any other lymph nodes are considered distant lymph nodes. Spread to distant nodes'
' is considered metastasis (described in the M category). Surgery is usually needed to find cancer '
'spread to lymph nodes, since it is not often seen on imaging tests.'
'NX: Regional lymph nodes cannot be assessed due to lack of information.'
'N0: There is no regional lymph node spread.'
'N1: The cancer has spread to a single lymph node in the true pelvis.'
'N2: The cancer has spread to 2 or more lymph nodes in the true pelvis.'
'N3: The cancer has spread to lymph nodes along the common iliac artery.',background='bisque',foreground='red')
label_1.pack()
root.mainloop()
|
from django import forms
from .models import JoinEvent
from django.views.generic import DetailView
class JoinEventForm(forms.ModelForm):
class Meta:
model = JoinEvent
fields = ('event_id', 'f_name', 'l_name', 'email', 'mobile')
labels = {
'event_id': 'Event Name',
'f_name': 'First Name',
'l_name': 'Last Name',
'email': 'Email Address',
'mobile': 'Phone Number',
}
widgets = {
'event_id': forms.TextInput(attrs={'readonly': 'readonly'}),
'event_id': forms.HiddenInput()
}
|
"""
Matplotlib styles
=================
_thumb: .8, .8
_example_title: Matplotlib styles for ArviZ
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
import arviz as az
x = np.linspace(0, 1, 100)
dist = stats.beta(2, 5).pdf(x)
style_list = [
"default",
["default", "arviz-colors"],
"arviz-darkgrid",
"arviz-whitegrid",
"arviz-white",
"arviz-grayscale",
["arviz-white", "arviz-redish"],
["arviz-white", "arviz-bluish"],
["arviz-white", "arviz-orangish"],
["arviz-white", "arviz-brownish"],
["arviz-white", "arviz-purplish"],
["arviz-white", "arviz-cyanish"],
["arviz-white", "arviz-greenish"],
["arviz-white", "arviz-royish"],
["arviz-white", "arviz-viridish"],
["arviz-white", "arviz-plasmish"],
]
fig = plt.figure(figsize=(12, 25))
for idx, style in enumerate(style_list):
with az.style.context(style, after_reset=True):
ax = fig.add_subplot(9, 2, idx + 1, label=idx)
for i in range(10):
ax.plot(x, dist - i, f"C{i}", label=f"C{i}")
ax.set_title(style)
ax.set_xlabel("x")
ax.set_ylabel("f(x)", rotation=0, labelpad=15)
plt.tight_layout()
plt.show()
|
#written by Bernard Crnković
from datetime import time
from datetime import date
from datetime import datetime
from aes_implementation import AESCipher
from colors import Colors
import password
import sys
class JournalViewer():
def __init__(self,notebook,entry_types,jw):
self.reference_to_writer = jw
self.notebook = [tuple(x.split('\t')) for x in notebook][1:]
self.entry_types = entry_types
#IN DEVELOPMENT.
self.operations = {"TIME":self.time_filter,
"DATE":self.date_filter,
"TYPE":self.type_filter,
"REVERSE":self.reverse,
"SHOW":self.show,
"DELETE":self.delete_entry,
"DUMP":self.dump}
#info
#self.splash()
self.console()
def console(self):
#console input
try:
ipt = [list(filter(lambda a: a!="",x.split(" "))) for x in input(Colors.RED+"[READING_MODE] > \033[39m").split(",")]
if not ipt[0]:
print(Colors.YELLOW+"Unknown command.")
self.console()
elif ipt[0][0] == "X":
raise KeyboardInterrupt
elif ipt[0][0] == "W":
return
elif ipt[0][0] == "R":
self.console()
elif ipt[0][0] == "H":
self.splash()
elif ipt[0][0] not in self.operations:
print(Colors.YELLOW+"Unknown command.")
self.console()
else:
for i in ipt:
if i[0] in self.operations:
z = i.pop(0)
cmd = i
self.operations[z](*cmd)
self.console()
except KeyboardInterrupt:
self.reference_to_writer.write_notebook()
print()
sys.exit(0)
def splash(self):
print(Colors.BLUE+"Type 'W' to return to '[WRITING_MODE]'.")
print("Type 'X' to exit.")
print("Available commands are:")
for key in self.operations:
print("\t"+key)
def type_filter(self,*args):
if args[0] in self.entry_types:
self.notebook = list(filter(lambda x: True if x[3] == self.entry_types[args[0]] else False, self.notebook))
else:
print(Colors.YELLOW+"Unknown type filter command.")
def gettime(self,d):
return time(*[int(t) for t in d.split(":")])
def time_filter(self,*args):
if len(args) == 2:
b = self.gettime(args[0])
e = self.gettime(args[1])
self.notebook = list(filter(lambda x: True if self.gettime(x[2]) >= b and self.gettime(x[2]) <= e else False, self.notebook))
elif len(args) == 1:
if ":" in args[0]:
self.notebook = list(filter(lambda x: True if self.gettime(x[2]) == self.gettime(args[0]) else False, self.notebook))
else:
self.notebook = list(filter(lambda x: True if self.gettime(x[2]).hour == self.gettime(args[0]).hour else False, self.notebook))
else:
print(Colors.YELLOW+"Unknown time filter command.")
def getdate(self,d):
return date(*[int(t) for t in d.split(".")[:-1][::-1]])
def date_filter(self,*args):
if len(args) == 2:
b = self.getdate(args[0])
e = self.getdate(args[1])
self.notebook = list(filter(lambda x: True if self.getdate(x[1]) >= b and self.getdate(x[1]) <= e else False, self.notebook))
elif len(args) == 1:
self.notebook = list(filter(lambda x: True if self.getdate(x[1]) == self.getdate(args[0]) else False, self.notebook))
else:
print(Colors.YELLOW+"Unknown date filter command.")
def reverse(self):
self.notebook=self.notebook[::-1]
def show(self):
for i in self.notebook:
for j in i:
print("\033[35m"+j)
print()
def delete_entry(self,*args):
if len(args) == 1:
for i in range(len(self.reference_to_writer.logs)):
if self.reference_to_writer.logs[i].split("\t")[0] == args[0]:
self.reference_to_writer.logs.pop(i)
break #CHECK IF IT WORKS
print(Colors.BLUE+"Deleted entries will be gone forever upon re-entering this mode!")
elif len(args) == 2:
to_remove = []
for i in range(1,len(self.reference_to_writer.logs)):
current_id = int(self.reference_to_writer.logs[i].split("\t")[0])
if current_id >= int(args[0]) and current_id <= int(args[1]):
to_remove.append(i) #CHECK IF IT WORKS
self.reference_to_writer.logs[to_remove[0]:to_remove[-1]+1]=[]
print(Colors.BLUE+"Deleted entries will be gone forever upon re-entering this mode!")
def dump(self,*args):
f = None
if not args:
f = open(datetime.now().strftime("%d.%m.%Y.-%H:%M:%S"),"wb")
else:
f = open(args[0],"wb")
writestring = "<ID>\t<DATE>\t<TIME>\t<TYPE>\t<CONTENT>\n"
for i in self.notebook:
for j in i:
writestring+=j+"\t"
writestring+="\n"
f.write(AESCipher(password.create_password()).encrypt(writestring))
f.close()
|
inputfile = 'taras.txt'
outputfile = 'taras1.txt'
password_tolooffor = "len"
myfile1 = open(inputfile, mode='r', encoding='utf_8')
myfile2 = open(outputfile, mode="w", encoding='utf_8') # create file
"""for line in myfile:
print("hello "+ line.strip()) # добаляє hello для кожної строки
# line.strip - обріже всі пробіли"""
for num , line in enumerate(myfile1, 1): # me file , я якої цифри починати
if password_tolooffor in line: # чи існує len в файлі то тоді вивести саме ту строку
print("line namber "+ str(num)+ ":" + line.strip())
myfile2.write("found world len" + line)
myfile1.close()
myfile2.close()
|
#!/usr/bin/env python3
#count number of alignments
import sys
#argument you put right after $. and 1 refers to second argument
if len(sys.argv)>1:
f = open(sys.argv[1])
else:
f = sys.stdin
chromosome = []
for line in f:
# filter lines that begin with @
if line.startswith("@"):
continue
#ref each column
fields =line.split("\t")
if fields[2] == "*":
continue
chromosome.append(fields[2])
if len(chromosome) >= 10:
break
print(chromosome)
|
import socket
import struct
rawsocket = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.htons(0x0800))
rawsocket.bind(("eth0", socket.htons(0x0800)))
# this will bind eth0 interface and we are using htons to specify the protocol we are intresred in
packet = struct.pack("!6s6s2s",'\xaa\xaa\xaa\xaa\xaa\xaa','\xaa\xaa\xaa\xaa\xaa\xaa','\x08\x00')
# now we are actually pack the package that contains source and destination ip and ether type here we use 6s6s2s in that first 6 bytes contain destination address , another 6 bytes contains source address and the last 2 bytes contain a ether type that defult to ip
rawSocket.send(packet + "Hello There")
# its send and packets along with hello there msg
|
from tkinter import *
from PIL import Image
root = Tk()
def Enlarge(Sol_Img, Img):
im = Image.open(Sol_Img)
x = im.width
im = im.resize(size=(9 * x, 9 * x))
im.show()
im2 = Image.open(Img)
im2 = im2.resize(size=(9 * x, 9 * x))
im2.show()
def Display_Results(Img, Sol_Img):
TempImg= Image.open(Sol_Img)
w,h = TempImg.size
i = Image.open(Img)
s_i= Image.open(Sol_Img)
width = (w*2)+60
height = h+80
root.title("SOLVED MAZE DISPLAY")
canvas = Canvas(root, width=width, height=height)
canvas.pack()
canvas.configure(bg="red")
tempWidth=(w/2)+20
canvas.create_text(tempWidth,20,text="Orignal Maze:", font='Forte 12 bold')
canvas.create_text((w+tempWidth+20),20,text="Solved Maze:", font='Forte 12 bold')
img=PhotoImage(file=Img)
img2 = PhotoImage(file=Sol_Img)
canvas.create_image(20,40, anchor=NW, image=img)
temp=w+40
canvas.create_image(temp,40, anchor=NW, image=img2)
btn = Button(root,width=5, text="Exit", command=root.destroy, font = 'Impact 10', bg="black", fg="red")
btn.place(x=width/2+20, y=height-28)
btn = Button(root,width=5, text="Enlarge", command=lambda: Enlarge(Img,Sol_Img), font = 'Impact 10', bg="black", fg="red")
btn.place(x=width/2-60, y=height-28)
root.mainloop() |
"""
Script para determinar el tiempo que tarda una suma con decorador.
"""
import time
def timer_track(function):
def wrapper(*args, **kwargs):
print(f"*args - {args} / **kwargs - {kwargs}")
start = time.time()
print(f"start --> {start}")
function(*args, **kwargs)
print(f"{ time.time() - start }")
return wrapper
@timer_track
def example():
sum = 0
for _ in range(1_000_000_000):
sum += 1
example()
|
import numpy as np
import math
import random_message
import sys
def weights_versions_test(log_max_weight = 9,
log_max_N = 3):
'''
Test with different weights and versions
'''
esp = 10**(-log_max_weight) # Our criteria
# Try different weights and versions
test_weights = [int(n) for n in 10**np.linspace(0,log_max_weight,10)]
test_Ns = [int(n) for n in 10**np.linspace(0,log_max_N,10)]
# Result of each test case
ret_T = []
for tw in test_weights:
for N in test_Ns:
# Init random weights
w = np.random.randint(0,tw,N)
samples = RandomMessage(w)
# A counting dict with default value of 0
# for all message version in setN
count_setN_randomized = dict.fromkeys(samples.setN,0)
# Go through the entire sampled messages
# Count how many each version appears
for message in samples.shuffle:
if message in samples.setN:
count_setN_randomized[message] += 1
# Normalize our weight so that \sum^N_{i=1}w_i = 1
norm_w_sample = [v / samples.M for k, v in count_setN_randomized.items()]
# Measure the difference between our posterior weight and the prior weight
t = sum(norm_w_sample - samples.norm_w)/samples.N
ret_T.append(t)
if not math.isnan(float(t)):
# If our criteria is violated, stop the test and see where the problem is
assert esp>=abs(t), "weight= %s" % w
print("%s test cases passed" % len(ret_T))
if __name__ == "__main__":
action = sys.argv[1:]
# The program will get killed if weight $\geq 10^7$
if "-full" in action:
weights_versions_test()
# The default test will only run with max weight 10^6 and 100 versions
else:
weights_versions_test(6,2)
|
## ???????
# ??????????????? int
income = int(input('请输入工资:'))
# ????
salary = 0
# ??????
shouldPay = 0
# ????
tax = 0
def calculator(num):
"""计算税后薪资的函数,参数为原始收入"""
# ??????????5000
shouldPay = num - 5000
# ????????????????????????
if shouldPay <= 0:
tax = 0
elif 0 < shouldPay <= 3000:
tax = shouldPay * 0.03
elif 3000 < shouldPay <=12000:
tax = shouldPay * 0.10 -210
elif 12000 < shouldPay <=25000:
tax = shouldPay *0.20 -1410
elif 25000 < shouldPay <=35000:
tax = shouldPay *0.25-2660
elif 35000 < shouldPay <=55000:
tax = shouldPay *0.30 - 4410
elif 55000 < shouldPay <=8000:
tax = shouldPay*0.35 -7160
else:
tax = shouldPay*0.4 - 15160
salary = income - tax
return '{:.2f}'.format(salary)
# ?????????????????????
# ??????
# ??
print('你的税后收入是:{}'.format(calculator(income)))
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2019 CNRS
# Author: Joseph Mirabel
import hppfcl, numpy as np
from gepetto import Color
def applyConfiguration(gui, name, tf):
gui.applyConfiguration(name, tf.getTranslation().tolist() + tf.getQuatRotation().coeffs().tolist())
def displayShape(gui, name, geom, color = (.9, .9, .9, 1.)):
if isinstance(geom, hppfcl.Capsule):
return gui.addCapsule(name, geom.radius, 2. * geom.halfLength, color)
elif isinstance(geom, hppfcl.Cylinder):
return gui.addCylinder(name, geom.radius, 2. * geom.halfLength, color)
elif isinstance(geom, hppfcl.Box):
w, h, d = (2. * geom.halfSide).tolist()
return gui.addBox(name, w, h, d, color)
elif isinstance(geom, hppfcl.Sphere):
return gui.addSphere(name, geom.radius, color)
elif isinstance(geom, hppfcl.Cone):
return gui.addCone(name, geom.radius, 2. * geom.halfLength, color)
elif isinstance(geom, hppfcl.Convex):
pts = [ geom.points(geom.polygons(f)[i]).tolist() for f in range(geom.num_polygons) for i in range(3) ]
gui.addCurve(name, pts, color)
gui.setCurveMode(name, "TRIANGLES")
gui.setLightingMode(name, "ON")
gui.setBoolProperty(name, "BackfaceDrawing", True)
return True
elif isinstance(geom, hppfcl.ConvexBase):
pts = [ geom.points(i).tolist() for i in range(geom.num_points) ]
gui.addCurve(name, pts, color)
gui.setCurveMode(name, "POINTS")
gui.setLightingMode(name, "OFF")
return True
else:
msg = "Unsupported geometry type for %s (%s)" % (geometry_object.name, type(geom) )
warnings.warn(msg, category=UserWarning, stacklevel=2)
return False
def displayDistanceResult(gui, group_name, res, closest_points = True, normal = True):
gui.createGroup(group_name)
r = 0.01
if closest_points:
p = [ group_name+"/p1", group_name+"/p2" ]
gui.addSphere(p[0], r, Color.red)
gui.addSphere(p[1], r, Color.blue)
qid = [0,0,0,1]
gui.applyConfigurations(p, [ res.getNearestPoint1().tolist() + qid, res.getNearestPoint2().tolist() + qid, ])
if normal:
n = group_name+"/normal"
gui.addArrow(n, r, 0.1, Color.green)
gui.applyConfiguration(n,
res.getNearestPoint1().tolist() + hppfcl.Quaternion.FromTwoVectors(np.array([1,0,0]), res.normal).coeffs().tolist())
gui.refresh()
def displayCollisionResult(gui, group_name, res, color=Color.green):
if res.isCollision():
if gui.nodeExists(group_name):
gui.setVisibility(group_name, "ON")
else:
gui.createGroup(group_name)
for i in range(res.numContacts()):
contact = res.getContact(i)
n = group_name+"/contact"+str(i)
depth = contact.penetration_depth
if gui.nodeExists(n):
gui.setFloatProperty(n, 'Size', depth)
gui.setFloatProperty(n, 'Radius', 0.1*depth)
gui.setColor(n, color)
else:
gui.addArrow(n, depth*0.1, depth, color)
N = contact.normal
P = contact.pos
gui.applyConfiguration(n, (P-depth*N/2).tolist() +
hppfcl.Quaternion.FromTwoVectors(np.array([1,0,0]), N).coeffs().tolist())
gui.refresh()
elif gui.nodeExists(group_name):
gui.setVisibility(group_name, "OFF")
|
from pyCABcython import pyCABcython
import random
ca = pyCABcython("../metadata/ruleset/acl_8000", 40)
for i in range(5):
resp = ca.query_btree(random.randint(0, 429496729),
random.randint(0, 429496729),
random.randint(0, 65535),
random.randint(0, 65535))
cnt = 0
for item in resp:
s = ""
if cnt == 0:
s += "("
cnt = cnt+1
if cnt == 8:
s += str(item)
s += "); "
cnt = 0
else:
s += str(item)
s += ", "
print s
|
class Solution(object):
def strobogrammaticInRange(self, low, high):
"""
:type low: str
:type high: str
:rtype: int
"""
self.ans = 0
pair = {"0":"0", "1":"1", "8":"8", "6":"9", "9":"6"}
for i in range(len(low), len(high) + 1):
self.permute(0, i - 1, low, high, [None for _ in range(i)], pair)
return self.ans
def permute(self, start, end, low, high, carry, pair):
if start > end:
tmp = "".join(carry)
if len(tmp) == len(low) and int(tmp) < int(low):
return
if len(tmp) == len(high) and int(tmp) > int(high):
return
self.ans += 1
return
for item in pair:
if start == end and item in ["6", "9"]:
continue
if end != 0 and start == 0 and item == "0":
continue
carry[start] = item
carry[end] = pair[item]
self.permute(start + 1, end - 1, low, high, carry, pair)
|
def soma(numero1, numero2):
soma_total = numero1 + numero2
return soma_total
def subtração(numero1, numero2):
sub_total = 0
return sub_total
# divisao
# multiplicacao
# potência
def eh_primo(numero):
resultado = False
contador = 2
while contador <= numero:
resto = numero % contador
if resto == 0:
if contador == numero:
resultado = True
else:
resultado = False
break
else:
resultado = True
contador += 1
return resultado |
import numpy as np
import pandas as pd
import gensim
from tsne import bh_sne
from optparse import OptionParser
if __name__ == '__main__':
parser = OptionParser()
(options, args) = parser.parse_args()
assert len(args) == 2
in_file = args[0]
out_file = args[1]
model = gensim.models.word2vec.Word2Vec.load_word2vec_format(in_file, binary=True)
vectors = np.zeros((len(model.vocab), model.vector_size))
words = model.vocab.keys()
for i, key in enumerate(words):
vectors[i,:] = model[key]
vis_data = bh_sne(vectors)
out = pd.DataFrame({'x': vis_data[:,0], 'y': vis_data[:,1], 'word': words})
out.to_csv(out_file, index=False)
|
# Generated by Django 3.0 on 2020-09-25 05:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('machines', '0052_equipment_in_complex'),
]
operations = [
migrations.CreateModel(
name='Repair_reason',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Наименование')),
('description', models.TextField(verbose_name='Описание')),
],
),
migrations.AddField(
model_name='repair_rawdata',
name='repair_reason',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='machines.Repair_reason', verbose_name='Причина поломки'),
),
]
|
import os
print(os.path.join('hello','bin','span'))
myFiles = ['accounts.txt','details.csv','invite.docx']
for filename in myFiles:
print(os.path.join('c:\\projects\\kl',filename)
; |
from models import Client, FavoriteList, Wishlist
class ClientDAO:
def __init__(self, db):
self.__db = db
def save(self, client):
cursor = self.__db.connection.cursor()
if client.id:
cursor.execute('UPDATE client SET name=%s, email=%s WHERE id=%s LIMIT 1', (client.name, client.email, client.id))
else:
cursor.execute('INSERT INTO client (name, email) VALUES (%s, %s)', (client.name, client.email))
client.id = cursor.lastrowid
self.__db.connection.commit()
return client
def get(self, id):
cursor = self.__db.connection.cursor()
cursor.execute('SELECT id, name, email FROM client WHERE id=%s LIMIT 1', (id,))
result = cursor.fetchone()
if result is None:
return None
return Client(result[1], result[2], result[0])
def get_by_email(self, email):
cursor = self.__db.connection.cursor()
cursor.execute('SELECT id, name, email FROM client WHERE email=%s LIMIT 1', (email,))
result = cursor.fetchone()
if result is None:
return None
return Client(result[1], result[2], result[0])
def delete(self, id):
cursor = self.__db.connection.cursor()
cursor.execute('DELETE FROM client WHERE id=%s LIMIT 1', (id, ))
self.__db.connection.commit()
class FavoriteListDAO:
def __init__(self, db):
self.__db = db
def save(self, client, favorite_list):
cursor = self.__db.connection.cursor()
cursor.execute('INSERT INTO favorite_list (client_id) VALUES (%s)', (client.id,))
favorite_list.id = cursor.lastrowid
self.__db.connection.commit()
def get(self, client_id):
cursor = self.__db.connection.cursor()
cursor.execute('SELECT id, client_id FROM favorite_list WHERE client_id=%s LIMIT 1', (client_id, ))
result = cursor.fetchone()
if result is None:
return None
return FavoriteList(result[1], result[0])
class WishlistDAO:
def __init__(self, db):
self.__db = db
def save(self, wishlist):
cursor = self.__db.connection.cursor()
cursor.execute('INSERT INTO wishlist (favorite_list_id, product_id) VALUES (%s, %s)', (wishlist.favorite_list_id,wishlist.product_id))
wishlist.id = cursor.lastrowid
self.__db.connection.commit()
def list(self, client_id):
cursor = self.__db.connection.cursor()
cursor.execute('SELECT w.id, w.favorite_list_id, w.product_id FROM wishlist w JOIN favorite_list f ON f.id = w.favorite_list_id WHERE f.client_id=%s', (client_id,))
fetch = cursor.fetchall()
return parse_wishlist(fetch)
def exists(self, client_id, product_id):
cursor = self.__db.connection.cursor()
cursor.execute(
'SELECT count(*) AS size FROM wishlist w JOIN favorite_list f ON f.id = w.favorite_list_id WHERE f.client_id=%s AND w.product_id=%s',
(client_id,product_id))
result = cursor.fetchone()
if result is None or result[0] == 0:
return False
return True
def parse_wishlist(fetch):
def parse(result):
return Wishlist(result[1], result[2], result[0])
return list(map(parse, fetch))
|
from lexer.definitions.tokens import DecrementCell
from lexer.definitions.tokens import IncrementCell
from lexer.tokenizer import Tokenizer
from lexer.definitions.keywords import Keywords
from .hackersdelight_matcher import HackersDelightMatcher
class EditCellMatcher:
@staticmethod
def match(tokenizer: Tokenizer, baseword_order: HackersDelightMatcher.WordOrder, pointer):
tokenizer.take_snapshot()
if not tokenizer.reached_end() and tokenizer.consume() == Keywords.E_CELL:
if baseword_order == HackersDelightMatcher.WordOrder.HD:
return IncrementCell(pointer)
elif baseword_order == HackersDelightMatcher.WordOrder.DH:
return DecrementCell(pointer)
else:
tokenizer.rollback_snapshot()
return False
|
from django.conf import settings
from provisioner.models import Subscription
from provisioner.resolve import Resolve
from restclients.uwnetid.subscription import modify_subscription_status
from restclients.models.uwnetid import Subscription as NWSSubscription
class Monitor(Resolve):
def confirm_activation(self):
limit = settings.O365_LIMITS['monitor']['default']
subscriptions = Subscription.objects.filter(
state=Subscription.STATE_ACTIVATING,
in_process__isnull=True).values_list('pk', flat=True)[:limit]
self.log.debug('confirm activation: %s of %s in process' % (len(subscriptions), limit))
Subscription.objects.filter(pk__in=list(subscriptions)).update(in_process=True)
try:
for sub_pk in subscriptions:
sub = Subscription.objects.get(pk=sub_pk)
if self.has_subscription_licensing(sub):
modify_subscription_status(
sub.net_id, sub.subscription,
NWSSubscription.STATUS_ACTIVE)
self.log.info(
'Subscription %s for %s set %s' % (
sub.subscription, sub.net_id,
NWSSubscription.STATUS_ACTIVE))
sub.state = Subscription.STATE_ACTIVE
sub.in_process = None
sub.save()
except Exception as ex:
Subscription.objects.filter(pk__in=list(subscriptions)).update(in_process=None)
self.log.error('Monitor activate bailing: %s' % (ex))
def confirm_deactivation(self):
limit = settings.O365_LIMITS['monitor']['default']
subscriptions = Subscription.objects.filter(
state=Subscription.STATE_DELETING,
in_process__isnull=True).values_list('pk', flat=True)[:limit]
self.log.debug('confirm deactivation: %s of %s in process' % (len(subscriptions), limit))
Subscription.objects.filter(pk__in=list(subscriptions)).update(in_process=True)
try:
for sub_pk in subscriptions:
sub = Subscription.objects.get(pk=sub_pk)
if not self.has_subscription_licensing(sub):
modify_subscription_status(
sub.net_id, sub.subscription,
NWSSubscription.STATUS_INACTIVE)
self.log.info(
'Subscription %s for %s set %s' % (
sub.subscription, sub.net_id,
NWSSubscription.STATUS_INACTIVE))
sub.state = Subscription.STATE_DELETED
sub.in_process = None
sub.save()
except Exception as ex:
Subscription.objects.filter(pk__in=list(subscriptions)).update(in_process=None)
self.log.error('Monitor bailing: %s' % (ex))
return
|
import requests
from bs4 import BeautifulSoup
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
# browser = webdriver.PhantomJS('phantomjs')
chrome_options = Options()
chrome_options.add_argument("--headless")
browser = webdriver.Chrome(chrome_options=chrome_options)
import html2text
h = html2text.HTML2Text()
h.ignore_links = True
f= open("output.txt","w+")
with open("input.txt") as q:
for line in q:
url = line;
r=requests.get(url)
soup=BeautifulSoup(r.content,"html5lib")
temp = soup.findAll("h1",attrs={"class":"AHFaub"}) # Extracting the name of the App
temp1 = temp[0].findAll("span")
title = temp1[0].text
f.write("Name of App :-%s\n" % title)
description = soup.findAll("meta",attrs={"itemprop":"description"}) # Extracting App description
temp = description[0]["content"]
f.write("Description of App :-\n%s\n" % temp)
browser.get(url)
browser.find_element_by_link_text('View details').click()
time.sleep(2)
html = browser.page_source
soup = BeautifulSoup(html,"html5lib")
permissions = soup.findAll("div",attrs={"class":"fnLizd"}) # Extracting App permissions
f.write("Permissions of App :-\n%s\n\n" % h.handle(str(permissions)))
f.close()
|
"""Various tests to ensure the functionality of my solution to this kata."""
import pytest
from jaden import to_jaden_case
quotes = [
[
"most trees are blue",
"Most Trees Are Blue"
],
[
"How can mirrors be real if our eyes aren't real",
"How Can Mirrors Be Real If Our Eyes Aren't Real"
],
[
"All the rules in this world were made by someone no smarter than you. So make your own.",
"All The Rules In This World Were Made By Someone No Smarter Than You. So Make Your Own."
],
[
"School is the tool to brainwash the youth.",
"School Is The Tool To Brainwash The Youth."
],
[
"If newborn babies could speak they would be the most intelligent beings on planet Earth.",
"If Newborn Babies Could Speak They Would Be The Most Intelligent Beings On Planet Earth."
],
[
"If everybody in the world dropped out of school we would have a much more intelligent society.",
"If Everybody In The World Dropped Out Of School We Would Have A Much More Intelligent Society."
],
[
"Trees are never sad look at them every once in awhile they're quite beautiful.",
"Trees Are Never Sad Look At Them Every Once In Awhile They're Quite Beautiful."
],
[
"When I die. then you will realize",
"When I Die. Then You Will Realize"
],
[
"I should just stop tweeting, the human conciousness must raise before I speak my juvenile philosophy.",
"I Should Just Stop Tweeting, The Human Conciousness Must Raise Before I Speak My Juvenile Philosophy."
],
[
"Jonah Hill is a genius",
"Jonah Hill Is A Genius"
],
[
"Dying is mainstream",
"Dying Is Mainstream"
],
[
"If there is bread winners, there is bread losers. But you can't toast what isn't real.",
"If There Is Bread Winners, There Is Bread Losers. But You Can't Toast What Isn't Real."
],
[
"You Can Discover Everything You Need to Know About Everything by Looking at your Hands",
"You Can Discover Everything You Need To Know About Everything By Looking At Your Hands"
],
[
"Fixed habits to respond to authority takes 12 years. Have fun",
"Fixed Habits To Respond To Authority Takes 12 Years. Have Fun"
],
[
"When you Live your Whole life In a Prison freedom Can be So dull.",
"When You Live Your Whole Life In A Prison Freedom Can Be So Dull."
],
[
"Young Jaden: Here's the deal for every time out you give me, you'll give me 15 dollars for therapy when I get older.",
"Young Jaden: Here's The Deal For Every Time Out You Give Me, You'll Give Me 15 Dollars For Therapy When I Get Older."
],
[
"The moment that truth is organized it becomes a lie.",
"The Moment That Truth Is Organized It Becomes A Lie."
],
[
"Three men, six options, don't choose.",
"Three Men, Six Options, Don't Choose."
],
[
"Water in the eyes and alcohol in the eyes are pretty much the same I know This from first Hand Experience.",
"Water In The Eyes And Alcohol In The Eyes Are Pretty Much The Same I Know This From First Hand Experience."
],
[
"Pay attention to the numbers in your life they are vastly important.",
"Pay Attention To The Numbers In Your Life They Are Vastly Important."
],
[
"We need to stop teaching the Youth about the Past and encourage them to change the Future.",
"We Need To Stop Teaching The Youth About The Past And Encourage Them To Change The Future."
],
[
"If a book store never runs out of a certain book, dose that mean that nobody reads it, or everybody reads it",
"If A Book Store Never Runs Out Of A Certain Book, Dose That Mean That Nobody Reads It, Or Everybody Reads It"
],
[
"People tell me to smile I tell them the lack of emotion in my face doesn't mean I'm unhappy",
"People Tell Me To Smile I Tell Them The Lack Of Emotion In My Face Doesn't Mean I'm Unhappy"
],
[
"I watch Twilight every night",
"I Watch Twilight Every Night"
]
]
def test_basic():
"""Unit tests provided by codewars."""
for quote in quotes:
assert to_jaden_case(quote[0]) == quote[1]
def test_output_exists():
"""Test that an output exists."""
assert to_jaden_case("test") is not None
def test_output_type():
"""Test that the output is the type expected."""
assert type(to_jaden_case("test")) is str
def test_inputs_are_needed():
"""Test that the function will throw an error without inputs."""
with pytest.raises(TypeError):
to_jaden_case()
def test_only_nums_are_valid_inputs():
"""Test that only strings are valid inputs."""
bad_inputs = [["boop", "boink"], 12, 99.99, {"one": 2, "three:": 4}]
for input in bad_inputs:
with pytest.raises(AttributeError):
to_jaden_case(bad_inputs)
|
from turtle import Turtle
class Score(Turtle):
def __init__(self):
super().__init__()
self.pu()
self.color('white')
self.goto(0,200)
self.pd()
self.hideturtle()
self.left_score = 0
self.right_score = 0
self.write(f'{self.left_score} | {self.right_score}')
def update_score(self, side):
if(side == 'left'):
self.left_score += 1
if(side == 'right'):
self.right_score +=1
self.clear()
self.write(f'{self.left_score} | {self.right_score}')
|
from __future__ import division
def isPrime(n):
for i in xrange(2,int(n**.5)+1):
if (n % i) == 0:
return False
return True
C = {1: 0}
def c(n):
if n not in C:
C[n] = 0
for i in xrange(2, n+1):
if n % i == 0:
if isPrime(i):
C[n] += i
return C[n]
B = {1: c(1)}
def b(n):
if n not in B:
B[n] = (1/n)*(c(n) + sum([c(k)*b(n-k) for k in xrange(1, n)]))
return B[n]
n = 2
while b(n) < 5000:
if n % 100 == 0:
print n, B[n]
n += 1
print n, B[n] |
from rest_framework import serializers
from .models import User, CreatedEvent
class UserSerializer(serializers.ModelSerializer):
full_name = serializers.CharField()
profile_picture = serializers.ImageField()
username = serializers.CharField()
# birth_date = serializers.DateField()
gender = serializers.CharField()
password = serializers.CharField()
# contact = serializers.CharField()
email = serializers.EmailField()
class Meta:
model = User
fields = '__all__'
class UserMinFoSerializer(serializers.ModelSerializer):
username = serializers.CharField()
full_name = serializers.CharField()
class Meta:
model = User
fields = ('username', 'full_name')
class CreatedEventSerializer(serializers.ModelSerializer):
# user_id = serializers.EmailField()
event_name = serializers.CharField()
event_date_time = serializers.DateTimeField()
address = serializers.CharField()
class Meta:
model = CreatedEvent
# fields = '__all__'
fields = ('event_name', 'event_date_time', 'address')
|
import requests
import sys
from bs4 import BeautifulSoup
from .logger import Logger
from .cpubenchmark import cpubenchmark_scraper_single, cpubenchmark_scraper_mega
logger = Logger("harddrivebenchmark_sraper")
class single_hdd:
def __init__(
self,
id=None,
hdd_name=None,
hdd_size=None,
hdd_mark=None,
hdd_rank=None,
hdd_value=None,
hdd_price=None
):
self._url = None
self.id = id
self._hdd_name = hdd_name
self._hdd_size = hdd_size
self._hdd_mark = hdd_mark
self._hdd_rank = hdd_rank
self._hdd_value = hdd_value
self._hdd_price = hdd_price
@property
def id(self):
return self._id
@id.setter
def id(self, value):
if type(value) == str:
value = value.replace("gpu", "")
self._id = value
self._url = "https://www.harddrivebenchmark.net/hdd.php?id=%s" % str(value)
def __repr__(self):
return str(self.__dict__())
def __dict__(self):
return {
"id": self.id,
"url": self._url,
"hdd_name": self._hdd_name,
"hdd_size": self._hdd_size,
"hdd_mark": self._hdd_mark,
"hdd_rank": self._hdd_rank,
"hdd_value": self._hdd_value,
"hdd_price": self._hdd_price
}
class mega_hdd:
def __init__(
self,
id=None,
hdd_name=None,
hdd_size=None,
hdd_mark=None,
hdd_samples=None,
hdd_rank=None,
hdd_price=None,
hdd_value=None,
test_date=None,
hdd_type=None
):
self._url = None
self.id = id
self._hdd_name = hdd_name
self._hdd_size = hdd_size
self._hdd_mark = hdd_mark
self._hdd_samples = hdd_samples
self._hdd_rank = hdd_rank
self._hdd_price = hdd_price
self._hdd_value = hdd_value
self._test_date = test_date
self._hdd_type = hdd_type
@property
def id(self):
return self._id
@id.setter
def id(self, value):
if type(value) == str:
value = value.replace("gpu", "")
self._id = value
self._url = "https://www.harddrivebenchmark.net/hdd.php?&id=%s" % str(value)
def __repr__(self):
return str(self.__dict__())
def __dict__(self):
return {
"id": self.id,
"url": self._url,
"hdd_name": self._hdd_name,
"hdd_size": self._hdd_size,
"hdd_mark": self._hdd_mark,
"hdd_samples": self._hdd_samples,
"hdd_rank": self._hdd_rank,
"hdd_price": self._hdd_price,
"hdd_value": self._hdd_value,
"test_date": self._test_date,
"hdd_type": self._hdd_type
}
class harddrivebenchmark_scraper_single(cpubenchmark_scraper_single):
def __init__(self, url="https://www.harddrivebenchmark.net/hdd_list.php"):
super().__init__(url=url)
def refresh_hdds(self, raw_rows=None, headers=None):
if raw_rows is None:
raw_rows = self._raw_rows
if headers is None:
headers = self._headers
_hdds = []
for idx, row in enumerate(raw_rows):
_cols = row.find_all("td")
if len(_cols) < 5:
logger.warning("Not enough columns for HDD: %i" % idx)
raise Exception("Not enough columns in table for extraction")
else:
_id = _cols[0].find('a').attrs['href'].split('id=')[-1]
if _id is None:
logger.warning(
"%s: unable to find HDD id, the url returned will most likely be wrong."
)
_hdds.append(
single_hdd(
id=_id,
hdd_name=_cols[0].text,
hdd_size=_cols[1].text,
hdd_mark=_cols[2].text,
hdd_rank=_cols[3].text,
hdd_value=_cols[4].text,
hdd_price=_cols[5].text
)
)
return _hdds
def refresh_all(self):
self._source = self.refresh_request()
self._soup = self.refresh_soup()
self._table = self.refresh_table()
self._headers = self.refresh_headers()
self._raw_rows = self.refresh_raw_rows()
self._hdds = self.refresh_hdds()
class harddrivebenchmark_scraper_mega(cpubenchmark_scraper_mega):
def __init__(self, url="https://www.harddrivebenchmark.net/hdd-mega-page.html"):
super().__init__(url=url)
def refresh_hdds(self, raw_rows=None, headers=None):
if raw_rows is None:
raw_rows = self._raw_rows
if headers is None:
headers = self._headers
_hdds = []
for idx, row in enumerate(raw_rows):
_cols = row[0].find_all("td")
if len(_cols) < 9:
logger.warning("Not enough columns for GPU: %i" % idx)
raise Exception("Not enough columns in table for extraction")
else:
_id = _cols[0].find('a').attrs['href'].split('id=')[-1]
if _id is None:
logger.warning(
"%s: unable to find GPU id, the url returned will most likely be wrong."
)
_hdds.append(mega_hdd(
id=_id,
hdd_name=_cols[0].text,
hdd_size=_cols[1].text,
hdd_mark=_cols[2].text,
hdd_samples=_cols[3].text,
hdd_rank=_cols[4].text,
hdd_price=_cols[5].text,
hdd_value=_cols[6].text,
test_date=_cols[7].text,
hdd_type=_cols[8].text
))
return _hdds
def refresh_all(self):
self._source = self.refresh_request()
self._soup = self.refresh_soup()
self._table = self.refresh_table()
self._headers = self.refresh_headers()
self._raw_rows = self.refresh_raw_rows()
self._hdds = self.refresh_hdds()
|
from terminal import timed_input
def obtener_direccion():
"""La funcion recibe la entrada del usuario y determina el movimiento de la serpiente"""
tecla_actual = ""
entrada_usuario = timed_input(1)
direccion = ""
if not entrada_usuario == "":
tecla_actual = entrada_usuario[0]
if not tecla_actual in "awsd":
tecla_actual = ""
if tecla_actual == "w":
direccion = "arriba"
if tecla_actual == "s":
direccion = "abajo"
if tecla_actual == "a":
direccion = "izquierda"
if tecla_actual == "d":
direccion = "derecha"
return direccion
def reiniciar_juego():
"""Pregunta al usuario si desea volver a jugar, en caso de que diga que si devuelve True,
y en caso contrario False"""
usuario = str(input("Desea volver a jugar?[SI/NO]: "))
if usuario == "si".lower() or usuario == "si".upper():
return True
elif usuario == "no".lower() or usuario == "no".upper():
return False
|
import cv2 as cv
import numpy as np
from numpy.fft import *
# import cv2.aruco as aruco
def wiener_filter(img, kernel, K):
kernel /= np.sum(kernel)
dummy = np.copy(img)
dummy = fft2(dummy)
kernel = fft2(kernel, s = img.shape)
kernel = np.conj(kernel) / (np.abs(kernel) ** 2 + K)
dummy = dummy * kernel
dummy = np.abs(ifft2(dummy))
return dummy
vid=cv.VideoCapture("../Videos/aruco_bot.mp4")
fps = vid.get(cv.CAP_PROP_FPS)
# 6th sec = 5*fps + 1
itrs = 1 * int(fps) + 1
frame = None
for i in range(itrs):
_, frame = vid.read()
crop_img = frame[0:725, 0:1282]
kernel_size=20;
kernel_v = np.zeros((kernel_size, kernel_size))
kernel_v[:,int((kernel_size - 2)/2)] =(-1)* np.ones(kernel_size)
kernel_v[:,int((kernel_size)/2)] = (-1)*np.ones(kernel_size)
print(kernel_v)
cv.imshow("kernl",wiener_filter(cv.cvtColor(crop_img,cv.COLOR_BGR2GRAY),kernel_v,150))
cv.waitKey()
#cv.imwrite('tempDump.jpg',) |
from rest_framework.test import APITestCase
from .factories import UserFactory
class UserViewSetTestCase(APITestCase):
def test_login(self):
username = 'abcd'
password = 'FS2322rfR@'
login_api = '/api/users/login/'
# create user
response = self.client.post(
login_api,
{'username': username, 'password': password},
)
self.assertEqual(response.status_code, 201)
self.assertEqual(
set(response.json().keys()),
{'user', 'token'}
)
# login with invalid password
response = self.client.post(
login_api,
{'username': username, 'password': '123'},
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{'error': "Invalid username or password"}
)
# login with correct password
response = self.client.post(
login_api,
{'username': username, 'password': password},
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
set(response.json().keys()),
{'user', 'token'}
)
def test_me(self):
me_api = '/api/users/me/'
response = self.client.get(me_api)
self.assertEqual(response.status_code, 401)
user = UserFactory()
self.client.force_authenticate(user=user)
response = self.client.get(me_api)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.json(),
{'username': user.username, 'id': user.id}
)
|
""" -*- coding: utf-8 -*-
@author: omerkocadayi
https://github.com/omerkocadayi
https://www.linkedin.com/in/omerkocadayi/ """
import cv2
import numpy as np
img1 = cv2.imread("bitwise1.png")
img2 = cv2.imread("bitwise2.png")
cv2.imshow("Original Image 1", img1)
cv2.imshow("Original Image 2", img2)
bit_and = cv2.bitwise_and(img1,img2)
bit_or = cv2.bitwise_or(img1,img2)
bit_not = cv2.bitwise_not(img1,img2)
bit_xor = cv2.bitwise_xor(img1,img2)
#cv2.imshow("Bitwise AND", bit_and) # 1&1=1 , gerisi 0
#cv2.imshow("Bitwise OR", bit_or) # 0|0=0 , gerisi 1
#cv2.imshow("Bitwise NOT", bit_not) # 0~0=1 , 0~1=1 , gerisi 0
#cv2.imshow("Bitwise XOR", bit_xor) # 0^0=0 , 1^1=0, gerisi 1
cv2.waitKey(0)
cv2.destroyAllWindows() |
''' This contains the packing and unpacking the padded sequences for rnn.
we want to run a LSTM on a batch of 3 character sequences ['long_str', 'tiny', 'medium']
step 1 : construct vocabulary
step 2 : convert the sequences into numerical form
step 3 : define model
step 4 : prepare data, by padding with 0 (<pad> token), making the batch equal lengths
step 5 : sort the data in the batch in descending order by their original lengths
step 6 : apply the embedding layer for the batch
step 7 : call the pack_padded_sequences fn, with embeddings and lengths of original sentences
step 8 : call the forward method of the lstm model
step 9 : call the unpack_padded_sequences (pad_packed_sequence) method if required
'''
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch import LongTensor
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence, pad_sequence
data = ['long_str', 'tiny', 'medium']
# step 1 : construct vocabulary
vocab = ['<pad>'] + sorted(set([char for seq in data for char in seq]))
# vocab = ['<pad>', '_', 'd', 'e', 'g', 'i', 'l', 'm', 'n', 'o', 'r', 's', 't', 'u', 'y']
# step 2 : convert the sequences into numerical form
vectorized_data = [[vocab.index(tok) for tok in seq] for seq in data]
# vectorized_data = [[6, 9, 8, 4, 1, 11, 12, 10], [12, 5, 8, 14], [7, 3, 2, 5, 13, 7]]
# step 3 : define model
# input for embedding layer is lengths of inputs
# output for embedding layer is embedding shape of inputs
embedding_layer = nn.Embedding(len(vocab), 4)
# input_size is the embedding output size
# hidden_size is the hidden size of lstm
lstm = nn.LSTM(input_size=4, hidden_size=5, batch_first=True)
# step 4 : prepare data, by padding with 0 (<pad> token), making the batch equal lengths
seq_lengths = LongTensor([len(seq) for seq in vectorized_data])
sequence_tensor = Variable(torch.zeros(len(vectorized_data), seq_lengths.max(), dtype=torch.long))
for idx, (seq, seq_len) in enumerate(zip(vectorized_data, seq_lengths)):
sequence_tensor[idx, :seq_len] = LongTensor(seq)
# sequence_tensor = ([[ 6, 9, 8, 4, 1, 11, 12, 10],
# [12, 5, 8, 14, 0, 0, 0, 0],
# [ 7, 3, 2, 5, 13, 7, 0, 0]])
# step 5 : sort the data in the batch in descending order by their original lengths
# seq_lengths = [8, 4, 6]
seq_lengths, perm_idx = seq_lengths.sort(0, descending=True)
# seq_lengths = [8, 6, 4]
# perm_idx = [0, 2, 1]
sequence_tensor = sequence_tensor[perm_idx]
# sequence_tensor = ([[ 6, 9, 8, 4, 1, 11, 12, 10],
# [ 7, 3, 2, 5, 13, 7, 0, 0],
# [12, 5, 8, 14, 0, 0, 0, 0]])
# step 6 : apply the embedding layer for the batch
# sequence_tensor shape => [batch_size, max_seq_len] => [3, 8]
embed = embedding_layer(sequence_tensor)
# embed shape is => [batch_size, max_seq_len, embedding_shape] => [3, 8, 4]
# step 7 : call the pack_padded_sequences fn, with embeddings and lengths of original sentences
packed_input = pack_padded_sequence(embed, seq_lengths, batch_first=True)
# packed_input is a namedtuple with 2 attributes: data, batch_sizes
# data.shape => [all_sequences_len_sum, embedding_size] => [18, 4]
# batch_sizes => [size_of_each_batch_input] => [3, 3, 3, 3, 2, 2, 1, 1]
# visualization :
# l o n g _ s t r # (long_str)
# m e d i u m # (medium)
# t i n y # (tiny)
# 3 3 3 3 2 2 1 1 (sum = 18 [all_sequences_len_sum])
# step 8 : call the forward method of the lstm model
packed_output, (ht, ct) = lstm(packed_input)
# packed_output is a namedtuple with 2 attributes: data, batch_sizes
# data.shape => [all_sequences_len_sum, hidden_shape] => [18, 5]
# batch_sizes => [size_of_each_batch_input] => [3, 3, 3, 3, 2, 2, 1, 1]
# ht => [num_layers * num_directions, batch_size, hidden_size] => [1, 3, 5]
# ct => [num_layers * num_directions, batch_size, hidden_size] => [1, 3, 5]
# step 9 : call the unpack_padded_sequences (pad_packed_sequence) method if required
output, input_sizes = pad_packed_sequence(packed_output, batch_first=True)
# output shape => [batch_size, max_seq_len, hidden_dim] (if batch_first is true) => [3, 8, 5]
# input_sizes => [length_of_each_sequence] => [8, 6, 4]
# Summary of Shape Transformations #
# -------------------------------- #
# (batch_size X max_seq_len X embedding_dim) --> Sort by seqlen ---> (batch_size X max_seq_len X embedding_dim)
# (batch_size X max_seq_len X embedding_dim) ---> Pack ---> (batch_sum_seq_len X embedding_dim)
# (batch_sum_seq_len X embedding_dim) ---> LSTM ---> (batch_sum_seq_len X hidden_dim)
# (batch_sum_seq_len X hidden_dim) ---> UnPack ---> (batch_size X max_seq_len X hidden_dim) |
# Write a Python program that prints the string s with the character curr_char replaced by the character new_char.
#
# curr_char and new_char are variables that contain strings with a single character.
#
# You may assume that new_char will not be an empty string.
#
# The match must be case-sensitive (do not replace lowercase letters if curr_char is uppercase).
#
# If no match is found, print the initial string
text = input("Enter a text: ")
old = input("Enter Old Character :")
New = input("Enter New Character :")
print(text.replace(old,New))
|
from src.load_data import load_df_from_dbs
from src.nn import conv_model, evaluate, evaluate_as_classifier
from src.settings import MAX_SEQUENCE_LENGTH, character_to_index, CHARACTER_DICT, max_mic_buffer, MAX_MIC
from sklearn.model_selection import train_test_split
import numpy as np
import random
from Bio import SeqIO
import argparse
import sys
def get_bacterium_df(bacterium, df):
# 根据 bacterium 筛选菌种
bacterium_df = df.loc[(df.bacterium.str.contains(bacterium))].groupby(['sequence', 'bacterium'])
return bacterium_df.mean().reset_index().dropna()
def sequence_to_vector(sequence):
# onehot编码 vector (47,21)
vector = np.zeros([MAX_SEQUENCE_LENGTH, len(character_to_index) + 1])
for i, character in enumerate(sequence[:MAX_SEQUENCE_LENGTH]):
vector[i][character_to_index[character]] = 1
return vector
def generate_random_sequence(min_length=5, max_length=MAX_SEQUENCE_LENGTH, fixed_length=None):
# 生成随机序列
# return 随机长度的序列 ['C', 'Y', 'N', 'M', 'T', 'K', 'I', 'Q', 'S', 'E']
# 从分布来讲,抗菌肽的数量是远远小于不抗菌的,这样一来也没有问题
if fixed_length:
length = fixed_length
else:
length = random.choice(range(min_length, max_length))
sequence = [random.choice(list(CHARACTER_DICT)) for _ in range(length)]
return sequence
def add_random_negative_examples(vectors, labels, negatives_ratio):
# 生成随机负样例序列
# vector为正样例
if negatives_ratio == 0:
return vectors, labels
num_negative_vectors = int(negatives_ratio * len(vectors))
negative_vectors = np.array(
[sequence_to_vector(generate_random_sequence()) for _ in range(num_negative_vectors)]
)
vectors = np.concatenate((vectors, negative_vectors))
negative_labels = np.full(num_negative_vectors, MAX_MIC) # 一次性赋值完所有负样本
labels = np.concatenate((labels, negative_labels))
# print(vectors[0],labels[0])
return vectors, labels
def load_uniprot_negatives(count):
"""
加载uniprot的负样例,剔除含有'C'的负样例
sequence为其中的片段
返回的sequence数目为count
"""
uniprot_file = 'data/Fasta_files/Uniprot_negatives.txt'
fasta = SeqIO.parse(uniprot_file, 'fasta')
fasta_sequences = [str(f.seq) for f in fasta]
negatives = []
for seq in fasta_sequences:
if 'C' in seq:
continue
start = random.randint(0,len(seq)-MAX_SEQUENCE_LENGTH)
negatives.append(seq[start:(start+MAX_SEQUENCE_LENGTH)])
if len(negatives) >= count:
return negatives
return negatives
def uniprot_precision(model):
negatives = load_uniprot_negatives(1000) # 每次调1000个负样例
vectors = []
for seq in negatives:
try:
vectors.append(sequence_to_vector(seq)) # 转化为矩阵
except KeyError:
continue
preds = model.predict(np.array(vectors)) # 用模型预测
# false_positives = len([p for p in preds if p < MAX_MIC - max_mic_buffer]) # 误差的数目
false_positives = len([p for p in preds if p < 3.5])
return 1 - false_positives / len(negatives) # 负样例的预测正确率
def train_model(bacterium, negatives_ratio=1, epochs=100):
"""
Bacterium can be E. coli, P. aeruginosa, etc.
When with_negatives is False, classification error will be 0
and error on correctly classified/active only/all will be equal
because all peptides in the dataset are active
"""
DATA_PATH = 'data/'
df = load_df_from_dbs(DATA_PATH)
bacterium_df = get_bacterium_df(bacterium, df) # 按菌种过滤后的条目
print("Found %s sequences for %s" % (len(bacterium_df), bacterium))
bacterium_df['vector'] = bacterium_df.sequence.apply(sequence_to_vector) #新建一列存放映射之后的向量
x = np.array(list(bacterium_df.vector.values))
y = bacterium_df.value.values # label
x, y = add_random_negative_examples(x, y, negatives_ratio) # 加入随机的负样例
train_x, test_x, train_y, test_y = train_test_split(x, y,test_size=0.2)
model = conv_model()
model.fit(train_x, train_y, epochs=epochs) # 模仿sklearn
print("Avg. MIC error (correctly classified, active only, all)")
print(evaluate(model, test_x, test_y))
print('True positives, true negatives, false positives, false negatives')
true_positives, true_negatives, false_positives, false_negatives = evaluate_as_classifier(
model, test_x, test_y
)
print(true_positives, true_negatives, false_positives, false_negatives)
print("Accuracy:",
(true_positives + true_negatives) / (true_positives + true_negatives + false_positives + false_negatives)
)
print("Precision on Uniprot:")
print(uniprot_precision(model))
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--bacterium', type=str, default='E. coli', help='Name of bacterium, in single quotes')
# 通过菌种筛选
parser.add_argument('--negatives', type=float, default=2, help='Ratio of negatives to positives')
# 调整比例
parser.add_argument('--epochs', type=int, default=40)
args = parser.parse_args()
train_model(bacterium=args.bacterium, negatives_ratio=args.negatives, epochs=args.epochs)
|
from . import engine, panel
modules = [
engine,
panel,
]
def register():
for module in modules:
module.register()
def unregister():
for module in reversed(modules):
module.unregister()
|
#!/usr/bin/env python
# Import math Library
import math
import sys
angle = float(sys.argv[1])
print(math.cos(math.radians(angle)))
|
import networkx as nx
distances = {}
with open('kilonetnew.dat', 'r') as f:
lines = f.readlines()
for l in lines:
fr,to,first,second,code = l.split(',')
distances[fr + ':' + to] = int(second)
G=nx.Graph()
def expand(lijn):
for j in range(1, len(lijn)):
G.add_edge(lijn[j - 1], lijn[j], weight=distances[lijn[j - 1] + ':' + lijn[j]])
def cost(steps):
s = 0
for j in range(1, len(steps)):
s += distances[steps[j - 1] + ':' + steps[j]]
return s
lijn1 = ['odz','hglo','hgl','hglg','ddn','go','lc','zp']
expand(lijn1)
lijn2 = ['es', 'esk', 'hgl', 'bn', 'amri', 'aml', 'wdn', 'nvd', 'rat', 'hno', 'zl']
expand(lijn2)
lijn3 = ['kpn', 'zlsh', 'zl']
expand(lijn3)
lijn4 = ['aml', 'vz', 'da', 'vhp', 'mrb', 'hdb']
expand(lijn4)
lijn5 = ['zl', 'dl', 'omn', 'mrb', 'hdb', 'gbg', 'co', 'dln', 'na', 'emnz', 'emn']
expand(lijn5)
pairs = set(lijn1 + lijn2 + lijn3 + lijn4 + lijn5)
print('startplaceref,endplaceref,distance,operatorref,fareref')
for x in pairs:
for y in pairs:
if x != y:
steps = nx.dijkstra_path(G, x, y, weight='weight')
print('NL:S:%s,NL:S:%s,%d,%s,%s' % (x, y, cost(steps), 'IFF:BN', 'IFF:BN'))
|
"""
https://leetcode.com/problems/arithmetic-slices/
"""
def sum_arithmetic(n):
# sum of all ints 1 .. n
return int((n/2.0)*(1+n))
def numberOfArithmeticSlices(arr):
n = len(arr)-1
all_diffs = []
for i in range(n):
diff = abs(arr[i]-arr[i+1])
all_diffs.append(diff)
unique_diffs = set(all_diffs)
# using a dictionary for debugging purposes so that
# we can look at the total count of all slices of the
# same diff
count_diffs = {}
for i in unique_diffs:
count = 0
for j in all_diffs:
if i == j:
count += 1
count_diffs[i] = count
result = 0
for k in count_diffs.values():
if k == 2:
result += 1
elif k > 2:
result += sum_arithmetic(k-1)
return result
"""
Sample work:
A = [1, 3, 5, 7, 9]
1,3,5
3,5,7
5,7,9
1,3,5,7
2,5,7,9
1,3,5,7,9
len = 5, k = 6
A = [7, 7, 7, 7]
7,7,7
7,7,7
7,7,7,7
len =4, k = 3
A = [3, -1, -5, -9]
3,-1,-5
-1,-5,-9
A = [1, 2, 3, 4]
[1,2,3,4,5,6]
123
234
345
456
1234
2345
3456
12345
23456
123456
len = 6, k = 10
4+3+2+1
5/2 * 5 * 1
(n/2)(n+1) => 4/2*5
"""
|
import os
import re
import urllib.request
import urllib
from collections import deque
from html.parser import HTMLParser
savePath = ''
imagevis = set()
cnt = 0
def getImage(addr):
global cnt
global imagevis
try :
data = urllib.request.urlopen(addr,timeout=10).read()
except :
print (addr)
print ('sorry--------------------')
return
splitPath = addr.strip().split('/')
fName = splitPath[-1]
if fName == '' or fName in imagevis:
return
cnt = cnt +1
imagevis |={fName}
print ('saving %s' % fName)
try :
fp = open(savePath+fName,'wb')
except:
return
print(savePath+fName)
fp.write(data)
fp.close()
class MyHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.links = []
self.imglinks = []
self.urlText = []
def handle_data(self, data):
if data != '\n':
self.urlText.append(data)
def handle_starttag(self, tag, attrs):
#print "Encountered the beginning of a %s tag" % tag
if tag == "a":
if len(attrs) == 0:
pass
else:
for (variable, value) in attrs:
if variable == "href":
self.links.append(value)
if tag =='img' :
if len(attrs) ==0:
pass
for (name,value) in attrs:
if (name == 'src'):
self.imglinks.append(value)
print ('ddddd')
url = 'http://www.941ni.com'
pattern = re.compile('http://www.941ni.com/xiaoshuo/')
queue = deque()
vis = set()
queue.append(url)
vis |={url}
while queue:
url1 = queue.popleft()
print(url1)
try:
data = urllib.request.urlopen(url1,timeout=10).read()
data = data.decode('utf-8')
except:
print ('sorry,--'+url1)
continue
yk = MyHTMLParser()
yk.feed(data)
for link in yk.links:
flink = link
print(flink)
if ('http' not in link):
flink = url+flink
if(flink in vis or not pattern.match(flink)):
continue
queue.append(flink)
vis |={flink}
savePath =os.getcwd()+'\image\\'
if not os.path.exists(savePath):
os.mkdir(savePath)
for text in yk.urlText:
print (text)
continue
for image in yk.imglinks:
if ('http' in image):
getImage(image)
else :
getImage(url+image)
|
#!/usr/bin/python3
"""
nqueens backtracking program to print the coordinates of n queens
on an nxn grid such that they are all in non-attacking positions
"""
from sys import argv
if __name__ == "__main__":
a = []
if len(argv) != 2:
print("Usage: nqueens N")
exit(1)
if argv[1].isdigit() is False:
print("N must be a number")
exit(1)
n = int(argv[1])
if n < 4:
print("N must be at least 4")
exit(1)
# initialize the answer list
for i in range(n):
a.append([i, None])
def already_exists(y):
"""check that a queen does not already exist in that y value"""
for x in range(n):
if y == a[x][1]:
return True
return False
def reject(x, y):
"""determines whether or not to reject the solution"""
if (already_exists(y)):
return False
i = 0
while(i < x):
if abs(a[i][1] - y) == abs(i - x):
return False
i += 1
return True
def clear_a(x):
"""clears the answers from the point of failure on"""
for i in range(x, n):
a[i][1] = None
def nqueens(x):
"""recursive backtracking function to find the solution"""
for y in range(n):
clear_a(x)
if reject(x, y):
a[x][1] = y
if (x == n - 1): # accepts the solution
print(a)
else:
nqueens(x + 1) # moves on to next x value to continue
# start the recursive process at x = 0
nqueens(0)
|
from django import forms
class ExecutarFerramentaForm(forms.Form):
def __init__(self, configuracaoferramenta_choices, *args, **kwargs):
super(ExecutarFerramentaForm, self).__init__(*args, **kwargs)
self.fields['configuracaoferramenta_escolhida'].choices = configuracaoferramenta_choices
configuracaoferramenta_escolhida = forms.ChoiceField(label='Escolha uma configuração', label_suffix=': ', \
required=True, choices=(), widget=forms.Select(attrs={'style':'width: 350px;'}))
|
# Generated by Django 3.2.5 on 2021-08-19 12:53
import ckeditor_uploader.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('kategori', '0003_auto_20210819_1453'),
]
operations = [
migrations.CreateModel(
name='egitmen',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Egitmen_ismi', models.CharField(max_length=50)),
('egitmen_bilgisi', models.CharField(max_length=255)),
('Egitmen_Resmi', models.ImageField(upload_to='images/')),
('create_at', models.DateTimeField(auto_now_add=True)),
('update_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='urunl',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('isim', models.CharField(max_length=150)),
('keywords', models.CharField(max_length=255)),
('aciklama', models.TextField(max_length=255)),
('image', models.ImageField(upload_to='images/')),
('fiyat', models.DecimalField(decimal_places=2, default=0, max_digits=12)),
('kontejan', models.IntegerField(default=0)),
('minimum_kontejan', models.IntegerField(default=3)),
('detay', ckeditor_uploader.fields.RichTextUploadingField()),
('slug', models.SlugField(unique=True)),
('Durum', models.CharField(choices=[('True', 'Evet'), ('False', 'Hayır')], max_length=10)),
('create_at', models.DateTimeField(auto_now_add=True)),
('update_at', models.DateTimeField(auto_now=True)),
('egitim_tarihi', models.DateTimeField()),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kategori.kategoriler')),
('egitmen', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kategori.egitmen')),
],
),
migrations.DeleteModel(
name='urunler',
),
]
|
import logging
from django.test import TestCase
from askup.models import Qset, Question
from askup.utils.general import (
get_checked_user_organization_by_id,
get_first_user_organization,
get_user_organizations_for_filter,
)
from askup.utils.views import select_user_organization
log = logging.getLogger(__name__)
class TestAdminPanelGroupQuestionsDeleting(TestCase):
"""
Testing the admin panel group question deleting.
"""
fixtures = ['groups', 'mockup_data']
def test_group_deleting(self):
"""
Test a group question deleting.
Used in the admin panel when performing a group delete action.
"""
qset = Qset.objects.get(id=4)
questions_count_before = qset.questions_count # 3 questions in this qset initially
self.assertEqual(questions_count_before, 3)
questions = Question.objects.filter(qset_id=qset.id)
questions.delete()
qset.refresh_from_db()
questions_count_after = qset.questions_count # 0 questions so far, after the deletion
self.assertEqual(questions_count_after, 0)
class TestUserProfileOrganization(TestCase):
"""
Testing the user profile organization functions.
"""
fixtures = ['groups', 'mockup_data']
def test_get_first_user_organization(self):
"""
Test the getting of the first user organization.
"""
organization = get_first_user_organization(3, None) # User is "student01", viewer is "admin"
self.assertIsNotNone(organization) # He has an organizations assigned
self.assertEqual(organization.id, 1) # "Organization 1" is the first one in his list
def test_get_checked_user_organization_by_id(self):
"""
Test the getting of the user organization by id.
"""
organization = get_checked_user_organization_by_id(3, 1, None) # "student01" and his organization (id=1)
self.assertIsNotNone(organization) # He has this organization assigned
self.assertEqual(organization.id, 1) # Got the "Organization 1" (id=1) as his organization.
organization = get_checked_user_organization_by_id(3, 3, 1) # "student01" and another's organization
self.assertIsNone(organization) # He has no this organization assigned
def test_get_user_organizations_for_filter(self):
"""
Test the getting of the first user organization.
"""
organizations = get_user_organizations_for_filter(3, None) # User is "student01", viewer is "admin"
self.assertEqual(len(organizations), 1) # He has one organization to show in the filter
self.assertTrue('id' in organizations[0])
self.assertTrue('name' in organizations[0])
self.assertEqual(organizations[0]['id'], 1)
self.assertEqual(organizations[0]['name'], 'Organization 1')
organizations = get_user_organizations_for_filter(4, None) # User is "student02_no_orgs", viewer is "admin"
self.assertEqual(len(organizations), 0) # He has no organizations to show in the filter
organizations = get_user_organizations_for_filter(5, None) # User is "student03", viewer is "admin"
self.assertEqual(len(organizations), 2) # He has two organizations to show in the filter
self.assertEqual(organizations[0]['id'], 1)
self.assertEqual(organizations[0]['name'], 'Organization 1')
self.assertEqual(organizations[1]['id'], 2)
self.assertEqual(organizations[1]['name'], 'Organization 2')
def test_select_user_organization(self):
"""
Test the organization selection by the user and organization id.
"""
organization = select_user_organization(3, 1, None) # 3 - student01, has 1 - "Organization 1"
self.assertIsNotNone(organization)
self.assertEqual(organization.id, 1)
organization = select_user_organization(3, 3, None) # 3 - student01, has no 3 - "Organization 3"
self.assertIsNone(organization)
organization = select_user_organization(4, 1, None) # 4 - student02_no_orgs, has no organizations
self.assertIsNone(organization)
|
"""Functions for common command line formatting and procedures."""
import os
import re
import textwrap
from blessings import Terminal
# Formatting
_term = Terminal()
#: Constant for string prepended to input prompts
PROMPT_PREFIX = '> '
#: Constant for string used for terminal indentation
INDENT = ' ' * 3
#: Color/formatting functions for different types of output
COLORS = {
None: str,
'error': _term.bold_red,
'warning': _term.yellow,
'success': _term.green,
'info': _term.cyan,
'prompt': _term.magenta,
'title': _term.blue,
'emphasize': _term.bold,
}
# Printing
def print_exception(e):
"""Format and print the string representation of an exception
:param e: The exception to print
"""
print(COLORS['error'](str(e)))
def print_warning(text):
"""Format and print a warning message
:param text: Warning message to print
"""
print(COLORS['warning'](text))
def print_info(text):
"""Format and print info message
:param text: Info message to print
"""
print(COLORS['info'](text))
def print_shortened(text, placeholder='...', indent='', fmt=None):
"""Print a string, shorten if it's longer than the current terminal width
Essentially a wrapper around :meth:`textwrap.shorten` (and optionally
:meth:`textwrap.indent`) that truncates based on the terminal width
If the printed string should be formatted, it is recommended to set the
``fmt`` parameter instead of passing in a formatted string. If a formatted
string is truncated, then the colors aren't reset, causing subsequent
terminal output to be formatted as well
:param text: Text to print
:param placeholder: (Default = '...') Placeholder to use when truncating
the string
:param indent: (Optional) If set, indent using this string
:param fmt: (Optional) A key in :const:`COLORS` to use for formatting when
printing the text
"""
width = _term.width - len(indent)
fmt_method = COLORS.get(fmt, str)
text = textwrap.indent(
fmt_method(textwrap.shorten(text, width=width, placeholder=placeholder)),
indent)
print(text)
# User Input
class ValidationError(Exception):
"""Exception raised if input validation fails"""
pass
def print_validation_change(message_format, original, changed):
"""Inform the user of changes to their input during validation.
Used to keep output format consistent
:param message_format: A format string with 2 positional fields, one for
the original value and one for the altered value. These fields should
be surrounded with double quotes for better readability.
example:
'"{0}" changed to "{1}" for compatibility'
:param original: The original user input
:param changed: The input after being altered
"""
print_info(message_format.format(original, changed))
def validate_nonempty(text):
"""Input validation function. Raises ValidationError if text is empty
:param text: Text to validate
:return: Validated text
"""
if not text:
raise ValidationError('Please enter some text.')
return text
def validate_choice(choices, shorthand_choices={}, error_msg=None):
"""Returns a validation function for input with specific choice options
:param choices: A list of **lowercase** strings the user can choose from
:param shorthand_choices: (Optional) A dictionary mapping short hand
answers to options in ``choices``. If user answers prompt with one of
the keys in ``shorthand_choices``, the validation function will treat
their answer as ``shorthand_choices[answer]``.
The following example values would allow 'y' and 'n' to be accepted as
'yes' and 'no', respectively:
.. code-block:: python
choices = ['yes', 'no']
shorthand_choices = {
'y': 'yes',
'n': 'no',
}
validate_yes_no = validate_choice(choices, shorthand_choices)
# Both of the following return 'yes'
result0 = validate_yes_no('yes')
result1 = validate_yes_no('y')
:param error_msg: (Optional) Custom validation error message. By default,
validation errors will have the message:
``'Please select a valid choice: [<choices>]'``
where ``<choices>`` is a comma separated representation of the values
in ``choices``.
:return: A validation function that accepts a string and returns the
corresponding item from ``choices`` if the string is valid
"""
if error_msg is None:
error_msg = 'Please select a valid choice: [{}]'.format(', '.join(choices))
def val(answer):
answer = answer.lower().strip()
if answer in shorthand_choices:
answer = shorthand_choices[answer]
if answer not in choices:
raise ValidationError(error_msg)
return answer
return val
def validate_yn(answer):
"""Validate y/n prompts
:param answer: User response to y/n prompt. If a boolean value is passed
(e.g. if a prompt received parsed_input=True), it is treated as a y/n
answer and considered valid input
:return: True if user answered yes, False if user answered no
"""
# If a boolean value was passed, return it
if isinstance(answer, bool):
return answer
answer = answer.lower().strip()
if answer not in ['y', 'yes', 'n', 'no']:
raise ValidationError('Please enter "y" or "n".')
return answer in ['y', 'yes']
def _validate_python_identifier(identifier):
"""Removes and replaces characters and returns a valid python identifier
Python identifiers include letters, numbers, and underscores and cannot
begin with a number
:param identifier: The desired identifier string
:return: Modified identifier with invalid characters removed or replaced
"""
# Trim outer whitespace and replace inner whitespace and hyphens with underscore
validated_identifier = re.sub(r'\s+|-+', '_', identifier.strip())
# Remove non-alphanumeric or _ characters
validated_identifier = re.sub(r'[^\w\s]', '', validated_identifier)
# Remove leading characters until we hit a letter or underscore
validated_identifier = re.sub(r'^[^a-zA-Z_]+', '', validated_identifier)
if not validated_identifier:
raise ValidationError('Please enter a valid python identifier.')
return validated_identifier
def validate_package_name(package_name):
"""Removes and replaces characters to ensure a string is a valid python package name
:param package_name: The desired package name
:return: Modified package_name with whitespaces and hyphens replaced with
underscores and all invalid characters removed
"""
try:
validated_package_name = _validate_python_identifier(package_name)
except ValidationError as e:
raise ValidationError('Please enter a valid package name.')
# Alert user of any changes made in validation
if package_name != validated_package_name:
print_validation_change(
'"{0}" was changed to "{1}" in order to be a valid python package',
package_name, validated_package_name
)
return validated_package_name
def validate_module_name(module_name):
"""Removes and replaces characters to ensure a string is a valid python
module file name
:param module_name: The desired module name. If the name ends in .py, the
extension will be removed
:return: Modified module_name with whitespaces and hyphens replaced with
underscores and all invalid characters removed
"""
# Strip .py extension if present
module_name, ext = os.path.splitext(module_name.strip())
try:
validated_module_name = _validate_python_identifier(module_name)
except ValidationError as e:
raise ValidationError('Please enter a valid module name.')
# Alert the user of any changes made in validation
if module_name != validated_module_name:
print_validation_change(
'"{0}" was changed to "{1}" in order to be a valid python module file',
module_name, validated_module_name
)
return validated_module_name
def validate_module_filename(module_filename, suppress_ext_change=True):
"""Removes and replaces characters to ensure a string is a valid python
module file name
Essentially a wrapper around :func:`validate_module_name` that makes sure a
.py extension is added to the end if needed
:param module_filename: The desired module file name. If the .py extension
is excluded, it will be appended after validation
:param suppress_ext_change: (Default: True) If False, print message when
appending .py extension to file name. Suppressed by default, as the
user shouldn't typically be required to append .py themselves
:return: Modified module_filename with whitespaces and hyphens replaced with
underscores, all invalid characters removed, and a '.py' extension
appended (if necessary)
"""
# Strip .py extension if present
module_name, ext = os.path.splitext(module_filename.strip())
validated_module_name = validate_module_name(module_name)
# Append .py extension
validated_module_filename = validated_module_name + '.py'
if ext != '.py' and not suppress_ext_change:
print_info('Added .py extension for filename')
return validated_module_filename
def validate_class_name(class_name):
"""Removes and replaces characters to ensure a string is a valid python
class name
:param class_name: The desired class name
:return: Modified class_name with invalid characters removed/replaced
"""
# TODO: Validate differently than packages? (e.g. 'class name' -> 'ClassName'?)
try:
validated_class_name = _validate_python_identifier(class_name)
except ValidationError as e:
raise ValidationError('Please enter a valid class name.')
# Alert the user of any changes made in validation
if class_name != validated_class_name:
print_validation_change(
'"{0}" was changed to "{1}" in order to be a valid python class name',
class_name, validated_class_name
)
# Print warning if first letter isn't capital
# (python is forgiving about class names but convention says it should be camel case)
if validated_class_name[0] != validated_class_name[0].upper():
print_warning('Warning: Class name should start with a capital letter')
return validated_class_name
def prompt(text, *description, default=None, validate=validate_nonempty,
parsed_input=None, trailing_newline=True):
"""Prompt the user for input and validate it
:param text: Text to display in prompt
:param description: (Optional) Positional arguments after text will be printed once before user is prompted for
input. Each argument will be printed on a new line
:param default: (Optional) default value
:param validate: (Default = validate_nonempty) Validation function for input
:param parsed_input: (Default = None) If ``parsed_input`` is set to
something other than ``None``, parser will attempt to validate it. If
validation is successful, the input prompt will be skipped and the
validated value of ``parsed_input`` will be returned. This allows for
input to be passed through command line arguments, but still prompt the
user in the event that it can't be validated
:param trailing_newline: (Default = True) Print a blank line after receiving user
input and successfully validating
:return: Validated input
"""
# Attempt to bypass prompt if parsed_input is not None
if parsed_input is not None:
try:
val = validate(parsed_input)
except ValidationError as e:
print_exception(e)
else:
# If no errors were raised, return validated input
return val
# Input prompt
if description:
print(*description, sep='\n')
prompt_text = '{} [{}]: '.format(text, default) if default is not None else text + ': '
prompt_text = COLORS['prompt'](PROMPT_PREFIX + prompt_text)
while True:
val = input(prompt_text).strip()
if default is not None and not val:
val = default
try:
val = validate(val)
except ValidationError as e:
print_exception(e)
continue
break
if trailing_newline:
print('')
return val
|
import quick2wire.i2c as i2c
import time
#bus = smbus.SMBus(1)
# This is the address we setup in the Arduino Program
address = 0x04
gpio_register = 0x09
def writeNumber(value):
with i2c.I2CMaster() as bus:
bus.transaction(
i2c.writing_bytes(address, gpio_register, value))
return -1
def readNumber():
with i2c.I2CMaster() as bus:
read_results = bus.transaction(
i2c.reading(address, 1))
number = read_results[0][0]
return number
while True:
var = int(input("Enter 1 - 9: "))
if not var:
continue
writeNumber(var)
print("RPI: Hi Arduino, I sent you ", var)
# sleep one second
time.sleep(1)
number = readNumber()
print("Arduino: Hey RPI, I received a digit ", number)
print
|
import ImageChops, Image, ImageMath, ImageFilter
import numpy as np
from utils import *
class SOBEL(ImageFilter.Filter):
name = "sobel filter"
def filter(self, image):
if image.mode != "L":
raise ValueError("image mode must be L")
dx = (3, 3), 1, 0, [-1, 0, 1, -2, 0, 2, -1, 0, 1]
dy = (3, 3), 1, 0, [1, 2, 1, 0, 0, 0, -1, -2, -1]
imx = Image.Image()._new(apply(image.filter, dx))
imy = Image.Image()._new(apply(image.filter, dy))
imx = imx.point(lambda i: abs(i), None)
imy = imy.point(lambda i: abs(i), None)
im = ImageChops.add(imx, imy)
return im.im
def nonmax_supression(image, size=3):
maxim = image.filter(ImageFilter.MaxFilter(size))
maxim_array = np.array(maxim, dtype=np.uint32)
image_array = np.array(image, dtype=np.uint32)
try:
div = (image_array+1) / (maxim_array+1) * image_array
except:
raise ZeroDivisionError("modulo by zero")
return Image.fromarray( np.uint8(div) )
def canny_edge_detection(image, nonmaxsize=3):
#gaussian smooth
ga = np.array([2, 4, 5, 4, 2,
4, 9, 12, 9, 4,
5, 12, 15, 12, 5,
4, 9, 12, 9, 4,
2, 4, 5, 4, 2])
ga = 1.0 / 159 * ga
dga = (5, 5), ga, 1, 0
image = image.filter(ImageFilter.Kernel(*dga))
#sobel edge
dx = (3, 3), [-1, 0, 1, -2, 0, 2, -1, 0, 1], 1, 0
dy = (3, 3), [1, 2, 1, 0, 0, 0, -1, -2, -1], 1, 0
imx = image.filter(ImageFilter.Kernel(*dx))
imy = image.filter(ImageFilter.Kernel(*dy))
imx = imx.point(lambda i: abs(i))
imy = imy.point(lambda i: abs(i))
im = ImageChops.add(imx, imy, 2)
sizex, sizey = im.size
mx = imx.load()
my = imy.load()
#edge direction
theta = np.zeros((sizex, sizey))
for i in xrange(sizex):
for j in xrange(sizey):
if mx[i, j] == 0:
if my[i, j] == 0:
v = 0
else:
v = 90.0
else:
v = np.degrees( np.arctan( my[i, j] / mx[i, j]) )
if 22.5 >= v >= 0 or 180 >= v >=157.5:
v = 0.0
elif 67.5 >= v >= 22.5:
v = 45.0
elif 112.5 >= v >= 67.5:
v = 90.0
else:
v = 135.0
theta[i, j] = v
#nonmax supression
out = nonmax_supression(im, nonmaxsize)
return out
if __name__== "__main__":
x = Image.open("lena512.bmp")
x = x.convert("L")
canny = canny_edge_detection(x, 3)
canny.save("lena_canny.png")
|
"""
@ Author : hong-il
@ Date : 2021-07-24
@ File name : close.py
@ File path :
@ Description :
"""
from datetime import datetime
import pandas_datareader as pa
from financeDB import financeDB
# DB 커넥션 클래스 초기화
fb = financeDB()
# DB 커넥트
conn = fb.get_connection()
start = datetime(2021, 7, 22)
end = datetime(2021, 7, 23)
# LG 일렉트로닉스
df = pa.DataReader("066570.KS", "yahoo", start, end)
# 데이터프레임 → 딕셔너리 변환
dd = df["Adj Close"].to_dict()
# 딕셔너리 키, 밸류 반복문
for k, v in dd.items():
sql = (f"""
INSERT INTO stock_close (
ID
, DT
, ADJ_CLOSE
, REG_DTM
, REG_ID
) VALUES (
'066570.KS'
, {str(k).replace('-', '')[0:8]}
, {v}
, NOW()
, 'init'
) ON DUPLICATE KEY UPDATE
ADJ_CLOSE = {v}
, REG_DTM = NOW()
, REG_ID = 'modi'
""")
# SQL 실행
with conn.cursor() as cur:
cur.execute(sql)
# DB 커밋
conn.commit()
|
from gurobipy import *
import os
data = os.listdir('../data/')[1]
print "Data: %s" % data
u, v = [], []
with open('../data/' + data, 'r') as fInput:
for line in fInput.readlines():
x, y = map(int, line.split())
u.append(x)
v.append(y)
model = Model('MaxIndependentSet')
selected = model.addVars(range(max(u + v) + 1), vtype = GRB.BINARY, name = "selected")
model.addConstrs((selected[u[i]] + selected[v[i]] <= 1 for i in range(len(u))), name = 'independence')
model.addConstrs((selected[i] <= 1 for i in range(max(u + v))), name = 'limit1')
model.setObjective(sum(selected[i] for i in range(max(u + v) + 1)), GRB.MAXIMIZE)
model.optimize()
print "Max Independent Set Size: %d" % int(model.objVal)
|
# -*- coding: utf-8 -*-
import requests
import datetime,time,random,re
import json
import pandas as pd
from bs4 import BeautifulSoup
from queue import Queue #线程
from threading import Thread #线程
from pyquery import PyQuery as pq
from lxml import etree
import urllib.request
import urllib.parse
import string
pd.set_option('display.width',3000)
pd.set_option('display.max_columns',None)
pd.set_option('display.max_rows',None)
pd.set_option('display.max_colwidth',2000)
user_agent_list = [
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3100.0 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36 Edge/18.17763",
"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"
]
headers = {"User-Agent": random.choice(user_agent_list),
}
p = ['省', '行政区', '自治区']
s = ['上海市', '北京市', '天津市', '重庆市']
class get_citys(object):
def __init__(self,**args):
self.p = ['省','行政区','自治区']
self.s = ['上海市' , '北京市' ,'天津市' , '重庆市']
self.data = None
self.provinces_list = None
self.area_list = None
self.area_list_df = None
self.area_df = None
def get_map(self,datatype):
'''datatype='url'时从网页爬取,='localfile'时从本地html读取'''
if datatype=='url':
url2='http://xzqh.mca.gov.cn/defaultQuery?shengji=-1&diji=-1&xianji=-1'
response = requests.get(url2,timeout=3,headers=headers) #
self.data = response.text
with open('city.html', 'w') as f: # 将返回结果写入文件,测试时避免重复发请求
f.write(self.data)
elif datatype=='localfile':
with open('city.html','r') as f:
self.data = f.read()
#获得所有省\市\区的地理位置列表(没有层级关系)
def get_detail(self,**args):
# Index(['地 名', '驻地', '人口(万人)', '面积(平方千米)', '行政区划代码', '区号', '邮编'], dtype='object')
# 读取html里面的所有地名表格,返回df
self.area_df = pd.read_html(self.data)[2]
print(self.area_df)
xpath_address = etree.HTML(self.data)
#获取省信息
provinces = xpath_address.xpath("//script[last()]/text()")
provinces_re = re.findall('json = ' + '(.+?);', str(provinces))[0] # 正则解析出所有省列表,[0]取出后是字符串
self.provinces_list = eval(provinces_re)
print(type(self.provinces_list),self.provinces_list)
#获取地区信息
areas=xpath_address.xpath("//input[@type='hidden']/@value")
self.area_list = eval(areas[3]) #将解析内容转换成列表
self.area_list_df = pd.DataFrame(self.area_list)
print(type(self.area_list),self.area_list)
@staticmethod
def get_city(area,*args):
'''根据市信息读出市名及其级别'''
# area=self.area_df['地 名'].strip().replace("+","")
if any(k in area for k in s):
city=area
type='Municipality'
elif '+' in area:
city = area
type='shiji'
else:
city=None
type='quxianji'
return city ,type
@staticmethod
def get_province(pron,*args):
'''根据省份信息读出省及其级别'''
if any(k in pron for k in p):
province=pron
type='shengji'
elif any(k in pron for k in s):
province=pron
type=""
else:
province=None
type=""
return province,type
def run(self):
'''主要执行程序'''
self.get_map('url') # 网页调用方式'url'
self.get_detail()
# 增加所属城市列,以及所属类型
self.area_df['cityof'], self.area_df['type'] =zip(*self.area_df['地 名'].apply(self.get_city))
# self.area_df['cityof'] =self.area_df.apply(self.get_city,axis=1)
self.area_df['cityof'] = self.area_df['cityof'].fillna(method='ffill')
self.area_df['地 名'] = self.area_df['地 名'].apply(lambda x: x.strip().replace("+", ""))
print(self.area_df.head(50))
# 增加所属['省','行政区','自治区']还有“直辖市”列,以及所属类型
print('hhh:',self.area_list_df['cName'].apply(self.get_province))
self.area_list_df['province'], self.area_list_df['type'] = zip(*self.area_list_df['cName'].apply(self.get_province))
self.area_list_df['province'] = self.area_list_df['province'].fillna(method='ffill')
# print(area_list_df.head(50))
# 将省市两张表合并程最后信息
self.area_all = pd.merge(self.area_list_df, self.area_df, how='left', left_on='cName', right_on='地 名')
self.area_all['type_y'] = self.area_all['type_y'].fillna('shengji')
print(self.area_all.head(1000))
self.area_all.to_excel('area_all.xlsx', encoding='utf-8')
return self.area_all
if __name__ == '__main__':
citys=get_citys()
area_all=citys.run()
# 备用,暂时用不上
province = '''省、自治区、直辖市
北京市(京)
天津市(津)
河北省(冀)
山西省(晋)
内蒙古自治区(内蒙古)
辽宁省(辽)
吉林省(吉)
黑龙江省(黑)
上海市(沪)
江苏省(苏)
浙江省(浙)
安徽省(皖)
福建省(闽)
江西省(赣)
山东省(鲁)
河南省(豫)
湖北省(鄂)
湖南省(湘)
广东省(粤)
广西壮族自治区(桂)
海南省(琼)
重庆市(渝)
四川省(川、蜀)
贵州省(黔、贵)
云南省(滇、云)
西藏自治区(藏)
陕西省(陕、秦)
甘肃省(甘、陇)
青海省(青)
宁夏回族自治区(宁)
新疆维吾尔自治区(新)
香港特别行政区(港)
澳门特别行政区(澳)
台湾省(台)''' |
#!/usr/bin/python
# -*- coding: utf-8; mode: python -*-
import gflags, os, traceback
FLAGS = gflags.FLAGS
def normalize_sub(fname):
return fname.split('/')[-2]
def report_correct(f, fname):
f.write('CORRECTO ' + normalize_sub(fname) + '\n\n')
def report_wrong(f, fname, expected, actual):
f.write('ERROR ' + normalize_sub(fname) + '\n')
f.write(' Se esperaba: ' + expected + '\n')
f.write(' Se obtuvo: ' + actual + '\n\n')
def report_exception(f, fname, expected):
f.write('ERROR ' + normalize_sub(fname) + '\n')
f.write(' Se esperaba: ' + expected + '\n')
f.write(' Ocurrió una excepción\n')
traceback.print_exc(file=f)
f.write('\n')
def report_open(fname):
reportpath = fname.replace(FLAGS.local, FLAGS.reportdir, 1)
try:
os.makedirs(os.path.dirname(reportpath))
except:
pass
return open(reportpath, 'wb')
|
# -*- coding: utf-8 -*-
from model.person import Person
def test_add_person(app, db, json_persons, check_ui):
person = json_persons
old_persons = db.get_person_list()
app.person.create(person)
new_persons = db.get_person_list()
assert len(old_persons) + 1 == len(new_persons)
old_persons.append(person)
assert sorted(old_persons, key=Person.id_or_max) == sorted(new_persons, key=Person.id_or_max)
if check_ui:
assert sorted(new_persons, key=Person.id_or_max) == sorted(app.person.get_person_list(), key=Person.id_or_max)
|
import bz2
username = b'BZh91AY&SYA\xaf\x82\r\x00\x00\x01\x01\x80\x02\xc0\x02\x00 \x00!\x9ah3M\x07<]\xc9\x14\xe1BA\x06\xbe\x084'
password = b'BZh91AY&SY\x94$|\x0e\x00\x00\x00\x81\x00\x03$ \x00!\x9ah3M\x13<]\xc9\x14\xe1BBP\x91\xf08'
print(bz2.decompress(username).decode()) # decompressing bytes and decoding
# username: huge
print(bz2.decompress(password).decode())
# password: file |
import sys
import time
import datetime
import RPi.GPIO as GPIO
import requests
BUTTON_A = 6
BUTTON_B = 9
g_button_a = False
g_button_b = False
URL='http://localhost:5000/api/shower_toggle'
def main():
global g_button_a
global g_button_b
status = 0
resume = 0
loop = 0
try:
GPIO.setmode(GPIO.BCM)
buttons = [
[BUTTON_A, button_a_pressed],
[BUTTON_B, button_b_pressed],
]
for button in buttons:
GPIO.setup(button[0], GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(button[0], GPIO.FALLING, bouncetime=200)
GPIO.add_event_callback(button[0], button[1])
while True:
time.sleep(0.1)
if status == 0:
if g_button_a:
status = 1
print_message("button 2 pressed")
toggle_shower(2)
elif status == 1:
loop += 1
if 10 < loop:
status = 2
loop = 0
print_message("exiting process")
elif loop % 20 == 0:
print_message("executing")
elif status == 2:
resume += 1
if 10 <= resume: # 1sec up
status = 0
resume = 0
print_message("1 sec elapsed. transitioning to waiting status")
g_button_a = False
g_button_b = False
except KeyboardInterrupt:
print("KeyboardInterrupt")
print("stopping...")
GPIO.cleanup()
return 0
def toggle_shower(pin):
try:
r = requests.get(f"{URL}/{pin}")
print(r.text)
except Exception as e:
print("error: {}".format(e))
def button_a_pressed(gpio_no):
global g_button_a
g_button_a = True
def button_b_pressed(gpio_no):
global g_button_b
g_button_b = True
def print_message(message):
now = datetime.datetime.now()
timestamp = now.strftime("%Y-%m-%d %H:%M:%S.%f")
print("{}: {}".format(timestamp, message))
if __name__ == "__main__":
sys.exit(main())
|
import sys
from shapely.geometry import LineString
north = 0
east = 0
direction = 'N'
changedir = {'N': {'L': 'W', 'R':'E'}, 'E': {'L':'N','R':'S'}, 'S': {'L':'E','R':'W'}, 'W':{'L':'S','R':'N'}}
segments = []
def intersection(s0, s1):
i = LineString(s0).intersection(LineString(s1))
if i:
return (i.x, i.y)
return None
instr = sys.stdin.read()
for instruction in instr.split(','):
instruction = instruction.strip()
turn = instruction[0]
blocks = int(instruction[1:])
direction = changedir[direction][turn]
startlocation = (east, north)
if direction == 'N':
north += blocks
elif direction == 'S':
north -= blocks
elif direction == 'E':
east += blocks
elif direction == 'W':
east -= blocks
endlocation = (east,north)
sn = (startlocation, endlocation)
for s in segments[:-1]:
i = intersection(s, sn)
if i:
print abs(i[0])+abs(i[1])
sys.exit(0)
segments.append(sn)
print abs(north)+abs(east)
sys.exit(1)
|
# -*- coding: utf-8 -*-
import arrow
import copy
import logging
import time
import requests
from . import exceptions, utils
from .utils import Sign
class APIMixin(object):
def __init__(self, partner_id, api_key):
self.partner_id = partner_id
self.api_key = api_key
self.sign = Sign(api_key)
def get_api_prefix(self):
raise NotImplementedError()
def get_url(self, path):
api_prefix = self.get_api_prefix()
return '{}{}'.format(api_prefix, path)
def process_params(self, params):
_params = {}
for key, val in params.iteritems():
if val is None:
continue
if val == '':
continue
_params[key] = val
sign = self.sign.sign(_params)
_params['sign'] = sign
return _params
def get_request_url(self, url, params=None):
params = params or {}
if not isinstance(params, dict):
raise exceptions.DuobeiSDKInvalidParamException()
_params = self.process_params(params)
return utils.format_url(url, _params)
def request(self, url, params, method='get', timeout=3, response_format_type='json'):
if not isinstance(params, dict):
raise exceptions.DuobeiSDKInvalidParamException()
_params = self.process_params(params)
if method == 'get':
response = requests.get(url, params=_params, timeout=timeout)
elif method == 'post':
response = requests.post(url, data=_params, timeout=timeout)
else:
raise NotImplementedError()
if response_format_type == 'json':
data = response.json()
if not data['success']:
logging.error('[DuobeiSDK request error]: %s', data)
raise exceptions.DuobeiSDKServerException(str(data))
else:
data = response.content
return data
def get_now_timestamp(self):
timestamp = int(time.time() * 1000)
return timestamp
|
import csv
##########################
# convert birth_year to integer
f = open("C:\\dev\\python\\legislators.csv", 'r')
csvreader = csv.reader(f)
legislators = list(csvreader)
for item in legislators:
# birthday = item[2]
try:
birth_year = int(item[2].split('-')[0])
except Exception:
birth_year = 0
item.append(birth_year)
print(legislators) |
from flask import Flask, request, redirect, render_template, session, flash
from flask_sqlalchemy import SQLAlchemy
#from hashutils import make_pw_hash, check_pw_hash
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://build-a-blog:build-a-blog@localhost:8889/build-a-blog'
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
app.secret_key = 'y337kGcys&zP3B'
class Blog(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120))
body = db.Column(db.String(10000))
def __init__(self, title, body):
self.title = title
self.body = body
@app.route('/blog', methods=['POST', 'GET'])
def blog():
blogs = Blog.query.all()
if request.method == 'POST':
return render_template('blog.html', title="My Blog Posts",
blogs=blogs)
if request.method == 'GET':
if not request.args.get('id'):
return render_template('blog.html', title="My Blog Posts",
blogs=blogs)
else:
id = request.args.get('id')
blog = Blog.query.filter_by(id=id).first()
return render_template('post.html', title="Blog Post", blog=blog)
@app.route('/newpost', methods=['POST', 'GET'])
def newpost():
if request.method == 'POST':
blog_title = request.form['blog_title']
body = request.form['body']
if empty_post(blog_title) and empty_post(body):
# if both title and body have content go here
new_post = Blog(blog_title, body)
db.session.add(new_post)
db.session.commit()
blog = Blog.query.filter_by(title=blog_title, body=body).first()
return render_template('post.html', blog=blog)
elif empty_post(blog_title): #returns true if title has content
#if title is not empty body content error
body_error="Blog Body cannot be blank"
return render_template('newpost.html', blog_title=blog_title,
body=body, body_error=body_error)
elif empty_post(body):#returns true if body has content
#if body is not empty title content error
title_error="Title cannot be blank"
return render_template('newpost.html', blog_title=blog_title,
body=body, title_error=title_error)
else: #if both are empty body and title error
title_error="Title cannot be blank"
body_error="Blog Body cannot be blank"
return render_template('newpost.html', blog_title=blog_title,
body=body, body_error=body_error, title_error=title_error)
return render_template('newpost.html', title="New Post Page", blog_title="", body="")
def empty_post(content):
if not content:
return False
return True
if __name__ == '__main__':
app.run() |
from typing import Union, Dict, Type
from intcode.interfaces.base_op import BaseOp
from intcode.ops.addition import AdditionOp
from intcode.ops.equals import EqualsOp
from intcode.ops.halt import HaltOp
from intcode.ops.jump_if_false import JumpIfFalseOp
from intcode.ops.jump_if_true import JumpIfTrueOp
from intcode.ops.less_than import LessThanOp
from intcode.ops.multiplication import MultiplicationOp
from intcode.ops.print_value import PrintValueOp
from intcode.ops.read_input import ReadInputOp
from intcode.ops.shift_relative_base import ShiftRelativeBaseOp
class OpFactory:
__OPCODES : Dict[int, Type[BaseOp]] = {
1: AdditionOp,
2: MultiplicationOp,
3: ReadInputOp,
4: PrintValueOp,
5: JumpIfTrueOp,
6: JumpIfFalseOp,
7: LessThanOp,
8: EqualsOp,
9: ShiftRelativeBaseOp,
99: HaltOp
}
@classmethod
def get_op(cls, op_code: Union[int, str]) -> Type[BaseOp]:
op_code = int(op_code)
if op_code not in cls.__OPCODES:
raise KeyError(f"Opcode '{op_code}' could't be found")
return cls.__OPCODES[op_code]
|
class Area:
def __init__(self,width,height):
self.width,self.height = width,height
self.grid = []
def add(self,row):
self.grid.append(row)
def __getitem__(self, item):
x,y = tuple(item)
return self.grid[x][y]
def __setitem__(self, key, value):
x,y = tuple(key)
self.grid[x][y] = value
def __contains__(self, item):
x,y = tuple(item)
return 0<=x<self.width and 0<=y<self.height
def get_closest(self,start_loc):
visited = {start_loc}
paths = {Path(start_loc,0)}
while len(paths)>0 and len(list(filter(lambda path:self[path.last]>0,paths)))==0:
for path in (paths.copy(),paths.clear())[0]:
for new_path in path.next(visited):
paths.add(new_path)
if len(list(filter(lambda path:self[path.last]>0,paths)))==0:
return None
return list(filter(lambda path:self[path.last]>0,paths))[0].last
class Coor:
def __init__(self,loc):
self.x,self.y = tuple(loc)
def __iter__(self):
yield self.x
yield self.y
def __add__(self, other):
x,y = tuple(other)
return Coor((x+self.x,y+self.y))
def __sub__(self, other):
x,y = tuple(other)
return Coor((x-self.x,y-self.y))
def __len__(self):
return abs(self.x)+abs(self.y)
def __eq__(self, other):
return hash(self) == hash(other)
def __hash__(self):
return hash(tuple(self))
def __str__(self):
return f"x: {self.x},y: {self.y}"
class Path:
def __init__(self,last,dis):
self.last, self.dis = last,dis
def next(self,visited):
for offset in OFFSET:
new_loc = self.last+offset
if new_loc in area and new_loc not in visited:
visited.add(new_loc)
yield Path(new_loc,self.dis+1)
if __name__ == '__main__':
OFFSET = []
for i in range(-1,2):
for j in range(-1,2):
if(i,j) != (0,0):
OFFSET.append(Coor((i,j)))
N,M,Q = tuple(map(int,input('').split(' ')))
area = Area(N,M)
for _ in range(N):
row = list(map(int,input('').split(' ')))
print(row)
area.add(row)
queries = []
for _ in range(Q):
queries.append(tuple(map(int,input('').split(' '))))
for type,a,b,K in queries:
if type == 0:
replace = {}
need = K
distance = 0
while need > 0:
best = area.get_closest(Coor((a,b)))
if best == None:
for best,amount in replace.items():
area[best] = amount
distance = -1
break
if area[best] >= need:
area[best] -= need
replace[best] = need
distance += need*(2*(len(best-(a,b))-1)+1)
else:
need -= area[best]
replace[best] = area[best]
distance += area[best]*(2*(len(best-(a,b))-1)+1)
area[best] = 0
print(distance)
elif type == 1:
area[(a,b)] = K
'''
4 5 6
0 0 1 0 2
1 0 0 0 0
0 0 0 1 0
4 0 0 0 1
0 0 0 2
0 2 0 6
0 3 4 3
0 3 3 12
1 0 0 1
0 0 0 2
''' |
""" cnvpytor.io
class IO: Reading/writing CNVpytor files (extension .pytor) using h5py library.
"""
from __future__ import absolute_import, print_function, division
from .genome import Genome
from .utils import *
from .version import __version__
import datetime
import logging
import os.path
import io
import numpy as np
import h5py
import re
_logger = logging.getLogger("cnvpytor.io")
FLAG_AUTO = 0x0001
FLAG_SEX = 0x0002
FLAG_MT = 0x0004
FLAG_GC_CORR = 0x0010
FLAG_AT_CORR = 0x0020
FLAG_USEMASK = 0x0100
FLAG_USEID = 0x0200
FLAG_USEHAP = 0x0400
class Signals(object):
signals = {
"RD p": "%(chr)s_rd_p",
"RD u": "%(chr)s_rd_u",
"GC/AT": "%(chr)s_gc",
"mask": "%(chr)s_mask",
"GC corr": "gc_corr_%(bin_size)d%(flag)s",
"RD p dist": "dist_rd_p_%(bin_size)d%(flag)s",
"RD u dist": "dist_rd_u_%(bin_size)d%(flag)s",
"RD GC dist": "dist_rd_gc_%(bin_size)d%(flag)s",
"RD stat": "rd_stat_%(bin_size)d%(flag)s",
"RD": "his_rd_p_%(chr)s_%(bin_size)d%(rd_flag)s",
"RD unique": "his_rd_u_%(chr)s_%(bin_size)d%(rd_flag)s",
"RD raw": "his_rd_p_%(chr)s_%(bin_size)d%(rd_flag)s_raw",
"RD l1": "his_rd_p_%(chr)s_%(bin_size)d%(rd_flag)s_l1",
"RD l2": "his_rd_p_%(chr)s_%(bin_size)d%(rd_flag)s_l2",
"RD l3": "his_rd_p_%(chr)s_%(bin_size)d%(rd_flag)s_l3",
"RD partition": "his_rd_p_%(chr)s_%(bin_size)d_partition%(rd_flag)s",
"RD call": "his_rd_p_%(chr)s_%(bin_size)d_partition%(rd_flag)s_merge",
"RD mosaic segments": "his_rd_p_%(chr)s_%(bin_size)d_partition%(rd_flag)s_mosaic_segments",
"RD mosaic call": "his_rd_p_%(chr)s_%(bin_size)d_partition%(rd_flag)s_mosaic_call",
"RD mosaic segments 2d": "his_rd_p_%(chr)s_%(bin_size)d_partition%(rd_flag)s_mosaic_segments_2d",
"RD mosaic call 2d": "his_rd_p_%(chr)s_%(bin_size)d_partition%(rd_flag)s_mosaic_call_2d",
"RD mosaic segments baf": "his_rd_p_%(chr)s_%(bin_size)d_partition%(rd_flag)s_mosaic_segments_baf",
"RD mosaic call baf": "his_rd_p_%(chr)s_%(bin_size)d_partition%(rd_flag)s_mosaic_call_baf",
"RD mosaic segments 2d phased": "his_rd_p_%(chr)s_%(bin_size)d_partition%(rd_flag)s_mosaic_segments_2d_phased",
"RD mosaic call 2d phased": "his_rd_p_%(chr)s_%(bin_size)d_partition%(rd_flag)s_mosaic_call_2d_phased",
"RD mosaic segments baf phased": "his_rd_p_%(chr)s_%(bin_size)d_partition%(rd_flag)s_mosaic_segments_baf_phased",
"RD mosaic call baf phased": "his_rd_p_%(chr)s_%(bin_size)d_partition%(rd_flag)s_mosaic_call_baf_phased",
"RD level": "rd_level_%(bin_size)d%(flag)s",
"GC": "%(chr)s_gc_%(bin_size)",
"SNP pos": "%(chr)s_snp_pos",
"SNP desc": "%(chr)s_snp_desc",
"SNP counts": "%(chr)s_snp_counts",
"SNP qual": "%(chr)s_snp_qual",
"SNP bin count 0|0": "snp_bafc_00_%(chr)s_%(bin_size)d%(snp_flag)s",
"SNP bin count 0|1": "snp_bafc_01_%(chr)s_%(bin_size)d%(snp_flag)s",
"SNP bin count 1|0": "snp_bafc_10_%(chr)s_%(bin_size)d%(snp_flag)s",
"SNP bin count 1|1": "snp_bafc_11_%(chr)s_%(bin_size)d%(snp_flag)s",
"SNP bin reads 0|0": "snp_readc_00_%(chr)s_%(bin_size)d%(snp_flag)s",
"SNP bin reads 0|1": "snp_readc_01_%(chr)s_%(bin_size)d%(snp_flag)s",
"SNP bin reads 1|0": "snp_readc_10_%(chr)s_%(bin_size)d%(snp_flag)s",
"SNP bin reads 1|1": "snp_readc_11_%(chr)s_%(bin_size)d%(snp_flag)s",
"SNP baf": "snp_baf_%(chr)s_%(bin_size)d%(snp_flag)s",
"SNP maf": "snp_maf_%(chr)s_%(bin_size)d%(snp_flag)s",
"SNP likelihood": "snp_likelihood_%(chr)s_%(bin_size)d%(snp_flag)s",
"SNP likelihood half": "snp_likelihood_half_%(chr)s_%(bin_size)d%(snp_flag)s",
"SNP i1": "snp_i1_%(chr)s_%(bin_size)d%(snp_flag)s",
"SNP i2": "snp_i2_%(chr)s_%(bin_size)d%(snp_flag)s",
"SNP i3": "snp_i3_%(chr)s_%(bin_size)d%(snp_flag)s",
"SNP i4": "snp_i4_bafc_%(chr)s_%(bin_size)d%(snp_flag)s",
"SNP likelihood partition": "snp_likelihood_%(chr)s_%(bin_size)d%(snp_flag)s_partition",
"SNP maf partition": "snp_maf_%(chr)s_%(bin_size)d%(snp_flag)s_partition",
"SNP i1 partition": "snp_i1_%(chr)s_%(bin_size)d%(snp_flag)s_partition",
"SNP i2 partition": "snp_i2_%(chr)s_%(bin_size)d%(snp_flag)s_partition",
"SNP i3 partition": "snp_i3_%(chr)s_%(bin_size)d%(snp_flag)s_partition",
"SNP i4 partition": "snp_i4_%(chr)s_%(bin_size)d%(snp_flag)s_partition",
"SNP likelihood segments": "snp_likelihood_%(chr)s_%(bin_size)d%(snp_flag)s_segments",
"SNP likelihood call": "snp_likelihood_%(chr)s_%(bin_size)d%(snp_flag)s_call",
"SNP likelihood segments 2d": "snp_likelihood_%(chr)s_%(bin_size)d%(snp_flag)s_segments_2d",
"SNP likelihood call 2d": "snp_likelihood_%(chr)s_%(bin_size)d%(snp_flag)s_call_2d",
"SNP likelihood segments baf": "snp_likelihood_%(chr)s_%(bin_size)d%(snp_flag)s_segments_baf",
"SNP likelihood call baf": "snp_likelihood_%(chr)s_%(bin_size)d%(snp_flag)s_call_baf",
"SNP read counts segments 2d phased": "snp_likelihood_%(chr)s_%(bin_size)d%(snp_flag)s_segments_2d_phased",
"SNP read counts call 2d phased": "snp_likelihood_%(chr)s_%(bin_size)d%(snp_flag)s_call_2d_phased",
"SNP 2d call flipped bins": "snp_%(chr)s_%(bin_size)d%(snp_flag)s_call_2d_flipped_bins",
"SNP read counts segments baf phased": "snp_likelihood_%(chr)s_%(bin_size)d%(snp_flag)s_segments_baf_phased",
"SNP read counts call baf phased": "snp_likelihood_%(chr)s_%(bin_size)d%(snp_flag)s_call_baf_phased",
"SNP baf call flipped bins": "snp_%(chr)s_%(bin_size)d%(snp_flag)s_call_baf_flipped_bins",
"SNP maf call": "snp_maf_%(chr)s_%(bin_size)d%(snp_flag)s_call",
"SNP i1 call": "snp_i1_%(chr)s_%(bin_size)d%(snp_flag)s_call",
"SNP i2 call": "snp_i2_%(chr)s_%(bin_size)d%(snp_flag)s_call",
"SNP i3 call": "snp_i3_%(chr)s_%(bin_size)d%(snp_flag)s_call",
"SNP i4 call": "snp_i4_%(chr)s_%(bin_size)d%(snp_flag)s_call",
"calls": "calls_%(chr)s_%(bin_size)d%(rd_flag)s",
"calls rd": "calls_rd_%(chr)s_%(bin_size)d%(rd_flag)s",
"calls baf": "calls_baf_%(chr)s_%(bin_size)d%(snp_flag)s",
"calls combined": "calls_2d_%(chr)s_%(bin_size)d%(snp_flag)s%(rd_flag)s",
"somatic SNP pos": "somatic_%(name)s_%(chr)s_snp_pos",
"somatic SNP desc": "somatic_%(name)s_%(chr)s_snp_desc",
"somatic SNP counts": "somatic_%(name)s_%(chr)s_snp_counts",
"somatic SNP qual": "somatic_%(name)s_%(chr)s_snp_qual",
"RD chromosomes": "rd_chromosomes",
"SNP chromosomes": "snp_chromosomes",
"chromosome lengths": "chr_len",
"read frg dist": "read_frg_len",
"reference genome": "reference_genome",
"use reference": "use_reference"
}
def __init__(self):
pass
@staticmethod
def suffix_rd_flag(flags):
"""
Converts binary flags into suffix used in RD signal names.
Parameters
----------
flags : int
Binary flag (FLAG_GC_CORR = 0x0010, FLAG_AT_CORR = 0x0020, FLAG_USEMASK = 0x0100).
Returns
-------
s : str
Suffix string used in RD signal names.
"""
s = ""
if flags & FLAG_AT_CORR:
s += "_AT"
if flags & FLAG_GC_CORR:
s += "_GC"
if flags & FLAG_USEMASK:
s += "_mask"
return s
@staticmethod
def suffix_snp_flag(flags):
"""
Converts binary flags into suffix used in SNP signal names.
Parameters
----------
flags : int
Binary flag (FLAG_USEMASK = 0x0100, FLAG_USEID = 0x0200, FLAG_USEHAP = 0x0400).
Returns
-------
s : str
Suffix string used in SNP signal names.
"""
s = ""
if flags & FLAG_USEMASK:
s += "_mask"
if flags & FLAG_USEID:
s += "_id"
if flags & FLAG_USEHAP:
s += "_hap"
return s
@staticmethod
def suffix_flag(flags):
"""
Converts binary flags into suffix used in distribution signal names.
Parameters
----------
flags : int
Binary flag (FLAG_AUTO = 0x0001, FLAG_SEX = 0x0002, FLAG_MT = 0x0004).
Returns
-------
s : str
Suffix string used in distribution signal names.
"""
s = ""
if flags & FLAG_AUTO:
s += "_auto"
if flags & FLAG_SEX:
s += "_sex"
if flags & FLAG_MT:
s += "_mt"
if flags & FLAG_GC_CORR:
s += "_GC"
return s
def signal_name(self, chr_name, bin_size, signal, flags=0, name=''):
"""
Returns h5py variable name for a given signal.
Parameters
----------
chr_name : str or None
Name of the chromosome or None.
bin_size : int or None
Bin size or None.
signal : str
Signal name.
flags : int
Binary flag
(FLAG_AUTO = 0x0001, FLAG_SEX = 0x0002, FLAG_MT = 0x0004, FLAG_GC_CORR = 0x0010
FLAG_AT_CORR = 0x0020, FLAG_USEMASK = 0x0100, FLAG_USEID = 0x0200, FLAG_USEHAP = 0x0400).
Returns
-------
sig_name : str
Signal h5py variable name
"""
if signal in self.signals:
try:
return self.signals[signal] % {"chr": chr_name, "bin_size": bin_size,
"rd_flag": self.suffix_rd_flag(flags),
"snp_flag": self.suffix_snp_flag(flags), "flag": self.suffix_flag(flags),
"name": name}
except TypeError:
return None
else:
return None
class IO(Signals):
def __init__(self, filename, ro=False, buffer=False, create=True):
"""
Opens CNVpytor file for reading/writing
Parameters
----------
filename : str
Name of the file.
ro : bool
Opens file in read-only mode. Default: False.
buffer : bool
It will copy hdf5 file in RAM buffer before opening. Works with read-only mode. Default: False.
create : bool
It will create file when set and when file does not exist. Otherwise, if file does not exist
it will log an error. Default: True.
"""
Signals.__init__(self)
self.filename = filename
self.file = None
_logger.debug("Opening h5 file '%s'" % self.filename)
if ro:
try:
if buffer:
with open(filename, 'rb') as f:
self.bytesio = io.BytesIO(f.read())
self.file = h5py.File(self.bytesio, "r")
else:
self.file = h5py.File(filename, "r")
_logger.debug("File '%s' successfully opened in read-only mode." % self.filename)
except IOError:
_logger.error("Unable to open file %s!" % filename)
exit(0)
elif os.path.exists(filename):
try:
self.file = h5py.File(filename, "r+")
_logger.debug("File '%s' successfully opened." % self.filename)
except IOError:
_logger.error("Unable to open file %s!" % filename)
exit(0)
elif create:
try:
self.file = h5py.File(filename, "w")
now = datetime.datetime.now()
# Meta data
self.add_meta_attribute('Version', __version__)
self.add_meta_attribute('Date', now.strftime("%Y-%m-%d %H:%M"))
_logger.debug("File '%s' successfully created." % self.filename)
except IOError:
_logger.error("Unable to create file %s!" % filename)
exit(0)
else:
_logger.error("File %s is missing!" % filename)
exit(0)
def __del__(self):
_logger.debug("Closing h5 file '%s'" % self.filename)
def chromosomes_with_signal(self, bin_size, signal, flags=0, name=''):
"""
Returns list of chromosomes with signal stored in CNVpytor file
Parameters
----------
bin_size : int or None
Bin size or None.
signal : str
Signal name.
flags : int
Binary flag
(FLAG_AUTO = 0x0001, FLAG_SEX = 0x0002, FLAG_MT = 0x0004, FLAG_GC_CORR = 0x0010
FLAG_AT_CORR = 0x0020, FLAG_USEMASK = 0x0100, FLAG_USEID = 0x0200, FLAG_USEHAP = 0x0400).
Returns
-------
chrs : list of str
List of chromosome names.
"""
# search_string = "^" + self.signal_name("(.[^_]*)", bin_size, signal, flags, name) + "$"
search_string = "^" + self.signal_name("(.*)", bin_size, signal, flags, name) + "$"
chrs = []
for key in self.file.keys():
res = re.findall(search_string, key)
if len(res) > 0:
chrs.append(res[0])
return chrs
def chromosomes_bin_sizes_with_signal(self, signal, flags=0, name=''):
"""
Returns list of chromosome bin_size pairs with signal stored in CNVpytor file
Parameters
----------
bin_size : int or None
Bin size or None.
signal : str
Signal name.
flags : int
Binary flag
(FLAG_AUTO = 0x0001, FLAG_SEX = 0x0002, FLAG_MT = 0x0004, FLAG_GC_CORR = 0x0010
FLAG_AT_CORR = 0x0020, FLAG_USEMASK = 0x0100, FLAG_USEID = 0x0200, FLAG_USEHAP = 0x0400).
Returns
-------
chrs_bss : list of (str, int)
List of tuples (chromosome name, bin size).
"""
# search_string = "^" + self.signal_name("(.[^_]*)", 17110806, signal, flags, name) + "$"
search_string = "^" + self.signal_name("(.*)", 17110806, signal, flags, name) + "$"
search_string = search_string.replace("17110806", "(.[0-9]*)")
chrs_bss = []
for key in self.file.keys():
res = re.findall(search_string, key)
if len(res) > 0:
chrs_bss.append(res[0])
return chrs_bss
def signal_exists(self, chr_name, bin_size, signal, flags=0, name=''):
"""
Checks does signal exist.
Parameters
----------
chr_name : str or None
Name of the chromosome or None.
bin_size : int or None
Bin size or None.
signal : str
Signal name.
flags : int
Binary flag
(FLAG_AUTO = 0x0001, FLAG_SEX = 0x0002, FLAG_MT = 0x0004, FLAG_GC_CORR = 0x0010
FLAG_AT_CORR = 0x0020, FLAG_USEMASK = 0x0100, FLAG_USEID = 0x0200, FLAG_USEHAP = 0x0400).
Returns
-------
exists : bool
True if signal exists in CNVpytor file
"""
signame = self.signal_name(chr_name, bin_size, signal, flags, name)
if not signame:
return False
if signal == "SNP likelihood":
signame_half = self.signal_name(chr_name, bin_size, "SNP likelihood half", flags, name)
return (signame in self.file) or (signame_half in self.file)
else:
return signame in self.file
def create_signal(self, chr_name, bin_size, signal, data, flags=0, name=''):
"""
Stores signal data into CNVpytor file and returns data set instance.
Parameters
----------
chr_name : str or None
Name of the chromosome or None.
bin_size : int or None
Bin size or None.
signal : str
Signal name.
data : numpy.ndarray
Array contains data.
flags : int
Binary flag
(FLAG_AUTO = 0x0001, FLAG_SEX = 0x0002, FLAG_MT = 0x0004, FLAG_GC_CORR = 0x0010
FLAG_AT_CORR = 0x0020, FLAG_USEMASK = 0x0100, FLAG_USEID = 0x0200, FLAG_USEHAP = 0x0400).
Returns
-------
data_set : h5py._hl.dataset.Dataset
Data set instance.
"""
signame = self.signal_name(chr_name, bin_size, signal, flags, name)
if not signame:
return None
if signame in self.file:
del self.file[signame]
ds = self.file.create_dataset(signame, data.shape, dtype=str(data.dtype), compression="gzip",
compression_opts=9, data=data)
self._flush()
return ds
def update_signal(self, chr_name, bin_size, signal, data, flags=0, name=''):
"""
Updates signal data in CNVpytor file and returns data set instance.
Parameters
----------
chr_name : str
Name of the chromosome or None.
bin_size : int
Bin size or None.
signal : str
Signal name.
data : numpy.ndarray
Array contains data.
flags : int
Binary flag
(FLAG_AUTO = 0x0001, FLAG_SEX = 0x0002, FLAG_MT = 0x0004, FLAG_GC_CORR = 0x0010
FLAG_AT_CORR = 0x0020, FLAG_USEMASK = 0x0100, FLAG_USEID = 0x0200, FLAG_USEHAP = 0x0400).
Returns
-------
data_set : h5py._hl.dataset.Dataset
Data set instance.
"""
signame = self.signal_name(chr_name, bin_size, signal, flags, name)
if not signame:
return None
if not (signame in self.file):
_logger.warning("Signal %s does not exist in file %s!" % (signame, self.filename))
return None
self.file[signame] = data
self._flush()
return self.file[signame]
def get_signal(self, chr_name, bin_size, signal, flags=0, name=''):
"""
Reads signal data from CNVpytor file and returns pointer to data set.
Parameters
----------
chr_name : str or None
Name of the chromosome or None.
bin_size : int or None
Bin size or None.
signal : str
Signal name.
flags : int
Binary flag
(FLAG_AUTO = 0x0001, FLAG_SEX = 0x0002, FLAG_MT = 0x0004, FLAG_GC_CORR = 0x0010
FLAG_AT_CORR = 0x0020, FLAG_USEMASK = 0x0100, FLAG_USEID = 0x0200, FLAG_USEHAP = 0x0400).
Returns
-------
array : numpy.nparray
Array contains data.
"""
signame = self.signal_name(chr_name, bin_size, signal, flags, name)
if not signame:
return None
if signal=="SNP likelihood":
signame_half = self.signal_name(chr_name, bin_size, "SNP likelihood half", flags, name)
if signame_half in self.file:
x=np.array(self.file[signame_half])
return np.concatenate((x,np.flip(x[:,:-1],axis=1)),axis=1)
if not (signame in self.file):
_logger.debug("Signal '%s' does not exist in file '%s'!" % (signame, self.filename))
return []
return np.array(self.file[signame])
def _flush(self):
"""
Flush pyh5 file.
Returns
-------
None
"""
self.file.flush()
def gc_info(self, stdout=False):
"""
Prints information about GC content of CNVpytor file.
Output is tab separated with columns:
1. length (100 bp resolution)
2. GC content
3. GC content percentage
4. AT content
5. AT content percentage
6. N content
7. N content percentage
Parameters
----------
stdout : bool
If true prints output to stdout (default: False)
Returns
-------
ret : dict
Distionary: chromosome -> [l, gc_content, gc_content_percent, at_content,
at_content_percent, n_content, n_content_percent]
"""
ret = {}
gc_chrom = sorted(self.gc_chromosomes(), key=lambda x: (len(x), x))
if len(gc_chrom) > 0:
if stdout:
print("Contig\tLength\tGC\tGC[%]\tAT\tAT[%]\tN\tN[%]")
for c in gc_chrom:
gcat = self.get_signal(c, None, "GC/AT")
gc, at = gc_at_decompress(gcat)
l = len(gc) * 100
tgc = sum(gc)
tac = sum(at)
ret[c] = [l, tgc, 100 * tgc / l, tac, 100 * tac / l, l - tac - tgc, 100 * (l - tac - tgc) / l]
if stdout:
print("%s\t%d\t%d\t%.2f\t%d\t%.2f\t%d\t%.2f" % tuple([c] + ret[c]))
return ret
def ls(self):
"""
Prints content of CNVpytor file.
Returns
-------
None
"""
print()
print("Filename '%s'" % self.filename)
print("-----------" + "-" * len(self.filename))
parameter_list = ['Date', 'Version']
if parameter_list and self.file.attrs.keys():
date = self.file.attrs['Date']
version = self.file.attrs['Version']
print("File created: {} using CNVpytor ver {}\n".format(date, version))
print("Chromosomes with RD signal: " + ", ".join(self.rd_chromosomes()))
print()
print("Chromosomes with SNP signal: " + ", ".join(self.snp_chromosomes()))
print()
if self.signal_exists(None, None, "reference genome") and self.signal_exists(None, None, "use reference"):
rg_name = np.array(self.get_signal(None, None, "reference genome")).astype("str")[0]
print("Using reference genome: " + rg_name + " [ ", end='')
if rg_name in Genome.reference_genomes:
rg_use = self.get_signal(None, None, "use reference")
if "gc_file" in Genome.reference_genomes[rg_name] and rg_use[0] == 1:
print("GC: yes, ", end='')
else:
print("GC: no, ", end='')
if "mask_file" in Genome.reference_genomes[rg_name] and rg_use[1] == 1:
print("mask: yes ]")
else:
print("mask: no ]")
else:
print("Reference genome is not set.")
print()
chr_bs = self.chromosomes_bin_sizes_with_signal("RD")
chrs = {}
bss = []
for c, b in chr_bs:
if c not in chrs:
chrs[c] = []
chrs[c].append(int(b))
if int(b) not in bss:
bss.append(int(b))
print("Chromosomes with RD histograms [bin sizes]: " + ", ".join(chrs.keys()) + " " + str(sorted(bss)))
print()
chr_bs = self.chromosomes_bin_sizes_with_signal("SNP likelihood", FLAG_USEMASK)
chrs = {}
bss = []
for c, b in chr_bs:
if c not in chrs:
chrs[c] = []
chrs[c].append(int(b))
if int(b) not in bss:
bss.append(int(b))
print("Chromosomes with SNP histograms [bin sizes]: " + ", ".join(chrs.keys()) + " " + str(sorted(bss)))
print()
chr_len = list(np.array(self.get_signal(None, None, "chromosome lengths")).astype("str"))
chr_len = dict(zip(chr_len[::2], chr_len[1::2]))
print("Chromosome lengths: " + str(chr_len))
@staticmethod
def save_root_trees(root_filename):
"""
Save RD and VCF data into root file. Requires ROOT installed.
Parameters
----------
root_filename : sr
Name of the root file.
Returns
-------
None
Not implemented yet!
"""
try:
import ROOT
except ImportError:
logging.warning("ROOT package is not installed - root file not saved!")
else:
# Save RD and SNP data into root file - TODO
_logger.debug(root_filename, ROOT.__version__)
def add_rd(self, chr_name, rd_p, rd_u, chromosome_length=None):
"""
Add RD signal, compress and stores into CNVpytor file and returns data set instances.
Parameters
----------
chr_name : str
Name of the chromosome.
rd_p : numpy.ndarray
Array with RD parity data.
rd_u : numpy.ndarray
Array with RD unique data.
Returns
-------
ds_p : h5py._hl.dataset.Dataset
Data set instance with RD parity signal.
ds_u : h5py._hl.dataset.Dataset
Data set instance with RD unique signal.
"""
_logger.info("Adding RD data for chromosome '%s'." % chr_name)
ord_p, ord_u = self.read_rd(chr_name)
if rd_p.size > ord_p.size:
_logger.warning("Different lengths (%d, %d). Using larger." % (rd_p.size, ord_p.size))
ord_p.resize(rd_p.size, refcheck=False)
ord_u.resize(rd_p.size, refcheck=False)
elif rd_p.size < ord_p.size:
_logger.warning("Different lengths (%d, %d). Using larger." % (rd_p.size, ord_p.size))
rd_p.resize(od_p.size, refcheck=False)
rd_u.resize(od_p.size, refcheck=False)
rd_p += ord_p
rd_u += ord_u
data_type = "uint32" if Genome.is_mt_chrom(chr_name) or (np.max(rd_p) > 65535) else "uint16"
crd_p, crd_u = rd_compress(rd_p, rd_u, data_type)
ds_p = self.create_signal(chr_name, None, "RD p", crd_p)
ds_u = self.create_signal(chr_name, None, "RD u", crd_u)
if not (chr_name in self.rd_chromosomes()):
rd_chroms = self.rd_chromosomes()
rd_chroms.append(chr_name)
self.update_signal(None, None, "RD chromosomes", np.array([np.string_(x) for x in rd_chroms]))
return ds_p, ds_u
def save_rd(self, chr_name, rd_p, rd_u, chromosome_length=None):
"""
Compress and stores RD data into CNVpytor file and returns data set instances.
Parameters
----------
chr_name : str
Name of the chromosome.
rd_p : numpy.ndarray
Array with RD parity data.
rd_u : numpy.ndarray
Array with RD unique data.
Returns
-------
ds_p : h5py._hl.dataset.Dataset
Data set instance with RD parity signal.
ds_u : h5py._hl.dataset.Dataset
Data set instance with RD unique signal.
"""
_logger.info("Saving chromosome RD data for chromosome '%s'." % chr_name)
data_type = "uint32" if Genome.is_mt_chrom(chr_name) or (np.max(rd_p) > 65535) else "uint16"
crd_p, crd_u = rd_compress(rd_p, rd_u, data_type)
snp_name = self.snp_chromosome_name(chr_name)
if not (snp_name is None):
if snp_name == chr_name:
_logger.info("Detecting SNP data in file '%s' for the same chromosome." % self.filename)
else:
_logger.info(
"Detecting RD data in file '%s' for the same chromosome with different name '%s'. SNP name will be used." % (
self.filename, snp_name))
chr_name = snp_name
if chromosome_length is not None:
self.set_chromosome_length(chr_name, chromosome_length)
ds_p = self.create_signal(chr_name, None, "RD p", crd_p)
ds_u = self.create_signal(chr_name, None, "RD u", crd_u)
if not (chr_name in self.rd_chromosomes()):
rd_chroms = self.rd_chromosomes()
rd_chroms.append(chr_name)
self.create_signal(None, None, "RD chromosomes", np.array([np.string_(x) for x in rd_chroms]))
return ds_p, ds_u
def add_rd_chromosome(self, chr_name):
"""
Add RD chromosome name to rd chromosomes list
Parameters
----------
chr_name : str
Name of the chromosome.
Returns
-------
None
"""
if not (chr_name in self.rd_chromosomes()):
rd_chroms = self.rd_chromosomes()
rd_chroms.append(chr_name)
self.create_signal(None, None, "RD chromosomes", np.array([np.string_(x) for x in rd_chroms]))
_logger.debug("Chromosome '%s' added to 'RD chromosomes' list" % chr_name)
def save_snp(self, chr_name, pos, ref, alt, nref, nalt, gt, flag, qual, update=False, callset=None,
chromosome_length=None):
"""
Compress and stores SNP data into CNVpytor file.
Parameters
----------
chr_name : str
Name of the chromosome.
pos : list of int
List of SNP positions.
ref : list of str
List of SNP reference base (A, T, G, C or .).
alt : list of str
List of SNP alternative base (A, T, G, C or .).
nref : list of int
Count of reads contains reference SNP.
nalt : list of int
Count of reads contains alternative SNP.
gt : list of int
List of genotypes (0 - "0/0", 1 - "0/1", 3- "1/1", 4 - "0|0" , 5 - "0|1", 6 - "1|0", 7 - "1|1").
flag : list of int
Binary flag: first bit 1 - SNP exists in database, second bit 1 - SNP in P region of strict mask.
qual : list of int
SNP quality (scale 0 - 255).
Returns
-------
None
"""
if callset is None:
if update:
_logger.info("Updating SNP data for chromosome '%s'. Number of variants: %d." % (chr_name, len(pos)))
else:
_logger.info("Saving SNP data for chromosome '%s'. Number of variants: %d." % (chr_name, len(pos)))
else:
if update:
_logger.info("Updating somatic '%s' SNV data for chromosome '%s'. Number of variants: %d." % (
callset, chr_name, len(pos)))
else:
_logger.info("Saving somatic '%s' SNV data for chromosome '%s'. Number of variants: %d." % (
callset, chr_name, len(pos)))
snp_pos, snp_desc, snp_counts, snp_qual = snp_compress(pos, ref, alt, nref, nalt, gt, flag, qual)
rd_name = self.rd_chromosome_name(chr_name)
if not update and not (rd_name is None):
if rd_name == chr_name:
_logger.info("Detecting RD data in file '%s' for the same chromosome." % self.filename)
else:
_logger.info(
"Detecting RD data in file '%s' for the same chromosome with different name '%s'. RD name will be used." % (
self.filename, rd_name))
chr_name = rd_name
if not self.is_chromosome_length_set(chr_name):
if chromosome_length is not None:
self.set_chromosome_length(chr_name, chromosome_length)
elif chromosome_length is None:
self.set_chromosome_length(chr_name, pos[-1] + 1)
if callset is None:
self.create_signal(chr_name, None, "SNP pos", snp_pos)
self.create_signal(chr_name, None, "SNP desc", snp_desc)
self.create_signal(chr_name, None, "SNP counts", snp_counts)
self.create_signal(chr_name, None, "SNP qual", snp_qual)
else:
self.create_signal(chr_name, None, "somatic SNP pos", snp_pos, name=callset)
self.create_signal(chr_name, None, "somatic SNP desc", snp_desc, name=callset)
self.create_signal(chr_name, None, "somatic SNP counts", snp_counts, name=callset)
self.create_signal(chr_name, None, "somatic SNP qual", snp_qual, name=callset)
if not (chr_name in self.snp_chromosomes()):
snp_chroms = self.snp_chromosomes()
snp_chroms.append(chr_name)
self.create_signal(None, None, "SNP chromosomes", np.array([np.string_(x) for x in snp_chroms]))
def read_rd(self, chr_name):
"""
Reads RD signals
Parameters
----------
chr_name : str
Name of the chromosome.
Returns
-------
rd_p : numpy.ndarray
Array with RD parity data.
rd_u : numpy.ndarray
Array with RD unique data.
"""
crd_p = self.get_signal(chr_name, None, "RD p")
crd_u = self.get_signal(chr_name, None, "RD u")
rd_p, rd_u = rd_decompress(crd_p, crd_u)
return rd_p, rd_u
def read_snp(self, chr_name, callset=None):
"""
Reads SNP signals
Parameters
----------
chr_name : str
Name of the chromosome.
Returns
-------
pos : list of int
List of SNP positions.
ref : list of str
List of SNP reference base (A, T, G, C or .).
alt : list of str
List of SNP alternative base (A, T, G, C or .).
nref : list of int
Count of reads contains reference SNP.
nalt : list of int
Count of reads contains alternative SNP.
gt : list of int
List of genotypes (0 - "0/0", 1 - "0/1", 3- "1/1", 4 - "0|0" , 5 - "0|1", 6 - "1|0", 7 - "1|1").
flag : list of int
Binary flag: first bit 1 - SNP exists in database, second bit 1 - SNP in P region of strict mask.
qual : list of int
SNP quality (scale 0 - 255).
"""
if callset is None:
snp_pos = self.get_signal(chr_name, None, "SNP pos")
snp_desc = self.get_signal(chr_name, None, "SNP desc")
snp_counts = self.get_signal(chr_name, None, "SNP counts")
snp_qual = self.get_signal(chr_name, None, "SNP qual")
else:
snp_pos = self.get_signal(chr_name, None, "somatic SNP pos", name=callset)
snp_desc = self.get_signal(chr_name, None, "somatic SNP desc", name=callset)
snp_counts = self.get_signal(chr_name, None, "somatic SNP counts", name=callset)
snp_qual = self.get_signal(chr_name, None, "somatic SNP qual", name=callset)
pos, ref, alt, nref, nalt, gt, flag, qual = snp_decompress(snp_pos, snp_desc, snp_counts, snp_qual)
return pos, ref, alt, nref, nalt, gt, flag, qual
def rd_chromosomes(self):
"""
Lists all chromosomes with RD signal stored in CNVpytor file.
Returns
-------
chrs : list of str
List of chromosome names with RD signal.
"""
return list(np.array(self.get_signal(None, None, "RD chromosomes")).astype("str"))
def gc_chromosomes(self):
"""
Lists all chromosomes with GC/AT content data stored in CNVpytor file.
Returns
-------
chrs : list of str
List of chromosome names with GC/AT content data.
"""
return self.chromosomes_with_signal(None, "GC/AT")
def snp_chromosomes(self):
"""
Lists all chromosomes with SNP signal stored in CNVpytor file.
Returns
-------
chrs : list of str
List of chromosome names with SNP signal.
"""
return list(np.array(self.get_signal(None, None, "SNP chromosomes")).astype("str"))
def mask_chromosomes(self):
"""
Lists all chromosomes with strict P mask stored in CNVpytor file.
Returns
-------
chrs : list of str
List of chromosome names with strict P mask.
"""
return self.chromosomes_with_signal(None, "mask")
def rd_chromosome_name(self, name):
"""
Finds name of the chromosome used for RD signal variable name.
Parameters
----------
name : str
Chromosome name
Returns
-------
chr : str or None
Synonym for provided chromosome name used for RD signal.
If such chromosome does not exist returns None.
"""
rdcs = self.rd_chromosomes()
if name in rdcs:
return name
if Genome.extended_chrom_name(name) in rdcs:
return Genome.extended_chrom_name(name)
if Genome.canonical_chrom_name(name) in rdcs:
return Genome.canonical_chrom_name(name)
for rdc in rdcs:
if Genome.canonical_chrom_name(name) == Genome.canonical_chrom_name(rdc):
return rdc
return None
def snp_chromosome_name(self, name):
"""
Finds name of the chromosome used for SNP signal variable name.
Parameters
----------
name : str
Chromosome name
Returns
-------
chr : str or None
Synonym for provided chromosome name used for SNP signal.
If such chromosome does not exist returns None.
"""
snpcs = self.snp_chromosomes()
if name in snpcs:
return name
if Genome.extended_chrom_name(name) in snpcs:
return Genome.extended_chrom_name(name)
if Genome.canonical_chrom_name(name) in snpcs:
return Genome.canonical_chrom_name(name)
for snpc in snpcs:
if Genome.canonical_chrom_name(name) == Genome.canonical_chrom_name(snpc):
return snpc
return None
def set_chromosome_length(self, chromosome, length):
"""
Parameters
----------
chromosome : str
Chromosome name
length: int
Chromosome length
Returns
-------
None
"""
chr_len = list(np.array(self.get_signal(None, None, "chromosome lengths")).astype("str"))
chr_len = dict(zip(chr_len[::2], chr_len[1::2]))
chr_len[chromosome] = str(length)
self.create_signal(None, None, "chromosome lengths",
np.array([np.string_(x) for s in chr_len.items() for x in s]))
def get_chromosome_length(self, chromosome):
"""
Parameters
----------
chromosome : str
Chromosome name
Returns
-------
len : int or None
Chromosome length
"""
chr_len = list(np.array(self.get_signal(None, None, "chromosome lengths")).astype("str"))
chr_len = dict(zip(chr_len[::2], chr_len[1::2]))
if chromosome in chr_len:
return int(chr_len[chromosome])
return None
def get_chromosome_lengths(self):
"""
Returns list of pairs (chromosome name, chromosome length)
Returns
-------
ret : list of (str, int)
List of pairs: chromosome name and length
"""
chr_len = list(np.array(self.get_signal(None, None, "chromosome lengths")).astype("str"))
chr_len = list(zip(chr_len[::2], map(int, chr_len[1::2])))
return chr_len
def is_chromosome_length_set(self, chromosome):
"""
Parameters
----------
chromosome : str
Chromosome name
Returns
-------
bool
True if chromosome length is set
"""
chr_len = list(np.array(self.get_signal(None, None, "chromosome lengths")).astype("str"))
chr_len = dict(zip(chr_len[::2], chr_len[1::2]))
return chromosome in chr_len
def rd_normal_level(self, bin_size, flags=0):
"""
Returns normal rd level for CN2 and standard deviation
Parameters
----------
bin_size : int
Bin size
flag : int
RD flag
Returns
-------
level : float
std : float
"""
if self.signal_exists(None, bin_size, "RD level", flags):
return tuple(self.get_signal(None, bin_size, "RD level", flags))
if self.signal_exists(None, bin_size, "RD stat", FLAG_AUTO | flags):
stat = self.get_signal(None, bin_size, "RD stat", FLAG_AUTO | flags)
elif self.signal_exists(None, bin_size, "RD stat", FLAG_SEX | flags):
stat = self.get_signal(None, bin_size, "RD stat", FLAG_SEX | flags)
else:
return 0, 0
return stat[4], stat[5]
def set_rd_normal_level(self, bin_size, mean, stdev, flags=0):
"""
Set normal rd level for CN2 and standard deviation
Parameters
----------
bin_size : int
Bin size
mean : float
Mean RD in normal region
stdev : float
Standard deviation of RD signal
flag : int
RD flag
"""
self.create_signal(None, bin_size, "RD level", np.array([mean, stdev]), flags=flags)
def save_calls(self, chr_name, bin_size, signal, calls, flags):
"""
Save calls
Parameters
----------
chr_name : str
Chromosome name
bin_size : int
Bin size
signal : str
Signal name
calls : list of dict
List of calls
flags : int
Flag
Returns
-------
None
"""
keys = ["type", "start", "end", "size", "cnv", "p_val", "p_val_2", "p_val_3", "p_val_4", "Q0", "pN", "dG"]
if signal == "calls combined" or signal == "calls baf":
keys = ["type", "start", "end", "size", "cnv", "p_val", "lh_del", "lh_loh", "lh_dup", "Q0", "bins", "baf",
"rd_p_val", "baf_p_val", "segment", "hets", "homs", "pN", "pNS", "pP"]
data = []
for call in calls:
item = [len(keys)] + [call[key] for key in keys]
if "models" in call:
for model in call["models"]:
item.extend(model)
data.append(item)
data2 = np.array(data, dtype=np.double)
x = self.create_signal(chr_name, bin_size, signal, data2, flags=flags)
def read_calls(self, chr_name, bin_size, signal, flags):
"""
Save calls
Parameters
----------
chr_name : str
Chromosome name
bin_size : int
Bin size
signal : str
Signal name
flags : int
Flag
Returns
-------
calls : list of dict
List of calls
"""
data = self.get_signal(chr_name, bin_size, signal, flags=flags)
keys = ["type", "start", "end", "size", "cnv", "p_val", "p_val_2", "p_val_3", "p_val_4", "Q0", "pN", "dG"]
if signal in {"calls combined", "calls baf"}:
keys = ["type", "start", "end", "size", "cnv", "p_val", "lh_del", "lh_loh", "lh_dup", "Q0", "bins", "baf",
"rd_p_val", "baf_p_val", "segment", "hets", "homs", "pN", "pNS", "pP"]
calls = []
for item in data:
call = {}
nkeys = int(item[0])
for ix in range(1, nkeys + 1):
call[keys[ix - 1]] = item[ix]
if len(item) > (nkeys + 1):
call["models"] = []
for i in range((len(item) - nkeys - 1) // 5):
call["models"].append(list(item)[nkeys + 1 + 5 * i:nkeys + 1 + 5 * (i + 1)])
calls.append(call)
return calls
def add_meta_attribute(self, attribute, value):
"""
Add meta attribute to the file
Parameters
----------
attribute : str
Attribute name
value : str
Attribute value
Returns
-------
None
"""
self.file.attrs[attribute] = str(value)
def print_meta_attribute(self):
"""
Prints meta attributes
Returns
-------
None
"""
print()
print("Filename '%s'" % self.filename)
print("-----------" + "-" * len(self.filename))
for attribute, value in self.file.attrs.items():
print("{}: {}".format(attribute, value))
|
from klampt import *
from klampt.control.robotinterfaceutils import *
from klampt.control.robotinterface import RobotInterfaceBase
from klampt.control.interop import RobotInterfacetoVis,RobotControllerBlockToInterface
from klampt.control.simrobotinterface import *
from klampt.control.blocks import wiggle_controller
from klampt.control.cartesian_drive import *
from klampt.control.utils import TimedLooper
from klampt.math import vectorops,so3
from klampt import vis
import math
import time
import csv
def testCompleter():
w = WorldModel()
w.readFile("../../data/tx90scenario0.xml")
r = w.robot(0)
sim = Simulator(w)
#TODO: CHANGE ME
#controller = RobotInterfaceCompleter(KinematicSimControlInterface(r))
#controller = RobotInterfaceCompleter(SimPositionControlInterface(sim.controller(0),sim))
#controller = RobotInterfaceCompleter(SimMoveToControlInterface(sim.controller(0),sim))
#controller = RobotInterfaceCompleter(SimVelocityControlInterface(sim.controller(0),sim))
controller = RobotInterfaceCompleter(SimFullControlInterface(sim.controller(0),sim))
if not controller.initialize():
raise RuntimeError("There was some problem initializing controller "+str(controller))
#start logger
ifaceLogger = RobotInterfaceLogger(controller,'controllertest_results.csv')
if controller.numJoints() != r.numDrivers():
raise RuntimeError("Invalid DOFs")
if controller.klamptModel() is None:
raise RuntimeError("Can't get Klampt model")
q = r.getConfig()[1:]
q2 = [x for x in q]
q2[2] -= 1.0
q2[3] -= 1.0
controller.setToolCoordinates([0,0,0])
#TODO: CHANGE ME
"""
#testing a single movement
moves = [(1.0,lambda: controller.setPiecewiseLinear([1],[q[:2]+[q[2]+1.0]+q[3:]]))]
"""
"""
#testing general movements with interruption
moves = [(0.5,lambda: controller.setVelocity([0]*2+[1.0]+[0]*(len(q)-3),1.0)),
(1.0,lambda: controller.setPiecewiseLinear([1],[q[:2]+[q[2]+1.0]+q[3:]])),
(3.0,lambda: controller.setPiecewiseCubic([1],[q],[[0]*len(q)])),
(3.5,lambda: controller.moveToPosition(q[:2]+[q[2]-1.0]+q[3:])),
(5.0,lambda: controller.moveToPosition(q,0.1)),
(5.5,lambda: controller.moveToPosition(q2,1.0)),
(8.0,lambda: controller.moveToCartesianPosition((so3.identity(),[0.5,0,1.0]))),
(10.0,lambda: controller.setCartesianVelocity([0,0,0.2],3.0)),
(11.0,lambda: controller.moveToCartesianPosition((so3.identity(),[0.5,0,1.0])))
]
"""
#testing interrupted cartesian velocity movements
moves = [(0.5,lambda: controller.moveToCartesianPosition((so3.identity(),[0.5,0,1.0]))),
(2.0,lambda: controller.setCartesianVelocity([0,0,0.1],5.0)) ,
#(3.0,lambda: controller.moveToPosition(q,1))
(3.0,lambda: controller.moveToCartesianPosition((so3.identity(),[0.5,0,1.0])))
]
"""
#testing cartesian velocity movements
moves = [(0.5,lambda: controller.moveToCartesianPosition((so3.identity(),[0.5,0,1.0]))),
(2.0,lambda: controller.setCartesianVelocity([0,0,0.1],5.0))
]
"""
visplugin = RobotInterfacetoVis(controller)
#visplugin.tag = ''
endTime = 13.0
lastClock = 0
dt = 1.0/controller.controlRate()
vis.add("world",w)
vis.show()
looper = TimedLooper(dt)
while controller.status() == 'ok' and vis.shown() and looper: #no error handling done here...
vis.lock()
try:
with StepContext(controller):
clock = controller.clock()
if (clock % 1.0) <= dt:
controller.printStatus()
for (trigger,callback) in moves:
if clock > trigger and lastClock <= trigger:
print("Calling trigger",trigger)
callback()
lastClock = clock
visplugin.update()
except Exception as e:
import traceback
traceback.print_exc()
print("Breaking due to exception",e)
looper.stop()
if controller.clock() > endTime:
vis.unlock()
break
#log results to disk
ifaceLogger.step()
if isinstance(controller._base,KinematicSimControlInterface):
r.setConfig(controller.configToKlampt(controller.commandedPosition()))
else:
sim.updateWorld()
#give visualization some chance to update
vis.unlock()
if vis.shown():
print("STATUS CHANGED TO",controller.status())
print("FINAL CLOCK",controller.clock())
controller.printStatus()
ifaceLogger.stop()
vis.show(False)
vis.clear()
vis.kill()
def testCartesianDrive():
w = WorldModel()
#w.readFile("../../data/tx90scenario0.xml")
w.readFile("../../data/robots/jaco.rob")
r = w.robot(0)
solver = CartesianDriveSolver(r)
#set a non-singular configuration
q = r.getConfig()
q[3] = 0.5
r.setConfig(q)
solver.start(q,6)
vis.add("world",w)
vis.addPlot("timing")
vis.addPlot("info")
vis.show()
time.sleep(0.1)
dt = 0.01
t = 0
while t < 20 and vis.shown():
vis.lock()
if t < 2:
v = [0,0,0.25]
elif t < 3:
v = [0,0,-0.1]
elif t < 3.2:
v = [0,0,-1]
elif t < 8:
v = [0,0,0]
elif t < 10:
v = [-1,0,0]
else:
v = [1,0,0]
if t < 4:
w = [0,0,0]
elif t < 10:
w = [0,-0.25,0]
else:
w = None
t0 = time.time()
progress, qnext = solver.drive(q,w,v,dt)
t1 = time.time()
vis.addText("debug","Vel %s"%(str(v),))
vis.logPlot("timing","t",t1-t0)
vis.logPlot("info","progress",progress)
vis.logPlot("info","adj",solver.driveSpeedAdjustment)
r.setConfig(qnext)
q = qnext
vis.unlock()
vis.add("tgt",solver.driveTransforms[0])
t += dt
time.sleep(max(0.005-(t1-t0),0))
vis.show(False)
vis.clear()
vis.kill()
def testMultiRobot():
#Create a world with two robots -- this will be the simulation world
w = WorldModel()
w.readFile("../../data/tx90scenario0.xml")
w.readFile("../../data/robots/jaco.rob")
r1 = w.robot(0)
r2 = w.robot(1)
#Create a world with a unified robot -- this will be the controller's model of the robot
w2 = w.copy()
w2.robot(0).mount(-1,w2.robot(1),so3.identity(),[1,0,0.5])
w2.remove(w2.robot(1))
whole_robot_model = w2.robot(0)
robot_1_klampt_indices = list(range(r1.numLinks()))
robot_2_klampt_indices = list(range(r1.numLinks(),r1.numLinks()+r2.numLinks()))
robot_1_driver_indices = list(range(r1.numDrivers()))
robot_2_driver_indices = list(range(r1.numDrivers(),r1.numDrivers()+r2.numDrivers()))
#update the base transform of robot 2
T0 = r2.link(0).getParentTransform()
r2.link(0).setParentTransform(T0[0],vectorops.add(T0[1],[1,0,0.5]))
r2.setConfig(r2.getConfig())
#Note: don't pass sim as the second argument to SimXControlInterface; we will need to simulate ourselves
sim = Simulator(w)
#sim_controller1 = RobotInterfaceCompleter(SimFullControlInterface(sim.controller(0)))
#sim_controller2 = RobotInterfaceCompleter(SimFullControlInterface(sim.controller(1)))
sim_controller1 = SimFullControlInterface(sim.controller(0))
sim_controller2 = SimFullControlInterface(sim.controller(1))
#whole_robot_controller = MultiRobotInterface()
#whole_robot_controller.addPart("Robot 1",sim_controller1,whole_robot_model,robot_1_klampt_indices)
#whole_robot_controller.addPart("Robot 2",sim_controller2,whole_robot_model,robot_2_klampt_indices)
whole_robot_controller = OmniRobotInterface(whole_robot_model)
whole_robot_controller.addPhysicalPart("Robot 1",sim_controller1,robot_1_driver_indices)
whole_robot_controller.addPhysicalPart("Robot 2",sim_controller2,robot_2_driver_indices)
if not whole_robot_controller.initialize():
raise RuntimeError("Failed to initialize")
print("Num total DOFs",whole_robot_controller.numJoints())
print("Control rate",whole_robot_controller.controlRate())
print(whole_robot_controller.partInterface("Robot 1").__class__.__name__)
print(whole_robot_controller.partInterface("Robot 2").__class__.__name__)
#sim_controller2.addPart("arm",list(range(6)))
#sim_controller2.addPart("gripper",[6,7,8])
print("Parts:")
for k,v in whole_robot_controller.parts().items():
print(" ",k,":",v)
if k is not None:
for k2,v2 in whole_robot_controller.partInterface(k).parts().items():
print(" ",k2,":",v2)
visplugin1 = RobotInterfacetoVis(whole_robot_controller.partInterface("Robot 1"),0)
visplugin1.text_x = 10
visplugin1.tag = ''
visplugin2 = RobotInterfacetoVis(whole_robot_controller.partInterface("Robot 2"),1)
visplugin2.text_x = 200
visplugin2.tag = 'a'
vis.add("world",w)
#vis.add("world",w2)
#vis.edit(("world",whole_robot_model))
vis.add("qdes",sim_controller1.configToKlampt(sim_controller1.sensedPosition()),color=[1,0,0,0.5],robot=0)
vis.add("qdes2",sim_controller2.configToKlampt(sim_controller2.sensedPosition()),color=[1,1,0,0.5],robot=1)
vis.show()
dt = 1.0/whole_robot_controller.controlRate()
controller_block = wiggle_controller.BigWiggleController(whole_robot_model)
controller_to_interface = RobotControllerBlockToInterface(controller_block,whole_robot_controller)
state = 0
looper = TimedLooper(dt)
while vis.shown() and looper:
vis.lock()
try:
with StepContext(controller_to_interface.robotInterface):
# whole_robot_controller.beginStep()
# #send commands here
# clock = whole_robot_controller.clock()
# if clock > 0.5 and clock < 2.5:
# velocity = [0]*whole_robot_controller.numJoints()
# velocity[2] = -0.1
# velocity[10] = 0.3
# whole_robot_controller.setVelocity(velocity,None)
# elif clock >= 2.5 and clock < 2.75:
# velocity = [0]*whole_robot_controller.numJoints()
# whole_robot_controller.setVelocity(velocity)
# elif clock > 2.75 and clock < 2.80:
# tgt = [0]*sim_controller1.numJoints()
# tgt[2] = 1.0
# whole_robot_controller.partInterface("Robot 1").moveToPosition(tgt)
# elif clock > 0.1 and clock < 4.0:
# #start moving the arm upward
# whole_robot_controller.partInterface("Robot 2").partInterface("arm").setCartesianVelocity(([0,0,0],[0,0,0.1]))
# elif clock > 4.0 and clock < 4.1:
# whole_robot_controller.partInterface("Robot 2").partInterface("arm").setCartesianVelocity(([0,0,0],[0,0,0]))
controller_to_interface.advance()
visplugin1.update()
visplugin2.update()
#whole_robot_controller.endStep()
except Exception as e:
import traceback
traceback.print_exc()
print("Breaking due to exception",e)
looper.stop()
#update the simulator
sim.simulate(dt)
#update the visualization world
sim.updateWorld()
vis.add("qdes",sim_controller1.configToKlampt(sim_controller1.sensedPosition()),color=[1,0,0,0.5],robot=0)
vis.add("qdes2",sim_controller2.configToKlampt(sim_controller2.sensedPosition()),color=[1,1,0,0.5],robot=1)
#whole_robot_model.setConfig(r1.getConfig()+r2.getConfig())
vis.unlock()
vis.clear()
vis.kill()
def testThreaded():
w = WorldModel()
w.readFile("../../data/tx90scenario0.xml")
robot = w.robot(0)
sim = Simulator(w)
sim_controller = SimFullControlInterface(sim.controller(0),sim)
completed_controller = RobotInterfaceCompleter(sim_controller)
robot_interface = ThreadedRobotInterface(completed_controller)
if not robot_interface.initialize():
raise RuntimeError("Unable to initialize")
assert robot_interface.klamptModel() is not None,"Error retrieving klampt model from threaded interface"
visplugin = RobotInterfacetoVis(robot_interface,0)
vis.add("world",w)
qsns = robot_interface.sensedPosition()
if qsns is not None:
robot.setConfig(robot_interface.configToKlampt(qsns))
vis.show()
dt = 1.0/robot_interface.controlRate()
controller_block = wiggle_controller.BigWiggleController(robot)
controller_to_interface = RobotControllerBlockToInterface(controller_block,robot_interface)
looper = TimedLooper(dt)
while vis.shown() and looper:
vis.lock()
try:
controller_to_interface.advance()
visplugin.update()
except Exception as e:
import traceback
traceback.print_exc()
print("Breaking due to exception",e)
looper.stop()
#update the visualization world
qsns = robot_interface.sensedPosition()
robot.setConfig(robot_interface.configToKlampt(qsns))
vis.unlock()
robot_interface.close() #stops the thread
vis.clear()
vis.kill()
def testMultiprocessing():
w = WorldModel()
w.readFile("../../data/tx90scenario0.xml")
robot = w.robot(0)
sim = Simulator(w)
sim_controller = SimFullControlInterface(sim.controller(0),sim)
completed_controller = RobotInterfaceCompleter(sim_controller)
robot_interface = MultiprocessingRobotInterface(completed_controller)
if not robot_interface.initialize():
raise RuntimeError("Unable to initialize")
assert robot_interface.klamptModel() is not None,"Error retrieving klampt model from threaded interface"
visplugin = RobotInterfacetoVis(robot_interface,0)
vis.add("world",w)
qsns = robot_interface.sensedPosition()
if qsns is not None:
robot.setConfig(robot_interface.configToKlampt(qsns))
vis.show()
dt = 1.0/robot_interface.controlRate()
controller_block = wiggle_controller.BigWiggleController(robot)
controller_to_interface = RobotControllerBlockToInterface(controller_block,robot_interface)
looper = TimedLooper(dt)
while vis.shown() and looper:
vis.lock()
try:
controller_to_interface.advance()
visplugin.update()
except Exception as e:
import traceback
traceback.print_exc()
print("Breaking due to exception",e)
looper.stop()
#update the visualization world
qsns = robot_interface.sensedPosition()
robot.setConfig(robot_interface.configToKlampt(qsns))
vis.unlock()
robot_interface.close() #stops the thread
vis.clear()
vis.kill()
def testFilters():
w = WorldModel()
w.readFile("../../data/robots/tx90pr2.rob")
w.readFile("../../data/terrains/plane.env")
robot = w.robot(0)
ri = RobotInfo.load("../../data/robots/tx90pr2.json")
sim = Simulator(w)
sim_controller = SimFullControlInterface(sim.controller(0),sim)
completed_controller = RobotInterfaceCompleter(sim_controller)
completed_controller.addPart('arm',ri.partDriverIndices('arm'))
completed_controller.addPart('gripper',ri.partDriverIndices('gripper'))
robot_interface = completed_controller
if not robot_interface.initialize():
raise RuntimeError("Unable to initialize")
assert robot_interface.klamptModel() is not None,"Error retrieving klampt model from threaded interface"
#hmm... these need to be done after initialize()?
completed_controller.setJointLimits(op='stop')
completed_controller.setCollisionFilter(w,'stop')
visplugin = RobotInterfacetoVis(robot_interface,0)
vis.add("world",w)
qsns = robot_interface.sensedPosition()
if qsns is not None:
robot.setConfig(robot_interface.configToKlampt(qsns))
vis.show()
dt = 1.0/robot_interface.controlRate()
looper = TimedLooper(dt)
iters = 0
while vis.shown() and looper:
vis.lock()
with StepContext(robot_interface):
if iters % 50 == 0:
robot.randomizeConfig()
robot_interface.moveToPosition(robot_interface.configFromKlampt(robot.getConfig()))
visplugin.update()
vis.unlock()
iters += 1
#testCartesianDrive()
#testCompleter()
#testMultiRobot()
#testThreaded()
testMultiprocessing()
#testFilters()
vis.kill() |
# %%
# VScodeで入力をテキストから読み込んで標準入力に渡す
import sys
import os
f=open(r'.\Chapter-1\A_input.txt', 'r', encoding="utf-8")
# inputをフルパスで指定
# win10でファイルを作るとs-jisで保存されるため、読み込みをutf-8へエンコードする必要あり
# VScodeでinput file開くとutf8になってるんだけど中身は結局s-jisになっているらしい
sys.stdin=f
#
# 入力スニペット
# num = int(input())
# num_list = [int(item) for item in input().split()]
# num_list = [input() for _ in range(3)]
##################################
# %%
# 以下ペースト可
N = int(input())
num_list = [int(item) for item in input().split()]
def insertionSort(A, N):
print(*A)
for i in range(1, N):
v = A[i]
j = i -1
while j >= 0 and A[j] > v:
A[j+1] = A[j]
j -= 1
A[j+1] = v
print(*A)
insertionSort(num_list, N) |
from expects import expect, be, be_none, be_true, be_a, be_false, equal, raise_error
from expects import be_callable
from spec.helper import description, before, describe, it, context
from spec.helper import TestClass, make_context
from spec.helper import MagicMock, raises
from spec import helper
from functools import partial
from receives.mapping import Mapping
from receives.error import Bug
from receives import patch
with description(patch.Patch) as self:
with before.each:
self.testclass = TestClass()
self.context = make_context(self.testclass, "valid")
self.mapping = Mapping()
self.subject = patch.Patch(self.mapping, self.context)
with describe('#__init__'):
with it('initializes patch object'):
expect(self.subject._expected_call_count).to(equal(0))
expect(self.subject._original_call).to(be_none)
expect(self.subject._expectations).to(equal([]))
expect(self.subject._call_count).to(equal(0))
expect(self.subject._mapping).to(be(self.mapping))
expect(self.subject._ctx).to(be(self.context))
expect(self.subject._call_handler).to(be_none)
with describe('#mapping'):
with it('returns the actual mapping'):
expect(self.subject.mapping).to(be(self.mapping))
with describe('#ctx'):
with it('returns the actual context'):
expect(self.subject.ctx).to(be(self.context))
with describe('#_select_call_handler'):
with context('property'):
with it('returns a fake property'):
subject = patch.Patch(self.mapping, make_context(TestClass, "prop"))
expect(subject._select_call_handler()).to(be_a(property))
with it('raises if a property on a instance'):
subject = patch.Patch(self.mapping, make_context(self.testclass, "prop"))
with raises(AssertionError):
subject._select_call_handler()
with context('instance'):
with it('returns the method call for an instance method'):
expect(self.subject._select_call_handler()).to(be_a(partial))
expect(self.subject._select_call_handler().func).to(be(patch.patch_handle_instance))
expect(self.subject._select_call_handler().args).to(equal((self.subject,)))
with context('function'):
with it('returns the method call for a normal function'):
subject = patch.Patch(self.mapping, make_context(helper, 'test_function'))
expect(subject._select_call_handler()).to(be_a(partial))
expect(subject._select_call_handler().func).to(be(patch.patch_handle_default))
expect(subject._select_call_handler().args).to(equal((subject,)))
with context('class'):
with it('returns the method call for a class method'):
subject = patch.Patch(self.mapping, make_context(TestClass, "valid"))
expect(subject._select_call_handler()).to(be_callable)
expect(subject._select_call_handler().__name__).to(be('wrap'))
with describe('#call_handler'):
with it('creates a new call handler when called first'):
self.subject._select_call_handler = MagicMock()
self.subject.call_handler
self.subject._select_call_handler.assert_called_once_with()
with it('does return the already created call handler'):
self.subject.call_handler
self.subject._select_call_handler = MagicMock()
self.subject.call_handler
expect(self.subject._select_call_handler.called).to(be_false)
with describe('#original_call'):
with it('returns None if not patched yet'):
expect(self.subject.original_call).to(be_none)
with it('returns the original_call'):
original_call = getattr(self.testclass, "valid")
self.subject.patch()
expect(self.subject.original_call).to(equal(original_call))
with describe('#was_called'):
with it('returns initially 0'):
expect(self.subject._call_count).to(equal(0))
self.subject.was_called()
expect(self.subject._call_count).to(equal(1))
with describe('#expect_one_more_call'):
with it('was called but requires one more call'):
expect(self.subject._call_count).to(equal(0))
expect(self.subject._expected_call_count).to(equal(0))
self.subject.expect_one_more_call()
expect(self.subject._call_count).to(equal(1))
expect(self.subject._expected_call_count).to(equal(1))
with describe('#patch'):
with it('replaces the to be patched method'):
orig_call = self.testclass.valid
self.subject.patch()
expect(self.testclass.valid).to(be(self.subject.call_handler))
expect(self.subject._original_call).to(equal(orig_call))
with describe('#unpatch'):
with it('unpatches the method if it was set'):
orig_call = self.testclass.valid
self.subject.patch()
expect(self.testclass.valid).to(be(self.subject.call_handler))
self.subject.unpatch()
expect(self.testclass.valid).to(equal(orig_call))
with describe('#finalize'):
with it('restors the old method and checks for call counts'):
self.subject.unpatch = MagicMock()
expect(lambda: self.subject.finalize()).not_to(raise_error(AssertionError))
self.subject.unpatch.assert_called_once_with()
with it('raises an assertion exception if call counts do not match'):
self.subject._call_count = 0
self.subject._expected_call_count = 1
expect(lambda: self.subject.finalize()).to(raise_error(AssertionError))
with describe('#new_expectation'):
with it('creates a new expectation'):
ex = self.subject.new_expectation()
expect(ex._context).to(be(self.context))
expect(self.subject._expectations).to(equal([ex]))
expect(self.subject._expected_call_count).to(be(1))
with describe('#has_expectations'):
with it('returns false if now expectation was added'):
expect(self.subject.has_expectations()).to(be_false)
with it('returns true if expectations were added'):
self.subject.new_expectation()
expect(self.subject.has_expectations()).to(be_true)
with describe('#next_expectation'):
with context('has expectations'):
with it('returns the next expectation'):
ex = self.subject.new_expectation()
self.subject.new_expectation()
expect(self.subject.next_expectation()).to(be(ex))
with context('has no expectation'):
with it('raises an error'):
expect(lambda: self.subject.next_expectation()).to(raise_error(Bug))
# Handler methods
with description(patch.patch_evaluate):
with before.each:
self.testclass = TestClass()
self.context = make_context(self.testclass, "valid")
self.mapping = Mapping()
self.subject = patch.Patch(self.mapping, self.context)
with describe('runs the generic evaluation'):
with context('valid input'):
with it('runs and validates against the input'):
self.subject.was_called = MagicMock()
ex = self.subject.new_expectation()
ex.with_args('foo').and_return('returnvalue')
value = patch.patch_evaluate(self.subject, ('foo',), {})
expect(value).to(equal('returnvalue'))
expect(self.subject.was_called.called).to(be_true)
with it('returns None if there is no expectation'):
self.subject.was_called = MagicMock()
expect(patch.patch_evaluate(self.subject, (), {})).to(be_none)
expect(self.subject.was_called.called).to(be_true)
with it('calles the original call'):
self.subject.patch()
self.subject.was_called = MagicMock()
ex = self.subject.new_expectation()
ex.with_args('moep').and_call_original()
value = patch.patch_evaluate(self.subject, ('moep',), {})
expect(self.subject.was_called.called).to(be_true)
expect(value).to(equal('moep'))
with description(patch.patch_find_class_patch):
with before.each:
self.testclass = TestClass()
self.mapping = Mapping()
with describe('finds the class of an instance'):
with context('a class patch is set'):
with it('returns the class patch'):
class_context = make_context(TestClass, "valid")
instance_context = make_context(self.testclass, "valid")
class_patch = self.mapping.create_patch(class_context)
instance_patch = self.mapping.create_patch(instance_context)
expect(patch.patch_find_class_patch(instance_patch)).to(be(class_patch))
with context('no class patch is set'):
with it('returns None'):
instance_context = make_context(self.testclass, "valid")
instance_patch = self.mapping.create_patch(instance_context)
expect(patch.patch_find_class_patch(instance_patch)).to(be_none)
with context('a class patch as argument'):
with it('returns None'):
class_context = make_context(TestClass, "valid")
instance_context = make_context(self.testclass, "valid")
class_patch = self.mapping.create_patch(class_context)
instance_patch = self.mapping.create_patch(instance_context)
expect(patch.patch_find_class_patch(class_patch)).to(be_none)
with describe(patch.patch_handle_instance):
with before.each:
self.testclass = TestClass()
self.mapping = Mapping()
self.instance_context = make_context(self.testclass, "valid")
self.class_context = make_context(TestClass, "valid")
with context('without class patch'):
with it('evaluates the class patch'):
instance_patch = self.mapping.create_patch(self.instance_context)
expect(patch.patch_handle_instance(instance_patch)).to(be_none)
expect(instance_patch._call_count).to(be(1))
with context('with class patch without expectation'):
with it('evaluates the class patch'):
class_patch = self.mapping.create_patch(self.class_context)
instance_patch = self.mapping.create_patch(self.instance_context)
expect(patch.patch_handle_instance(instance_patch)).to(be_none)
expect(instance_patch._call_count).to(be(1))
expect(class_patch._expected_call_count).to(be(1))
with context('with class patch with expectation'):
with it('evaluates the class patch'):
class_patch = self.mapping.create_patch(self.class_context)
instance_patch = self.mapping.create_patch(self.instance_context)
expt = class_patch.new_expectation()
expt.and_return('foo')
expect(patch.patch_handle_instance(instance_patch)).to(be('foo'))
expect(class_patch._call_count).to(be(1))
expect(class_patch._expected_call_count).to(be(1))
expect(instance_patch._call_count).to(be(1))
expect(instance_patch._expected_call_count).to(be(1))
with context('with class patch and expectation and original call'):
with it('evaluates the class patch and runs the instance method'):
class_patch = self.mapping.create_patch(self.class_context)
class_patch.patch()
instance_patch = self.mapping.create_patch(self.instance_context)
instance_patch.patch()
expt = class_patch.new_expectation()
expt.and_call_original()
expect(patch.patch_handle_instance(instance_patch, 'moep')).to(be('moep'))
expect(class_patch._call_count).to(be(1))
expect(class_patch._expected_call_count).to(be(1))
expect(instance_patch._call_count).to(be(1))
expect(instance_patch._expected_call_count).to(be(1))
with describe(patch.patch_handle_instance_method):
with before.each:
self.mapping = Mapping()
self.class_context = make_context(TestClass, "valid")
with it('evaluates the class expectation if a instance is created'):
class_patch = self.mapping.create_patch(self.class_context)
class_patch.new_expectation().and_return('foo')
class_patch.patch()
testclass = TestClass()
expect(patch.patch_handle_instance_method(class_patch, testclass, ('moep',), {})).to(be('foo'))
class_patch.unpatch()
with it('evaluates and calls the original call'):
class_patch = self.mapping.create_patch(self.class_context)
class_patch.new_expectation().and_call_original()
class_patch.patch()
testclass = TestClass()
expect(patch.patch_handle_instance_method(class_patch, testclass, ('moep',), {})).to(be('moep'))
class_patch.unpatch()
with describe(patch.patch_handle_default):
with before.each:
self.mapping = Mapping()
self.func_context = make_context(helper, "test_function")
with it('patches runs a normal evaluation'):
func_patch = self.mapping.create_patch(self.func_context)
func_patch.new_expectation().and_return('foo')
expect(patch.patch_handle_default(func_patch)).to(be('foo'))
with describe(patch.patch_handle_property_set):
with before.each:
self.mapping = Mapping()
self.prop_context = make_context(TestClass, "prop2")
self.prop_patch = self.mapping.create_patch(self.prop_context)
with it('evaluates the setter without calling original'):
self.prop_patch.new_expectation().with_args(1000)
self.prop_patch.patch()
testclass = TestClass()
expect(patch.patch_handle_property_set(self.prop_patch, testclass, 1000)).to(be(None))
expect(testclass.prop2_value).to(be(42))
self.prop_patch.unpatch()
with it('evaluates the setter and calls the original'):
self.prop_patch.new_expectation().with_args(1000).and_call_original()
self.prop_patch.patch()
testclass = TestClass()
expect(patch.patch_handle_property_set(self.prop_patch, testclass, 1000)).to(be(None))
expect(testclass.prop2_value).to(be(1000))
self.prop_patch.unpatch()
with describe(patch.patch_handle_property_get):
with before.each:
self.mapping = Mapping()
self.prop_context = make_context(TestClass, "prop2")
self.prop_patch = self.mapping.create_patch(self.prop_context)
with it('evaluates the getter without calling original'):
self.prop_patch.new_expectation().and_return(1000)
self.prop_patch.patch()
testclass = TestClass()
expect(patch.patch_handle_property_get(self.prop_patch, testclass)).to(be(1000))
self.prop_patch.unpatch()
with it('evaluates the getther with calling original'):
self.prop_patch.new_expectation().and_call_original()
self.prop_patch.patch()
testclass = TestClass()
expect(patch.patch_handle_property_get(self.prop_patch, testclass)).to(be(42))
self.prop_patch.unpatch()
with describe(patch.patch_fake_property):
with before.each:
self.mapping = Mapping()
with it('returns a fake property without a setter'):
self.prop_context = make_context(TestClass, "prop")
self.prop_patch = self.mapping.create_patch(self.prop_context)
prop = patch.patch_fake_property(self.prop_patch)
expect(prop).to(be_a(property))
expect(prop.fset).to(be_none)
expect(prop.fget).to(be_a(partial))
with it('returns a fake'):
self.prop_context = make_context(TestClass, "prop2")
self.prop_patch = self.mapping.create_patch(self.prop_context)
prop = patch.patch_fake_property(self.prop_patch)
expect(prop).to(be_a(property))
expect(prop.fset).to(be_a(partial))
expect(prop.fget).to(be_a(partial))
|
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from users import views
urlpatterns = [
url(r'^users/$', views.user_list_api, name='users-list'),
] |
import requests
import argparse
endpoints = 'https://api.cloudflare.com/client/v4/'
ip_url = {
'v4': 'http://ip4only.me/api/',
'v6': 'http://ip6only.me/api/',
}
def getZoneID(dn):
tmp = dn.split('.')
top_lv = tmp[-2] + '.' + tmp[-1]
params = {
"name": top_lv
}
r = requests.get(endpoints + 'zones', headers=headers, params=params)
return r.json()['result'][0]['id']
def getDomainID(dn, zID, type='A'):
params = {
"name": dn,
"type": type
}
r = requests.get(endpoints + 'zones/' + zID + '/dns_records', headers=headers, params=params)
return r.json()['result'][0]['id']
def get_ip_addr(v):
try:
r = requests.get(ip_url[v])
except:
exit()
ip = r.text.split(',')[1]
return ip
def update_dns(domain, ip_addr_4, ip_addr_6):
zID = getZoneID(domain)
domain_id_A = getDomainID(domain, zID)
print('#--------------------#')
print(' ', domain)
data = {
"type": "A",
"name": domain,
"content": ip_addr_4
}
r = requests.put(endpoints + 'zones/' + zID + '/dns_records/' + domain_id_A, headers=headers, json=data)
print(' ipv4 :', r.json()['success'])
if ip_addr_6 != '':
domain_id_AAAA = getDomainID(domain, zID, 'AAAA')
data = {
"type": "AAAA",
"name": domain,
"content": ip_addr_6
}
r = requests.put(endpoints + 'zones/' + zID + '/dns_records/' + domain_id_AAAA, headers=headers, json=data)
print(' ipv6 :', r.json()['success'])
print('#--------------------#')
if __name__ == '__main__':
# args parser
parser = argparse.ArgumentParser(
prog='cf-ddns',
usage='cf-ddns [-6] -t <API_TOKEN> -d <DOMAIN>',
)
parser.add_argument('-6', action='store_true', help='enable IPv6 mode')
parser.add_argument('-t', metavar='<API_TOKEN>', required=True, help='your cf api token')
parser.add_argument('-d', metavar='<DOMAIN>', required=True, help='your cf domain name')
args = vars(parser.parse_args())
kwargs = {
'domain': args['d'],
'ip_addr_4': get_ip_addr('v4'),
'ip_addr_6': get_ip_addr('v6') if args['6'] else '',
}
api_token = args['t']
headers = {
"Authorization": f"Bearer {api_token}",
"Content-Type": "application/json"
}
update_dns(**kwargs)
|
import requests
from bs4 import BeautifulSoup
import math
def webscrape(user_input):
user_input = handle_casing(user_input)
print(user_input)
url = "https://www.imsdb.com/scripts/" + user_input + ".html"
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
script = soup.find('pre')
allScript = script.getText
everyBody = script.findAll("b") #possible indictors of a scene, or character's cue
allScript = str(allScript) #cleaning up the text
allScript = allScript.replace('<b>', "")
allScript = allScript.replace("</b>", "")
allScript = allScript.replace("BACK TO:", "")
allScript = allScript.replace("CUT TO:", "")
allScript = allScript.replace("(CONTINUED)", "")
index = 2
scenes = []
for i in range(len(everyBody)): #cleans up indicators, TODO: can be elminated by using exact HTML formatting
everyBody[i] = str(everyBody[i])
everyBody[i] = everyBody[i].replace("<b>", "")
everyBody[i] = everyBody[i].replace("</b>", "")
everyBody[i] = everyBody[i].replace("</pre>>", "")
everyBody[i] = everyBody[i].strip()
if(everyBody[i].isupper() or everyBody[i].find("(") > -1):
allScript = allScript.replace(everyBody[i], "") #gets rid of character names before their dialog
allScript = allScript.replace("\n", "")
allScript = allScript.replace(" ", "")
i = 0 #index for everyBody
start = 0 #find where the end of the scene starts
end = 0
arbit_len = math.floor((len(allScript) / 100)) #for scripts without indicators, will have 100 scenes
while(i < len(everyBody)): #appends lines from the same scene and stored at an index, will populate scene array if there are indicators
indexStr = str(index) + "."
if(everyBody[i] == indexStr):
end = allScript.find(indexStr)
scenes.append(allScript[start:end])
start = end + 2
index = index + 1
i = i + 1
if(len(scenes) > 1):
#need to account for the last scene
scenes.append(allScript[start:len(allScript)])
else:
start = 0
end = arbit_len
for i in range(100): #those scripts without indicators
scenes.append(allScript[start:end])
start = end
end = end + arbit_len if end + arbit_len < len(allScript) else len(allScript) - 1
return scenes
def handle_casing(user_input):
lower_case = set(["a", "an", "the", "above", "across", "after", "at", "around", "before", "behind", "below", "beside", "between", "by", "down", "during", "for", "from", 'in', 'inside', 'onto', 'of', 'off', 'on','out', 'through', 'to', 'under', 'up', 'with', 'nor', 'but', 'or', 'yet', 'so'])
words = user_input.split(" ")
for i in range(len(words)):
if((words[i].lower()) in lower_case):
if(i == 0):
words[i] = words[i].lower()
words[i] = words[i].capitalize()
else:
words[i] = words[i].casefold()
else:
words[i] = words[i].lower()
words[i] = words[i].capitalize()
user_input = "-".join(words)
return user_input |
class Solution:
def findDuplicate(self, nums):
# Find the intersection point of the two runners.
tortoise = hare = nums[0]
while True:
tortoise = nums[tortoise]
hare = nums[nums[hare]]
if tortoise == hare:
break
# Find the "entrance" to the cycle.
tortoise = nums[0]
while tortoise != hare:
tortoise = nums[tortoise]
hare = nums[hare]
return hare
'''
class Solution:
def findDuplicate(self, nums: List[int]) -> int:
l = len(nums)
if l>1:
a = [0]*(l+1)
for i in range(l):
a[nums[i]] += 1
if a[nums[i]] > 1:
return nums[i] |
from numpy import *
import wrenchStampingLib as ws
from kinorrt.mechanics.stability_margin import *
from kinorrt.rrt import RRTManipulationStability
# params = (array([[ 0. , 1. , -1. ],
# [ 0. , 1. , 1. ],
# [ 1. , -0. , 0.2],
# [ 1. , -0. , 0.2]]), array([[ 0. , -1. , -0.2097],
# [ 0. , -1. , 0.939 ],
# [-1. , 0. , 0.2 ],
# [-1. , 0. , 0.2 ]]), array([[ 0.2135, 0.7118, -0.6691],
# [-0.2135, 0.7118, -0.7545],
# [ 0.2016, 0.6721, 0.7125],
# [-0.2016, 0.6721, 0.6318]]), array([[-0.6242, -0.7803, -0.0388],
# [ 0.6242, -0.7803, -0.2885],
# [-0.4741, -0.5926, 0.6512],
# [ 0.4741, -0.5926, 0.4616]]), array([[ -0.],
# [-10.],
# [ 0.]]), 6, 0.3, 0.8, 0.15, array([[1., 0., 0., 0., 0., 0.],
# [0., 1., 0., 0., 0., 0.],
# [0., 0., 1., 0., 0., 0.]]), array([[-0.14 ],
# [ 0.7001],
# [ 0.7001]]), array([[2, 2],
# [3, 3],
# [1, 1],
# [1, 0],
# [2, 0],
# [3, 0],
# [0, 1],
# [0, 2],
# [0, 3]], dtype=int32), array([[1, 1],
# [2, 2],
# [3, 3],
# [1, 0],
# [0, 1],
# [3, 0],
# [2, 0],
# [0, 2],
# [0, 3]], dtype=int32), array([[1],
# [0]], dtype=int32), array([[1],
# [1]], dtype=int32), 0)
#
# stability_margin = ws.wrenchSpaceAnalysis_2d(*params)
env_mu = 0.3
mnp_mu = 0.8
object_weight = 10
mnp_fn_max = 100
stability_solver = StabilityMarginSolver()
h_modes = np.array([CONTACT_MODE.LIFT_OFF, CONTACT_MODE.STICKING,
CONTACT_MODE.SLIDING_RIGHT, CONTACT_MODE.SLIDING_LEFT]).reshape(-1, 1)
smsolver = StabilityMarginSolver()
x = [0,0,0]
envs = [Contact((-0.5,0.2),(1,0),0),Contact((-0.5,-0.2),(1,0),0), Contact((-0.5,-0.2),(0,1),0),Contact((0.5,-0.2),(0,1),0)]
mnps = [Contact((0.5,0),(-1,0),0)]
mode = [CONTACT_MODE.FOLLOWING,CONTACT_MODE.SLIDING_RIGHT, CONTACT_MODE.LIFT_OFF, CONTACT_MODE.SLIDING_RIGHT, CONTACT_MODE.LIFT_OFF]
v_star = [0,0,1]
'''
x = [0,0,-np.pi/8]
envs = [Contact((-0.2,0),(1,0),0)]
mnps = [Contact((0.2,0),(-1,0),0)]
mode = [CONTACT_MODE.FOLLOWING,CONTACT_MODE.STICKING]
v_star = [0,0,1]
'''
'''
x = [0,0,0]
envs = [Contact((-0.5,-0.2),(0,1),0), Contact((0.5,-0.2),(0,1),0)]
mnps = [Contact((0.5,0),(-1,0),0)]
mode = [CONTACT_MODE.FOLLOWING,CONTACT_MODE.SLIDING_LEFT, CONTACT_MODE.SLIDING_LEFT]
v_star = [-1,0,0]
'''
vel = qp_inv_mechanics_2d(np.array(v_star), np.array(x), mnps, envs, mode, 'vert', mnp_mu, env_mu, mnp_fn_max)
e_modes = np.array(get_contact_modes([], envs))
e_modes = e_modes[~np.all(e_modes == CONTACT_MODE.LIFT_OFF, axis=1)]
preprocess = smsolver.preprocess(x, env_mu, mnp_mu, envs, mnps, e_modes, h_modes,
object_weight, mnp_fn_max)
# vel_ = self.inverse_mechanics(x_near, vel, envs, mnps, mode)
stability_margin_score = smsolver.stability_margin(preprocess, vel, mode)
print(stability_margin_score)
|
class Variable:
def __init__(self):
super().__init__()
self.A={"Группа 1":"Группа 1","Пременная 1":10,"Пременная 2":20.2}
self.B = {"Группа 2": "Группа 2", "Пременная 1": 10, "Пременная 2": 20.2}
def __str__(self, *args, **kwargs):
str=""
for k, v in self.A.items():
str="{} {} = {}\n".format(str,k,v)
for k, v in self.B.items():
str = "{} {} = {}\n".format(str, k, v)
return str
dd=d()
print (dd)
#for k, v in dd.d.items():
#print(k, v) |
from flask import Flask,jsonify,request
from main import Game
app = Flask(__name__)
# Create route for get suggestion from image
@app.route("/", methods = ["POST"])
def get_table():
# Get image in base64 coding
img_str = request.form.get("img_str")
#print(img_str)
# Create game from image
game = Game(img_str)
pos = game.get_balls_position()
angles = game.get_angles()
response = []
for i in range(len(pos)):
ball = pos[i]
angle = angles[i]
response.append({"x": ball[0], "y" : ball[1], "angle" : angle})
print(response)
return jsonify({"result":response})
if __name__ == '__main__':
app.run(debug=True, host= '192.168.1.15')
'''
game = Game('')
''' |
#!/usr/bin/python
import rospy
import string
import threading
from sensor_msgs.msg import JointState
from std_srvs.srv import Trigger, TriggerResponse
from trajectory_msgs.msg import JointTrajectoryPoint, JointTrajectory
def jointTrajectoryCallback(msg):
global joint_trajectory
lock.acquire()
joint_trajectory = msg
lock.release()
def showTrajectoryCallback(req):
js_msg = JointState()
js_msg.name = ['arm_joint_' + str(i+1) for i in range(5)]
lock.acquire()
if len(joint_trajectory.points) == 0:
lock.release()
return TriggerResponse(False, 'No one or empty trajectory have been received!')
for i, pose in enumerate(joint_trajectory.points):
js_msg.position = pose.positions
js_pub.publish(js_msg)
if i != len(joint_trajectory.points) - 1:
dt = joint_trajectory.points[i+1].time_from_start.to_sec() - pose.time_from_start.to_sec()
rospy.sleep(dt)
else:
break
lock.release()
return TriggerResponse(True, 'Successfully done!')
if __name__=="__main__":
rospy.init_node("trajectory_vizualizator_node")
lock = threading.Lock()
joint_trajectory = JointTrajectory()
joint_trajectory.points = []
jt_sub = rospy.Subscriber("joint_trajectory", JointTrajectory, jointTrajectoryCallback)
js_pub = rospy.Publisher("poses/joint_states", JointState, queue_size=1, latch=True)
show_poses = rospy.Service('show_trajectory', Trigger, showTrajectoryCallback)
rospy.spin()
|
###MDP assignment
###Programmed by: Nick Miller
###Assistance by: Alex Cody
###Alex helped me print the U values and the policy properly-
import random
class MDP:
def __init__(self):
self.states = [0]*16
self.discount = .95
for i in range(16):
self.states[i] = i
self.actions = [0,1,2,3] #Actions [Up,down,left,right] as suggested in the right up
#creating the reward function
self.reward = [0]*len(self.states)
for i in range(16):
if(i==12):
self.reward[i] = 200
elif(i==10):
self.reward[i] = 100
elif(i%2==1): #odd numbers get 50, even numbers stay at 0
self.reward[i] = 50
##Creating a 3D array for the transition function
##Then writing all of the probabilities
self.transition = [[[0.0 for i in range(len(self.actions))] for i in range(len(self.states))] for i in range(len(self.states))]
for states in self.states:
for action in self.actions:
#next state if action succeeds
sprime = 0
if(action==0):
sprime = states + 4
elif(action==1):
sprime = states - 4
elif(action==2):
sprime = states - 1
elif(action==3):
sprime = states + 1
if(sprime>=0 and sprime<len(self.states)):
if(not(action==2 and states%4==0) and not(action==3 and states%4==3)):
self.transition[sprime][states][action] += .7
else:
if(action==0):
self.transition[states-4][states][action] += .7
elif(action==1):
self.transition[states+4][states][action] += .7
elif(action==2 and states%4!=0):
self.transition[states+1][states][action] += .7
elif(action==3 and states%4!=3):
self.transition[states-1][states][action] += .7
#next state if the action will not work
sprime = 0
if(action==0):
sprime = states - 4
elif(action==1):
sprime = states + 4
elif(action==2):
sprime = states + 1
elif(action==3):
sprime = states - 1
if(sprime>=0 and sprime<len(self.states)):
#Checking for a valid move
if(not(action==2 and states%4==3) and not(action==3 and states%4==0)):
self.transition[sprime][states][action] += .2
else:
if(action==0):
self.transition[states+4][states][action] += .2
elif(action==1):
self.transition[states-4][states][action] += .2
elif(action==2):
self.transition[states-1][states][action] += .2
elif(action==3):
self.transition[states+1][states][action] += .2
#next state if there is no move
#sprime = states in this case
sprime = states
self.transition[sprime][states][action] += .1
def ValueIteration(self,maxError):
U = [0]*len(self.states)
Uprime = [0]*len(self.states)
discount = .95
while(True):
#copy u1 to u
for s in range(len(Uprime)):
U[s] = Uprime[s]
delta = 0
#Computing the sums
for state in range(len(self.states)):
sumOverActions = [0]*len(self.actions)
for action in range(len(self.actions)):
sumOverActions[action] = 0
for sprime in range(len(self.states)):
sumOverActions[action] += self.transition[sprime][state][action] * U[sprime]
Uprime[state] = self.reward[state] + self.discount * max(sumOverActions)
delta = max(delta,abs(Uprime[state]-U[state]))
if(delta<((maxError*(1.0-self.discount))/self.discount)):
break
#obtain my optimal policy
policy = [0]*len(self.states)
for states in range(len(self.states)):
policy[states] = 0
valueOfActions = [0]*len(self.actions)
for action in range(len(self.actions)):
valueOfActions[action] = 0
for sprime in range(len(self.states)):
valueOfActions[action] += self.transition[sprime][states][action] * U[sprime]
policy[states] = valueOfActions.index(max(valueOfActions))
return policy,U
def PolicyIteration(self,iterations):
U = [0]*len(self.states)
policy = [0]*len(self.states)
for s in range(len(self.states)):
policy[s] = 2
unchanged = False
while(not unchanged):
for k in range(1,iterations):
for s in range(len(self.states)):
total = 0
for s1 in range(len(self.states)):
total += self.transition[s1][s][policy[s]]*U[s1]
U[s] = self.reward[s] + self.discount * total
#Now that I have U, I can find the optimal policy
#If I find the optimal policy, I will break out of the loop
#If I don't, based on the last if statement, I will remain in the loop
unchanged = True
for state in range(len(self.states)):
sumOverActions = [0]*len(self.actions)
for action in range(len(self.actions)):
for sprime in range(len(self.states)):
sumOverActions[action] += self.transition[sprime][state][action] * U[sprime]
maxA = max(sumOverActions)
bestA = sumOverActions.index(maxA)
if maxA > sumOverActions[policy[state]]:
policy[state] = bestA
unchanged = False
return policy,U
def printPolicy(self,actions):
#Will print the policies nicely, using characters to represent direction
toPrint = ""
for i in range(len(actions)-1,-1,-1):
if i%4==3:
toPrint += "\n"
if(actions[i]==0):
toPrint += " ^ "
elif(actions[i]==1):
toPrint += " v "
elif(actions[i]==2):
toPrint += " < "
elif(actions[i]==3):
toPrint += " > "
print toPrint
def printValueFunction(self,utilities):
##Will print the utility nicely
toPrint = ""
for i in range(len(utilities)-1,-1,-1):
if i%4==3:
toPrint += "\n"
toPrint += " " + `int(round(utilities[(4*abs((3-i)/4))+(3-(i%4))]))` + " "
print toPrint
def main():
mdp = MDP()
valueIteration = mdp.ValueIteration(pow(10,-10))
actionarray = valueIteration[0]
utilityarray = valueIteration[1]
mdp.printValueFunction(utilityarray)
print ""
mdp.printPolicy(actionarray)
policyIteration = mdp.PolicyIteration(100)
actionarray = policyIteration[0]
utilityarray = policyIteration[1]
mdp.printValueFunction(utilityarray)
print ""
mdp.printPolicy(actionarray)
main()
|
"""Generates code to perform xml decoding.
"""
# Module imports.
from operator import add
from pycim_mp.core.generators.base_generator import BaseGenerator
from pycim_mp.core.generators.generator_utils import *
from pycim_mp.core.generators.python.utils import *
# Module exports.
__all__ = ['DecodersGenerator']
# Module provenance info.
__author__="markmorgan"
__copyright__ = "Copyright 2010, Insitut Pierre Simon Laplace - Prodiguer"
__date__ ="$Jun 28, 2010 2:52:22 PM$"
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Sebastien Denvil"
__email__ = "sdipsl@ipsl.jussieu.fr"
__status__ = "Production"
# Generator key.
_GEN_KEY = 'decoders'
# Generator language identifier.
_GEN_LANG = 'python'
def _get_template(filename):
"""Helper function to return templates.
"""
return get_template(filename, _GEN_LANG, _GEN_KEY)
class DecodersGenerator(BaseGenerator):
"""Generates code to perform xml decoding.
"""
def __init__(self, ontology, opts):
"""Constructor.
Keyword Arguments:
ontology - ontology from which code is generated.
opts - code generation options.
"""
# Constructor chaining.
super(DecodersGenerator, self).__init__(ontology, opts)
def on_ontology_parse(self, ont):
"""Event handlers for the ontology parse event.
Keyword Arguments:
ont - ontology being processed.
"""
super(DecodersGenerator, self).on_ontology_parse(ont)
# Create code output directory.
dir = get_ontology_directory(ont, self.opts.out_dir, 'decoding', self.opts.out_suffix)
create_directory(dir)
# Create python package init file.
code = self.__emit_root_package_init_file()
file = dir + '/' + get_package_init_file_name()
write_file(code, file)
def on_package_parse(self, pkg):
"""Event handlers for the package parse event.
Keyword Arguments:
pkg - package being processed.
"""
super(DecodersGenerator, self).on_package_parse(pkg)
id = emit_indent()
lr = emit_line_return()
def get_decoder_functions():
fns = ''
for cls in pkg.classes:
dcs = self.__get_decodings(cls)
fn = _get_template('decoder_function.txt')
fn = fn.replace('{class-name}', get_class_name(cls))
fn = fn.replace('{class-function-name}', get_class_functional_name(cls))
fn = fn.replace('{class-doc-name}', get_class_doc_string_name(cls))
fn = fn.replace('{class-decodings}', dcs)
fn += emit_line_return(3)
fns += fn
return fns
# Open template.
code = _get_template('decoder.txt')
# Generate code.
code = inject_standard_template_params(self.ontology, self.opts, code)
code = code.replace('{module-exports}', get_package_exports(pkg))
code = code.replace('{module-imports}', get_package_imports(pkg))
code = code.replace('{decoding-functions}', get_decoder_functions())
# Create decoder.
file = self.__get_decoder_file_name(pkg)
write_file(code, file)
def __get_decodings(self, cls):
"""Returns class level decodings.
Keyword Arguments:
cls - class being processed.
"""
code = ''
for p in cls.all_properties:
for dc in cls.get_property_decodings(p):
code += self.__emit_decoding(p, dc.decoding, dc.type)
return code
def __emit_decoding(self, prp, decoding, type):
"""Emits code corresponding to a class property decoding.
Keyword Arguments:
prp - property being processed.
decoding - decoding being applied.
type - sub type being decoded.
"""
def get_decoding_function():
# ... simple/enum types - return type name as this is mapped to a convertor function.
if prp.type.is_simple or prp.type.is_enum:
return '\'{0}\''.format(get_type_functional_name(prp.type))
# ... complex classes - return type functional name.
elif prp.type.is_class:
type_name = prp.type.name if type is None else type
return get_class_decoder_function_name(type_name)
# Set template.
tmpl = '{0}(\'{1}\', {2}, {3}, \'{4}\'),'
# Geenrate code.
code = tmpl.format(
emit_line_return() + emit_indent(2),
prp.name,
prp.is_iterative,
get_decoding_function(),
decoding)
return code
def __get_decoder_file_name(self, pkg):
"""Returns name of decoding file.
Keyword Arguments:
pkg - package being processed.
"""
dir = get_ontology_directory(pkg.ontology, self.opts.out_dir, 'decoding', self.opts.out_suffix)
file = dir +'/'
file += get_package_decoder_file_name(pkg)
file += FILE_EXTENSION
return file
def __emit_root_package_init_file(self):
"""Emits the root package initialisation file.
"""
lr = emit_line_return()
# Open template.
code = _get_template('decoder_root_package.txt')
def get_module_exports():
exports = ''
is_first = True
for e in self.ontology.entities:
if is_first == False:
exports += ', '
cls_decoder = get_class_decoder_function_name(e)
exports += '\'{0}\''.format(cls_decoder)
is_first = False
return exports
def get_module_imports():
imports = ''
is_first = True
for e in self.ontology.entities:
if is_first == False:
imports += lr
imports += 'from py{0}.v{1}.decoding.{2} import {3}'.format(
get_ontology_name(self.ontology),
get_ontology_version(self.ontology),
get_package_decoder_file_name(e.package),
get_class_decoder_function_name(e))
is_first = False
return imports
# Set helper vars.
module_exports = get_module_exports()
module_imports = get_module_imports()
# Generate code.
code = inject_standard_template_params(self.ontology, self.opts, code)
code = code.replace('{module-imports}', module_imports)
code = code.replace('{module-exports}', module_exports)
return code
|
"""
Consider an alternative version of Pig Latin, in which we don't check to see if the first letter is a vowel, but rather we check to see if the word contains two different vowels. Thus, 'wine' would have 'way' added to the end, but 'wind' would be translated into 'indway'. How would you check for two different vowels in the word? (Hint: Sets can come in handy here.)
"""
def wordToAlternativePigLatin(word):
if len(word) < 1:
return ""
vowels = set('aeiou')
if len(vowels.intersection(word)) >= 2:
return f"{word}way"
else:
return f"{word[1:]}{word[0]}ay"
print (wordToAlternativePigLatin('air'))
print (wordToAlternativePigLatin('mist'))
print (wordToAlternativePigLatin('eat'))
print (wordToAlternativePigLatin('python'))
print (wordToAlternativePigLatin('computer'))
print (wordToAlternativePigLatin('a'))
print (wordToAlternativePigLatin('b'))
|
import xlrd
from datetime import date
from datetime import datetime
import random
workbook = xlrd.open_workbook('testResult.xlsx')
worksheet = workbook.sheet_by_name('Sheet1')
file = open("testResult.txt","w")
for x in range(0, 250000):
pid=worksheet.cell(x, 0).value
rid=worksheet.cell(x, 1).value
proid=worksheet.cell(x, 2).value
did=worksheet.cell(x, 3).value
datee=worksheet.cell(x, 4).value
datee2=worksheet.cell(x, 5).value
pid=pid.encode('utf-8')
rid=rid.encode('utf-8')
proid=proid.encode('utf-8')
did=did.encode('utf-8')
dateR = xlrd.xldate.xldate_as_datetime(datee,workbook.datemode)
dateR2 = xlrd.xldate.xldate_as_datetime(datee2,workbook.datemode)
dateRegistered = dateR.date()
dateRegistered2 = dateR2.date()
print pid,rid,proid,did, dateRegistered,dateRegistered2
#registered = str(dateRegistered)
#date=date.encode('utf-8')
#book = xlrd.open_workbook("myfile.xls")
#sh = book.sheet_by_index(0)
#a1 = sh.cell_value(rowx=0, colx=0)
#a1_as_datetime = datetime.datetime(*xlrd.xldate_as_tuple(date, workbook.datemode))
# print registered
#print pid,did,mid,dateRegistered
# date=str(date).encode('utf-8')
#reaction=reaction.replace(',', '')
#reaction=reaction.replace(')', '')
#reaction=reaction.replace('(', '')
#-print (allergy)
#print (aid)
# print (date)
# print str(drug)pid,rid,proid,did, dateRegistered,dateRegistered2
file.write("Insert into testResult" +" " + "values ('"+(pid)+"', '"+(rid)+"','"+(proid)+"','"+(did)+"','"+str(dateRegistered)+"','"+str(dateRegistered2)+"');\n")
file.close()
|
'''
A Keras port of the original Caffe SSD300 network.
Copyright (C) 2018 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
from keras.models import Model
from keras.layers import Input, Lambda, Activation, Conv2D, MaxPooling2D, ZeroPadding2D, Reshape, Concatenate, SeparableConv2D, Dropout, BatchNormalization
from keras.layers import DepthwiseConv2D, AveragePooling2D, Add, Convolution2D
import keras.backend as K
from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes
from keras_layers.keras_layer_DecodeDetections import DecodeDetections
from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast
import sys, os
import light_networks.squeezenet.squeezenet_bypass as squeezenet
def ssd_300(image_size,
n_classes,
input_tensor = None,
mode='training',
min_scale=None,
max_scale=None,
scales=None,
aspect_ratios_global=None,
aspect_ratios_per_layer=[[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5]],
two_boxes_for_ar1=True,
steps=[8, 16, 32, 64, 100, 300],
offsets=None,
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
coords='centroids',
normalize_coords=True,
subtract_mean=[123, 117, 104],
divide_by_stddev=None,
swap_channels=[2, 1, 0],
confidence_thresh=0.01,
iou_threshold=0.45,
top_k=200,
nms_max_output_size=400,
return_predictor_sizes=False):
'''
Build a Keras model with SSD300 architecture, see references.
The base network is a reduced atrous VGG-16, extended by the SSD architecture,
as described in the paper.
Most of the arguments that this function takes are only needed for the anchor
box layers. In case you're training the network, the parameters passed here must
be the same as the ones used to set up `SSDBoxEncoder`. In case you're loading
trained weights, the parameters passed here must be the same as the ones used
to produce the trained weights.
Some of these arguments are explained in more detail in the documentation of the
`SSDBoxEncoder` class.
Note: Requires Keras v2.0 or later. Currently works only with the
TensorFlow backend (v1.0 or later).
Arguments:
image_size (tuple): The input image size in the format `(height, width, channels)`.
input_tensor: Tensor with shape (batch, height, width, channels)
n_classes (int): The number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO.
mode (str, optional): One of 'training', 'inference' and 'inference_fast'. In 'training' mode,
the model outputs the raw prediction tensor, while in 'inference' and 'inference_fast' modes,
the raw predictions are decoded into absolute coordinates and filtered via confidence thresholding,
non-maximum suppression, and top-k filtering. The difference between latter two modes is that
'inference' follows the exact procedure of the original Caffe implementation, while
'inference_fast' uses a faster prediction decoding procedure.
min_scale (float, optional): The smallest scaling factor for the size of the anchor boxes as a fraction
of the shorter side of the input images.
max_scale (float, optional): The largest scaling factor for the size of the anchor boxes as a fraction
of the shorter side of the input images. All scaling factors between the smallest and the
largest will be linearly interpolated. Note that the second to last of the linearly interpolated
scaling factors will actually be the scaling factor for the last predictor layer, while the last
scaling factor is used for the second box for aspect ratio 1 in the last predictor layer
if `two_boxes_for_ar1` is `True`.
scales (list, optional): A list of floats containing scaling factors per convolutional predictor layer.
This list must be one element longer than the number of predictor layers. The first `k` elements are the
scaling factors for the `k` predictor layers, while the last element is used for the second box
for aspect ratio 1 in the last predictor layer if `two_boxes_for_ar1` is `True`. This additional
last scaling factor must be passed either way, even if it is not being used. If a list is passed,
this argument overrides `min_scale` and `max_scale`. All scaling factors must be greater than zero.
aspect_ratios_global (list, optional): The list of aspect ratios for which anchor boxes are to be
generated. This list is valid for all prediction layers.
aspect_ratios_per_layer (list, optional): A list containing one aspect ratio list for each prediction layer.
This allows you to set the aspect ratios for each predictor layer individually, which is the case for the
original SSD300 implementation. If a list is passed, it overrides `aspect_ratios_global`.
two_boxes_for_ar1 (bool, optional): Only relevant for aspect ratio lists that contain 1. Will be ignored otherwise.
If `True`, two anchor boxes will be generated for aspect ratio 1. The first will be generated
using the scaling factor for the respective layer, the second one will be generated using
geometric mean of said scaling factor and next bigger scaling factor.
steps (list, optional): `None` or a list with as many elements as there are predictor layers. The elements can be
either ints/floats or tuples of two ints/floats. These numbers represent for each predictor layer how many
pixels apart the anchor box center points should be vertically and horizontally along the spatial grid over
the image. If the list contains ints/floats, then that value will be used for both spatial dimensions.
If the list contains tuples of two ints/floats, then they represent `(step_height, step_width)`.
If no steps are provided, then they will be computed such that the anchor box center points will form an
equidistant grid within the image dimensions.
offsets (list, optional): `None` or a list with as many elements as there are predictor layers. The elements can be
either floats or tuples of two floats. These numbers represent for each predictor layer how many
pixels from the top and left boarders of the image the top-most and left-most anchor box center points should be
as a fraction of `steps`. The last bit is important: The offsets are not absolute pixel values, but fractions
of the step size specified in the `steps` argument. If the list contains floats, then that value will
be used for both spatial dimensions. If the list contains tuples of two floats, then they represent
`(vertical_offset, horizontal_offset)`. If no offsets are provided, then they will default to 0.5 of the step size.
clip_boxes (bool, optional): If `True`, clips the anchor box coordinates to stay within image boundaries.
variances (list, optional): A list of 4 floats >0. The anchor box offset for each coordinate will be divided by
its respective variance value.
coords (str, optional): The box coordinate format to be used internally by the model (i.e. this is not the input format
of the ground truth labels). Can be either 'centroids' for the format `(cx, cy, w, h)` (box center coordinates, width,
and height), 'minmax' for the format `(xmin, xmax, ymin, ymax)`, or 'corners' for the format `(xmin, ymin, xmax, ymax)`.
normalize_coords (bool, optional): Set to `True` if the model is supposed to use relative instead of absolute coordinates,
i.e. if the model predicts box coordinates within [0,1] instead of absolute coordinates.
subtract_mean (array-like, optional): `None` or an array-like object of integers or floating point values
of any shape that is broadcast-compatible with the image shape. The elements of this array will be
subtracted from the image pixel intensity values. For example, pass a list of three integers
to perform per-channel mean normalization for color images.
divide_by_stddev (array-like, optional): `None` or an array-like object of non-zero integers or
floating point values of any shape that is broadcast-compatible with the image shape. The image pixel
intensity values will be divided by the elements of this array. For example, pass a list
of three integers to perform per-channel standard deviation normalization for color images.
swap_channels (list, optional): Either `False` or a list of integers representing the desired order in which the input
image channels should be swapped.
confidence_thresh (float, optional): A float in [0,1), the minimum classification confidence in a specific
positive class in order to be considered for the non-maximum suppression stage for the respective class.
A lower value will result in a larger part of the selection process being done by the non-maximum suppression
stage, while a larger value will result in a larger part of the selection process happening in the confidence
thresholding stage.
iou_threshold (float, optional): A float in [0,1]. All boxes that have a Jaccard similarity of greater than `iou_threshold`
with a locally maximal box will be removed from the set of predictions for a given class, where 'maximal' refers
to the box's confidence score.
top_k (int, optional): The number of highest scoring predictions to be kept for each batch item after the
non-maximum suppression stage.
nms_max_output_size (int, optional): The maximal number of predictions that will be left over after the NMS stage.
return_predictor_sizes (bool, optional): If `True`, this function not only returns the model, but also
a list containing the spatial dimensions of the predictor layers. This isn't strictly necessary since
you can always get their sizes easily via the Keras API, but it's convenient and less error-prone
to get them this way. They are only relevant for training anyway (SSDBoxEncoder needs to know the
spatial dimensions of the predictor layers), for inference you don't need them.
Returns:
model: The Keras SSD300 model.
predictor_sizes (optional): A Numpy array containing the `(height, width)` portion
of the output tensor shape for each convolutional predictor layer. During
training, the generator function needs this in order to transform
the ground truth labels into tensors of identical structure as the
output tensors of the model, which is in turn needed for the cost
function.
References:
https://arxiv.org/abs/1512.02325v5
'''
n_predictor_layers = 6 # The number of predictor conv layers in the network is 6 for the original SSD300.
n_classes += 1 # Account for the background class.
img_height, img_width, img_channels = image_size[0], image_size[1], image_size[2]
############################################################################
# Get a few exceptions out of the way.
############################################################################
if aspect_ratios_global is None and aspect_ratios_per_layer is None:
raise ValueError("`aspect_ratios_global` and `aspect_ratios_per_layer` cannot both be None. At least one needs to be specified.")
if aspect_ratios_per_layer:
if len(aspect_ratios_per_layer) != n_predictor_layers:
raise ValueError("It must be either aspect_ratios_per_layer is None or len(aspect_ratios_per_layer) == {}, but len(aspect_ratios_per_layer) == {}.".format(n_predictor_layers, len(aspect_ratios_per_layer)))
if (min_scale is None or max_scale is None) and scales is None:
raise ValueError("Either `min_scale` and `max_scale` or `scales` need to be specified.")
if scales:
if len(scales) != n_predictor_layers+1:
raise ValueError("It must be either scales is None or len(scales) == {}, but len(scales) == {}.".format(n_predictor_layers+1, len(scales)))
else: # If no explicit list of scaling factors was passed, compute the list of scaling factors from `min_scale` and `max_scale`
scales = np.linspace(min_scale, max_scale, n_predictor_layers+1)
if len(variances) != 4:
raise ValueError("4 variance values must be pased, but {} values were received.".format(len(variances)))
variances = np.array(variances)
if np.any(variances <= 0):
raise ValueError("All variances must be >0, but the variances given are {}".format(variances))
if (not (steps is None)) and (len(steps) != n_predictor_layers):
raise ValueError("You must provide at least one step value per predictor layer.")
if (not (offsets is None)) and (len(offsets) != n_predictor_layers):
raise ValueError("You must provide at least one offset value per predictor layer.")
############################################################################
# Compute the anchor box parameters.
############################################################################
# Set the aspect ratios for each predictor layer. These are only needed for the anchor box layers.
if aspect_ratios_per_layer:
aspect_ratios = aspect_ratios_per_layer
else:
aspect_ratios = [aspect_ratios_global] * n_predictor_layers
# Compute the number of boxes to be predicted per cell for each predictor layer.
# We need this so that we know how many channels the predictor layers need to have.
if aspect_ratios_per_layer:
n_boxes = []
for ar in aspect_ratios_per_layer:
if (1 in ar) & two_boxes_for_ar1:
n_boxes.append(len(ar) + 1) # +1 for the second box for aspect ratio 1
else:
n_boxes.append(len(ar))
else: # If only a global aspect ratio list was passed, then the number of boxes is the same for each predictor layer
if (1 in aspect_ratios_global) & two_boxes_for_ar1:
n_boxes = len(aspect_ratios_global) + 1
else:
n_boxes = len(aspect_ratios_global)
n_boxes = [n_boxes] * n_predictor_layers
if steps is None:
steps = [None] * n_predictor_layers
if offsets is None:
offsets = [None] * n_predictor_layers
############################################################################
# Define functions for the Lambda layers below.
############################################################################
def identity_layer(tensor):
return tensor
def input_mean_normalization(tensor):
return tensor - np.array(subtract_mean)
def input_stddev_normalization(tensor):
return tensor / np.array(divide_by_stddev)
def input_channel_swap(tensor):
if len(swap_channels) == 3:
return K.stack([tensor[...,swap_channels[0]], tensor[...,swap_channels[1]], tensor[...,swap_channels[2]]], axis=-1)
elif len(swap_channels) == 4:
return K.stack([tensor[...,swap_channels[0]], tensor[...,swap_channels[1]], tensor[...,swap_channels[2]], tensor[...,swap_channels[3]]], axis=-1)
#############################################################################
# Functions for Shufflenetv1 architeture
#############################################################################
def _conv_blockSSD(inputs, filters,block_id=11):
channel_axis = -1
x = ZeroPadding2D(padding=(1, 1), name='conv_pad_%d_1' % block_id)(inputs)
x = Conv2D(filters, (1,1),padding='valid',use_bias=False,strides=(1, 1),name='conv__%d_1'%block_id)(x)
x = BatchNormalization(axis=channel_axis, name='conv_%d_bn_1'% block_id)(x)
x = Activation('relu', name='conv_%d_relu_1'% block_id)(x)
Conv = Conv2D(filters*2, (3,3), padding='valid', use_bias=False, strides=(2, 2), name='conv__%d_2' % block_id)(x)
x = BatchNormalization(axis=channel_axis, name='conv_%d_bn_2' % block_id)(Conv)
x = Activation('relu', name='conv_%d_relu_2' % block_id)(x)
return x,Conv
############################################################################
# Build the network.
############################################################################
if input_tensor != None:
x = Input(tensor=input_tensor, shape=(img_height, img_width, img_channels))
else:
x = Input(shape=(img_height, img_width, img_channels))
# The following identity layer is only needed so that the subsequent lambda layers can be optional.
x1 = Lambda(identity_layer, output_shape=(img_height, img_width, img_channels), name='identity_layer')(x)
if not (divide_by_stddev is None):
x1 = Lambda(input_stddev_normalization, output_shape=(img_height, img_width, img_channels), name='input_stddev_normalization')(x1)
if not (subtract_mean is None):
x1 = Lambda(input_mean_normalization, output_shape=(img_height, img_width, img_channels), name='input_mean_normalization')(x1)
if swap_channels:
x1 = Lambda(input_channel_swap, output_shape=(img_height, img_width, img_channels), name='input_channel_swap')(x1)
# Get squeezenet architecture
squeezenet_v1 = squeezenet.SqueezeNet(1000,
inputs=(img_height, img_width, img_channels),
include_top=False)
FeatureExtractor = Model(inputs=squeezenet_v1.input, outputs=squeezenet_v1.get_layer('concatenate_8').output)
merge9 = FeatureExtractor(x1)
maxpool9 = MaxPooling2D(
pool_size=(3, 3), strides=(2, 2), name='maxpool9', padding='same',
data_format="channels_last")(merge9)
fire10_squeeze = Convolution2D(
64, (1, 1), activation='relu', kernel_initializer='glorot_uniform',
padding='same', name='fire10_squeeze',
data_format="channels_last")(maxpool9)
fire10_expand1 = Convolution2D(
256, (1, 1), activation='relu', kernel_initializer='glorot_uniform',
padding='same', name='fire10_expand1',
data_format="channels_last")(fire10_squeeze)
fire10_expand2 = Convolution2D(
256, (3, 3), activation='relu', kernel_initializer='glorot_uniform',
padding='same', name='fire10_expand2',
data_format="channels_last")(fire10_squeeze)
merge10 = Concatenate(axis=-1)([fire10_expand1, fire10_expand2])
layer, conv11_2 = _conv_blockSSD(merge10, 256, block_id=11)
layer, conv12_2 = _conv_blockSSD(layer, 128, block_id=12)
layer, conv13_2 = _conv_blockSSD(layer, 128, block_id=13)
layer, conv14_2 = _conv_blockSSD(layer, 64, block_id=14)
### Build the convolutional predictor layers on top of the base network
# We precidt `n_classes` confidence values for each box, hence the confidence predictors have depth `n_boxes * n_classes`
# Output shape of the confidence layers: `(batch, height, width, n_boxes * n_classes)`
conv9_mbox_conf = Conv2D(n_boxes[0] * n_classes, (3, 3), padding='same', name='conv9_mbox_conf')(merge9)
conv10_mbox_conf = Conv2D(n_boxes[1] * n_classes, (3, 3), padding='same', name='conv10_mbox_conf')(merge10)
conv11_2_mbox_conf = Conv2D(n_boxes[2] * n_classes, (3, 3), padding='same', name='conv11_2_mbox_conf')(conv11_2)
conv12_2_mbox_conf = Conv2D(n_boxes[3] * n_classes, (3, 3), padding='same', name='conv12_2_mbox_conf')(conv12_2)
conv13_2_mbox_conf = Conv2D(n_boxes[4] * n_classes, (3, 3), padding='same', name='conv13_2_mbox_conf')(conv13_2)
conv14_2_mbox_conf = Conv2D(n_boxes[5] * n_classes, (3, 3), padding='same', name='conv14_2_mbox_conf')(conv14_2)
# We predict 4 box coordinates for each box, hence the localization predictors have depth `n_boxes * 4`
# Output shape of the localization layers: `(batch, height, width, n_boxes * 4)`
conv9_mbox_loc = Conv2D(n_boxes[0] * 4, (3, 3), padding='same', name='conv9_mbox_loc')(merge9)
conv10_mbox_loc = Conv2D(n_boxes[1] * 4, (3, 3), padding='same', name='conv10_mbox_loc')(merge10)
conv11_2_mbox_loc = Conv2D(n_boxes[2] * 4, (3, 3), padding='same', name='conv11_2_mbox_loc')(conv11_2)
conv12_2_mbox_loc = Conv2D(n_boxes[3] * 4, (3, 3), padding='same', name='conv12_2_mbox_loc')(conv12_2)
conv13_2_mbox_loc = Conv2D(n_boxes[4] * 4, (3, 3), padding='same', name='conv13_2_mbox_loc')(conv13_2)
conv14_2_mbox_loc = Conv2D(n_boxes[5] * 4, (3, 3), padding='same', name='conv14_2_mbox_loc')(conv14_2)
### Generate the anchor boxes (called "priors" in the original Caffe/C++ implementation, so I'll keep their layer names)
# Output shape of anchors: `(batch, height, width, n_boxes, 8)`
conv9_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[0], next_scale=scales[1], aspect_ratios=aspect_ratios[0],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[0], this_offsets=offsets[0], clip_boxes=clip_boxes,
variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv9_mbox_priorbox')(conv9_mbox_loc)
conv10_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[1], next_scale=scales[2], aspect_ratios=aspect_ratios[1],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[1], this_offsets=offsets[1], clip_boxes=clip_boxes,
variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv10_mbox_priorbox')(conv10_mbox_loc)
conv11_2_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[2], next_scale=scales[3], aspect_ratios=aspect_ratios[2],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[2], this_offsets=offsets[2], clip_boxes=clip_boxes,
variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv11_2_mbox_priorbox')(conv11_2_mbox_loc)
conv12_2_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[3], next_scale=scales[4], aspect_ratios=aspect_ratios[3],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[3], this_offsets=offsets[3], clip_boxes=clip_boxes,
variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv12_2_mbox_priorbox')(conv12_2_mbox_loc)
conv13_2_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[4], next_scale=scales[5], aspect_ratios=aspect_ratios[4],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[4], this_offsets=offsets[4], clip_boxes=clip_boxes,
variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv13_2_mbox_priorbox')(conv13_2_mbox_loc)
conv14_2_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[5], next_scale=scales[6], aspect_ratios=aspect_ratios[5],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[5], this_offsets=offsets[5], clip_boxes=clip_boxes,
variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv14_2_mbox_priorbox')(conv14_2_mbox_loc)
### Reshape
# Reshape the class predictions, yielding 3D tensors of shape `(batch, height * width * n_boxes, n_classes)`
# We want the classes isolated in the last axis to perform softmax on them
conv9_mbox_conf_reshape = Reshape((-1, n_classes), name='conv9_mbox_conf_reshape')(conv9_mbox_conf)
conv10_mbox_conf_reshape = Reshape((-1, n_classes), name='conv10_mbox_conf_reshape')(conv10_mbox_conf)
conv11_2_mbox_conf_reshape = Reshape((-1, n_classes), name='conv11_2_mbox_conf_reshape')(conv11_2_mbox_conf)
conv12_2_mbox_conf_reshape = Reshape((-1, n_classes), name='conv12_2_mbox_conf_reshape')(conv12_2_mbox_conf)
conv13_2_mbox_conf_reshape = Reshape((-1, n_classes), name='conv13_2_mbox_conf_reshape')(conv13_2_mbox_conf)
conv14_2_mbox_conf_reshape = Reshape((-1, n_classes), name='conv14_2_mbox_conf_reshape')(conv14_2_mbox_conf)
# Reshape the box predictions, yielding 3D tensors of shape `(batch, height * width * n_boxes, 4)`
# We want the four box coordinates isolated in the last axis to compute the smooth L1 loss
conv9_mbox_loc_reshape = Reshape((-1, 4), name='conv9_mbox_loc_reshape')(conv9_mbox_loc)
conv10_mbox_loc_reshape = Reshape((-1, 4), name='conv10_mbox_loc_reshape')(conv10_mbox_loc)
conv11_2_mbox_loc_reshape = Reshape((-1, 4), name='conv11_2_mbox_loc_reshape')(conv11_2_mbox_loc)
conv12_2_mbox_loc_reshape = Reshape((-1, 4), name='conv12_2_mbox_loc_reshape')(conv12_2_mbox_loc)
conv13_2_mbox_loc_reshape = Reshape((-1, 4), name='conv13_2_mbox_loc_reshape')(conv13_2_mbox_loc)
conv14_2_mbox_loc_reshape = Reshape((-1, 4), name='conv14_2_mbox_loc_reshape')(conv14_2_mbox_loc)
# Reshape the anchor box tensors, yielding 3D tensors of shape `(batch, height * width * n_boxes, 8)`
conv9_mbox_priorbox_reshape = Reshape((-1, 8), name='conv9_mbox_priorbox_reshape')(conv9_mbox_priorbox)
conv10_mbox_priorbox_reshape = Reshape((-1, 8), name='conv10_mbox_priorbox_reshape')(conv10_mbox_priorbox)
conv11_2_mbox_priorbox_reshape = Reshape((-1, 8), name='conv11_2_mbox_priorbox_reshape')(conv11_2_mbox_priorbox)
conv12_2_mbox_priorbox_reshape = Reshape((-1, 8), name='conv12_2_mbox_priorbox_reshape')(conv12_2_mbox_priorbox)
conv13_2_mbox_priorbox_reshape = Reshape((-1, 8), name='conv13_2_mbox_priorbox_reshape')(conv13_2_mbox_priorbox)
conv14_2_mbox_priorbox_reshape = Reshape((-1, 8), name='conv14_2_mbox_priorbox_reshape')(conv14_2_mbox_priorbox)
### Concatenate the predictions from the different layers
# Axis 0 (batch) and axis 2 (n_classes or 4, respectively) are identical for all layer predictions,
# so we want to concatenate along axis 1, the number of boxes per layer
# Output shape of `mbox_conf`: (batch, n_boxes_total, n_classes)
mbox_conf = Concatenate(axis=1, name='mbox_conf')([conv9_mbox_conf_reshape,
conv10_mbox_conf_reshape,
conv11_2_mbox_conf_reshape,
conv12_2_mbox_conf_reshape,
conv13_2_mbox_conf_reshape,
conv14_2_mbox_conf_reshape])
# Output shape of `mbox_loc`: (batch, n_boxes_total, 4)
mbox_loc = Concatenate(axis=1, name='mbox_loc')([conv9_mbox_loc_reshape,
conv10_mbox_loc_reshape,
conv11_2_mbox_loc_reshape,
conv12_2_mbox_loc_reshape,
conv13_2_mbox_loc_reshape,
conv14_2_mbox_loc_reshape])
# Output shape of `mbox_priorbox`: (batch, n_boxes_total, 8)
mbox_priorbox = Concatenate(axis=1, name='mbox_priorbox')([conv9_mbox_priorbox_reshape,
conv10_mbox_priorbox_reshape,
conv11_2_mbox_priorbox_reshape,
conv12_2_mbox_priorbox_reshape,
conv13_2_mbox_priorbox_reshape,
conv14_2_mbox_priorbox_reshape])
# The box coordinate predictions will go into the loss function just the way they are,
# but for the class predictions, we'll apply a softmax activation layer first
mbox_conf_softmax = Activation('softmax', name='mbox_conf_softmax')(mbox_conf)
# Concatenate the class and box predictions and the anchors to one large predictions vector
# Output shape of `predictions`: (batch, n_boxes_total, n_classes + 4 + 8)
predictions = Concatenate(axis=2, name='predictions')([mbox_conf_softmax, mbox_loc, mbox_priorbox])
if mode == 'training':
model = Model(inputs=x, outputs=predictions)
elif mode == 'inference':
decoded_predictions = DecodeDetections(confidence_thresh=confidence_thresh,
iou_threshold=iou_threshold,
top_k=top_k,
nms_max_output_size=nms_max_output_size,
coords=coords,
#normalize_coords=normalize_coords, #change this parameter for inference
normalize_coords=False,
img_height=img_height,
img_width=img_width,
name='decoded_predictions')(predictions)
model = Model(inputs=x, outputs=decoded_predictions)
elif mode == 'inference_fast':
decoded_predictions = DecodeDetectionsFast(confidence_thresh=confidence_thresh,
iou_threshold=iou_threshold,
top_k=top_k,
nms_max_output_size=nms_max_output_size,
coords=coords,
normalize_coords=normalize_coords,
img_height=img_height,
img_width=img_width,
name='decoded_predictions')(predictions)
model = Model(inputs=x, outputs=decoded_predictions)
else:
raise ValueError("`mode` must be one of 'training', 'inference' or 'inference_fast', but received '{}'.".format(mode))
return model
|
import chainer.links as L
import chainer.functions as F
from chainer import Chain, optimizers, Variable, serializers, initializers
from collections import deque
import copy
import gym
import matplotlib.pyplot as plt
import numpy as np
import sys
import pickle
import os
import glob
from time import sleep
import timeit
class NN(Chain):
def __init__(self, n_in, n_out):
super(NN, self).__init__(
L1=L.Linear(n_in, 100),
L2=L.Linear(100, 100),
L3=L.Linear(100, 100),
Q_value=L.Linear(100, n_out, initialW=initializers.Normal(scale=0.05))
)
def Q_func(self, x):
h1 = F.leaky_relu(self.L1(x))
h2 = F.leaky_relu(self.L2(h1))
h3 = F.leaky_relu(self.L3(h2))
return F.identity(self.Q_value(h3))
class DQN(object):
def __init__(self, n_st, n_act, seed=0):
super(DQN, self).__init__()
np.random.seed(seed)
sys.setrecursionlimit(10000)
self.n_st = n_st
self.n_act = n_act
self.model = NN(n_st, n_act)
self.target_model = copy.deepcopy(self.model)
self.optimizer = optimizers.Adam()
self.optimizer.setup(self.model)
self.memory = deque()
self.loss = 0
self.step = 0
self.gamma = 0.99
self.memory_size = 10000
self.batch_size = 100
self.epsilon = 1
self.epsilon_decay = 0.001
self.epsilon_min = 0
self.exploration = 1000
self.train_freq = 10
self.target_update_freq = 30
def stock_experience(self, st, act, r, st_dash, ep_end):
self.memory.append((st, act, r, st_dash, ep_end))
if len(self.memory) > self.memory_size:
self.memory.popleft()
def forward(self, st, act, r, st_dash, ep_end):
s = Variable(st)
s_dash = Variable(st_dash)
Q = self.model.Q_func(s)
Q_dash = self.target_model.Q_func(s_dash)
max_Q_dash = np.asanyarray(list(map(np.max, Q_dash.data)))
target = np.asanyarray(copy.deepcopy(Q.data), dtype=np.float32)
for i in range(self.batch_size):
target[i, act[i]] = r[i] + (self.gamma * max_Q_dash[i]) * (not ep_end[i])
loss = F.mean_squared_error(Q, Variable(target))
self.loss = loss.data
return loss
def shuffle_memory(self):
mem = np.array(self.memory)
return np.random.permutation(mem)
def parse_batch(self, batch):
st, act, r, st_dash, ep_end = [], [], [], [], []
for i in range(self.batch_size):
st.append(batch[i][0])
act.append(batch[i][1])
r.append(batch[i][2])
st_dash.append(batch[i][3])
ep_end.append(batch[i][4])
st = np.array(st, dtype=np.float32)
act = np.array(act, dtype=np.int8)
r = np.array(r, dtype=np.float32)
st_dash = np.array(st_dash, dtype=np.float32)
ep_end = np.array(ep_end, dtype=np.bool)
return st, act, r, st_dash, ep_end
def experience_replay(self):
mem = self.shuffle_memory()
perm = np.array(range(len(mem)))
index = perm[0:self.batch_size]
batch = mem[index]
st, act, r, st_dash, ep_end = self.parse_batch(batch)
self.model.cleargrads()
loss = self.forward(st, act, r, st_dash, ep_end)
loss.backward()
self.optimizer.update()
def get_action(self, st):
if np.random.rand() < self.epsilon:
return np.random.randint(0, self.n_act), 0
else:
s = Variable(st)
Q = self.model.Q_func(s)
Q = Q.data[0]
a = np.argmax(Q)
return np.asarray(a, dtype=np.int8), max(Q)
def reduce_epsilon(self):
if self.epsilon > self.epsilon_min and self.exploration < self.step:
self.epsilon -= self.epsilon_decay
def train(self):
if len(self.memory) >= self.memory_size:
if self.step % self.train_freq == 0:
self.experience_replay()
self.reduce_epsilon()
if self.step % self.target_update_freq == 0:
self.target_model = copy.deepcopy(self.model)
self.step += 1
def save_model(self, outputfile):
serializers.save_npz(outputfile, self.model)
def load_model(self, inputfile):
serializers.load_npz(inputfile, self.model)
class DQNMaster(DQN):
def __init__(self, n_st, n_act, seed=0):
super(DQNMaster, self).__init__(n_st, n_act, seed=0)
self.train_freq = 1
def load_experience(self, files, share_model_update_flag_file):
if os.path.exists(share_model_update_flag_file):
os.remove(share_model_update_flag_file)
for file in files:
memory = pickle.load(open(file, 'rb'))
self.memory.extend(memory)
while len(self.memory) > self.memory_size:
self.memory.popleft()
os.remove(file)
print('load_experience !')
def save_model_share(self, share_model_file, share_model_file_bk, share_model_update_flag_file):
if os.path.exists(share_model_file):
if os.path.exists(share_model_file_bk):
os.remove(share_model_file_bk)
os.rename(share_model_file, share_model_file_bk)
serializers.save_npz(share_model_file, self.model)
sleep(2)
update_flag = True
pickle.dump(update_flag, open(share_model_update_flag_file, 'wb'))
print('save_model_share !')
def save_epsilon(self, share_epsilon_file):
pickle.dump(self.epsilon, open(share_epsilon_file, 'wb'))
print('save_epsilon !')
if __name__ == "__main__":
env_name = "CartPole-v0"
seed = 0
env = gym.make(env_name)
view_path = 'video/' + env_name
n_st = env.observation_space.shape[0]
if type(env.action_space) == gym.spaces.discrete.Discrete:
# CartPole-v0, Acrobot-v0, MountainCar-v0
n_act = env.action_space.n
action_list = np.arange(0, n_act)
elif type(env.action_space) == gym.spaces.box.Box:
# Pendulum-v0
action_list = [np.array([a]) for a in [-2.0, 2.0]]
n_act = len(action_list)
# new
agent = DQNMaster(n_st, n_act, seed)
share_folder = '//ALB0218/Users/xinzhu_ye/PythonScript/share/'
slave_number = 3
share_memory_folder = share_folder + 'memory' + os.sep
share_model_folder = share_folder + 'DQNmodel' + os.sep
share_model_file = share_model_folder + 'DQNmodel.model'
share_model_file_bk = share_model_folder + 'DQNmodel.model.bk'
share_model_update_flag_file = share_model_folder + 'DQNmodel_update_flag.txt'
end_flag_folder = share_folder + 'end_flag' + os.sep
share_epsilon_folder = share_folder + 'epsilon' + os.sep
share_epsilon_file = share_epsilon_folder + 'epsilon.txt'
share_memory_files = glob.glob(share_memory_folder + '*')
for file in share_memory_files:
os.remove(file)
if os.path.exists(share_model_update_flag_file):
os.remove(share_model_update_flag_file)
end_flag_files = glob.glob(end_flag_folder + '*')
for file in end_flag_files:
os.remove(file)
if os.path.exists(share_epsilon_file):
os.remove(share_epsilon_file)
list_t = []
list_loss = []
i = 0
end_flag_count = len(glob.glob(end_flag_folder + '*'))
# while not (end_flag_file1 in end_flag_files and end_flag_file2 in end_flag_files):
while end_flag_count < slave_number:
share_memory_files = glob.glob(share_memory_folder + '*')
sleep(5)
if len(share_memory_files) > 0:
agent.load_experience(share_memory_files, share_model_update_flag_file)
agent.train()
agent.save_model_share(share_model_file, share_model_file_bk, share_model_update_flag_file)
agent.save_epsilon(share_epsilon_file)
i += 1
print("episode_num" + str(i))
observation = env.reset()
for t in range(400):
env.render()
state = observation.astype(np.float32).reshape((1, n_st))
act_i = agent.get_action(state)[0]
action = action_list[act_i]
observation, reward, ep_end, _ = env.step(action)
state_dash = observation.astype(np.float32).reshape((1, n_st))
if ep_end:
print('max t:', t)
print('loss:', agent.loss)
list_t.append(t)
list_loss.append(agent.loss)
break
end_flag_count = len(glob.glob(end_flag_folder + '*'))
fig = plt.figure()
ax1 = fig.add_subplot(2, 1, 1)
ax1.plot(list_t)
ax2 = fig.add_subplot(2, 1, 2)
ax2.plot(list_loss)
plt.show()
# env.Monitor.close()
agent.save_model('DQN.model')
print('yes!')
|
from rest_framework import routers
from bootcamp.news.serializers import NewsViewSet
router = routers.DefaultRouter()
router.register("news", NewsViewSet)
|
# coding:utf-8
"""
环境:Mac Python3
pip install -U selenium
下载chromedriver,放到项目路径下
(https://npm.taobao.org/mirrors/chromedriver/2.33/)
https://sites.google.com/a/chromium.org/chromedriver/downloads
问题:
无法打开“chromedriver”,因为无法验证开发者。 仍然运行
macOS无法验证“chromedriver”的开发者。您确定要打开它吗? 打开
"""
import requests
import json
import os
from lxml import etree
from selenium import webdriver
import time
query = '张柏芝'
downloadPath = './ts_data/img2'
chromedriver_url = '/Users/liampro/sdk/chrome/v89/chromedriver'
''' 下载图片 '''
driver = webdriver.Chrome(chromedriver_url)
def download(src, id):
#dir = downloadPath + str(id) + '.jpg'
#dir = os.path.join(downloadPath, os.path.basename(src)) #./ts_data/img2/p1394446025.33.webp
old_name = os.path.basename(src)
ex = os.path.splitext(old_name)[1] # .jpg
name = (str(id) + ex) # 123.jpg
dir = os.path.join(downloadPath, name)
#print(dir) #./ts_data/img2/张柏芝 Cecilia Cheung.webp
if not os.path.exists(downloadPath):
os.mkdir(downloadPath)
if os.path.exists(dir):
print('已存在:' + id)
return
try:
pic = requests.get(src, timeout=20)
fp = open(dir, 'wb')
fp.write(pic.content)
fp.close()
except requests.exceptions.ConnectionError:
# print 'error, %d 当前图片无法下载', %id
print('图片无法下载:'+id)
def get_json_img():
""" for 循环 请求全部的 url """
for i in range(0, 22471, 20):
url = 'https://www.douban.com/j/search_photo?q=' + query + '&limit=20&start=' + str(i)
# https://www.douban.com/j/search_photo?q=张柏芝&limit=20&start=20
html = requests.get(url).text # 得到返回结果
print('html:' + html)
response = json.loads(html, encoding='utf-8') # 将 JSON 格式转换成 Python 对象
for image in response['images']:
print(image['src']) # 查看当前下载的图片网址
download(image['src'], image['id']) # 下载一张图片
def get_xpath_img_by_page(url=''):
# https://movie.douban.com/subject_search?search_text=张柏芝&cat=1002
#url = 'https://movie.douban.com/subject_search?search_text=' + query + '&cat=1002'
#driver = webdriver.Chrome(chromedriver_url)
driver.get(url)
html = etree.HTML(driver.page_source)
# 使用xpath helper, ctrl+shit+x 选中元素,如果要匹配全部,则需要修改query 表达式
src_xpath = "//div[@class='item-root']/a[@class='cover-link']/img[@class='cover']/@src"
title_xpath = "//div[@class='item-root']/div[@class='detail']/div[@class='title']/a[@class='title-text']"
srcs = html.xpath(src_xpath)
titles = html.xpath(title_xpath)
for src, title in zip(srcs, titles):
print('\t'.join([str(src), str(title.text)]))
#https://img2.doubanio.com/view/celebrity/s_ratio_celebrity/public/p1394446025.33.webp 张柏芝 Cecilia Cheung
download(src, title.text)
time.sleep(0.005)
#driver.close()
def multiple_page_by_xpath():
for i in range(0, 61, 15):
url = 'https://movie.douban.com/subject_search?search_text=' + query + '&cat=1002'+ '&start=' + str(i)
print('分页:start:%d',i)
time.sleep(0.005)
get_xpath_img_by_page(url)
time.sleep(0.5) # # Sleep for x *1000 milliseconds
driver.close()
#test one
#download('https://img2.doubanio.com/view/celebrity/s_ratio_celebrity/public/p1394446025.33.webp','张柏芝 Cecilia Cheung')
#test more
#url = 'https://movie.douban.com/subject_search?search_text=' + query + '&cat=1002'
#get_xpath_img_by_page(url)
#driver.close()
# multiple
multiple_page_by_xpath()
""" data:
<div class="item-root">
<a href="https://movie.douban.com/celebrity/1003495/" data-moreurl="" class="cover-link">
<img src="https://img9.doubanio.com/view/celebrity/s_ratio_celebrity/public/p1394446025.33.webp" alt="张柏芝 Cecilia Cheung" class="cover">
</a>
<div class="detail">
<div class="title"><a href="https://movie.douban.com/celebrity/1003495/" data-moreurl="" class="title-text">张柏芝 Cecilia Cheung</a></div>
<div class="meta abstract" style="overflow: hidden;"><div class="meta abstract" style="line-height: 16.8px;">11519 人收藏</div>
</div>
<div class="meta abstract_2" style="overflow: hidden;"><div class="meta abstract_2" style="line-height: 18px;">演员 / 配音 / 制片人 / 1980-05-24 / 喜剧之王 / 新喜剧之王 / 少林足球</div></div></div>
</div>
""" |
#!/usr/bin/env python
import time
import serial
import paho.mqtt.client as mqtt
from random import randrange, uniform
import time
import json
import trionesControl.trionesControl as tc
import pygatt
counter = 0
complete = False
level_1 = 40
level_0 = 10
error_code = False
ser = serial.Serial(
port='/dev/ttyUSB0', #Replace ttyS0 with ttyAM0 for Pi1,Pi2,Pi0
baudrate = 9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
def on_connect1(client, userdata, flags, rc):
print("Client 1 Connected with result code "+str(rc))
client1.subscribe("test/response")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
global complete
global counter
global level_1
global error_code
instruction = msg.payload.decode('utf-8')
print(instruction)
data = json.loads(instruction)
if(data["task_type"] == "AD"): #abiant measure
ser.write("temphumid".encode())
time.sleep(5)
line = ser.read(100).decode()
return_data = line.split(",")
client.publish("response", json.dumps({
'task_id': data["task_id"],
'succeeded': True,
'ambient_humidity': return_data[0],
'ambient_light_intensity': return_data[2],
'ambient_temperature': return_data[1]
}))
elif(data["task_type"] == "LC"):#lighting
print(data)
device = tc.connect('58:82:04:00:09:2E')
tc.powerOn(device)
tc.setRGB(data["desired_light_red"], data["desired_light_green"], data["desired_light_blue"],device)
client.publish("response", json.dumps({
'task_id': data["task_id"],
'succeeded': True,
}))
elif(data["task_type"] == "WT"):#watering
#move motor
#engage watering
if(data["level"] == 1):
instruction1 = str(level_1)+ "," + str(data["radius"]+4)+ "," + str(data["degree"])
instruction2 = str(level_1-4)+ "," + str(data["radius"]+4)+ "," + str(data["degree"])
instruction3 = str(level_1)+ "," + str(data["radius"]+4)+ "," + str(data["degree"])
instruction4 = str(level_1)+ "," + str(data["radius"]-5)+ "," + str(data["degree"])
print(instruction1)
print(instruction2)
print(instruction3)
while complete == False:
if(counter == 0):
client1.publish("test/request",instruction1)
while(counter != 1):
time.sleep(1)
if (error_code == True):
error_code = False
break
ser.write("soil".encode())
elif(counter == 1):
client1.publish("test/request", instruction2)
while(counter != 2):
time.sleep(1)
if (error_code == True):
error_code = False
break
ser.write("a".encode())
time.sleep(3)
line = ser.read(100).decode()
data_line = line.split("\r\n")
print(data_line[0])
elif(counter == 2):
client1.publish("test/request", instruction3)
while(counter != 3):
time.sleep(1)
if (error_code == True):
error_code = False
break
ser.write("a".encode())
elif(counter == 3):
counter = 0
complete = True
complete = False
humid = int(data_line[0])
dif_humid = data["target_humidity"] - humid
duration = dif_humid / 5
client1.publish("test/request", instruction4)
while(counter != 1):
time.sleep(1)
if (error_code == True):
error_code = False
break
counter = 0
com_temp = "-1," + str(duration)+",0"
client1.publish("test/request", com_temp)
while(counter != 1):
time.sleep(1)
counter = 0
client.publish("response", json.dumps({
'task_id': data["task_id"],
'succeeded': True,
}))
elif(data["task_type"] == "SD"):#seeding
#move motor
#engage seeding
print("debug data")
print (data["seed_container_level"])
if(data["level"] == 1):
#move to level 1 and move back humidity sensor
instruction4 = str(level_1)+ "," + str(data["radius"])+ "," + "0"
#shove seed into soil
instruction5 = str(level_1)+ "," + str(data["radius"])+ "," + str(data["degree"])
#raise it up
instruction6 = str(level_1 - 8)+ "," + str(data["radius"])+ "," + str(data["degree"])
instruction7 = str(level_1)+ "," + str(data["radius"])+ "," + str(data["degree"])
print("here level 1")
if(data["seed_container_level"] == 0):
# to level 0
instruction0 = str(level_0)+ "," + str(data["seed_container_radius"])+ "," + "0"
# to seed position
instruction1 = str(level_0)+ "," + str(data["seed_container_radius"])+ "," + str(data["seed_container_degree"])
# down pickup seed
instruction2 = str(level_0 - 4)+ "," + str(data["seed_container_radius"])+ "," + str(data["seed_container_degree"])
# up to move back humidity sensor
instruction3 = str(level_0)+ "," + str(data["seed_container_radius"])+ "," + str(data["seed_container_degree"])
print("here level 0")
print(instruction0)
while complete == False:
if(counter == 0):
client1.publish("test/request",instruction0)
while(counter != 1):
time.sleep(1)
if (error_code == True):
error_code = False
break
ser.write("grip_open".encode())
elif(counter == 1):
client1.publish("test/request", instruction1)
while(counter != 2):
time.sleep(1)
if (error_code == True):
error_code = False
break
elif(counter == 2):
client1.publish("test/request", instruction2)
while(counter != 3):
time.sleep(1)
if (error_code == True):
error_code = False
break
time.sleep(1)
ser.write("grip_close".encode())
time.sleep(3)
elif(counter == 3):
client1.publish("test/request", instruction3)
while(counter != 4):
time.sleep(1)
if (error_code == True):
error_code = False
break
elif(counter == 4):
client1.publish("test/request", instruction4)
while(counter != 5):
time.sleep(1)
if (error_code == True):
error_code = False
break
elif(counter == 5):
client1.publish("test/request", instruction5)
while(counter != 6):
time.sleep(1)
if (error_code == True):
error_code = False
break
ser.write("grip_open".encode())
time.sleep(5)
elif(counter == 6):
time.sleep(1)
ser.write("grip_close".encode())
client1.publish("test/request", instruction6)
while(counter != 7):
time.sleep(1)
if (error_code == True):
error_code = False
break
elif(counter == 7):
client1.publish("test/request", instruction7)
while(counter != 8):
time.sleep(1)
if (error_code == True):
error_code = False
break
time.sleep(1)
ser.write("grip_close".encode())
elif(counter == 8):
counter = 0
complete = True
complete = False
print("seeding completed")
client.publish("response", json.dumps({
'task_id': data["task_id"],
'succeeded': True,
}))
elif(data["task_type"] == "PD"):#soil humidity
#move motor
#engage humidity
if(data["level"] == 1):
instruction1 = str(level_1)+ "," + str(data["radius"]+4)+ "," + str(data["degree"])
instruction2 = str(level_1-4)+ "," + str(data["radius"]+4)+ "," + str(data["degree"])
instruction3 = str(level_1)+ "," + str(data["radius"]+4)+ "," + str(data["degree"])
print(instruction1)
print(instruction2)
print(instruction3)
while complete == False:
if(counter == 0):
client1.publish("test/request",instruction1)
while(counter != 1 and error_code == False):
time.sleep(1)
if (error_code == True):
error_code = False
break
ser.write("soil".encode())
elif(counter == 1):
client1.publish("test/request", instruction2)
while(counter != 2):
time.sleep(1)
if (error_code == True):
error_code = False
break
ser.write("a".encode())
time.sleep(3)
line = ser.read(100).decode()
data_line = line.split("\r\n")
print(data_line[0])
elif(counter == 2):
client1.publish("test/request", instruction3)
while(counter != 3):
time.sleep(1)
if (error_code == True):
error_code = False
break
ser.write("a".encode())
elif(counter == 3):
counter = 0
complete = True
complete = False
print("humidity completed")
client.publish("response", json.dumps({
'task_id': data["task_id"],
'succeeded': True,
'humidity' : data_line[0]
}))
elif(data["task_type"] == "MC"):#manual control
#move motor
#engage humidity
client.publish("response", json.dumps({
'task_id': data["task_id"],
'succeeded': True,
}))
def on_message1(client, userdata, msg):
global error_code
global counter
instruction = msg.payload.decode('utf-8')
print(instruction)
if(instruction != "failed"):
counter += 1
else:
print("failed to connect, sleeping for 1 second")
time.sleep(1)
error_code = True
time.sleep(1)
print(counter)
device = tc.connect('58:82:04:00:09:2E')
tc.powerOn(device)
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.tls_set(tls_version=mqtt.ssl.PROTOCOL_TLS)
client.username_pw_set('plantCondo', '!9KWdW#egQ7ch8L')
client.connect("84da454f982d4061a3e9339908532687.s1.eu.hivemq.cloud", 8883)
client.subscribe("request")
client1 = mqtt.Client()
client1.on_connect = on_connect1
client1.on_message = on_message1
client1.connect("192.168.0.164", 1883)
client1.subscribe("test/response")
client1.loop_start()
client.loop_forever()
|
## Sid Meier's Civilization 4
## Copyright Firaxis Games 2005
##
## Sevopedia
## sevotastic.blogspot.com
## sevotastic@yahoo.com
##
from CvPythonExtensions import *
import CvUtil
import ScreenInput
import CvScreenEnums
import random
import string
# globals
gc = CyGlobalContext()
ArtFileMgr = CyArtFileMgr()
localText = CyTranslator()
class CvPediaLeader:
"Civilopedia Screen for Leaders"
def __init__(self, main):
self.iLeader = -1
self.top = main
#Rhye - start
self.X_LEADERHEAD_PANE = self.top.X_PEDIA_PAGE + 20
self.Y_LEADERHEAD_PANE = 55
self.W_LEADERHEAD_PANE = 325
self.H_LEADERHEAD_PANE = 390
self.W_LEADERHEAD = 320
self.H_LEADERHEAD = 380
self.X_LEADERHEAD = self.X_LEADERHEAD_PANE + ((self.W_LEADERHEAD_PANE - self.W_LEADERHEAD) / 2)
self.Y_LEADERHEAD = self.Y_LEADERHEAD_PANE + ((self.H_LEADERHEAD_PANE - self.H_LEADERHEAD) / 2) +3
self.W_CIV = 64
self.H_CIV = 64
self.X_CIV = self.X_LEADERHEAD_PANE + self.W_LEADERHEAD_PANE + 10
self.Y_CIV = self.Y_LEADERHEAD_PANE + 5
self.X_CIVIC = self.X_LEADERHEAD_PANE + self.W_LEADERHEAD_PANE + 10
self.Y_CIVIC = self.Y_LEADERHEAD_PANE
self.W_CIVIC = 1000 - self.X_CIVIC
self.H_CIVIC = 80
self.X_HISTORY = self.X_LEADERHEAD_PANE
self.Y_HISTORY = self.Y_LEADERHEAD_PANE + self.H_LEADERHEAD_PANE + 5
self.W_HISTORY = 1000 - self.X_HISTORY
self.H_HISTORY = 700 - self.Y_HISTORY
self.X_TRAITS = self.X_LEADERHEAD_PANE + self.W_LEADERHEAD_PANE + 10
self.Y_TRAITS = self.Y_CIVIC + self.H_CIVIC
self.W_TRAITS = 1000 - self.X_TRAITS
self.H_TRAITS = self.Y_HISTORY - self.Y_TRAITS
#Rhye - end
# Screen construction function
def interfaceScreen(self, iLeader):
self.iLeader = iLeader
self.top.deleteAllWidgets()
screen = self.top.getScreen()
bNotActive = (not screen.isActive())
if bNotActive:
self.top.setPediaCommonWidgets()
# Header...
szHeader = u"<font=4b>" + gc.getLeaderHeadInfo(self.iLeader).getDescription().upper() + u"</font>"
szHeaderId = self.top.getNextWidgetName()
screen.setLabel(szHeaderId, "Background", szHeader, CvUtil.FONT_CENTER_JUSTIFY, self.top.X_SCREEN, self.top.Y_TITLE, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_GENERAL, -1, -1)
# Top
screen.setText(self.top.getNextWidgetName(), "Background", self.top.MENU_TEXT, CvUtil.FONT_LEFT_JUSTIFY, self.top.X_MENU, self.top.Y_MENU, 0, FontTypes.TITLE_FONT, WidgetTypes.WIDGET_PEDIA_MAIN, CivilopediaPageTypes.CIVILOPEDIA_PAGE_LEADER, -1)
if self.top.iLastScreen != CvScreenEnums.PEDIA_LEADER or bNotActive:
if self.top.iLastScreen != CvScreenEnums.PEDIA_MAIN:
self.placeLinks()
self.top.iLastScreen = CvScreenEnums.PEDIA_LEADER
# Leaderhead
leaderPanelWidget = self.top.getNextWidgetName()
screen.addPanel( leaderPanelWidget, "", "", true, true,
self.X_LEADERHEAD_PANE, self.Y_LEADERHEAD_PANE, self.W_LEADERHEAD_PANE, self.H_LEADERHEAD_PANE, PanelStyles.PANEL_STYLE_BLUE50 )
self.leaderWidget = self.top.getNextWidgetName()
screen.addLeaderheadGFC(self.leaderWidget, self.iLeader, AttitudeTypes.ATTITUDE_PLEASED,
self.X_LEADERHEAD, self.Y_LEADERHEAD, self.W_LEADERHEAD, self.H_LEADERHEAD, WidgetTypes.WIDGET_GENERAL, -1, -1)
self.placeHistory()
#self.placeCivic() #Rhye
self.placeCiv()
#self.placeTraits() #Rhye
def placeCiv(self):
screen = self.top.getScreen()
for iCiv in range(gc.getNumCivilizationInfos()):
civ = gc.getCivilizationInfo(iCiv)
if civ.isLeaders(self.iLeader):
screen.setImageButton(self.top.getNextWidgetName(), civ.getButton(), self.X_CIV, self.Y_CIV, self.W_CIV, self.H_CIV, WidgetTypes.WIDGET_PEDIA_JUMP_TO_CIV, iCiv, 1)
def placeTraits(self):
screen = self.top.getScreen()
panelName = self.top.getNextWidgetName()
screen.addPanel( panelName, localText.getText("TXT_KEY_PEDIA_TRAITS", ()), "", true, false,
self.X_TRAITS, self.Y_TRAITS, self.W_TRAITS, self.H_TRAITS, PanelStyles.PANEL_STYLE_BLUE50 )
listName = self.top.getNextWidgetName()
szSpecialText = CyGameTextMgr().parseLeaderTraits(self.iLeader, -1, False, True)
screen.addMultilineText(listName, szSpecialText, self.X_TRAITS+5, self.Y_TRAITS+5, self.W_TRAITS-10, self.H_TRAITS-10, WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY)
def placeCivic(self):
screen = self.top.getScreen()
panelName = self.top.getNextWidgetName()
screen.addPanel( panelName, localText.getText("TXT_KEY_PEDIA_FAV_CIVIC", ()), "", true, true,
self.X_CIVIC, self.Y_CIVIC, self.W_CIVIC, self.H_CIVIC, PanelStyles.PANEL_STYLE_BLUE50 )
listName = self.top.getNextWidgetName()
screen.attachListBoxGFC( panelName, listName, "", TableStyles.TABLE_STYLE_EMPTY )
screen.enableSelect(listName, False)
iCivic = gc.getLeaderHeadInfo(self.iLeader).getFavoriteCivic()
if (-1 != iCivic):
szCivicText = gc.getCivicInfo(iCivic).getDescription()
screen.appendListBoxString( listName, szCivicText, WidgetTypes.WIDGET_GENERAL, -1, -1, CvUtil.FONT_LEFT_JUSTIFY )
def placeHistory(self):
screen = self.top.getScreen()
panelName = self.top.getNextWidgetName()
screen.addPanel( panelName, "", "", true, true,
self.X_HISTORY, self.Y_HISTORY, self.W_HISTORY, self.H_HISTORY, PanelStyles.PANEL_STYLE_BLUE50 )
historyTextName = self.top.getNextWidgetName()
CivilopediaText = gc.getLeaderHeadInfo(self.iLeader).getCivilopedia()
CivilopediaText = u"<font=2>" + CivilopediaText + u"</font>"
screen.attachMultilineText( panelName, historyTextName, CivilopediaText,
WidgetTypes.WIDGET_GENERAL,-1,-1, CvUtil.FONT_LEFT_JUSTIFY )
def placeLinks(self):
self.top.placeLinks()
self.top.placeLeaders()
# Will handle the input for this screen...
def handleInput (self, inputClass):
if (inputClass.getData() == int(InputTypes.KB_RETURN) or inputClass.getData() == int(InputTypes.KB_ESCAPE)):
self.top.getScreen().hideScreen()
return 1
if (inputClass.getNotifyCode() == NotifyCode.NOTIFY_CLICKED):
if (inputClass.getFunctionName() == self.LEADER_WIDGET):
if (inputClass.getFlags() & MouseFlags.MOUSE_LBUTTONUP):
self.top.getScreen().performLeaderheadAction(self.LEADER_WIDGET, 0)
return 0
|
import unittest
from hummingbot.connector.exchange.ascend_ex import ascend_ex_utils as utils
class AscendExUtilTestCases(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.base_asset = "COINALPHA"
cls.quote_asset = "HBOT"
cls.trading_pair = f"{cls.base_asset}-{cls.quote_asset}"
cls.hb_trading_pair = f"{cls.base_asset}-{cls.quote_asset}"
cls.ex_trading_pair = f"{cls.base_asset}{cls.quote_asset}"
def test_is_pair_information_valid(self):
invalid_info_1 = {
"statusCode": None,
}
self.assertFalse(utils.is_pair_information_valid(invalid_info_1))
invalid_info_2 = {
"statusCode": "",
}
self.assertFalse(utils.is_pair_information_valid(invalid_info_2))
invalid_info_3 = {
"statusCode": "Err",
}
self.assertFalse(utils.is_pair_information_valid(invalid_info_3))
invalid_info_4 = {
"statusCode": "Normal",
}
self.assertTrue(utils.is_pair_information_valid(invalid_info_4))
|
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from userena.forms import SignupFormTos
from django.conf import settings
from .models import AllowedMailDomain, AllowedEMailAddress
class MailDomainValidationForm(SignupFormTos):
def __init__(self, *args, **kwargs):
super(SignupFormTos, self).__init__(*args, **kwargs)
def clean_email(self):
"""Add test to see whether email domain is allowed.
"""
# do validations that already have been specified
super(SignupFormTos, self).clean_email()
data = self.cleaned_data['email']
domain_whitelist = [d.domain.lower()
for d in AllowedMailDomain.objects.all()]
domain = data.split('@')[1].lower()
if domain not in domain_whitelist:
email_whitelist = [e.email.lower()
for e in AllowedEMailAddress.objects.all()]
email = data.lower()
if email not in email_whitelist:
msg = 'Automatic registration not possible for "%(email)s".\n' + \
'Please send an email to %(adminemail)s to request an ' \
'account for the ANDI portal.'
raise ValidationError(
_(msg), code='invalid',
params={'email': data,
'adminemail': settings.ADMIN_EMAIL_ADDRESS},
)
return data
|
import psycopg2
import time
import sys
import stomp
import threading
import datetime
from commons import RandomUtils
from db_tools.postgresql import PG_Client
from mq_tools.amq import Amq_Conn
from mq_tools.amq.Amq_Conn import MessageListener
from mq_tools.amq.Amq_Conn import AmqConnetion
from main_test.egms import egms_location_msg_fun as locationMsgUtils
print('发送消息 .....')
'''
total = 10081;
pageSize = 100;
page = int(total/pageSize) + 1;
page = int(page)
for pageNo in range(1 , page) :
tagInfoList = locationMsgUtils.getTagListByPages(pageNo);
if tagInfoList and len(tagInfoList) > 0 :
for tagInfoItem in tagInfoList:
i_id = tagInfoItem['i_id']
locationMsgUtils.sendOnlineMessage(i_id);
'''
# 线程保活
class OneThread(threading.Thread): # 继承父类threading.Thread
tagInfoList = None;
def __init__(self ):
threading.Thread.__init__(self)
def setParams (self ,tagInfoList):
self.tagInfoList = tagInfoList;
def run(self): # 把要执行的代码写到run函数里面 线程在创建后会直接运行run函数
tagInfoList = self.tagInfoList;
while (True):
if tagInfoList and len(tagInfoList) > 0:
for tagInfoItem in tagInfoList:
i_id = tagInfoItem['i_id']
locationMsgUtils.sendLocationData(i_id);
else:
return;
print('第一轮消息发送成功。。。')
time.sleep(5)
total = 10081;
pageSize = 500;
page = int(total / pageSize) + 1;
page = int(page)
for pageNo in range(1, page):
tagInfoList = locationMsgUtils.getTagListByPages(pageNo);
print(' page : ' , page , ' 线程')
# 创建新线程
thread2 = OneThread()
thread2.setParams(tagInfoList);
# 开启线程
thread2.start()
print('发送完毕')
# locationMsgUtils.sendOnlineMessage();
|
# D. Расстояние по Левенштейну
def main():
word_1, word_2 = input(), input()
correction_matrix = [[0] * (len(word_2) + 1) for _ in range(len(word_1) + 1)]
for i in range(len(correction_matrix)):
correction_matrix[i][0] = i
for j in range(len(correction_matrix[0])):
correction_matrix[0][j] = j
for i in range(1, len(word_1) + 1):
for j in range(1, len(word_2) + 1):
correction_matrix[i][j] = min([
correction_matrix[i - 1][j] + 1,
correction_matrix[i][j - 1] + 1,
correction_matrix[i - 1][j - 1] + int(word_1[i - 1] != word_2[j - 1]),
])
print(correction_matrix[-1][-1])
if __name__ == '__main__':
main()
|
import re
pattern = r'eggs'
if re.search(pattern, 'abceggseggseggsabc'):
print('Match Found')
print(re.findall(pattern, 'abceggseggseggsabc'))
|
# 1.Car
class Car:
def __init__(self, name, model, engine):
self.name = name
self.model = model
self.engine = engine
def get_info(self):
return f"This is {self.name} {self.model} with engine {self.engine}"
# 2.Shop
class Shop:
def __init__(self, name, items):
self.name = name
self.items = items
def get_items_count(self):
return len(self.items)
# 3.Hero
class Hero:
def __init__(self, name, health):
self.name = name
self.health = health
def defend(self, damage):
self.health -= damage
if self.health <= 0:
self.health = 0
return f"{self.name} was defeated"
def heal(self, amount):
self.health += amount
# 4.Steam_User
class SteamUser:
def __init__(self, username, games, played_hours=0):
self.username = username
self.games = games
self.played_hours = played_hours
def play(self, game, hours):
if game in self.games:
self.played_hours += hours
return f"{self.username} is playing {game}"
else:
return f"{game} is not in library"
def buy_game(self, game):
if game not in self.games:
self.games.append(game)
return f"{self.username} bought {game}"
else:
return f"{game} is already in your library"
def stats(self):
return f"{self.username} has {len(self.games)} games. Total play time: {self.played_hours}"
# 5.Programmer
class Programmer:
def __init__(self, name, language, skills):
self.name = name
self.language = language
self.skills = skills
def watch_course(self, course_name, language, skills_earned):
if language == self.language:
self.skills += skills_earned
return f"{self.name} watched {course_name}"
else:
return f"{self.name} does not know {language}"
def change_language(self, new_language, skills_needed):
if self.skills >= skills_needed and new_language != self.language:
old_language = self.language
self.language = new_language
return f"{self.name} switched from {old_language} to {new_language}"
elif self.skills >= skills_needed and new_language == self.language:
return f"{self.name} already knows {self.language}"
else:
return f"{self.name} needs {skills_needed-self.skills} more skills"
|
import sys
import pandas as pd
import os
import subprocess
import xml.etree.ElementTree as ET
print("Script:", sys.argv[0])
print("Job Index:", sys.argv[1])
print("Num Jobs:", sys.argv[2])
JOB_INDEX = sys.argv[1]
NUM_JOBS = sys.argv[2]
# Retrieve list of required ncbi ids from the web of life lookup table
taxids = pd.read_csv("./ncbi_taxids.txt",
sep="\t")
ncbi_ids = set([str(x) for x in taxids['ncbi_id'].tolist()])
print("Num Ids:", len(ncbi_ids))
ncbi_ids = sorted(list(ncbi_ids))
for i in range(int(JOB_INDEX), len(ncbi_ids), int(NUM_JOBS)):
ncbi_id = ncbi_ids[i]
print(i, '/', len(ncbi_ids), ncbi_id)
print(ncbi_id)
if not os.path.exists("./netmhc/" + str(ncbi_id) + ".fin"):
print("Skipping " + str(ncbi_id) + " (No .fin)")
continue
if os.path.exists("./netmhc/" + str(ncbi_id) + ".done"):
print("Skipping " + str(ncbi_id) + " (Existence of .done says it's already parsed)")
continue
cmd = subprocess.run('grep "<=SB" ./netmhc/' + str(ncbi_id) + '.mhc | python parse_strong_binders.py 2> ./netmhc/' + str(ncbi_id) + '.parseErr > ./netmhc/' + str(ncbi_id) + '.core',
shell=True)
cmd = subprocess.run("touch ./netmhc/" + str(ncbi_id) + ".done", shell=True)
|
from builtins import range
from ..base import MLClassifierBase
from ..utils import get_matrix_in_format
from sklearn.neighbors import NearestNeighbors
import scipy.sparse as sparse
import numpy as np
class BinaryRelevanceKNN(MLClassifierBase):
"""Binary Relevance adapted kNN Multi-Label Classifier."""
def __init__(self, k = 10):
"""Initializes the classifier
Attributes
----------
k : int (default is 10)
number of neighbours
"""
super(BinaryRelevanceKNN, self).__init__()
self.k = k # Number of neighbours
self.copyable_attrs = ['k']
def fit(self, X, y):
"""Fit classifier with training data
Internally this method uses a sparse CSC representation for y
(:class:`scipy.sparse.csc_matrix`).
Parameters
----------
X : numpy.ndarray or scipy.sparse
input features, can be a dense or sparse matrix of size
:code:`(n_samples, n_features)`
y : numpy.ndaarray or scipy.sparse {0,1}
binary indicator matrix with label assignments.
Returns
-------
skmultilearn.adapt.brknn.BinaryRelevanceKNN
fitted instance of self
"""
self.train_labelspace = get_matrix_in_format(y, 'csc')
self.num_instances = self.train_labelspace.shape[0]
self.num_labels = self.train_labelspace.shape[1]
self.knn = NearestNeighbors(self.k).fit(X)
return self
def compute_confidences(self):
"""Helper function to compute for the confidences
Performs a computation involving the percent of neighbours that
have a given label assigned, then summed over each label columns
after subsetting for neighbours.Then normalization is done.
"""
self.confidences = np.vstack([self.train_labelspace[n,:].tocsc().sum(axis=0) / self.k for n in self.neighbors])
return self.confidences
def predict(self, X):
"""Predict labels for X
Parameters
----------
X : numpy.ndarray or scipy.sparse.csc_matrix
input features of shape :code:`(n_samples, n_features)`
Returns
-------
scipy.sparse of int
binary indicator matrix with label assignments with shape
:code:`(n_samples, n_labels)`
"""
self.neighbors = self.knn.kneighbors(X, self.k, return_distance=False)
self.compute_confidences()
return self.predict_variant(X)
class BRkNNaClassifier(BinaryRelevanceKNN):
"""Binary Relevance multi-label classifier based on k-Nearest
Neighbours method.
This version of the classifier assigns the labels that are assigned
to at least half of the neighbors.
Attributes
----------
k : int
number of neighbours
"""
def predict_variant(self, X):
"""Predict labels for X
Parameters
----------
X : numpy.ndarray or scipy.sparse.csc_matrix
input features of shape :code:`(n_samples, n_features)`
Returns
-------
scipy.sparse of int
binary indicator matrix with label assignments with shape
:code:`(n_samples, n_labels)`
"""
# TODO: find out if moving the sparsity to compute confidences boots speed
return sparse.csr_matrix(np.rint(self.confidences), dtype='i8')
class BRkNNbClassifier(BinaryRelevanceKNN):
"""Binary Relevance multi-label classifier based on k-Nearest
Neighbours method.
This version of the classifier assigns the most popular m labels of
the neighbors, where m is the average number of labels assigned to
the object's neighbors.
Attributes
----------
k : int
number of neighbours
"""
def predict_variant(self, X):
"""Predict labels for X
Parameters
----------
X : numpy.ndarray or scipy.sparse.csc_matrix
input features of shape :code:`(n_samples, n_features)`
Returns
-------
scipy.sparse of int
binary indicator matrix with label assignments with shape
:code:`(n_samples, n_labels)`
"""
self.avg_labels = [int(np.average(self.train_labelspace[n,:].sum(axis=1)).round()) for n in self.neighbors]
prediction = sparse.lil_matrix((X.shape[0], self.num_labels), dtype='i8')
top_labels = np.argpartition(self.confidences, kth=min(self.avg_labels + [len(self.confidences[0])]), axis=1).tolist()
for i in range(X.shape[0]):
for j in top_labels[i][-self.avg_labels[i]:]:
prediction[i,j] += 1
return prediction
|
from migen import Module, Signal, If, Instance, ClockSignal
from litex.soc.integration.doc import ModuleDoc
from litex.soc.interconnect.csr import AutoCSR, CSRStatus, CSRStorage, CSRField
class SBLED(Module, AutoCSR):
def __init__(self, revision, pads):
rgba_pwm = Signal(3)
self.intro = ModuleDoc("""RGB LED Controller
The ICE40 contains two different RGB LED control devices. The first is a
constant-current LED source, which is fixed to deliver 4 mA to each of the
three LEDs. This block is called ``SB_RGBA_DRV``.
The other is used for creating interesting fading effects, particularly
for "breathing" effects used to indicate a given state. This block is called
``SB_LEDDA_IP``. This block feeds directly into ``SB_RGBA_DRV``.
The RGB LED controller available on this device allows for control of these
two LED control devices. Additionally, it is possible to disable ``SB_LEDDA_IP``
and directly control the individual LEDs.
""")
self.dat = CSRStorage(8, description="""
This is the value for the ``SB_LEDDA_IP.DAT`` register. It is directly
written into the ``SB_LEDDA_IP`` hardware block, so you should
refer to http://www.latticesemi.com/view_document?document_id=50668.
The contents of this register are written to the address specified in
``ADDR`` immediately upon writing this register.""")
self.addr = CSRStorage(4, description="""
This register is directly connected to ``SB_LEDDA_IP.ADDR``. This
register controls the address that is updated whenever ``DAT`` is
written. Writing to this register has no immediate effect -- data
isn't written until the ``DAT`` register is written.""")
self.ctrl = CSRStorage(fields=[
CSRField("exe", description="Connected to ``SB_LEDDA_IP.LEDDEXE``. Set this to ``1`` to enable the fading pattern."),
CSRField("curren", description="Connected to ``SB_RGBA_DRV.CURREN``. Set this to ``1`` to enable the current source."),
CSRField("rgbleden", description="Connected to ``SB_RGBA_DRV.RGBLEDEN``. Set this to ``1`` to enable the RGB PWM control logic."),
CSRField("rraw", description="Set this to ``1`` to enable raw control of the red LED via the ``RAW.R`` register."),
CSRField("graw", description="Set this to ``1`` to enable raw control of the green LED via the ``RAW.G`` register."),
CSRField("braw", description="Set this to ``1`` to enable raw control of the blue LED via the ``RAW.B`` register."),
], description="Control logic for the RGB LED and LEDDA hardware PWM LED block.")
self.raw = CSRStorage(fields=[
CSRField("r", description="Raw value for the red LED when ``CTRL.RRAW`` is ``1``."),
CSRField("g", description="Raw value for the green LED when ``CTRL.GRAW`` is ``1``."),
CSRField("b", description="Raw value for the blue LED when ``CTRL.BRAW`` is ``1``."),
], description="""
Normally the hardware ``SB_LEDDA_IP`` block controls the brightness of the LED,
creating a gentle fading pattern. However, by setting the appropriate bit in ``CTRL``,
it is possible to manually control the three individual LEDs.""")
ledd_value = Signal(3)
if revision == "pvt" or revision == "dvt":
self.comb += [
If(self.ctrl.storage[3], rgba_pwm[1].eq(self.raw.storage[0])).Else(rgba_pwm[1].eq(ledd_value[0])),
If(self.ctrl.storage[4], rgba_pwm[0].eq(self.raw.storage[1])).Else(rgba_pwm[0].eq(ledd_value[1])),
If(self.ctrl.storage[5], rgba_pwm[2].eq(self.raw.storage[2])).Else(rgba_pwm[2].eq(ledd_value[2])),
]
elif revision == "evt":
self.comb += [
If(self.ctrl.storage[3], rgba_pwm[1].eq(self.raw.storage[0])).Else(rgba_pwm[1].eq(ledd_value[0])),
If(self.ctrl.storage[4], rgba_pwm[2].eq(self.raw.storage[1])).Else(rgba_pwm[2].eq(ledd_value[1])),
If(self.ctrl.storage[5], rgba_pwm[0].eq(self.raw.storage[2])).Else(rgba_pwm[0].eq(ledd_value[2])),
]
elif revision == "hacker":
self.comb += [
If(self.ctrl.storage[3], rgba_pwm[2].eq(self.raw.storage[0])).Else(rgba_pwm[2].eq(ledd_value[0])),
If(self.ctrl.storage[4], rgba_pwm[1].eq(self.raw.storage[1])).Else(rgba_pwm[1].eq(ledd_value[1])),
If(self.ctrl.storage[5], rgba_pwm[0].eq(self.raw.storage[2])).Else(rgba_pwm[0].eq(ledd_value[2])),
]
else:
self.comb += [
If(self.ctrl.storage[3], rgba_pwm[0].eq(self.raw.storage[0])).Else(rgba_pwm[0].eq(ledd_value[0])),
If(self.ctrl.storage[4], rgba_pwm[1].eq(self.raw.storage[1])).Else(rgba_pwm[1].eq(ledd_value[1])),
If(self.ctrl.storage[5], rgba_pwm[2].eq(self.raw.storage[2])).Else(rgba_pwm[2].eq(ledd_value[2])),
]
self.specials += Instance("SB_RGBA_DRV",
i_CURREN = self.ctrl.storage[1],
i_RGBLEDEN = self.ctrl.storage[2],
i_RGB0PWM = rgba_pwm[0],
i_RGB1PWM = rgba_pwm[1],
i_RGB2PWM = rgba_pwm[2],
o_RGB0 = pads.r,
o_RGB1 = pads.g,
o_RGB2 = pads.b,
p_CURRENT_MODE = "0b1",
p_RGB0_CURRENT = "0b000011",
p_RGB1_CURRENT = "0b000011",
p_RGB2_CURRENT = "0b000011",
)
self.specials += Instance("SB_LEDDA_IP",
i_LEDDCS = self.dat.re,
i_LEDDCLK = ClockSignal(),
i_LEDDDAT7 = self.dat.storage[7],
i_LEDDDAT6 = self.dat.storage[6],
i_LEDDDAT5 = self.dat.storage[5],
i_LEDDDAT4 = self.dat.storage[4],
i_LEDDDAT3 = self.dat.storage[3],
i_LEDDDAT2 = self.dat.storage[2],
i_LEDDDAT1 = self.dat.storage[1],
i_LEDDDAT0 = self.dat.storage[0],
i_LEDDADDR3 = self.addr.storage[3],
i_LEDDADDR2 = self.addr.storage[2],
i_LEDDADDR1 = self.addr.storage[1],
i_LEDDADDR0 = self.addr.storage[0],
i_LEDDDEN = self.dat.re,
i_LEDDEXE = self.ctrl.storage[0],
# o_LEDDON = led_is_on, # Indicates whether LED is on or not
# i_LEDDRST = ResetSignal(), # This port doesn't actually exist
o_PWMOUT0 = ledd_value[0],
o_PWMOUT1 = ledd_value[1],
o_PWMOUT2 = ledd_value[2],
o_LEDDON = Signal(),
)
|
#!/usr/bin/python3
##########################################################################
# Copyright (c) 2019 ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, CAB F.78, Universitaetstr. 6, CH-8092 Zurich,
# Attn: Systems Group.
#
# Script to control the Toradex boards over the USB UART
#
##########################################################################
import os
import sys
import time
import usb
import argparse
import cmd
class AOSBoard:
BITMODE_CBUS = 0x20
SIO_SET_BITMODE_REQUEST = 0x0b
def __init__(self, dev):
self._dev = dev
def _ftdi_set_bitmode(self, bitmask):
"""
FTDIs CBUS bitmode expect the following value:
CBUS Bits
3210 3210
|------ Output Control 0->LO, 1->HI
|----------- Input/Output 0->Input, 1->Output
This script assumes:
- CBUS3 connected to RESET_EXT#
- CBUS2 connected to OE# (recovery mode)
"""
bm_request_type = usb.util.build_request_type(
usb.util.CTRL_OUT,
usb.util.CTRL_TYPE_VENDOR,
usb.util.CTRL_RECIPIENT_DEVICE
)
wValue = bitmask | (AOSBoard.BITMODE_CBUS << 32)
self._dev.ctrl_transfer(bm_request_type, AOSBoard.SIO_SET_BITMODE_REQUEST, wValue)
def on(self):
# Set CBUS3 tristate, module run...
self._ftdi_set_bitmode(0x00)
def off(self):
# Set CBUS3 low, module in reset...
self._ftdi_set_bitmode(0x80)
def reset(self):
self.off()
time.sleep(0.1)
self.on()
def enter_recovery(self):
# Set recovery bit low
self._ftdi_set_bitmode(0x40)
time.sleep(0.2)
# Set reset bit low
self._ftdi_set_bitmode(0xC0)
time.sleep(0.1)
# Set reset bit tristate
self._ftdi_set_bitmode(0x40)
time.sleep(0.2)
# Set recovery bit tristate
self._ftdi_set_bitmode(0x00)
class BoardShell(cmd.Cmd):
def __init__(self, board):
super().__init__()
self._board = board
self.prompt = "aos-ctrl> "
def do_on(self, args):
"Turn the board on."
self._board.on()
def do_off(self, args):
"Turn the board off."
self._board.off()
def do_reset(self, args):
"Reset the board."
self._board.reset()
def do_recovery(self, args):
"Reset the board and enter recovery mode."
self._board.enter_recovery()
def do_exit(self, args):
"Exit the command prompt."
return True
def postcmd(self, stop, line):
return stop
def main(args):
dev = usb.core.find(
custom_match=lambda d: \
d.idVendor == 0x0403 and
d.idProduct == 0x6001 and
(
(args.board is None) or
(d.serial_number == args.board)
)
)
if dev is None:
print("Board with serial '%s' not found." % (args.board), file=sys.stderr)
exit(1)
board = AOSBoard(dev)
shell = BoardShell(board)
if args.command is None:
shell.cmdloop()
else:
shell.onecmd(args.command)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Controls the Toradex Colibri boards")
parser.add_argument(
"--board", "-b", metavar="SERIAL", type=str,
help="Serial of the board to control"
)
parser.add_argument(
"command", metavar="CMD", type=str, nargs='?',
help="Commands to run"
)
args = parser.parse_args()
main(args)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# 批量查询企业的工商信息,注册号.组织机构代码等
import requests
import ssl
import time
import re
import sys
import urllib
from openpyxl import Workbook, load_workbook
def get_info(get_url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
'Host': 'xin.baidu.com'
}
cookies = {
'BDUSS': 'JPUmFMdnhDfkwwTG52dXk1dUVzVWZRcjlGVmdnQno5ckRwa2x6c3Jtc3NZcEJkSUFBQUFBJCQAAAAAAAAAAAEAAAAB-b5YssvE8bmkvt~N-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACzVaF0s1WhdRH'
}
res_obj = requests.get(url=get_url, headers=headers,
cookies=cookies) # 创建网页访问对象
pid_z = re.compile(r'pid=(.+?)"') # 定义pid正则
q_name_z = re.compile(r'titleName":"(.+?)"') # 定义企业名称正则
pid_list = pid_z.findall(res_obj.text.encode(
'utf-8').decode('unicode_escape')) # 匹配pid,返回列表
q_name_list = q_name_z.findall(res_obj.text.encode(
'utf-8').decode('unicode_escape')) # 匹配企业名称,返回列表
return pid_list, q_name_list
def get_info1(get_url1, q_name, name_word2):
headers1 = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.1; PAR-AL00 Build/HUAWEIPAR-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/044304 Mobile Safari/537.36 MicroMessenger/6.7.3.1360(0x26070333) NetType/WIFI Language/zh_CN Process/tools',
'Host': 'xin.baidu.com',
'Referer': get_url
}
if pid[0] == 'None': # 如果搜索不到企业则退出函数
sheet.cell(row_num, 1, row_num-1) # 插入ID
sheet.cell(row_num, 2, name_word2) # 插入企业名称
wb.save(sys.path[0] + '/001.xlsx') # 保存为excel格式
return
res_obj1 = requests.get(url=get_url1, headers=headers1)
res_obj1_text = re.sub(r"[ \f\n\r\t\v]", r"", res_obj1.text, ) # 清理空格,换行符
qy_info_all = re.finditer(r'content">(.*?)<', res_obj1_text) # 匹配企业信息
num = 2 # 定义表格初始插入列位置
sheet.cell(row_num, 1, row_num-1) # 插入ID
sheet.cell(row_num, 2, name_word2) # 插入企业名称
name_word2 = name_word2.replace(' ', '') # 删除常规空格
name_word2 = name_word2.replace(' ', '') # 删除全角空格
name_word2 = name_word2.replace(u"(", "(")
name_word2 = name_word2.replace(u")", ")")
print(name_word2)
if q_name != name_word2: # 给定的企业和爬取的企业名称不一至时,写入搜索的名称
sheet.cell(row_num, 3, q_name) # 给第1行第1列的单元格赋值
for qy_info in qy_info_all: # 循环插入企业所有信息
num += 1
res_obj2_text = re.sub(r'content">|<', r"",
qy_info.group(), ) # 清理企业信息首尾多余字符
print(row_num, res_obj2_text)
sheet.cell(row_num, num+1, res_obj2_text) # 插入企业信息
sheet.cell(row_num, 21, get_url1) # 最后一列插入查询网址
wb.save(sys.path[0] + '/001.xlsx') # 保存为excel格式
def excel_info():
# 打开文件:
excel = load_workbook(sys.path[0] + '/1.xlsx')
# 获取sheet:
global rows, table, sheet, wb, get_url, row_num, name_word2 # 设置为全局变量,供其它函数使用
table = excel.get_sheet_by_name('Sheet1') # 通过表名获取
# 获取行数和列数:
rows = table.max_row # 获取行数
# cols = table.max_column # 获取列数
excel_hed = [('ID', '公司名', '搜索到的公司名', '注册号', '组织机构代码', '税务登记证号', '法定代表人', '经营状态', '成立日期', '营业期限',
'审核/年检日期', '注册资本', '企业类型', '机构类型', '所属行业', '行政区划', '电话号码', '登记机关', '注册地址', '经营范围', '查询网址')] # 定义表头
# ------------openpyxl操作excel部分------------------------------
wb = Workbook() # 创建文件对象
sheet = wb.active # 获取默认sheet
sheet.title = "工商企业信息" # 修改sheet名字
row_num = 1 # 定义初始excel行数
for row1 in excel_hed: # 写入表头
sheet.append(row1)
ssl._create_default_https_context = ssl._create_unverified_context # 忽略ssl错误
excel_info() # excel 读取创建
for row in range(1, rows): # excel按列插入信息
row_num += 1
name_word2 = table.cell(row=row+1, column=1).value # 插入读取的企业名称
name_word = urllib.parse.quote(name_word2) # url中文转码
get_url = 'https://xin.baidu.com/s/l?q=' + name_word # 构建搜索地址
# 运行搜索代码并接收返回的pid和搜索到的企业名称(用于跟读取到企业名称对比,是否搜索到不一样的企业)
pid, q_name = get_info(get_url)
if pid == []: # 如果搜索不到此企业,则定义pid为空
pid = ['None']
q_name = ['None']
get_url1 = 'https://xin.baidu.com/m/basic?pid=' + \
pid[0] # 定义详情页地址
print(get_url1)
get_info1(get_url1, q_name[0], name_word2) # 利用PID获取详情页
time.sleep(3) # 休眠3秒,开始下个循环
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.