text
stringlengths 8
6.05M
|
|---|
# Generated by Django 2.2.10 on 2020-02-24 10:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user', '0002_auto_20200223_2349'),
]
operations = [
migrations.AlterField(
model_name='message',
name='course',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='notices', to='course.Course'),
),
migrations.AlterField(
model_name='messagestatus',
name='message',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='status', to='user.Message'),
),
]
|
from Jumpscale import j
import netaddr
import random
import nacl
import os
def chat(bot):
"""
This chat is to deploy 3bot container on the grid
"""
explorer = j.clients.explorer.explorer
cl = j.clients.s3.get("deployer")
AWS_ID = cl.accesskey_
AWS_SECRET = cl.secretkey_
user_info = bot.user_info()
name = user_info["username"]
email = user_info["email"]
ips = ["IPv6", "IPv4"]
choose = ["Deploy a new 3bot", "Restore my 3bot"]
ip_range_choose = ["Specify IP Range", "Choose IP Range for me"]
expiration = j.data.time.epoch + (60 * 60 * 24) # for one day
backup_directory = name.replace(".", "_")
env = dict()
secret_env = dict()
if not name or not email:
bot.md_show("Username or email not found in session. Please log in properly")
user_choice = bot.single_choice("This wizard will help you deploy or restore your 3bot.", choose)
identity = explorer.users.get(name=name, email=email)
identity_pubkey = identity.pubkey
if user_choice == "Restore my 3bot":
password = bot.secret_ask("Please enter the password you configured to backup your 3bot")
hash_restore = nacl.hash.blake2b(password.encode(), key=identity_pubkey.encode()).decode()
# ask user about corex user:password and ssh-key to give him full access to his container
pub_key = None
while not pub_key:
pub_key = bot.string_ask(
"""Please add your public ssh key, this will allow you to access the deployed container using ssh.
Just copy your key from `~/.ssh/id_rsa.pub`"""
)
form = bot.new_form()
user_corex = form.string_ask(
"Please create a username for your 3bot (this will allow you secure access to the 3bot from your web browser)"
)
password = form.secret_ask("Please create a password for your 3bot")
form.ask()
# create new reservation
reservation = j.sal.zosv2.reservation_create()
ip_version = bot.single_choice("Do you prefer to access your 3bot using IPv4 or IPv6? If unsure, chooose IPv4", ips)
node_selected = j.sal.chatflow.nodes_get(1, cru=4, sru=8, ip_version=ip_version)
if len(node_selected) != 0:
node_selected = node_selected[0]
else:
node_selected = j.sal.chatflow.nodes_get(1, cru=4, hru=8, ip_version=ip_version)
if len(node_selected) != 0:
res = "# We are sorry we don't have empty Node to deploy your 3bot"
res = j.tools.jinja2.template_render(text=res, **locals())
bot.md_show(res)
return
node_selected = node_selected[0]
# Encrypt AWS ID and AWS Secret to send it in secret env
aws_id_encrypted = j.sal.zosv2.container.encrypt_secret(node_selected.node_id, AWS_ID)
aws_secret_encrypted = j.sal.zosv2.container.encrypt_secret(node_selected.node_id, AWS_SECRET)
user_corex_encrypted = j.sal.zosv2.container.encrypt_secret(node_selected.node_id, user_corex.value)
password_corex_encrypted = j.sal.zosv2.container.encrypt_secret(node_selected.node_id, password.value)
# Create network of reservation and add peers
if user_choice == "Restore my 3bot":
hash_encrypt = j.sal.zosv2.container.encrypt_secret(node_selected.node_id, hash_restore)
env.update({"restore": "True"})
secret_env.update({"HASH": hash_encrypt})
reservation, config = j.sal.chatflow.network_configure(
bot, reservation, [node_selected], customer_tid=identity.id, ip_version=ip_version
)
ip_address = config["ip_addresses"][0]
backup = bot.single_choice("Do you want your 3bot to be automatically backed up?", ["Yes", "No"])
if backup == "Yes":
password = bot.secret_ask(
"""The password you add here will be used to encrypt your backup to keep your 3bot safe.
please make sure to keep this password safe so you can later restore your 3bot.
Remember, this password will not be saved anywhere, so there cannot be recovery for it"""
)
hash_backup = nacl.hash.blake2b(password.encode(), key=identity_pubkey.encode()).decode()
hash_encrypted = j.sal.zosv2.container.encrypt_secret(node_selected.node_id, hash_backup)
secret_env.update({"HASH": hash_encrypted})
env.update({"backup": "True", "FOLDER": backup_directory})
env.update({"pub_key": pub_key})
secret_env.update(
{
"AWS_ID": aws_id_encrypted,
"AWS_SECRET": aws_secret_encrypted,
"corex_password": password_corex_encrypted,
"corex_user": user_corex_encrypted,
}
)
container_flist = "https://hub.grid.tf/bola_nasr_1/threefoldtech-3bot-corex.flist"
entry_point = "/usr/bin/zinit init -d"
storage_url = "zdb://hub.grid.tf:9900"
# Add volume and create container schema
vol = j.sal.zosv2.volume.create(reservation, node_selected.node_id, size=8)
rid = j.sal.chatflow.reservation_register(reservation, expiration, customer_tid=identity.id)
# create container
cont = j.sal.zosv2.container.create(
reservation=reservation,
node_id=node_selected.node_id,
network_name=config["name"],
ip_address=ip_address,
flist=container_flist,
storage_url=storage_url,
env=env,
entrypoint=entry_point,
cpu=4,
memory=4096,
secret_env=secret_env,
)
j.sal.zosv2.volume.attach_existing(cont, vol, rid, "/sandbox/var")
resv_id = j.sal.chatflow.reservation_register(reservation, expiration, customer_tid=identity.id)
res = """# reservation sent. ID: {}
""".format(
resv_id
)
bot.md_show(res)
filename = "{}_{}.conf".format(name, resv_id)
res = """
## Use the following template to configure your wireguard connection. This will give you access to your 3bot.
# Make sure you have wireguard ```https://www.wireguard.com/install/``` installed
## ```wg-quick up /etc/wireguard/{}```
Click next
to download your configuration
""".format(
filename
)
res = j.tools.jinja2.template_render(text=j.core.text.strip(res), **locals())
bot.md_show(res)
res = j.tools.jinja2.template_render(text=config["wg"], **locals())
bot.download_file(res, filename)
res = "# Open your browser at ```{}:1500```".format(ip_address)
res = j.tools.jinja2.template_render(text=res, **locals())
bot.md_show(res)
|
# cd /Users/AL/Dropbox/0. AL Current Work/3. To Submit/Dr K/AL/python/
import sdm
import sdm_utils
from numpy import *
def mem_write_x_at_x(count=10):
for i in range (count):
b=sdm.Bitstring()
sdm.thread_write(b,b)
def mem_write_x_at_random(count=10):
for i in range (count):
b=sdm.Bitstring()
c=sdm.Bitstring()
sdm.thread_write(b,c)
def linhares_fig7_1():
import sdm
import sdm_utils
sdm.initialize()
a = sdm_utils.table_7_1()
import pylab
pylab.plot(a)
pylab.show()
def linhares_critical1():
#cd /Users/AL/Dropbox/0. AL Current Work/3. To Submit/Dr K/AL/python/
import sdm
import sdm_utils
import time
start=time.clock()
#sdm.initialize()
sdm.initialize_from_file("/Users/AL/Desktop/mem45000_n1000_10000x_at_x.sdm")
mem_write_x_at_x(5000)
v = sdm.Bitstring()
sdm.thread_write(v,v)
print ("computing distances graph")
print (time.clock()-start, "seconds")
a = sdm_utils.critical_distance2(0, 1000, 1, v)
print (time.clock()-start)
print "saving file"
sdm.save_to_file("/Users/AL/Desktop/mem50000_n1000_10000x_at_x.sdm")
import pylab
pylab.plot(a)
pylab.show()
def scan_for_distances():
import time, cPickle;
sdm.initialize()
v = sdm.Bitstring()
for i in range (0,10,1):
sdm.thread_write(v,v)
import pylab
for i in range (1000,51000,1000):
print 'Computing distances for '+str(i)+' items registered'
#add 1000 itens to memory
mem_write_x_at_x(1000)
a = sdm_utils.critical_distance2(0, 1000, 1, v, read=sdm.thread_read_chada)
#get new distance values in a
#save a
cPickle.dump(a, open (str(i)+'10writes_Chada_Read.cPickle', 'wb'))
print 'saved '+str(i)+'.cPickle'
#print 'now lets see..'
#for i in range (1000,11000,1000):
# print (cPickle.load(open(str(i)+'.cPickle','rb')))
#from pylab import *
def TestFig1():
import os, cPickle
#os.chdir ("results/6_iter_readng/1000D/DrK_Read/x_at_x/")
import pylab
for i in range (1000,51000,1000):
a = (cPickle.load(open(str(i)+'_10writes.cPickle','rb')))
pylab.plot(a)
pylab.show()
from matplotlib.pylab import *
def Plot_Heatmap (data=[]):
# Make plot with vertical (default) colorbar
maxd = int(data.max())
mind = int(data.min())
avgd = int ((maxd+mind) / 2);
print 'minimum value=',mind
fig = plt.figure()
ax = fig.add_subplot(111)
#use aspect=20 when N=1000
#use aspect=5 when N=256
cax = ax.imshow(data, cmap=cm.YlGnBu, aspect=5.0, interpolation=None, norm=None, origin='lower')
ax.set_title('Critical Distance Behavior', fontsize=58)
ax.grid(True, label='Distance')
ax.set_xlabel('original distance', fontsize=100)
ax.set_ylabel("# items previously stored (000's)")
# Add colorbar, make sure to specify tick locations to match desired ticklabels
cbar = fig.colorbar(cax, ticks=[mind, avgd, maxd]) #had ZERO here before
cbar.ax.set_yticklabels([str(mind), str(avgd), str(maxd)])
cbar.ax.set_ylabel('distance obtained after 20 iteractive-readings', fontsize=24)
#########CONTOUR DELINEATES THE CRITICAL DISTANCE
# We are using automatic selection of contour levels;
# this is usually not such a good idea, because they don't
# occur on nice boundaries, but we do it here for purposes
# of illustration.
CS = contourf(data, 100, levels = [mind,avgd,maxd], alpha=0.1, cmap=cm.YlGnBu, origin='lower')
# Note that in the following, we explicitly pass in a subset of
# the contour levels used for the filled contours. Alternatively,
# We could pass in additional levels to provide extra resolution,
# or leave out the levels kwarg to use all of the original levels.
CS2 = contour(CS, levels=[88], colors = 'gray', origin='lower', hold='on', linestyles='dashdot')
title('Critical Distance Behavior', fontsize=40)
xlabel('original distance', fontsize=24)
ylabel("# items previously stored (000's)", fontsize=24)
# Add the contour line levels to the colorbar
#cbar.add_lines(CS2)
show()
from matplotlib.pylab import *
import os, cPickle
def GetDataForPlots(folder='',filenameext='MUST_BE_PROVIDED'):
p=q=r=s=[]
if len(folder)>0: os.chdir (folder)
for i in range(1,51):
S = 'N=256_iter_read=2_'+str(i*1000)+filenameext+'.cPickle'
p.append( (cPickle.load(open(S,'rb') ) ) )
q=concatenate(p,axis=0)
r = q[:,1]
print len(r)
print '& shape (r)=',shape(r)
r.shape=(50,256) #if N=256
#r.shape=(50,1000)
print 'r=',r
return r
def now():
#data=GetDataForPlots("results/6_iter_readng/1000D/DrK_Read/x_at_x/1_write", '')
#data=GetDataForPlots("results/6_iter_readng/1000D/DrK_Read/x_at_x/10_writes", '_10writes')
data=GetDataForPlots('','saved items_x_at_x_0_writes_DrK_cubed')
Plot_Heatmap (data)
|
from django.conf.urls import *
from tagging_autocomplete.views import list_tags
urlpatterns = [
# 'tagging_autocomplete.views',
url(r'^list$', list_tags, name='tagging_autocomplete-list'),
]
|
import numpy
from numpy import array
from numpy import mean
from numpy import cov, var
from PIL import Image
from numpy.linalg import eigh, norm
from matplotlib.pyplot import *
import matplotlib.pyplot as plt
import math
f = open('ts.txt', 'r')
train_images = []
tf = open('tss.txt', 'r')
test_images = []
line_list1 = f.readlines()
#line_list1.pop()
line_list2 = tf.readlines()
for line in line_list1:
line = line.split("-")
train_images.append(
((numpy.asarray(Image.open(line[0]).convert('L').resize((64, 64))).flatten()), line[1]))
for line in line_list2:
test_images.append(numpy.asarray(Image.open(line.split('\n')[0]).convert('L').resize((64, 64))).flatten())
images = []
for (image, name) in train_images:
images.append(image)
matrix = numpy.asarray(images)
#print(matrix)
avg = mean(matrix.T, axis=1)
center = matrix - avg
variance = cov(center.T)
values, vectors = eigh(variance)
feat_vec = numpy.flip(vectors)[:,:32]
norm_line = feat_vec.T.dot(center.T)
vec = feat_vec
line = norm_line.T
avg = avg
classed_eigen = dict()
for index, arr in enumerate(line):
if train_images[index][1] not in classed_eigen:
classed_eigen[train_images[index][1]] = list()
classed_eigen[train_images[index][1]].append(arr)
for key in classed_eigen:
classed_eigen[key] = numpy.asarray(classed_eigen[key])
avgg = {}
vari = {}
for name in classed_eigen:
arr = classed_eigen[name]
mu = [mean(col) for col in arr.T]
sigma_sq = var(arr.T, axis=1)
if name not in avgg:
avgg[name] = 0
vari[name] = 0
avgg[name] = mu
vari[name] = sigma_sq
meuu = avgg
sigsq = vari
matr = numpy.asarray(test_images)
#print(matr)
cc = matr - avg
test_norm_line = vec.T.dot(cc.T)
test_line = test_norm_line.T
prod = 1
max_val = -9999
max_class = list()
for vec in test_line:
temp_name = 'X'
max_val = -9999
for name in meuu:
prod = 1
for index in range(len(vec)):
p_x_1 = (2 * 3.14 * sigsq[name][index]) ** 0.5
ra = (-(vec[index] - meuu[name][index]) ** 2) / (2*sigsq[name][index])
p_x_2 = math.exp(ra)
p_x = p_x_2/p_x_1
prod *= p_x
if prod > max_val:
max_val = prod
temp_name = name
max_class.append(temp_name)
names = max_class
#print((len(train_images)/6), ' Images per Class have been used to Train the Model')
#print('Using ', len(test_images), ' Images per Class have been used to Test the Model')
#print('\n Training Data Size: ', len(train_images))
#print('Testing Data Size: ', len(test_images))
droness = list()
fjets = list()
helicopts = list()
missiles = list()
pplanes = list()
rockets = list()
#dronesfound = 0;fjetsfound = 0;helicoptersfound = 0
#missilesfound = 0;pplanesfound = 0;rocketsfound = 0
tnd = 0; fnd = 0; fpd = 0; tpd = 0;
tnf = 0; fnf = 0; fpf = 0; tpf = 0;
tnh = 0; fnh = 0; fph = 0; tph = 0;
tnm = 0; fnm = 0; fpm = 0; tpm = 0;
tnp = 0; fnp = 0; fpp = 0; tpp = 0;
tnr = 0; fnr = 0; fpr = 0; tpr = 0;
for count in range(len(test_images)):
# Check drone images
if count < (len(test_images)/6):
if (names[count])[0] == 'd':
tpd += 1
tnf += 1
tnh += 1
tnr += 1
tnm += 1
tnp += 1
if (names[count])[0] == 'f':
fnd += 1
fpf += 1
tnp += 1
tnr += 1
tnh += 1
tnm += 1
if (names[count])[0] == 'h':
fnd += 1
tnf += 1
tnr += 1
tnm += 1
tnp += 1
fph += 1
if (names[count])[0] == 'm':
fnd += 1
tnf += 1
tnr += 1
tnp += 1
tnh += 1
fpm += 1
if (names[count])[0] == 'p':
fnd += 1
tnf += 1
tnr += 1
fpp += 1
tnm += 1
tnh += 1
if (names[count])[0] == 'r':
fnd += 1
tnf += 1
tnh += 1
tnp += 1
tnm += 1
fpr += 1
# else:
# tnd += 1
# Check fighterjet images
if count < (len(test_images)/3) and count >= (len(test_images)/6):
if (names[count])[0] == 'd':
fnf += 1
fpd += 1
tnm += 1
tnr += 1
tnp += 1
tnh += 1
if (names[count])[0] == 'f':
tpf += 1
tnd += 1
tnr += 1
tnp += 1
tnm += 1
tnh += 1
if (names[count])[0] == 'h':
fnf += 1
tnp += 1
tnr += 1
tnd += 1
tnm += 1
fph += 1
if (names[count])[0] == 'm':
fnf += 1
fpm += 1
tnd += 1
tnp += 1
tnr += 1
tnh += 1
if (names[count])[0] == 'p':
fnf += 1
tnd += 1
fpp += 1
tnr += 1
tnm += 1
tnh += 1
if (names[count])[0] == 'r':
fnf += 1
tnd += 1
tnh += 1
fpr += 1
tnm += 1
tnp += 1
# else:
# tnf += 1
# Check Helicopter Images
if count < (len(test_images)/2) and count >= (len(test_images)/3):
if (names[count])[0] == 'd':
fnh += 1
fpd += 1
tnm += 1
tnr += 1
tnp += 1
tnf += 1
if (names[count])[0] == 'f':
fnh += 1
tnr += 1
tnd += 1
tnp += 1
fpf += 1
tnm += 1
if (names[count])[0] == 'h':
tph += 1
tnr += 1
tnd += 1
tnp += 1
tnm += 1
tnf += 1
if (names[count])[0] == 'm':
fnh += 1
tnd += 1
tnr += 1
tnp += 1
fpm += 1
tnf += 1
if (names[count])[0] == 'p':
fnh += 1
fpp += 1
tnm += 1
tnr += 1
tnd += 1
tnf += 1
if (names[count])[0] == 'r':
fnh += 1
tnd += 1
tnp += 1
fpr += 1
tnm += 1
tnf += 1
# else:
# tnh += 1
# Check missile images
if count < (len(test_images)/(6/4)) and count >= (len(test_images)/2):
if (names[count])[0] == 'd':
fnm += 1
fpd += 1
tnf += 1
tnr += 1
tnh += 1
tnp += 1
if (names[count])[0] == 'f':
fnm += 1
tnp += 1
tnr += 1
tnd += 1
fpf += 1
tnh += 1
if (names[count])[0] == 'h':
fnm += 1
tnd += 1
tnf += 1
tnr += 1
tnp += 1
fph += 1
if (names[count])[0] == 'm':
tpm += 1
tnp += 1
tnr += 1
tnd += 1
tnf += 1
tnh += 1
if (names[count])[0] == 'p':
fnm += 1
tnd += 1
tnr += 1
fpp += 1
tnf += 1
tnh += 1
if (names[count])[0] == 'r':
fnm += 1
tnd += 1
tnp += 1
fpr += 1
tnf += 1
tnh += 1
# else:
# tnm += 1
# Check passengerplane images
if count < (len(test_images)/(6/5)) and count >= (len(test_images)/(6/4)):
if (names[count])[0] == 'd':
fnp += 1
fpd += 1
tnf += 1
tnm += 1
tnr += 1
tnh += 1
if (names[count])[0] == 'f':
fnp += 1
tnd += 1
tnr += 1
tnm += 1
fpf += 1
tnh += 1
if (names[count])[0] == 'h':
fnp += 1
tnd += 1
tnr += 1
tnm += 1
tnf += 1
fph += 1
if (names[count])[0] == 'm':
fnp += 1
tnd += 1
tnh += 1
tnr += 1
fpm += 1
tnf += 1
if (names[count])[0] == 'p':
tpp += 1
tnd += 1
tnm += 1
tnf += 1
tnr += 1
tnh += 1
if (names[count])[0] == 'r':
fnp += 1
tnd += 1
tnh += 1
fpr += 1
tnm += 1
tnf += 1
# else:
# tnp += 1
# Check rocket images
if count < (len(test_images)) and count >= (len(test_images)/(6/5)):
if (names[count])[0] == 'd':
fnr += 1
fpd += 1
tnh += 1
tnm += 1
tnp += 1
tnf += 1
if (names[count])[0] == 'f':
fnr += 1
tnd += 1
tnh += 1
tnp += 1
fpf += 1
tnm += 1
if (names[count])[0] == 'h':
fnr += 1
tnd += 1
tnf += 1
tnp += 1
tnm += 1
fph += 1
if (names[count])[0] == 'm':
fnr += 1
tnd += 1
tnp += 1
fpm += 1
tnh += 1
tnf += 1
if (names[count])[0] == 'p':
fnr += 1
tnd += 1
tnf += 1
fpp += 1
tnm += 1
tnh += 1
if (names[count])[0] == 'r':
tpr += 1
tnd += 1
tnm += 1
tnp += 1
tnh += 1
tnf += 1
# else:
# tnr += 1
#print('' ,(count+1), 'is a ', names[count])
print('\n Confusion Matrix for Drones Confusion Matrix for FighterJets\n')
print(' TN : ',tnd,' FP : ',fpd,' TN : ',tnf,' FP : ',fpf)
print(' FN : ',fnd,' TP : ',tpd,' FN : ',fnf,' TP : ',tpf)
print('\n')
print('\n Confusion Matrix for Helicopters Confusion Matrix for Missiles\n')
print(' TN : ',tnh,' FP : ',fph,' TN : ',tnm,' FP : ',fpm)
print(' FN : ',fnh,' TP : ',tph,' FN : ',fnm,' TP : ',tpm)
print('\n')
print('\n Confusion Matrix for PassengerPlanes Confusion Matrix for Rockets\n')
print(' TN : ',tnp,' FP : ',fpp,' TN : ',tnr,' FP : ',fpr)
print(' FN : ',fnp,' TP : ',tpp,' FN : ',fnr,' TP : ',tpr)
print('\n')
|
# author: Wenrui Zhang
# email: wenruizhang@ucsb.edu
#
# install required package using: pip install -r requirements.txt
# run the code: python main.py
import numpy as np
from sklearn.preprocessing import StandardScaler, LabelEncoder
import sklearn.model_selection as model_s
from sklearn import neighbors
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
from sklearn.metrics import plot_confusion_matrix
classes = ['Bulbasaur', 'Sudowoodo', 'Charmander', 'Gastly', 'Jigglypuff', 'Pidgey', 'Pikachu', 'Squirtle']
short_classes = ['Bul', 'Sud', 'Cha', 'Gas', 'Jig', 'Pid', 'Pik', 'Squ']
def preprocessing(data, labels=None): # preprocess data
global classes
samples = []
# change the value of gender from char to float
for sample in data:
tmp = list(sample)
tmp[9] = float(0) if sample[9] == 'F' else float(1)
tmp = list(map(float, tmp))
tmp = np.asarray(tmp, dtype=np.float32)
samples.append(tmp)
# input normalization
scaler = StandardScaler()
samples = scaler.fit_transform(samples)
if labels is not None: # for training samples
# encode the labels from string to 0-7
le = LabelEncoder()
le.fit(classes)
labels = le.transform(labels)
# split training data into training set and validtion set
train_x, validate_x, train_y, validate_y = model_s.train_test_split(samples, labels, test_size=0.2, random_state=100)
return train_x, validate_x, train_y, validate_y
else: # for testing samples
return samples
def k_nearest_neighbor(train_x, train_y, validate_x, validate_y):
max_score = 0
best_neighbors = None
best_weights = None
# grid search some possible combinations of hyper parameters
for n_neighbors in [5, 10, 15, 20, 25, 30, 35, 40, 45, 50]:
for weights in ['uniform', 'distance']:
# create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(train_x, train_y)
score = clf.score(validate_x, validate_y)
# record the best result
max_score = score if score > max_score else max_score
best_neighbors = n_neighbors if score == max_score else best_neighbors
best_weights = weights if score == max_score else best_weights
print("number of neighbors: ", n_neighbors, ", weights: ", weights)
print(score)
print("final result of KNN")
print("number of neighbors: ", best_neighbors, ", weights: ", best_weights)
print(max_score)
# plot confusion matrix
clf = neighbors.KNeighborsClassifier(best_neighbors, weights=best_weights)
clf.fit(train_x, train_y)
disp = plot_confusion_matrix(clf, validate_x, validate_y,
display_labels=short_classes,
cmap=plt.cm.Blues,
normalize='true',
xticks_rotation='vertical')
disp.ax_.set_title("Confusion Matrix of k-Nearest Neighbors")
plt.savefig("CM_KNN.png")
def naive_bayes(train_x, train_y, validate_x, validate_y):
# create an instance of Naive Bayes Classifier and fit the data.
clf = GaussianNB()
clf.fit(train_x, train_y)
score = clf.score(validate_x, validate_y)
print("result of Gaussian Naive Bayes")
print(score)
# plot confusion matrix
disp = plot_confusion_matrix(clf, validate_x, validate_y,
display_labels=short_classes,
cmap=plt.cm.Blues,
normalize='true',
xticks_rotation='vertical')
disp.ax_.set_title("Confusion Matrix of Gaussian Naive Bayes")
plt.savefig("CM_GNB.png")
def svm(train_x, train_y, validate_x, validate_y):
parameters = {'kernel': ('linear', 'rbf', 'poly'), 'C': [1, 10, 100, 1000], 'gamma': [0.1, 0.01, 0.001, 0.0001]}
# create an instance of SVM Classifier and fit the data.
clf = SVC()
# grid search some possible combinations of hyper parameters
clf = model_s.GridSearchCV(clf, parameters)
clf.fit(train_x, train_y)
score = clf.score(validate_x, validate_y)
print("result of SVM")
print(score)
print(clf.best_params_) # the hyper parameteres with best result.
# plot confusion matrix
disp = plot_confusion_matrix(clf, validate_x, validate_y,
display_labels=short_classes,
cmap=plt.cm.Blues,
normalize='true',
xticks_rotation='vertical')
disp.ax_.set_title("Confusion Matrix of SVM")
plt.savefig("CM_SVM.png")
def decision_tree(train_x, train_y, validate_x, validate_y):
# create an instance of DT Classifier and fit the data.
clf = DecisionTreeClassifier()
clf.fit(train_x, train_y)
score = clf.score(validate_x, validate_y)
print("result of decision tree")
print(score)
# plot confusion matrix
disp = plot_confusion_matrix(clf, validate_x, validate_y,
display_labels=short_classes,
cmap=plt.cm.Blues,
normalize='true',
xticks_rotation='vertical')
disp.ax_.set_title("Confusion Matrix of decision tree")
plt.savefig("CM_DT.png")
def lda(train_x, train_y, validate_x, validate_y):
# create an instance of LDA Classifier and fit the data.
clf = LinearDiscriminantAnalysis()
clf.fit(train_x, train_y)
score = clf.score(validate_x, validate_y)
print("result of LDA")
print(score)
# plot confusion matrix
disp = plot_confusion_matrix(clf, validate_x, validate_y,
display_labels=short_classes,
cmap=plt.cm.Blues,
normalize='true',
xticks_rotation='vertical')
disp.ax_.set_title("Confusion Matrix of LDA")
plt.savefig("CM_LDA.png")
def random_forest(train_x, train_y, validate_x, validate_y):
# create an instance of RF Classifier and fit the data.
clf = RandomForestClassifier()
clf.fit(train_x, train_y)
score = clf.score(validate_x, validate_y)
print("result of random forest")
print(score)
# plot confusion matrix
disp = plot_confusion_matrix(clf, validate_x, validate_y,
display_labels=short_classes,
cmap=plt.cm.Blues,
normalize='true',
xticks_rotation='vertical')
disp.ax_.set_title("Confusion Matrix of random forest")
plt.savefig("CM_RF.png")
def mlp(train_x, train_y, validate_x, validate_y):
# test on different network sizes.
hidden_size = [(400,), (800,), (1200,), (1600,), (2000,), (200, 200,), (400, 400,), (800, 800,), (400, 400, 400,),
(400, 400, 400, 400,)]
max_accuracy = 0
best_hidden = None
for hidden in hidden_size:
# create an instance of MLP Classifier and fit the data.
clf = MLPClassifier(hidden_layer_sizes=hidden)
clf.fit(train_x, train_y)
score = clf.score(validate_x, validate_y)
best_hidden = hidden if max_accuracy < score else best_hidden
max_accuracy = score if max_accuracy < score else max_accuracy
print("network size: ", hidden)
print(score)
print("result of MLP")
print("network size: ", best_hidden)
print(max_accuracy)
# best result is reported
# plot confusion matrix
clf = MLPClassifier(hidden_layer_sizes=best_hidden)
clf.fit(train_x, train_y)
disp = plot_confusion_matrix(clf, validate_x, validate_y,
display_labels=short_classes,
cmap=plt.cm.Blues,
normalize='true',
xticks_rotation='vertical')
disp.ax_.set_title("Confusion Matrix of MLP")
plt.savefig("CM_MLP.png")
return best_hidden
def mlp_predict(train_x, train_y, test_x, best_hidden):
# train all the training data using MLP with best network size and then predict the testing data
global classes
clf = MLPClassifier(hidden_layer_sizes=best_hidden)
clf.fit(train_x, train_y)
test_y = clf.predict(test_x)
le = LabelEncoder()
le.fit(classes)
# decode the predicted labels to the name of Pokemons.
predict_y = le.inverse_transform(test_y)
np.save("pokemon_test_y.npy", predict_y)
print(predict_y)
# load data
train_x = np.load("pokemon_train_x.npy")
train_y = np.load("pokemon_train_y.npy")
test_x = np.load("pokemon_test_x.npy")
train_x, validate_x, train_y, validate_y = preprocessing(train_x, train_y)
k_nearest_neighbor(train_x, train_y, validate_x, validate_y)
naive_bayes(train_x, train_y, validate_x, validate_y)
svm(train_x, train_y, validate_x, validate_y)
decision_tree(train_x, train_y, validate_x, validate_y)
lda(train_x, train_y, validate_x, validate_y)
random_forest(train_x, train_y, validate_x, validate_y)
best_hidden = mlp(train_x, train_y, validate_x, validate_y)
x = np.concatenate((train_x, validate_x), axis=0)
y = np.concatenate((train_y, validate_y), axis=0)
test_x = preprocessing(test_x)
mlp_predict(x, y, test_x, best_hidden)
|
from Pages.drag_drop import DragDropPage
from Utils.locators import DragDropLocators
import time
from Utils.Logger import Logging
import allure
from allure_commons.types import AttachmentType
@allure.severity(allure.severity_level.NORMAL)
class Test_DragDrop:
logger = Logging.loggen()
##################
@allure.severity(allure.severity_level.BLOCKER)
def test_drag_drop(self, test_setup):
self.logger.info("*************** Test_001_Drag And Drop *****************")
self.logger.info("*************** Drag & Drop Test Started *****************")
self.driver = test_setup
self.driver.get(DragDropLocators.DragDropUrl)
self.obj = DragDropPage(self.driver)
self.obj.drag_and_drop()
self.logger.info("**** Drag & Drop Test Passed ****")
time.sleep(3)
self.driver.save_screenshot(".\\Screenshots\\" + "test_drag&drop.png")
allure.attach(self.driver.get_screenshot_as_png(), name="testDrag&Drop", attachment_type=AttachmentType.PNG)
# close browser
self.driver.close()
# pytest -v -s --alluredir=".\AllureReports\Drag&Drop" Tests\test_drag_drop.py
# pytest -v --html=PytestReports\drag&drop_report.html Tests\test_drag_drop.py
|
from django.shortcuts import render, get_object_or_404
from decimal import Decimal
from django.conf import settings
from django.urls import reverse
from paypal.standard.forms import PayPalPaymentsForm
from django.views.decorators.csrf import csrf_exempt
import pymysql
connection = pymysql.connect(host='localhost',user='root',password='Enter Your DB Password',db='busroad')
a =connection.cursor()
@csrf_exempt
def payment_done(request):
return render(request, 'main/done.html')
@csrf_exempt
def payment_canceled(request):
return render(request, 'main/canceled.html')
def payment_process(request):
cursor = connection.cursor()
cursor.execute("select * from buses_routes where source= %s AND destination = %s AND date = %s",(fromStation, toStation, dte))
bus = cursor.fetchall()
host = request.get_host()
print("Total number of rows in Laptop is: ", cursor.rowcount)
paypal_dict = {
'business' : settings.PAYPAL_RECEIVER_EMAIL,
'amount' : '120',
'currency_code': 'USD',
'notify_url': 'http://{}{}'.format(host,reverse('paypal-ipn')),
'return_url': 'http://{}{}'.format(host, reverse('payment:done')),
'cancel_return': 'http://{}{}'.format(host, reverse('payment:canceled')),
}
form = PayPalPaymentsForm(initial=paypal_dict)
return render(request, 'main/process.html', {'bus':bus,'form':form})
|
"""
:mod:`pysgutils.sg_pt`
~~~~~~~~~~~~~~~~~~~~~~
Python port of sg_pt.h from sg3_utils
Comments from sg_pt.h:
Copyright (c) 2005-2014 Douglas Gilbert.
All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the BSD_LICENSE file.
"""
from __future__ import absolute_import
import os
import ctypes
import enum
import errno
import sys
import six
import weakref
from . import sg_lib, libsgutils2, _impl_check
class SGPTBase(ctypes.c_void_p):
"""
This declaration hides the fact that each implementation has its own
structure "derived" (using a C++ term) from this one. It compiles
because 'struct sg_pt_base' is only referenced (by pointer: 'objp')
in this interface. An instance of this structure represents the
context of one SCSI command.
"""
def scsi_pt_version():
"""The format of the version string is like this: "2.01 20090201".
The leading digit will be incremented if this interface changes
in a way that may impact backward compatibility."""
return libsgutils2.scsi_pt_version().decode('utf-8')
@_impl_check
def scsi_pt_open_device(device_name, read_only=False, verbose=False):
"""Returns >= 0 if successful. If error in Unix returns negated errno."""
ret = libsgutils2.scsi_pt_open_device(device_name.encode('utf-8'), read_only, verbose)
if ret < 0:
raise OSError(-ret, sg_lib.safe_strerror(-ret))
return ret
@_impl_check
def scsi_pt_open_flags(device_name, flags=os.O_RDWR, verbose=False):
"""Similar to scsi_pt_open_device() but takes Unix style open flags OR-ed
together. Returns valid file descriptor( >= 0 ) if successful, otherwise
returns -1 or a negated errno.
In Win32 O_EXCL translated to equivalent."""
ret = libsgutils2.scsi_pt_open_flags(device_name.encode('utf-8'), flags, verbose)
if ret < 0:
raise OSError(-ret, sg_lib.safe_strerror(-ret))
return ret
@_impl_check
def scsi_pt_close_device(device_fd):
"""Returns 0 if successful. If error in Unix returns negated errno."""
ret = libsgutils2.scsi_pt_close_device(device_fd)
if ret < 0:
raise OSError(-ret, sg_lib.safe_strerror(-ret))
@_impl_check
def construct_scsi_pt_obj():
"""Creates an object that can be used to issue one or more SCSI commands
(or task management functions). Returns NULL if problem.
Once this object has been created it should be destroyed with
destruct_scsi_pt_obj() when it is no longer needed."""
ret = libsgutils2.construct_scsi_pt_obj()
if ret == 0:
raise MemoryError("Construction of scsi pt object is failed")
else:
return SGPTBase(ret)
@_impl_check
def clear_scsi_pt_obj(objp):
"""Clear state information held in *objp . This allows this object to be
used to issue more than one SCSI command."""
libsgutils2.clear_scsi_pt_obj(objp)
@_impl_check
def set_scsi_pt_cdb(objp, cdb):
"""Set the CDB (command descriptor block)"""
libsgutils2.set_scsi_pt_cdb(objp, cdb, len(cdb))
@_impl_check
def set_scsi_pt_sense(objp, sense):
"""Set the sense buffer and the maximum length that it can handle"""
libsgutils2.set_scsi_pt_sense(objp, sense, len(sense))
@_impl_check
def set_scsi_pt_data_in(objp, dxferp):
"""Set a pointer and length to be used for data transferred from device"""
libsgutils2.set_scsi_pt_data_in(objp, dxferp, len(dxferp))
@_impl_check
def set_scsi_pt_data_out(objp, dxferp):
"""Set a pointer and length to be used for data transferred to device"""
libsgutils2.set_scsi_pt_data_out(objp, dxferp, len(dxferp))
@_impl_check
def set_scsi_pt_packet_id(objp, packet_id):
"""The following "set_"s implementations may be dummies"""
libsgutils2.set_scsi_pt_packet_id(objp, packet_id)
@_impl_check
def set_scsi_pt_tag(objp, tag):
libsgutils2.set_scsi_pt_tag(objp, tag)
@_impl_check
def set_scsi_pt_task_management(objp, tmf_code):
libsgutils2.set_scsi_pt_task_management(objp, tmf_code)
@_impl_check
def set_scsi_pt_task_attr(objp, attribute, priority):
libsgutils2.set_scsi_pt_task_attr(objp, attribute, priority)
class SCSIPTFlags(enum.IntEnum):
"""Following is a guard which is defined when set_scsi_pt_flags() is
present. Older versions of this library may not have this function.
If neither QUEUE_AT_HEAD nor QUEUE_AT_TAIL are given, or both
are given, use the pass-through default."""
NONE = 0
FUNCTION = 1
QUEUE_AT_TAIL = 0x10
QUEUE_AT_HEAD = 0x20
@_impl_check
def set_scsi_pt_flags(objp, flags):
"""Set (potentially OS dependant) flags for pass-through mechanism.
Apart from contradictions, flags can be OR-ed together."""
libsgutils2.set_scsi_pt_flags(objp, flags)
@_impl_check
def do_scsi_pt(objp, fd, timeout_secs, verbose=False):
"""If OS error prior to or during command submission then returns negated
error value (e.g. Unix '-errno'). This includes interrupted system calls
(e.g. by a signal) in which case -EINTR would be returned. Note that
system call errors also can be fetched with get_scsi_pt_os_err().
Return 0 if okay (i.e. at the very least: command sent). Positive
return values are errors (see SCSI_PT_DO_* defines)."""
ret = libsgutils2.do_scsi_pt(objp, fd, timeout_secs, verbose)
if ret < 0:
raise OSError(-ret, sg_lib.safe_strerror(-ret))
elif ret == 1:
raise ValueError("SCSI_PT_DO_BAD_PARAMS (1)")
elif ret == 2:
if sys.version_info > (3,):
# noinspection PyCompatibility
raise TimeoutError("SCSI_PT_DO_TIMEOUT (2)")
else:
raise OSError(errno.ETIMEDOUT, "SCSI_PT_DO_TIMEOUT (2)")
class SCSIPTResult(enum.IntEnum):
GOOD = 0
#: other than GOOD and CHECK CONDITION
STATUS = 1
SENSE = 2
TRANSPORT_ERR = 3
OS_ERR = 4
@_impl_check
def get_scsi_pt_result_category(objp):
"""highest numbered applicable category returned"""
return SCSIPTResult(libsgutils2.get_scsi_pt_result_category(objp))
@_impl_check
def get_scsi_pt_resid(objp):
"""If not available return 0"""
return libsgutils2.get_scsi_pt_resid(objp)
@_impl_check
def get_scsi_pt_status_response(objp):
"""Returns SCSI status value (from device that received the
command)."""
return sg_lib.SCSIStatusCode(libsgutils2.get_scsi_pt_status_response(objp))
@_impl_check
def get_scsi_pt_sense_len(objp):
"""Actual sense length returned. If sense data is present but
actual sense length is not known, return 'max_sense_len'"""
return libsgutils2.get_scsi_pt_sense_len(objp)
@_impl_check
def get_scsi_pt_os_err(objp):
"""If not available return 0"""
return libsgutils2.get_scsi_pt_os_err(objp)
@_impl_check
def get_scsi_pt_os_err_str(objp):
buffer = ctypes.create_string_buffer(512)
libsgutils2.get_scsi_pt_os_err_str(objp, 512, ctypes.byref(buffer))
return buffer.value.decode('utf-8')
@_impl_check
def get_scsi_pt_transport_err(objp):
"""If not available return 0"""
return libsgutils2.get_scsi_pt_transport_err(objp)
@_impl_check
def get_scsi_pt_transport_err_str(objp):
buffer = ctypes.create_string_buffer(512)
libsgutils2.get_scsi_pt_transport_err_str(objp, 512, ctypes.byref(buffer))
return buffer.value.decode('utf-8')
@_impl_check
def get_scsi_pt_duration_ms(objp):
"""If not available return -1"""
ret = libsgutils2.get_scsi_pt_duration_ms(objp)
if ret == -1:
return None
else:
return ret
@_impl_check
def destruct_scsi_pt_obj(objp):
"""Should be invoked once per objp after other processing is complete in
order to clean up resources. For ever successful construct_scsi_pt_obj()
call there should be one destruct_scsi_pt_obj()."""
libsgutils2.destruct_scsi_pt_obj(objp)
@_impl_check
def scsi_pt_win32_direct(objp, state_direct):
"""Request SPT direct interface when state_direct is 1, state_direct set
to 0 for the SPT indirect interface. Default setting selected by build
(i.e. library compile time) and is usually indirect."""
try:
libsgutils2.scsi_pt_win32_direct(objp, state_direct)
except AttributeError:
pass
@_impl_check
def scsi_pt_win32_spt_state():
try:
return libsgutils2.scsi_pt_win32_spt_state() != 0
except AttributeError:
pass
class TransportError(RuntimeError):
def __init__(self, err, message):
super().__init__("[Error {}] {}".format(err, message))
class SCSIError(RuntimeError):
def __init__(self, status_code, message):
super().__init__("[SCSI Status {}] {}".format(status_code, message))
self.status_code = status_code
class SCSIPTDevice(object):
_refs = weakref.WeakValueDictionary()
_stack = [None]
def __init__(self, device_name, read_only_or_flags=False, verbose=False, **kwargs):
if 'flags' in kwargs:
read_only_or_flags = kwargs['flags']
elif 'read_only' in kwargs:
read_only_or_flags = kwargs['read_only']
if isinstance(read_only_or_flags, bool):
self._fd = scsi_pt_open_device(device_name, read_only_or_flags, verbose)
elif isinstance(read_only_or_flags, six.integer_types):
self._fd = scsi_pt_open_flags(device_name, read_only_or_flags, verbose)
else:
raise ValueError("read_only_or_flags must be one of bool or integer value")
self.device_name = device_name
self._refs[id(self)] = self
def __repr__(self):
return "<{}: {}, fd: {}>".format(type(self).__qualname__, self.device_name, self._fd)
def __del__(self):
if self._fd is not None:
self.close()
def close(self):
scsi_pt_close_device(self._fd)
self._fd = None
def enter(self):
self._stack.append(self)
def exit(self):
self._stack.pop()
@classmethod
def current(cls):
return cls._stack[-1]
def __enter__(self):
return self.enter()
def __exit__(self, exc_type, exc_val, exc_tb):
return self.exit()
class SCSIPTObject(object):
_refs = weakref.WeakValueDictionary()
timeout = 5
class TaskAttr(object):
def __init__(self, pt_obj):
self._pt_obj = pt_obj
self._attrs = dict()
def __getitem__(self, item):
return self._attrs.get(item, None)
def __setitem__(self, key, value):
set_scsi_pt_task_attr(self._pt_obj, key, value)
self._attrs[key] = value
def __init__(self):
self._pt_obj = construct_scsi_pt_obj()
self._cdb = None
self._sense = None
self._data_in = None
self._data_out = None
self._packet_id = None
self._tag = None
self._task_management = None
self.task_attr = self.TaskAttr(self._pt_obj)
self._flags = SCSIPTFlags.NONE
try:
self._win32_direct = scsi_pt_win32_spt_state()
except NotImplementedError:
self._win32_direct = None
self._refs[id(self)] = self
def clear(self):
clear_scsi_pt_obj(self._pt_obj)
def __del__(self):
destruct_scsi_pt_obj(self._pt_obj)
@property
def cdb(self):
return self._cdb
@cdb.setter
def cdb(self, val):
set_scsi_pt_cdb(self._pt_obj, val)
if isinstance(val, sg_lib.SCSICommand):
self._cdb = val
else:
self._cdb = sg_lib.SCSICommand(bytes(val))
@property
def sense(self):
return self._sense
@sense.setter
def sense(self, val):
set_scsi_pt_sense(self._pt_obj, val)
self._sense = val
@property
def data_in(self):
return self._data_in
@data_in.setter
def data_in(self, val):
set_scsi_pt_data_in(self._pt_obj, val)
self._data_in = val
@property
def data_out(self):
return self._data_out
@data_out.setter
def data_out(self, val):
set_scsi_pt_data_out(self._pt_obj, val)
self._data_out = val
@property
def packet_id(self):
return self._packet_id
@packet_id.setter
def packet_id(self, val):
set_scsi_pt_packet_id(self._pt_obj, val)
self._packet_id = val
@property
def tag(self):
return self._tag
@tag.setter
def tag(self, val):
set_scsi_pt_tag(self._pt_obj, val)
self._tag = val
@property
def task_management(self):
return self._task_management
@task_management.setter
def task_management(self, val):
set_scsi_pt_task_management(self._pt_obj, val)
self._task_management = val
@property
def result_category(self):
return get_scsi_pt_result_category(self._pt_obj)
@property
def resid(self):
return get_scsi_pt_resid(self._pt_obj)
@property
def status_response(self):
return get_scsi_pt_status_response(self._pt_obj)
@property
def sense_len(self):
return get_scsi_pt_sense_len(self._pt_obj)
@property
def os_err(self):
return get_scsi_pt_os_err(self._pt_obj)
@property
def os_err_str(self):
return get_scsi_pt_os_err_str(self._pt_obj)
@property
def transport_err(self):
return get_scsi_pt_transport_err(self._pt_obj)
@property
def transport_err_str(self):
return get_scsi_pt_transport_err_str(self._pt_obj)
@property
def duration_ms(self):
return get_scsi_pt_duration_ms(self._pt_obj)
@property
def win32_direct(self):
return self._win32_direct
@win32_direct.setter
def win32_direct(self, val):
scsi_pt_win32_direct(self._pt_obj, val)
self._win32_direct = val
def do_scsi_pt(self, timeout=None, device=None, verbose=False):
if device is None:
device = SCSIPTDevice.current()
if device is None:
raise ValueError("Device is not specified")
if timeout is None:
timeout = self.timeout
do_scsi_pt(self._pt_obj, device._fd, timeout, verbose)
result = self.result_category
if result == SCSIPTResult.OS_ERR or self.os_err:
raise OSError(self.os_err, self.os_err_str)
elif result == SCSIPTResult.TRANSPORT_ERR or self.transport_err:
raise TransportError(self.transport_err, self.transport_err_str)
elif result == SCSIPTResult.SENSE:
raise SCSIError(self.status_response, str(self.sense))
elif result == SCSIPTResult.STATUS:
raise SCSIError(self.status_response, '')
|
import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
def initial_plot(f):
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
delta = 0.025
x = np.arange(-5.0, 5.0, delta)
y = np.arange(-5.0, 5.0, delta)
X, Y = np.meshgrid(x, y)
Z = f([X, Y])
fig = plt.figure(figsize=(20, 6))
ax = fig.add_subplot(1, 2, 1, projection='3d')
ax.plot_surface(X, Y, Z, rstride=4, cstride=4, alpha=0.25)
cset = ax.contour(X, Y, Z, zdir='z', offset=0, cmap=cm.coolwarm)
ax.set_xlim3d(-5, 5)
ax.set_ylim3d(-5, 5)
ax.set_zlim3d(0, 10)
ax2 = fig.add_subplot(1, 2, 2)
levels = [5, 10, 15, 25, 50, 100, 150]
CS = ax2.contour(X, Y, Z, levels)
ax2.clabel(CS, inline=1, fontsize=10, cmap=cm.coolwarm)
ax2.set_xlabel('$x_0$')
ax2.set_ylabel('$x_1$')
ax2.set_title('$f(\mathbf{x})$')
def plot_gradient(f, f_grad):
delta = 0.025
x = np.arange(-5.0, 5.0, delta)
y = np.arange(-5.0, 5.0, delta)
X, Y = np.meshgrid(x, y)
Z = f([X, Y])
X_2, Y_2 = np.meshgrid(x[::40], y[::40])
Z_grad = f_grad([X_2, Y_2])
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(1, 1, 1)
levels = [5, 10, 15, 25, 50, 100, 150]
CS = ax.contour(X, Y, Z, levels)
ax.clabel(CS, inline=1, fontsize=10, cmap=cm.coolwarm)
ax.set_xlabel('$x_0$')
ax.set_ylabel('$x_1$')
ax.set_title('$f(\mathbf{x})$')
plt.quiver(X_2, Y_2, Z_grad[0], Z_grad[1])
def trajectory_visualization(f, n_iter, trajectory):
delta = 0.025
x = np.arange(-5.0, 5.0, delta)
y = np.arange(-5.0, 5.0, delta)
X, Y = np.meshgrid(x, y)
Z = f([X, Y])
fig = plt.figure(figsize=(20, 9))
ax = fig.add_subplot(1, 2, 2)
levels = [5, 10, 15, 25, 50, 100, 150]
CS = ax.contour(X, Y, Z, levels)
ax.clabel(CS, inline=1, fontsize=10, cmap=cm.coolwarm)
ax.set_xlabel('$x_0$')
ax.set_ylabel('$x_1$')
ax.set_title('{}'.format(n_iter))
ax.plot(trajectory[:n_iter, 0], trajectory[:n_iter, 1], '-o', markersize=10, color='red')
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from apps.web import views
from apps.api.views import GameViewSet
from rest_framework import routers
router = routers.DefaultRouter()
router.register(r'games', GameViewSet)
urlpatterns = patterns('',
url(r'^', include(views)),
url(r'^admin/', include(admin.site.urls)),
url(r'^api/v1/', include(router.urls)),
url(r'^api/v1/stats/', include("apps.api.stats.urls")),
url(r'^api/v1/wars/', include("apps.api.wars.urls")),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
)
handler404 = 'apps.web.views.error404'
|
"""
Heber Cooke 10/31/2019
Chapter 6 Exercise 9
Write a program that computes and prints the average of of the numbers in a text file.
You should make use of two higher-order functions to simplify the design.
"""
import random
inpu = input("Enter a file name or C to create one ")
if inpu == 'C' or inpu == 'c': # create a txt file
f = open("numbers.txt",'w')
for i in range(100): # put 100 random integers in the txt file
f.write(str(random.randint(1,100)))
f.write(' ')
f.close() #close the file
f = open('numbers.txt', 'r')#open the created file for reading
else:
f = open(inpu, 'r')
s = f.read().split()
f.close()
def total(s):
n = 0
for i in s:
n += int(i)
return n
def count(s):
return len(s)
def average(t, c):
return t / c
t = total
c = count
a = average
print("Total",t(s))
print("Count",c(s))
print("Average", a(t(s),c(s)))
|
# Generated by Django 2.2.8 on 2020-06-09 08:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0002_teach_time'),
]
operations = [
migrations.AlterField(
model_name='teach',
name='time',
field=models.CharField(blank=True, max_length=10, null=True),
),
]
|
#!/usr/bin/python
"""
Author Paula Dwan
Email paula.dwan@gmail.com
Student ID 13208660
Subject COMP47270 (Computational Network Analysis and Modeling)
Date Jan-2015
Lecturer Dr. Neil Hurley
LABORATORY | CASE STUDY 2 : laplacian.py
"""
# import the networkx network analysis package
import networkx as nx
# import the graphvisualisation package graphviz
from networkx import graphviz_layout
import pygraphviz
# import the plotting functionality from matplotlib
import matplotlib.pyplot as plt
#import Delaunay tesselation
from scipy.spatial import Delaunay
# import kmeans
from scipy.cluster.vq import vq, kmeans, whiten
import numpy as np
import scipy as sp
import random
def placement():
num_nodes = 100
x = [random.random() for i in range(num_nodes)]
y = [random.random() for i in range(num_nodes)]
x = np.array(x)
y = np.array(y)
# Make a graph with num_nodes nodes and zero edges
# Plot the nodes using x,y as the node positions
G = nx.empty_graph(num_nodes)
print "G.number_of_nodes() = ", G.number_of_nodes(), "\n"
pos = dict()
for i in range(num_nodes):
pos[i] = x[i],y[i]
plot_graph(G, pos, 1)
# Now add some edges - use Delaunay tesselation to produce a planar graph.
# Delaunay tesselation covers the convex hull of a set of points with
# triangular simplices (in 2D)
#
# Aside : Paula 13-Jan-2015
# planar graph - graph that can be plotted in 2-D with no overlaps.
points = np.column_stack((x,y))
dl = Delaunay(points)
tri = dl.simplices
edges = np.zeros((2, 6*len(tri)),dtype=int)
data = np.ones(6*len(points))
j=0
for i in range(len(tri)):
edges[0][j]=tri[i][0]
edges[1][j]=tri[i][1]
j = j+1
edges[0][j]=tri[i][1]
edges[1][j]=tri[i][0]
j = j+1
edges[0][j]=tri[i][0]
edges[1][j]=tri[i][2];
j = j+1
edges[0][j]=tri[i][2]
edges[1][j]=tri[i][0];
j = j+1
edges[0][j]=tri[i][1]
edges[1][j]=tri[i][2]
j=j+1
edges[0][j]=tri[i][2]
edges[1][j]=tri[i][1]
j=j+1
data=np.ones(6*len(tri))
A = sp.sparse.csc_matrix((data,(edges[0,:],edges[1,:])))
for i in range(A.nnz):
A.data[i] = 1.0
G = nx.to_networkx_graph(A)
plot_graph(G,pos,2)
# Use the eigenvectors of the normalised Laplacian to calculate placement positions
# for the nodes in the graph
# eigen_pos holds the positions
eigen_pos = dict()
deg = A.sum(0)
diags = np.array([0])
D = sp.sparse.spdiags(deg,diags,A.shape[0],A.shape[1]) # diagonal matrix of degrees
Dinv = sp.sparse.spdiags(1/deg,diags,A.shape[0],A.shape[1]) # inverse of
# Normalised laplacian : multiply by 1 / Deg previously
L = Dinv*(D - A)
E, V = sp.sparse.linalg.eigs(L,3,None,100.0,'SM') # 100x100 martrix --> compress into 100 vector
V = V.real
for i in range(num_nodes):
eigen_pos[i] = V[i,1].real,V[i,2].real
# for n,nbrsdict in G.adjacency_iter():
# for nbr,eattr in nbrsdict.items():
# if 'weight' in eattr:
# print n,nbr,eattr['weight']
plot_graph(G,eigen_pos,3)
# Now let's see if the eigenvectors are good for clustering
# Use k-means to cluster the points in the vector V
features = np.column_stack((V[:,1], V[:,2]))
print "cluster_nodes for e-vector values :-"
cluster_nodes(G,features,pos,eigen_pos) # e-vectors
# Finally, use the columns of A directly for clustering
raw_input("Press Enter to Continue ...\n")
print "cluster_nodes for Delaunay tesselation values :-"
cluster_nodes(G,A.todense(),pos,eigen_pos) # Delaunay tesselationvalues
raw_input("Press Enter to Continue ...\n")
def plot_graph(G,pos,fignum):
label = dict()
labelpos=dict()
for i in range(G.number_of_nodes()):
label[i] = i
labelpos[i] = pos[i][0]+0.02, pos[i][1]+0.02
fig=plt.figure(fignum,figsize=(8,8))
fig.clf()
nx.draw_networkx_nodes(G,
pos,
node_size=40,
hold=False,
)
nx.draw_networkx_edges(G,pos, hold=True)
nx.draw_networkx_labels(G,
labelpos,
label,
font_size=10,
hold=True,
)
fig.show(1)
def cluster_nodes(G, feat, pos, eigen_pos):#
book,distortion = kmeans(feat,3)
codes,distortion = vq(feat, book)
nodes = np.array(range(G.number_of_nodes()))
W0 = nodes[codes==0].tolist()
W1 = nodes[codes==1].tolist()
W2 = nodes[codes==2].tolist()
print "W0 = ", W0
print "W1 = ", W1
print "W2 = ", W2
plt.figure(3) # position of nodes as per e-vectors
nx.draw_networkx_nodes(G,
eigen_pos,
node_size=40,
hold=True,
nodelist=W0,
node_color='m'
)
nx.draw_networkx_nodes(G,
eigen_pos,
node_size=40,
hold=True,
nodelist=W1,
node_color='b'
)
plt.figure(2) # positions of nodes per Delaney tesselation
nx.draw_networkx_nodes(G,
pos,
node_size=40,
hold=True,
nodelist=W0,
node_color='m'
)
nx.draw_networkx_nodes(G,
pos,
node_size=40,
hold=True,
nodelist=W1,
node_color='b'
)
if __name__ == '__main__':
placement()
|
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gio
from gi.repository.GdkPixbuf import Pixbuf
class openSessionWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Open Session Overlay")
self.set_border_width(10)
hb = Gtk.HeaderBar(title="Open Session")
self.connect("destroy", Gtk.main_quit)
hbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.add(hbox)
listbox = Gtk.ListBox()
listbox.add(Gtk.Label(' Open an Existing Session '))
listbox.add(self.sessionName())
listbox.add(self.bottomBttn())
hbox.pack_start(listbox, False, True, 0)
def sessionName(self):
row = Gtk.ListBox()
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)
row.add(hbox)
vbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=15)
hbox.pack_start(vbox, False, True, 0)
label2 = Gtk.Label()
label2.set_markup("Session Name")
vbox.pack_start(label2, False, True, 0)
entry1 = Gtk.Entry()
entry1.set_text('Session Name')
vbox.pack_start(entry1, False, True, 0)
browse1 = Gtk.Button.new_with_label("Browse")
vbox.pack_start(browse1, False, True, 0)
return row
def bottomBttn(self):
row = Gtk.ListBoxRow()
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)
row.add(hbox)
btn = Gtk.Button.new_with_label("Open")
hbox.pack_start(btn, True, True, 0)
btn = Gtk.Button.new_with_label("Cancel")
hbox.pack_start(btn, True, True, 0)
return row
window = openSessionWindow()
window.show_all()
Gtk.main()
|
# -*- coding: utf-8 -*-
# @Time : 2019/11/28 15:30
# @Author : Jeff Wang
# @Email : jeffwang987@163.com OR wangxiaofeng2020@ia.ac.cn
# @Software: PyCharm
import cv2
import numpy as np
canvas = np.zeros((300, 300, 3), dtype='uint8')
green = (0, 255, 0)
red = (0, 0, 255)
white = (255, 255, 255)
cv2.line(canvas, (0, 0), (300, 300), green)
cv2.line(canvas, (300, 0), (0, 300), red, 5)
cv2.rectangle(canvas, (100, 100), (200, 200), green, -1)
cv2.rectangle(canvas, (200, 150), (300, 250), red, 3)
cv2.circle(canvas, (89, 89), 50, white, 1)
cv2.imshow('Canvas', canvas)
cv2.waitKey(0)
|
"""empty message
Revision ID: 76453dfdcd53
Revises: 2e68164a73a9
Create Date: 2020-03-29 00:39:54.361536
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '76453dfdcd53'
down_revision = '2e68164a73a9'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('department',
sa.Column('name', sa.String(length=32), nullable=False),
sa.PrimaryKeyConstraint('name')
)
op.create_table('class',
sa.Column('subject', sa.String(length=32), nullable=True),
sa.Column('num', sa.Integer(), nullable=False),
sa.Column('unit', sa.Float(precision=2, asdecimal=1), nullable=False),
sa.Column('alp', sa.Boolean(), nullable=False),
sa.Column('cz', sa.Boolean(), nullable=False),
sa.Column('ns', sa.Boolean(), nullable=False),
sa.Column('qs', sa.Boolean(), nullable=False),
sa.Column('ss', sa.Boolean(), nullable=False),
sa.Column('cci', sa.Boolean(), nullable=False),
sa.Column('ei', sa.Boolean(), nullable=False),
sa.Column('sts', sa.Boolean(), nullable=False),
sa.Column('fl', sa.Boolean(), nullable=False),
sa.Column('r', sa.Boolean(), nullable=False),
sa.Column('w', sa.Boolean(), nullable=False),
sa.Column('rating', sa.Float(precision=2, asdecimal=1), nullable=True),
sa.Column('desc', sa.String(length=256), nullable=True),
sa.ForeignKeyConstraint(['subject'], ['department.name'], ),
sa.PrimaryKeyConstraint('num')
)
op.create_table('courseoff',
sa.Column('subject', sa.String(length=256), nullable=False),
sa.Column('course_num', sa.Integer(), nullable=False),
sa.Column('type', sa.String(length=8), nullable=False),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('mon', sa.Boolean(), nullable=False),
sa.Column('tues', sa.Boolean(), nullable=False),
sa.Column('wed', sa.Boolean(), nullable=False),
sa.Column('thur', sa.Boolean(), nullable=False),
sa.Column('fri', sa.Boolean(), nullable=False),
sa.Column('start_time', sa.Time(), nullable=False),
sa.Column('end_time', sa.Time(), nullable=False),
sa.ForeignKeyConstraint(['subject', 'course_num'], ['class.subject', 'class.num'], ),
sa.PrimaryKeyConstraint('subject', 'course_num', 'type', 'id')
)
op.create_table('corequisite',
sa.Column('main_subject', sa.String(length=32), nullable=False),
sa.Column('main_num', sa.Integer(), nullable=False),
sa.Column('main_type', sa.String(length=32), nullable=False),
sa.Column('sup_subject', sa.String(length=32), nullable=False),
sa.Column('sup_num', sa.Integer(), nullable=False),
sa.Column('sup_type', sa.String(length=32), nullable=False),
sa.ForeignKeyConstraint(['main_subject', 'main_num', 'main_type'], ['courseoff.subject', 'courseoff.course_num', 'courseoff.type'], ),
sa.ForeignKeyConstraint(['sup_subject', 'sup_num', 'sup_type'], ['courseoff.subject', 'courseoff.course_num', 'courseoff.type'], ),
sa.PrimaryKeyConstraint('sup_subject', 'sup_num', 'sup_type', name='_sup_uc')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('corequisite')
op.drop_table('courseoff')
op.drop_table('class')
op.drop_table('department')
# ### end Alembic commands ###
|
import torch
import torch.nn as nn
import torchvision.models as models
from tqdm import tqdm
from torch.utils.data import DataLoader
from sklearn.metrics import accuracy_score
from dataset import TextDataset
from utils import get_val_augmentations, preprocess_data
def main():
BATCH_SIZE = 64
NUM_WORKERS = 8
IMAGE_SIZE = 256
device = torch.device("cuda:0")
#device_ids = [0, 1]
albumentations_transform_validate = get_val_augmentations(IMAGE_SIZE)
train_df, val_df, train_labels, val_labels = preprocess_data('input/noisy_imagewoof.csv')
validate_data = TextDataset(dataframe=val_df,
labels=val_labels,
path='input',
transform=albumentations_transform_validate)
validate_loader = DataLoader(dataset=validate_data,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
shuffle=False,
drop_last=False)
model = models.resnext50_32x4d(pretrained=False)
model.fc = nn.Linear(2048, 2)
checkpoint = torch.load('model_saved/weight_best.pth')
model.load_state_dict(checkpoint)
model.to(device)
criterion = nn.CrossEntropyLoss()
model.eval()
val_loss = 0
acc_val = 0
val_len = len(validate_loader)
for i, (imgs, labels) in tqdm(enumerate(validate_loader), total=val_len):
with torch.no_grad():
imgs_vaild = imgs.to(device)
labels_vaild = labels.to(device)
output_test = model(imgs_vaild)
val_loss += criterion(output_test, labels_vaild).item()
pred = torch.argmax(torch.softmax(output_test, 1), 1).cpu().detach().numpy()
true = labels.cpu().numpy()
acc_val += accuracy_score(true, pred)
avg_val_acc = acc_val / val_len
print(f'val_loss {val_loss / val_len} val_acc {avg_val_acc}')
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-12-15 22:48
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0003_grade_is_canceled'),
]
operations = [
migrations.AddField(
model_name='courseclass',
name='ranking_size',
field=models.IntegerField(default=10, validators=[django.core.validators.MinValueValidator(0)]),
),
migrations.AlterField(
model_name='grade',
name='is_canceled',
field=models.BooleanField(default=False, verbose_name='Canceled'),
),
]
|
import os,re,math
from time import gmtime, strftime
from flask import Flask,render_template,request,session,g,redirect, url_for,abort, flash
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
# Static routing
# Please check the files ending in .html in the templates folder to understand about rendering template.
@app.route('/TheEngineer') # Replace TheEngineer with your nickname
def routeStaticTheEngineer():
imageURL = 'http://placehold.it/350x150&text=imageOne'
return render_template('TheEngineer.html', imageURL=imageURL)
"""
HitmanFoo # Static routing, static files and return render_template
"""
@app.route('/')
def index():
return render_template('index.html')
@app.route('/biscuit')
def routeStaticbiscuit():
imageUrl = 'http://www.rides-mag.com/wp-content/uploads/2013/01/Lamborghini-Sesto-Elemento-2.jpg'
return render_template('biscuit.html', imageURL = imageUrl)
"""
aronLim # Static routing, static files and return render_template
"""
# Dynamic routing
@app.route('/TheEngineer/<int:visitor>')
def routeDynamicTheEngineer(visitor):
numOfVisitor = visitor
return render_template('DynamicTheEngineer.html', numOfVisitor=numOfVisitor)
"""
HitmanFoo # Dynamic routing
"""
# biscuit # Dynamic routing
@app.route('/biscuit/<int:visitor>')
def routeDynamicbiscuit(visitor):
numOfVisitor = visitor
return render_template('Dynamicbiscuit.html', numOfVisitor=numOfVisitor)
"""
aronLim # Dynamic routing
"""
# HTTP methods
# N.B: The default method is GET. If no method is defined, Flask will think that it should execute GET.
@app.route('/TheEngineer/HTTPmethods',methods=['GET', 'POST'])
def httpMethodsTheEngineer():
if request.method == 'POST':
# if client/browser is requesting a POST method then execute this.
varTheEngineer = 1 + 2
return render_template('HTTPmethodsTheEngineer.html', varTheEngineer = varTheEngineer)
if request.method == 'GET':
varTheEngineer = 1 + 1
return render_template('HTTPmethodsTheEngineer.html', varTheEngineer = varTheEngineer)
"""
HitmanFoo # Dynamic routing
"""
# biscuit # HTTP methods
@app.route('/biscuit/HTTPmethods',methods=['GET', 'POST'])
def httpMethodsbiscuit():
if request.method == 'POST':
varbiscuit = 1 + 2
return render_template('HTTPmethodsbiscuit.html', varbiscuit = varbiscuit)
if request.method == 'GET':
varbiscuit = 1 + 1
return render_template('HTTPmethodsbiscuit.html', varbiscuit = varbiscuit)
"""
aronLim # Dynamic routing
"""
# RequestData
@app.route('/TheEngineer/requestData',methods=['GET', 'POST'])
def requestDataTheEngineer():
if request.method == 'POST':
name = request.form['name']
location = request.form['location']
return render_template('requestDataTheEngineer.html', **locals())
return render_template('requestDataTheEngineer.html')
"""
HitmanFoo # Request Data
"""
# biscuit # Request Data
@app.route('/biscuit/requestData', methods=['GET', 'POST'])
def requestDatabiscuit():
if request.method == 'POST':
name = request.form['name']
location = request.form['location']
return render_template('requestDatabiscuit.html', **locals())
return render_template('requestDatabiscuit.html')
"""
aronLim # Request Data
"""
# Session & url_for & flash
# App secret should be stored in the configuration section
app.secret_key = 'ultimate/123Aron/345Killed/456Hitman/987Foo/432By/543Eating/435Biscuit'
@app.route('/TheEngineer/storeSession')
def storeSessionTheEngineer():
session['timeEntered'] = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
flash('Data stored in session & you have been redirected to index page')
return redirect(url_for('index'))
@app.route('/TheEngineer/checkSession')
def checkSessionTheEngineer():
checkSession = session['timeEntered']
return render_template('checkSessionTheEngineer.html', checkSession=checkSession)
@app.route('/TheEngineer/popSession')
def popSessionTheEngineer():
session.pop('timeEntered', None)
flash('Data removed from session & you have been redirected to index page')
return redirect(url_for('index'))
"""
HitmanFoo # Session
"""
# biscuit # Session
app.secret_key = 'ultimate/123Aron/345Killed/456Hitman/987Foo/432By/543Eating/435Biscuit'
@app.route('/biscuit/storeSession'):
def storeSessionbiscuit();
session['timeEntered'] = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
flash('Data stored in session & you have been redirected to index page')
return redirect(url_for('index'))
@app.route('/biscuit/checkSession')
def checkSessionbiscuit():
checkSession = session['timeEntered']
return render_template('checkSessionbiscuit.html', checkSession=checkSession)
@app.route('/biscuit/popSession')
def popSessionbiscuit():
session.pop('timeEntered', None)
flash('Data removed from session & you have been redirected to index page')
return redirect(url_for('index'))
"""
aronLim # Session
"""
if __name__ == '__main__':
app.debug = True
port = int(os.environ.get('PORT', 5000))
app.run(host='127.0.0.1', port=port)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
"""dir = "cleaned_df/"
file_list = os.listdir(dir)
for i,element in enumerate(file_list) :
print(i)
df=pd.read_csv(dir+str(element))
plt.plot(df["engergy"])
plt.title(str(element))
plt.savefig("./img/"+element.replace(".csv",".png"))
plt.close()
"""
"""for i in range(1,26):
series = pd.read_csv("centroid_class_"+str(i)+".csv")
plt.plot(series)
plt.show()"""
list_R = np.array(pd.read_csv("res.csv")["x"])
elements = os.listdir("../classes")
Mat = {}
for element in elements :
new_mat = []
dirs = os.listdir("../classes/"+element)
for dir in dirs :
if dir[-3 :] == "png" :
new_mat.append(int(dir.replace(".png","")))
Mat[int(element.replace("class_",""))] = new_mat
matrix= np.zeros((18,18))
for key, value in Mat.items():
for num in value :
print(key,num,list_R[num-1]-1)
matrix[key-1,list_R[num-1]-1] = matrix[key-1,list_R[num-1]-1] + 1
print(matrix)
import itertools
import sys
import munkres
import numpy as np
import seaborn as sn
def permute_cols(a, inds):
"""
Permutes the columns of matrix `a` given
a list of tuples `inds` whose elements `(from, to)` describe how columns
should be permuted.
"""
p = np.zeros_like(a)
for i in inds:
p[i] = 1
return np.dot(a, p)
def maximize_trace(a):
"""
Maximize trace by minimizing the Frobenius norm of
`np.dot(p, a)-np.eye(a.shape[0])`, where `a` is square and
`p` is a permutation matrix. Returns permuted version of `a` with
maximal trace.
"""
assert a.shape[0] == a.shape[1]
d = np.zeros_like(a)
n = a.shape[0]
b = np.eye(n, dtype=int)
for i, j in itertools.product(range(n), range(n)):
d[j, i] = sum((b[j, :]-a[i, :])**2)
m = munkres.Munkres()
inds = m.compute(d)
return permute_cols(a, inds)
new_m = maximize_trace(matrix)
print(new_m)
plt.figure(figsize = (10,7))
sn.heatmap(new_m, annot=True)
plt.show()
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import os
import numpy as np
import torch
import rasterio
import rasterio.mask
from rasterio.windows import Window
from rasterio.plot import show
# Inference
def predictor_arg(tensor,model):
"""
args:
- tensor : tensor of size N,C,W,H
- model : Model
Return : argmax of the model with the tensor as input
"""
return model(tensor).argmax(1)
def inference_roi(path_image,roi_size,predictor,output_dir,model):
"""
path_image : path for the image
roi_size : inference size
predictor : model predictor
output_dir : output directory for the prediction
model : Model
Show the prediction
"""
# open image with rasterio
img = rasterio.open(os.path.join(path_image))
height = img.height
width = img.width
nb_col = width // roi_size[0]
nb_row = height // roi_size[1]
base=os.path.basename(path_image)
base_without_ex = os.path.splitext(base)[0]
profile = img.profile.copy()
# And then change the band count to 1, set the
# dtype to uint8, and specify LZW compression.
profile.update(
dtype=rasterio.uint8,
count=1,
driver = "GTiff",
height = height,
width = width,
compress='lzw')
img_transform = img.transform
# Initialisation
mask = np.zeros((1,width, height))
#print('mask shape',np.shape(mask))
shp_width = np.shape(mask)[1]
shp_height = np.shape(mask)[2]
with torch.no_grad():
for col in range(0,nb_col):
for row in range(0,nb_row):
tile = img.read(window=Window(col*roi_size[0],row*roi_size[1],roi_size[0],roi_size[1]))
tile_tensor = torch.from_numpy(tile).float()
pred = predictor(tile_tensor.unsqueeze(dim=0),model)
pred_cm = pred.cpu().detach().numpy()
# Affiche Rvb & Mask
show(tile)
show(pred_cm)
mask[:,row*roi_size[1]:(row+1)*roi_size[1],col*roi_size[0]:(col+1)*roi_size[0]] = pred_cm.astype(np.uint8)
# Cas unique dernière tile en diagonale
if (col == nb_col -1) and (row == nb_row -1):
tile = img.read(window=Window( shp_width - roi_size[0], shp_height - roi_size[1],roi_size[0],roi_size[1]))
tile_tensor = torch.from_numpy(tile).float()
pred = predictor(tile_tensor.unsqueeze(dim=0),model)
pred_cm = pred.cpu().detach().numpy()
mask[:,shp_height - roi_size[0] :,shp_width - roi_size[1]:] = pred_cm.astype(np.uint8)
# Dernière Row -> Recouvrement
if row == nb_row -1:
# window argument : taille height, width image
tile = img.read(window=Window(col*roi_size[0],shp_height - roi_size[0],roi_size[0],roi_size[1]))
tile_tensor = torch.from_numpy(tile).float()
pred = predictor(tile_tensor.unsqueeze(dim=0),model)
pred_cm = pred.cpu().detach().numpy()
mask[:,shp_height - roi_size[1]:,col*roi_size[0]:(col+1)*roi_size[0]] = pred_cm.astype(np.uint8)
# Dernière Col -> Recouvrement
if col == nb_col -1:
# window argument : taille height, width image
tile = img.read(window=Window( shp_width - roi_size[0], row*roi_size[1] ,roi_size[0],roi_size[1]))
tile_tensor = torch.from_numpy(tile).float()
pred = predictor(tile_tensor.unsqueeze(dim=0),model)
pred_cm = pred.cpu().detach().numpy()
mask[:,row*roi_size[1]:(row+1)*roi_size[1],shp_height - roi_size[0]:] = pred_cm.astype(np.uint8)
# Profile update (transformation)
x,y = rasterio.transform.xy(img_transform, nb_col*roi_size[0],nb_row*roi_size[1])
out_transform = rasterio.transform.from_origin(x,y,nb_col*roi_size[0],nb_row *roi_size[1])
out_tile_name = os.path.join(output_dir,f'{base_without_ex}_{nb_col:02}_{nb_row:02}_predfinal.tif')
profile.update(transform = out_transform)
# Plot mask
mask = mask.astype(np.uint8)
show(mask)
with rasterio.open(out_tile_name,"w",**profile) as dst :
dst.write(mask)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 12 17:45:23 2017
@author: modellav
"""
#The goal of this project is to scrape data from Google Finance
#To determine the top gainers and top losers of the market, with corresponding % change
#IMPORT PACKAGES
import urllib.request as ul
from bs4 import BeautifulSoup
import re
import datetime
#OPEN URL
url = "http://www.google.com/finance"
url_response=ul.urlopen(url,timeout=5)
#CREATE SOUP AND FIND SECTOR TABLE
finance_soup = BeautifulSoup(url_response, "lxml")
sector_table = finance_soup.find('div', class_ = 'id-secperf sfe-section-major')
#USE REG. EX. TO FIND OUT WHICH SECTOR MOVED THE MOST AND EXTRACT ITS NAME, THE PCT CHANGE AND LINK TO NEXT PAGE
regex_change = re.compile('[+-](.\...)%')
regex_link = re.compile('href=\"(.+)\">')
regex_name = re.compile('>(.+)<')
maxchange = 0
for row in sector_table.find_all('tr'):
changerow = str(row.find('span', class_='chg'))
changevalue = regex_change.findall(changerow)
if changevalue:
change = float(changevalue[0])
if change > maxchange:
maxchange = change
biggest_mover = regex_name.findall(str(row.a))
nextpage_link = regex_link.findall(str(row.a))
#OPEN NEXT PAGE (SECTOR URL) AND EXTRACT TOP MOVERS TABLE
url2 = "http://www.google.com" + nextpage_link[0]
url_response2=ul.urlopen(url2,timeout=5)
sector_soup = BeautifulSoup(url_response2, "lxml")
top_movers = sector_soup.find('table', class_ = "topmovers")
#SINCE THEY ARE ORDERED IT IS EASY TO FIND TOP GAINER AND TOP LOSER
mover_rows = top_movers.find_all('tr')
top_gainer = mover_rows[1]
top_loser = mover_rows[7]
#USE REGEX TO FIND TOP GAINER/LOSER NAMES AND CORRESPONDING PCT CHANGE
regex_change2 = re.compile('<span class="chg">\((.+\...%)\)')
regex_change3 = re.compile('<span class="chr">\(\-(.+\...%)\)')
topgainer_name = regex_name.findall(str(top_gainer.a))
toploser_name = regex_name.findall(str(top_loser.a))
topgainer_gain = regex_change2.findall(str(top_gainer))
toploser_loss = regex_change3.findall(str(top_loser))
#find today's date
today = datetime.date.today()
#PRINT FINAL RECAP STATEMENT
print('The sector that has moved the most today, '+ today + " is " + biggest_mover[0] + ' +'+str(maxchange)+'%. '+topgainer_name[0] + ' gained the most ('+topgainer_gain[0]+') while ' + toploser_name[0]+ ', the biggest loser, lost '+ toploser_loss[0]+'.')
|
'''
Created on Jul 10, 2013
@author: emma
'''
from UnitTesting.page_objects.base_page_object import base_page_object
from selenium.webdriver.common.action_chains import ActionChains
import time
class booksellers(base_page_object):
def __init__(self, webd_wrap):
base_page_object.__init__(self, webd_wrap)
def get_page(self, category):
return self
def confirm_page(self):
''' raises AssertionError if page is incorrect '''
_actual_url = self._webd_wrap._driver.current_url
_actual_title = self._webd_wrap._driver.title
_url = self._webd_wrap._baseURL + '/people/booksellers'
_title = 'Zola Books | ebook |'# Booksellers'
if _url != _actual_url or _title != _actual_title:
raise AssertionError("Not on the Booksellers list page.")
def click_my_zola(self):
self.confirm_page()
time.sleep(2)
self._webd_wrap._driver.find_element_by_id('h-user-personalized-toolbar').find_element_by_xpath('div/a').click()
########################################################################
########################################################################
def click_first_bookseller(self):
''' clicks the first acp in the main list '''
self.confirm_page()
self._webd_wrap._driver.find_element_by_class_name('l-main-primary').find_element_by_xpath('div/section[1]/div/div/div[1]/h5/a').click()
|
#!/usr/bin/python
# Iskandar Setiadi 13511073@std.stei.itb.ac.id
# Institut Teknologi Bandung (ITB) - Indonesia
# Final Project (c) 2015
# mongodb_testcase2.py
__author__ = 'freedomofkeima'
import sys
import time
from pymongo import MongoClient
def main(args):
client = MongoClient('52.74.132.58', 27017) # Nearest Server location
db = client['tests_database']
tests = db['tests_collection']
max_iteration = 2000
key_size = 10
value_size = 100 * 1024
print '** Starting benchmarking **'
print '** Length key + value: %d byte(s)**' % (key_size + value_size)
print '--EMPTY TIMER--'
tx = 0 # time counter
counter = 0
while counter < max_iteration:
t0 = time.time()
tx = tx + (time.time() - t0)
counter = counter + 1
print 'Number of iteration: %d' % (max_iteration)
empty_timer = tx / max_iteration * 1000000
print 'Average elapsed time: %.10f us' % (empty_timer)
item_id = item_id = tests.distinct('_id')
print '--UPDATE--'
tx = 0 # time counter
counter = 1
for item in item_id:
value = "a" * value_size
t0 = time.time()
tests.update_one({"_id": item}, {'$set': {'mongodbkey' : value}})
tx = tx + (time.time() - t0)
counter = counter + 1
print 'Number of iteration: %d' % (counter)
print 'Average elapsed time: %.10f us' % (tx / counter * 1000000 - empty_timer)
print '--READ--'
tx = 0 # time counter
counter = 1
for item in item_id:
t0 = time.time()
res = tests.find_one({"_id": item})
tx = tx + (time.time() - t0)
counter = counter + 1
print 'Number of iteration: %d' % (counter)
print 'Average elapsed time: %.10f us' % (tx / counter * 1000000 - empty_timer)
print '--DELETE--'
tx = 0 # time counter
counter = 1
for item in item_id:
t0 = time.time()
tests.delete_one({"_id": item})
tx = tx + (time.time() - t0)
counter = counter + 1
print 'Number of iteration: %d' % (counter)
print 'Average elapsed time: %.10f us' % (tx / counter * 1000000 - empty_timer)
client.close()
if __name__ == '__main__':
main(sys.argv[1:])
|
#对数
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(0.01,10,0.01)
y1 = np.log(x)#python以e为底
y2 = np.log(x)/np.log(0.5)
plt.plot(x,y1,c='red')
plt.plot(x,y2,c='yellow')
plt.show()
|
class Call(object):
def __init__(self,unique_id,class_name,caller_phone_num,timeofcall,reason_for_call):
self.unique_id = unique_id
self.class_name = class_name
self.caller_phone_num = caller_phone_num
self.timeofcall = timeofcall
self.reason_for_call = reason_for_call
self.display_all()
def display_all(self):
print self.unique_id
print self.class_name
print self.caller_phone_num
print self.timeofcall
print self.reason_for_call
def __str__(self):
return "unique_id ( {} ) class_name ( {} ) caller_phone_num ( {} ) timeofcall ( {} ) reason_for_call ( {} ) ".format(' '.join(self.unique_id), self.class_name, self.caller_phone_num, self.timeofcall, self.reason_for_call)
call1 = Call("12","mat","408-245-1345","3:45","lsfksfal")
call2 = Call("53","ho","408-255-13345","6:45","Jav")
call3 = Call("64","ajot","408-255-1245","1:15","lav")
call4 = Call("42","matt","508-456-1345","10:45","lsfksfal")
call5 = Call("54","hoht","708-3434-1745","6:45","Jasafv")
call6 = Call("84","ahho","408-255-7435","8:45","ladv")
class CallCenter(object):
def __init__(self):
self.calls = []
self.queue_size = 0
def add(self, newcall):
self.calls.append(newcall)
# print self.calls
return self
def remove(self):
if len(self.calls) > 0:
self.calls.pop(0)
return self
def ninjalevel(self, phonenum):
for idx, call in enumerate(self.calls):
if call.caller_phone_num == phonenum:
self.calls.pop(idx)
return self
def hackerlever(self):
def keyfuc(call):
return call.timeofcall
self.calls = sorted(self.calls , key=keyfuc)
def __str__(self):
callstring = ''
for c in self.calls:
callstring += str(c) + "\n"
return "calls ( {} ) queue_size ( {} )".format(callstring, self.queue_size)
callcenter = CallCenter()
callcenter.add(call1)
callcenter.add(call2)
callcenter.add(call3)
callcenter.add(call4)
callcenter.add(call5)
callcenter.add(call6)
callcenter.remove()
callcenter.ninjalevel("408-255-13345")
callcenter.hackerlever()
print callcenter
|
from django.http import JsonResponse
from index.models import Products
from .models import Message
from user.models import UserProfile
import json
from user.logging_check import logging_check
# Create your views here.
@logging_check('POST')
def message(request):
if request.method == 'GET':
goods_id = request.GET.get('id')
result = {'code':200}
try:
goods = Products.objects.get(id = goods_id)
except Exception as e:
result = {'code':20101,'error':'没有找到此商品'}
return JsonResponse(result)
goods_dict = {}
goods_dict['id'] = goods.id
goods_dict['title'] = goods.title
goods_dict['market_price'] = goods.market_price
goods_dict['supplier'] = goods.supplier
goods_dict['repertory'] = goods.repertory
goods_dict['sell_number'] = goods.sell_number
goods_dict['info'] = goods.info
goods_dict['img'] = str(goods.img)
result['goods'] = goods_dict
all_messages = Message.objects.filter(topic_id=goods_id).order_by('-created_time')
m_count = 0
# 留言专属容器
msg_list = []
# 回复专属容器
reply_home = {}
for message in all_messages:
m_count += 1
if message.parent_message:
# 回复
reply_home.setdefault(message.parent_message, [])
reply_home[message.parent_message].append({'msg_id': message.id, 'content': message.content,
'publisher': message.publisher.username,
'publisher_avatar': str(message.publisher.avatar),
'created_time': message.created_time.strftime('%Y-%m-%d %H:%M:%S')})
else:
# 留言
dic = {}
dic['id'] = message.id
dic['content'] = message.content
dic['publisher'] = message.publisher.username
dic['publisher_avatar'] = str(message.publisher.avatar)
dic['reply'] = []
dic['created_time'] = message.created_time.strftime('%Y-%m-%d %H:%M:%S')
msg_list.append(dic)
# 关联留言及回复
for m in msg_list:
if m['id'] in reply_home:
m['reply'] = reply_home[m['id']]
result['messages'] = msg_list
result['messages_count'] = m_count
return JsonResponse(result)
if request.method == 'POST':
# 发表评论/回复
json_str = request.body
json_obj = json.loads(json_str)
content = json_obj.get('content')
username = json_obj.get('user')
id = json_obj.get('id')
parent_id = json_obj.get('parent_id', 0)
# TODO 参数检查
# 检查商品是否存在
try:
goods = Products.objects.get(id=id)
except Exception as e:
result = {'code': 20102, 'error': '没有此商品 !'}
return JsonResponse(result)
try:
user = UserProfile.objects.get(username=username)
except Exception as e:
result = {'code': 20103, 'error': '没有此用户 !'}
return JsonResponse(result)
if request.user != user:
result = {'code': 20104, 'error': '用户未登陆 !'}
return JsonResponse(result)
# 第一种方案 可以直接对外建属性赋值 对象
Message.objects.create(content=content, parent_message=parent_id, publisher=user, topic=goods)
return JsonResponse({'code': 200})
|
下雨天 hello
下午学习git
秦岭一日游
wo men dou yi yang
|
weight = float(input('Enter your weight in kgs: '))
height = float(input('Enter your height in metres: '))
result = weight / (height**2)
print('Your BMI is {:.2f}'.format(result))
|
#!/usr/bin/env python3
from termcolor import cprint
from nubia import command, argument, context
@command(name_or_function="gcloud")
class GCLOUD:
"""
Google Cloud Platform commands set. This is still not implemented.
"""
def __init__(self) -> None:
pass
@command
def info(self):
"""
print info message
"""
cprint("This module is not implemented")
|
import os
import re
class LanguageModelContent:
def __init__(self, words,count):
self.words = words
self.count = count
def __str__(self):
return self.words + '\t' +self.count
if __name__ == "__main__":
dir_list = sorted(os.listdir('/Users/geekye/Documents/Dataset/LM/UniBiGram'))
# because ngrams-[00030 - 00036]-of-00394 have no invalid data
filtered_list = [dir for dir in dir_list if dir >= 'ngrams-00001-of-00394' and dir <= 'ngrams-0029-of-00394']
for file_name in filtered_list:
grams_2 = []
with open('/Users/geekye/Documents/Dataset/LM/UniBiGram/'+ file_name) as file:
for line in file:
if re.match('^[\u4e00-\u9fa5]{1,8}[\s\t]{1,}[\u4e00-\u9fa5]{1,8}[\s\t]{1,}\d{1,}', line):
segments = line.split('\t')
words = segments[0]
count = segments[1]
model = LanguageModelContent(words, count)
grams_2.append(model)
if len(grams_2) == 0:
continue
with open('/Users/geekye/Documents/Dataset/LM/gram2'+ file_name, 'a') as file:
print(file_name+'has been started!')
for model in grams_2:
file.write(str(model) + '\n')
print(file_name+'has been processed!')
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# File Name: Action.py
# By: Daniel Lamothe
#
# Purpose: A simple object representing an Action a Creature can take. Used to house Action information.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Action:
name = ''
desc = ''
attack = ''
hit = ''
# Default Constructor
def __init__(self):
pass
# Constructor with provided parameters
def __init__(self, name, desc, attack, hit):
self.name = name
self.desc = desc
self.attack = attack
self.hit = hit
|
"""Define test cases for KFLR."""
from test.extensions.secondorder.secondorder_settings import GROUP_CONV_SETTINGS
SHARED_NOT_SUPPORTED_SETTINGS = GROUP_CONV_SETTINGS
LOCAL_NOT_SUPPORTED_SETTINGS = []
NOT_SUPPORTED_SETTINGS = SHARED_NOT_SUPPORTED_SETTINGS + LOCAL_NOT_SUPPORTED_SETTINGS
|
import os
import importlib
from gmc.conf import global_settings
ENVIRONMENT_VARIABLE = "GMC_SETTINGS_MODULE"
class Settings:
"""
Module to load settings to configure gmc
"""
def __init__(self, *args, **kwargs):
self.settings = None
self.settings_module = None
def __getattr__(self, name):
"""
Make settings available as the attributes.
Like settings.DATASET_DIR
"""
self.load_settings()
return self.settings[name]
def __iter__(self):
self.load_settings()
return iter(self.settings)
def load_settings(self):
settings_module = os.environ.get(ENVIRONMENT_VARIABLE)
if self.settings is not None and settings_module == self.settings_module:
return
self.settings = {}
for setting in dir(global_settings):
if setting.isupper():
self.settings[setting] = getattr(global_settings, setting)
self.settings_module = os.environ.get(ENVIRONMENT_VARIABLE, None)
if self.settings_module is not None:
mod = importlib.import_module(self.settings_module)
for setting in dir(mod):
if setting.isupper():
self.settings[setting] = getattr(mod, setting)
def modify(self, new_settings):
for name in new_settings:
if name in self.settings:
self.settings[name] = new_settings[name]
settings = Settings()
|
import os
import PIL
from PIL import Image
from PIL import ImageEnhance
from tqdm import tqdm
first_num = 82
last_num = 84
folder_name = "201013-vib"
for th in tqdm((1,2,3,4,5,6,7,8,9,10)):
TH=str(th)
os.mkdir("enhance_test/"+folder_name+"_"+TH)
for num in tqdm(range (first_num,last_num)):
im_name=str(num)
fileA_name=im_name.zfill(6)+".jpg"
enhance_im_name=im_name.zfill(6)+".jpg"
im = Image.open(folder_name+"/"+fileA_name)
im = im.convert('L')
im = ImageEnhance.Contrast(im)
im = im.enhance(th)
im = im.crop((410,977,1069,1378))
im.save("enhance_test/"+folder_name+"_"+TH+"/"+fileA_name)
|
import unittest
import testutil
import hdbfs
class HiguQueryCases( testutil.TestCase ):
def setUp( self ):
self.init_env()
h = hdbfs.Database()
h.enable_write_access()
red_obj = h.register_file( self._load_data( self.red ) )
yellow_obj = h.register_file( self._load_data( self.yellow ) )
green_obj = h.register_file( self._load_data( self.green ) )
cyan_obj = h.register_file( self._load_data( self.cyan ) )
blue_obj = h.register_file( self._load_data( self.blue ) )
magenta_obj = h.register_file( self._load_data( self.magenta ) )
white_obj = h.register_file( self._load_data( self.white ) )
grey_obj = h.register_file( self._load_data( self.grey ) )
black_obj = h.register_file( self._load_data( self.black ) )
red_obj['test'] = 1
yellow_obj['test'] = 2
green_obj['test'] = 3
blue_obj['test'] = 4
warm_tag = h.make_tag( 'warm' )
cool_tag = h.make_tag( 'cool' )
rgb_tag = h.make_tag( 'rgb' )
cmyk_tag = h.make_tag( 'cmyk' )
paint_tag = h.make_tag( 'paint' )
red_obj.assign( warm_tag )
yellow_obj.assign( warm_tag )
magenta_obj.assign( warm_tag )
green_obj.assign( cool_tag )
cyan_obj.assign( cool_tag )
blue_obj.assign( cool_tag )
red_obj.assign( rgb_tag )
green_obj.assign( rgb_tag )
blue_obj.assign( rgb_tag )
cyan_obj.assign( cmyk_tag )
magenta_obj.assign( cmyk_tag )
yellow_obj.assign( cmyk_tag )
black_obj.assign( cmyk_tag )
red_obj.assign( paint_tag )
yellow_obj.assign( paint_tag )
blue_obj.assign( paint_tag )
self.h = hdbfs.Database()
self.red_obj = self.h.get_object_by_id( red_obj.get_id() )
self.yellow_obj = self.h.get_object_by_id( yellow_obj.get_id() )
self.green_obj = self.h.get_object_by_id( green_obj.get_id() )
self.cyan_obj = self.h.get_object_by_id( cyan_obj.get_id() )
self.blue_obj = self.h.get_object_by_id( blue_obj.get_id() )
self.magenta_obj = self.h.get_object_by_id( magenta_obj.get_id() )
self.white_obj = self.h.get_object_by_id( white_obj.get_id() )
self.grey_obj = self.h.get_object_by_id( grey_obj.get_id() )
self.black_obj = self.h.get_object_by_id( black_obj.get_id() )
self.warm_tag = self.h.get_object_by_id( warm_tag.get_id() )
self.cool_tag = self.h.get_object_by_id( cool_tag.get_id() )
self.rgb_tag = self.h.get_object_by_id( rgb_tag.get_id() )
self.cmyk_tag = self.h.get_object_by_id( cmyk_tag.get_id() )
self.paint_tag = self.h.get_object_by_id( paint_tag.get_id() )
def tearDown( self ):
self.uninit_env()
def test_query_all( self ):
rs = [ r for r in self.h.all_albums_or_free_files() ]
self.assertTrue( self.red_obj in rs, 'Red not in result' )
self.assertTrue( self.yellow_obj in rs, 'Yellow not in result' )
self.assertTrue( self.green_obj in rs, 'Green not in result' )
self.assertTrue( self.cyan_obj in rs, 'Cyan not in result' )
self.assertTrue( self.blue_obj in rs, 'Blue not in result' )
self.assertTrue( self.magenta_obj in rs, 'Magenta not in result' )
self.assertTrue( self.white_obj in rs, 'White not in result' )
self.assertTrue( self.grey_obj in rs, 'Grey not in result' )
self.assertTrue( self.black_obj in rs, 'Black not in result' )
self.assertTrue( len( rs ) == 9, 'Result size mismatch' )
def test_query_unowned( self ):
rs = [ r for r in self.h.unowned_files() ]
self.assertTrue( self.white_obj in rs, 'White not in result' )
self.assertTrue( self.grey_obj in rs, 'Grey not in result' )
self.assertTrue( len( rs ) == 2, 'Result size mismatch' )
def test_query_require( self ):
query = hdbfs.query.Query()
query.add_require_constraint( hdbfs.query.TagConstraint( self.warm_tag ) )
query.add_require_constraint( hdbfs.query.TagConstraint( self.paint_tag ) )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.red_obj in rs, 'Red not in result' )
self.assertTrue( self.yellow_obj in rs, 'Yellow not in result' )
self.assertTrue( len( rs ) == 2, 'Result size mismatch' )
def test_query_add( self ):
query = hdbfs.query.Query()
query.add_or_constraint( hdbfs.query.TagConstraint( self.warm_tag ) )
query.add_or_constraint( hdbfs.query.TagConstraint( self.paint_tag ) )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.red_obj in rs, 'Red not in result' )
self.assertTrue( self.yellow_obj in rs, 'Yellow not in result' )
self.assertTrue( self.blue_obj in rs, 'Blue not in result' )
self.assertTrue( self.magenta_obj in rs, 'Magenta not in result' )
self.assertTrue( len( rs ) == 4, 'Result size mismatch' )
def test_query_sub( self ):
query = hdbfs.query.Query()
query.add_not_constraint( hdbfs.query.TagConstraint( self.warm_tag ) )
query.add_not_constraint( hdbfs.query.TagConstraint( self.paint_tag ) )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.green_obj in rs, 'Green not in result' )
self.assertTrue( self.cyan_obj in rs, 'Cyan not in result' )
self.assertTrue( self.white_obj in rs, 'White not in result' )
self.assertTrue( self.grey_obj in rs, 'Grey not in result' )
self.assertTrue( self.black_obj in rs, 'Black not in result' )
self.assertTrue( len( rs ) == 5, 'Result size mismatch' )
def test_query_add_sub( self ):
query = hdbfs.query.Query()
query.add_or_constraint( hdbfs.query.TagConstraint( self.rgb_tag ) )
query.add_or_constraint( hdbfs.query.TagConstraint( self.cmyk_tag ) )
query.add_not_constraint( hdbfs.query.TagConstraint( self.cool_tag ) )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.red_obj in rs, 'Red not in result' )
self.assertTrue( self.yellow_obj in rs, 'Yellow not in result' )
self.assertTrue( self.magenta_obj in rs, 'Magenta not in result' )
self.assertTrue( self.black_obj in rs, 'Black not in result' )
self.assertTrue( len( rs ) == 4, 'Result size mismatch' )
def test_query_require_add( self ):
query = hdbfs.query.Query()
query.add_require_constraint( hdbfs.query.TagConstraint( self.warm_tag ) )
query.add_require_constraint( hdbfs.query.TagConstraint( self.paint_tag ) )
query.add_or_constraint( hdbfs.query.TagConstraint( self.cool_tag ) )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.red_obj in rs, 'Red not in result' )
self.assertTrue( self.yellow_obj in rs, 'Yellow not in result' )
self.assertTrue( self.green_obj in rs, 'Green not in result' )
self.assertTrue( self.cyan_obj in rs, 'Cyan not in result' )
self.assertTrue( self.blue_obj in rs, 'Blue not in result' )
self.assertTrue( len( rs ) == 5, 'Result size mismatch' )
def test_query_require_add_sub( self ):
query = hdbfs.query.Query()
query.add_require_constraint( hdbfs.query.TagConstraint( self.warm_tag ) )
query.add_require_constraint( hdbfs.query.TagConstraint( self.paint_tag ) )
query.add_or_constraint( hdbfs.query.TagConstraint( self.cool_tag ) )
query.add_not_constraint( hdbfs.query.TagConstraint( self.cmyk_tag ) )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.red_obj in rs, 'Red not in result' )
self.assertTrue( self.green_obj in rs, 'Green not in result' )
self.assertTrue( self.blue_obj in rs, 'Blue not in result' )
self.assertTrue( len( rs ) == 3, 'Result size mismatch' )
def test_query_order_add( self ):
query = hdbfs.query.Query()
query.add_require_constraint( hdbfs.query.TagConstraint( self.rgb_tag ) )
query.set_order( 'add' )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.red_obj == rs[0], 'Red not in pos 0' )
self.assertTrue( self.green_obj == rs[1], 'Green not in pos 1' )
self.assertTrue( self.blue_obj == rs[2], 'Blue not in pos 2' )
self.assertTrue( len( rs ) == 3, 'Result size mismatch' )
def test_query_order_radd( self ):
query = hdbfs.query.Query()
query.add_require_constraint( hdbfs.query.TagConstraint( self.rgb_tag ) )
query.set_order( 'add', True )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.red_obj == rs[2], 'Red not in pos 2' )
self.assertTrue( self.green_obj == rs[1], 'Green not in pos 1' )
self.assertTrue( self.blue_obj == rs[0], 'Blue not in pos 0' )
self.assertTrue( len( rs ) == 3, 'Result size mismatch' )
def test_query_by_name( self ):
query = hdbfs.query.Query()
query.add_require_constraint( hdbfs.query.StringConstraint( self.red ) )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.red_obj in rs, 'Red not in result' )
self.assertTrue( len( rs ) == 1, 'Result size mismatch' )
def test_query_by_name_subset( self ):
query = hdbfs.query.Query()
query.add_require_constraint( hdbfs.query.StringConstraint( 'e_sq.' ) )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.blue_obj in rs, 'Blue not in result' )
self.assertTrue( self.white_obj in rs, 'White not in result' )
self.assertTrue( len( rs ) == 2, 'Result size mismatch' )
def test_query_by_name_wildcard( self ):
query = hdbfs.query.Query()
query.add_require_constraint( hdbfs.query.StringConstraint( 'gr*sq' ) )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.green_obj in rs, 'Green not in result' )
self.assertTrue( self.grey_obj in rs, 'Grey not in result' )
self.assertTrue( len( rs ) == 2, 'Result size mismatch' )
def test_query_by_parameters( self ):
query = hdbfs.query.Query()
query.add_require_constraint( hdbfs.query.ParameterConstraint( 'test', '>=', 2 ) )
query.add_require_constraint( hdbfs.query.ParameterConstraint( 'test', '<=', 3 ) )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.yellow_obj in rs, 'Yellow not in result' )
self.assertTrue( self.green_obj in rs, 'Green not in result' )
self.assertTrue( len( rs ) == 2, 'Result size mismatch' )
if( __name__ == '__main__' ):
unittest.main()
|
# -*- coding: utf-8 -*-
import scrapy
import time
import labsql
import copy
class PanyuspiderSpider(scrapy.Spider):
name = 'panyuSpider'
allowed_domains = ['qx.panyu.gov.cn']
start_urls = ['http://qx.panyu.gov.cn/pyinterface/wap/sk_zd.jsp']
# create sql server session
conn = labsql.LabSQL('172.168.1.36', 'panyu', 'sa', 'scucc')
# initial setting
year = time.localtime(time.time()).tm_year
# the dict is sorted after collected
info_dict = {}
def parse(self, response):
try:
# get initial information
for id, get_location in enumerate(response.css('.station-val::text').extract()):
self.info_dict[id] = [get_location.strip()]
for id, date_time in enumerate(response.css('.time-val::text').extract()):
date, pub_time = date_time.strip().split(' ')
self.info_dict[id].append('%s-%s' % (self.year, date))
self.info_dict[id].append(pub_time)
# get one hour rainfall
hourrf_dict = copy.deepcopy(self.info_dict)
for id, get_hourrf in enumerate(response.css('.hourrf-val::text').extract()):
hourrf_dict[id].append(get_hourrf.split('m')[0])
hourrf_values = hourrf_dict.values()
for value in hourrf_values:
self.conn.insert("""insert into rf1 ([location]
,[date]
,[time]
,[rainfall_of_one_hour]
) values(?,?,?,?)""",
value)
# get three hour rainfall
rf3_dict = copy.deepcopy(self.info_dict)
for id, get_rf3 in enumerate(response.css('.rf3-val::text').extract()):
rf3_dict[id].append(get_rf3.split('m')[0])
rf3_values = rf3_dict.values()
for value in rf3_values:
self.conn.insert("""insert into rf3 ([location]
,[date]
,[time]
,[rainfall_of_three_hour]
) values(?,?,?,?)""",
value)
# get daily rainfall since 8am
ryl_dict = copy.deepcopy(self.info_dict)
for id, get_ryl in enumerate(response.css('.ryl-val::text').extract()):
ryl_dict[id].append(get_ryl.split('m')[0])
ryl_values = ryl_dict.values()
for value in ryl_values:
self.conn.insert("""insert into daily_am ([location]
,[date]
,[time]
,[rainfall_of_daily_am]
) values(?,?,?,?)""",
value)
# get daily rainfall since 20pm
rf20_dict = copy.deepcopy(self.info_dict)
for id, get_rf20 in enumerate(response.css('.rf20-val::text').extract()):
rf20_dict[id].append(get_rf20.split('m')[0])
rf20_values = rf20_dict.values()
for value in rf20_values:
self.conn.insert("""insert into daily_pm ([location]
,[date]
,[time]
,[rainfall_of_daily_pm]
) values(?,?,?,?)""",
value)
except:
with open('error.txt', 'a') as f:
f.write(time.asctime(time.localtime(time.time()))+'\n')
|
class Section:
section_number = None
type = None
days_of_the_week = None
start_time = None
end_time = None
def __init__(self, json_dict=None):
if json_dict is None:
self.section_number = None
self.type = None
self.days_of_the_week = None
self.start_time = None
self.end_time = None
else:
self.section_number = json_dict['section_number']
self.type = json_dict['type']
self.days_of_the_week = json_dict['days_of_the_week']
self.start_time = json_dict['start_time']
self.end_time = json_dict['end_time']
def section_to_string(self):
output = ""
output += "SectionNumber: " + self.var_to_string(self.section_number) + "\n"
output += "Type: " + self.var_to_string(self.type) + "\n"
output += "Days of the week: " + self.var_to_string(self.days_of_the_week) + "\n"
output += "Start/End: " + self.var_to_string(self.start_time) \
+ " to " + self.var_to_string(self.end_time) + "\n"
return output
def var_to_string(self, variable):
if variable is None:
return "NONE"
else:
return str(variable)
def reprJSON(self):
return dict(section_number = self.section_number, type=self.type, days_of_the_week = self.days_of_the_week,
start_time = self.start_time, end_time = self.end_time)
|
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'downloaderFNzIGb.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
class Ui_Main(object):
def setupUi(self, Main):
if not Main.objectName():
Main.setObjectName(u"Main")
Main.setEnabled(True)
Main.resize(640, 423)
self.centralwidget = QWidget(Main)
self.centralwidget.setObjectName(u"centralwidget")
self.verticalLayout = QVBoxLayout(self.centralwidget)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName(u"verticalLayout")
self.verticalLayout.setContentsMargins(10, 10, 10, 10)
self.drop = QFrame(self.centralwidget)
self.drop.setObjectName(u"drop")
self.drop.setStyleSheet(u"QFrame{\n"
" background-color: rgb(56,58,89);\n"
" color: rgb(220, 220, 220);\n"
" border-radius: 10px\n"
"}")
self.drop.setFrameShape(QFrame.StyledPanel)
self.drop.setFrameShadow(QFrame.Raised)
self.Downloader = QLabel(self.drop)
self.Downloader.setObjectName(u"Downloader")
self.Downloader.setGeometry(QRect(0, 0, 611, 101))
font = QFont()
font.setFamily(u"Segoe UI")
font.setPointSize(40)
self.Downloader.setFont(font)
self.Downloader.setStyleSheet(u"color: rgb(254,121,199);")
self.Downloader.setAlignment(Qt.AlignCenter)
self.progressBar = QProgressBar(self.drop)
self.progressBar.setObjectName(u"progressBar")
self.progressBar.setGeometry(QRect(10, 270, 601, 23))
self.progressBar.setAutoFillBackground(False)
self.progressBar.setStyleSheet(u"QProgressBar {\n"
" background-color: rgb(98,114,164);\n"
" color: rgb(200,200,200);\n"
" border-style: none;\n"
" border-radius: 10px;\n"
" text-align: center;\n"
"}\n"
"QProgressBar::chunk{ \n"
"border-radius:10px;\n"
" background-color: qlineargradient(spread:pad, x1:0, y1:0.42, x2:1, y2:0.443182, stop:0 rgba(254, 121, 199, 255), stop:1 rgba(170, 85, 255, 255));\n"
"}")
self.progressBar.setValue(0)
self.Info_label = QLabel(self.drop)
self.Info_label.setObjectName(u"Info_label")
self.Info_label.setGeometry(QRect(-10, 300, 631, 31))
font1 = QFont()
font1.setFamily(u"Segoe UI")
font1.setPointSize(14)
self.Info_label.setFont(font1)
self.Info_label.setStyleSheet(u"color: rgb(98,114,250);")
self.Info_label.setAlignment(Qt.AlignCenter)
self.fielist = QListWidget(self.drop)
self.fielist.setObjectName(u"fielist")
self.fielist.setGeometry(QRect(20, 81, 571, 181))
self.fielist.setStyleSheet(u"hover{\n"
"color: rgb(98,114,250);\n"
"}")
self.exit_button = QPushButton(self.drop)
self.exit_button.setObjectName(u"exit_button")
self.exit_button.setGeometry(QRect(590, 0, 31, 31))
font2 = QFont()
font2.setFamily(u"Segoe UI")
font2.setPointSize(18)
font2.setKerning(True)
self.exit_button.setFont(font2)
self.exit_button.setAcceptDrops(False)
self.exit_button.setAutoFillBackground(False)
self.exit_button.setStyleSheet(u"")
self.exit_button.setAutoDefault(False)
self.exit_button.setFlat(True)
self.ip_label = QLabel(self.drop)
self.ip_label.setObjectName(u"ip_label")
self.ip_label.setGeometry(QRect(10, 0, 47, 13))
self.ip_label.setStyleSheet(u"color: rgb(98,114,250);")
self.verticalLayout.addWidget(self.drop)
Main.setCentralWidget(self.centralwidget)
self.retranslateUi(Main)
self.exit_button.setDefault(False)
QMetaObject.connectSlotsByName(Main)
# setupUi
def retranslateUi(self, Main):
Main.setWindowTitle(QCoreApplication.translate("Main", u"MainWindow", None))
self.Downloader.setText(QCoreApplication.translate("Main", u"<html><head/><body><p>PyDownloader</p><p><br/></p></body></html>", None))
self.Info_label.setText(QCoreApplication.translate("Main", u"<html><head/><body><p>NA/NA NA Mb/s ETA: NA TM: NA</p></body></html>", None))
self.exit_button.setText(QCoreApplication.translate("Main", u"\u2715", None))
self.ip_label.setText("")
# retranslateUi
|
import board
import neopixel
import time
pixels = neopixel.NeoPixel(board.D18, 20)
for i in range (0,20):
pixels[i] = (0,0,255)
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import functools
import itertools
import json
import logging
import os.path
from dataclasses import dataclass
from pathlib import PurePath
from typing import Any, Iterable, Iterator, NamedTuple, Sequence, Type, cast
from pants.base.deprecated import warn_or_error
from pants.base.specs import AncestorGlobSpec, RawSpecsWithoutFileOwners, RecursiveGlobSpec
from pants.build_graph.address import BuildFileAddressRequest, MaybeAddress, ResolveError
from pants.engine.addresses import (
Address,
Addresses,
AddressInput,
BuildFileAddress,
UnparsedAddressInputs,
)
from pants.engine.collection import Collection
from pants.engine.environment import ChosenLocalEnvironmentName, EnvironmentName
from pants.engine.fs import EMPTY_SNAPSHOT, GlobMatchErrorBehavior, PathGlobs, Paths, Snapshot
from pants.engine.internals import native_engine
from pants.engine.internals.mapper import AddressFamilies
from pants.engine.internals.native_engine import AddressParseException
from pants.engine.internals.parametrize import Parametrize, _TargetParametrization
from pants.engine.internals.parametrize import ( # noqa: F401
_TargetParametrizations as _TargetParametrizations,
)
from pants.engine.internals.parametrize import ( # noqa: F401
_TargetParametrizationsRequest as _TargetParametrizationsRequest,
)
from pants.engine.internals.target_adaptor import TargetAdaptor, TargetAdaptorRequest
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import (
AllTargets,
AllUnexpandedTargets,
CoarsenedTarget,
CoarsenedTargets,
CoarsenedTargetsRequest,
Dependencies,
DependenciesRequest,
DepsTraversalBehavior,
ExplicitlyProvidedDependencies,
ExplicitlyProvidedDependenciesRequest,
Field,
FieldDefaultFactoryRequest,
FieldDefaultFactoryResult,
FieldDefaults,
FieldSetsPerTarget,
FieldSetsPerTargetRequest,
FilteredTargets,
GeneratedSources,
GeneratedTargets,
GenerateSourcesRequest,
GenerateTargetsRequest,
HydratedSources,
HydrateSourcesRequest,
InferDependenciesRequest,
InferredDependencies,
InvalidFieldException,
MultipleSourcesField,
OverridesField,
RegisteredTargetTypes,
SourcesField,
SourcesPaths,
SourcesPathsRequest,
SpecialCasedDependencies,
Target,
TargetFilesGenerator,
TargetFilesGeneratorSettings,
TargetFilesGeneratorSettingsRequest,
TargetGenerator,
Targets,
TargetTypesToGenerateTargetsRequests,
TransitiveTargets,
TransitiveTargetsRequest,
UnexpandedTargets,
UnrecognizedTargetTypeException,
ValidatedDependencies,
ValidateDependenciesRequest,
WrappedTarget,
WrappedTargetRequest,
_generate_file_level_targets,
)
from pants.engine.unions import UnionMembership, UnionRule
from pants.option.global_options import GlobalOptions, UnmatchedBuildFileGlobs
from pants.util.docutil import bin_name, doc_url
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.memo import memoized
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
from pants.util.strutil import bullet_list, pluralize, softwrap
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------------------------
# Address -> Target(s)
# -----------------------------------------------------------------------------------------------
@rule(_masked_types=[EnvironmentName])
async def resolve_unexpanded_targets(addresses: Addresses) -> UnexpandedTargets:
wrapped_targets = await MultiGet(
Get(
WrappedTarget,
WrappedTargetRequest(
a,
# Idiomatic rules should not be manually constructing `Addresses`. Instead, they
# should use `UnparsedAddressInputs` or `Specs` rules.
#
# It is technically more correct for us to require callers of
# `Addresses -> UnexpandedTargets` to specify a `description_of_origin`. But in
# practice, this dramatically increases boilerplate, and it should never be
# necessary.
#
# Note that this contrasts with an individual `Address`, which often is unverified
# because it can come from the rule `AddressInput -> Address`, which only verifies
# that it has legal syntax and does not check the address exists.
description_of_origin="<infallible>",
),
)
for a in addresses
)
return UnexpandedTargets(wrapped_target.target for wrapped_target in wrapped_targets)
@rule
def target_types_to_generate_targets_requests(
union_membership: UnionMembership,
) -> TargetTypesToGenerateTargetsRequests:
return TargetTypesToGenerateTargetsRequests(
{
request_cls.generate_from: request_cls # type: ignore[misc]
for request_cls in union_membership.get(GenerateTargetsRequest)
}
)
@memoized
def warn_deprecated_target_type(tgt_type: type[Target]) -> None:
assert tgt_type.deprecated_alias_removal_version is not None
warn_or_error(
removal_version=tgt_type.deprecated_alias_removal_version,
entity=f"the target name {tgt_type.deprecated_alias}",
hint=(
f"Instead, use `{tgt_type.alias}`, which behaves the same. Run `{bin_name()} "
"update-build-files` to automatically fix your BUILD files."
),
)
@memoized
def warn_deprecated_field_type(field_type: type[Field]) -> None:
assert field_type.deprecated_alias_removal_version is not None
warn_or_error(
removal_version=field_type.deprecated_alias_removal_version,
entity=f"the field name {field_type.deprecated_alias}",
hint=(
f"Instead, use `{field_type.alias}`, which behaves the same. Run `{bin_name()} "
"update-build-files` to automatically fix your BUILD files."
),
)
@dataclass(frozen=True)
class _AdaptorAndType:
adaptor: TargetAdaptor
target_type: type[Target]
@dataclass(frozen=True)
class _RequestAdaptorAndType:
address: Address
description_of_origin: str
@rule
async def _determine_target_adaptor_and_type(
req: _RequestAdaptorAndType, registered_target_types: RegisteredTargetTypes
) -> _AdaptorAndType:
target_adaptor = await Get(
TargetAdaptor,
TargetAdaptorRequest(req.address, description_of_origin=req.description_of_origin),
)
target_type = registered_target_types.aliases_to_types.get(target_adaptor.type_alias, None)
if target_type is None:
raise UnrecognizedTargetTypeException(
target_adaptor.type_alias,
registered_target_types,
req.address,
target_adaptor.description_of_origin,
)
if (
target_type.deprecated_alias is not None
and target_type.deprecated_alias == target_adaptor.type_alias
and not req.address.is_generated_target
):
warn_deprecated_target_type(target_type)
return _AdaptorAndType(target_adaptor, target_type)
@dataclass(frozen=True)
class _TargetGeneratorOverridesRequest:
target_generator: TargetGenerator
@dataclass(frozen=True)
class ResolvedTargetGeneratorRequests:
requests: tuple[GenerateTargetsRequest, ...] = tuple()
@dataclass(frozen=True)
class ResolveTargetGeneratorRequests:
address: Address
description_of_origin: str = dataclasses.field(hash=False, compare=False)
@dataclass(frozen=True)
class ResolveAllTargetGeneratorRequests:
description_of_origin: str = dataclasses.field(hash=False, compare=False)
of_type: type[TargetGenerator] | None = None
@rule
async def resolve_all_generator_target_requests(
req: ResolveAllTargetGeneratorRequests,
) -> ResolvedTargetGeneratorRequests:
address_families = await Get(
AddressFamilies,
RawSpecsWithoutFileOwners(
recursive_globs=(RecursiveGlobSpec(""),),
description_of_origin="the `ResolveAllTargetGeneratorRequests` rule",
),
)
results = await MultiGet(
Get(
ResolvedTargetGeneratorRequests,
ResolveTargetGeneratorRequests(address, req.description_of_origin),
)
for family in address_families
for address, target_adaptor in family.addresses_to_target_adaptors.items()
if not req.of_type or target_adaptor.type_alias == req.of_type.alias
)
return ResolvedTargetGeneratorRequests(
tuple(itertools.chain.from_iterable(result.requests for result in results))
)
async def _target_generator_overrides(
target_generator: TargetGenerator, unmatched_build_file_globs: UnmatchedBuildFileGlobs
) -> dict[str, dict[str, Any]]:
address = target_generator.address
if target_generator.has_field(OverridesField):
overrides_field = target_generator[OverridesField]
overrides_flattened = overrides_field.flatten()
else:
overrides_flattened = {}
if isinstance(target_generator, TargetFilesGenerator):
override_globs = OverridesField.to_path_globs(
address, overrides_flattened, unmatched_build_file_globs
)
override_paths = await MultiGet(
Get(Paths, PathGlobs, path_globs) for path_globs in override_globs
)
return OverridesField.flatten_paths(
address, zip(override_paths, override_globs, overrides_flattened.values())
)
return overrides_flattened
@rule
async def resolve_generator_target_requests(
req: ResolveTargetGeneratorRequests,
union_membership: UnionMembership,
target_types_to_generate_requests: TargetTypesToGenerateTargetsRequests,
unmatched_build_file_globs: UnmatchedBuildFileGlobs,
) -> ResolvedTargetGeneratorRequests:
adaptor_and_type = await Get(
_AdaptorAndType, _RequestAdaptorAndType(req.address, req.description_of_origin)
)
target_adaptor = adaptor_and_type.adaptor
target_type = adaptor_and_type.target_type
if not issubclass(target_type, TargetGenerator):
return ResolvedTargetGeneratorRequests()
generate_request = target_types_to_generate_requests.request_for(target_type)
if not generate_request:
return ResolvedTargetGeneratorRequests()
generator_fields = dict(target_adaptor.kwargs)
generators = _parametrized_target_generators_with_templates(
req.address,
target_adaptor,
target_type,
generator_fields,
union_membership,
)
base_generator = target_type(
generator_fields,
req.address,
name_explicitly_set=target_adaptor.name_explicitly_set,
union_membership=union_membership,
description_of_origin=target_adaptor.description_of_origin,
)
overrides = await _target_generator_overrides(base_generator, unmatched_build_file_globs)
return ResolvedTargetGeneratorRequests(
requests=tuple(
generate_request(
generator,
template_address=generator.address,
template=template,
overrides={
name: dict(Parametrize.expand(generator.address, override))
for name, override in overrides.items()
},
)
for generator, template in generators
)
)
@rule
async def resolve_target_parametrizations(
request: _TargetParametrizationsRequest, union_membership: UnionMembership
) -> _TargetParametrizations:
address = request.address
adaptor_and_type = await Get(
_AdaptorAndType, _RequestAdaptorAndType(request.address, request.description_of_origin)
)
target_adaptor = adaptor_and_type.adaptor
target_type = adaptor_and_type.target_type
parametrizations: list[_TargetParametrization] = []
requests: ResolvedTargetGeneratorRequests | None = None
if issubclass(target_type, TargetGenerator):
requests = await Get(
ResolvedTargetGeneratorRequests,
ResolveTargetGeneratorRequests(address, request.description_of_origin),
)
if requests and requests.requests:
all_generated = await MultiGet(
Get(GeneratedTargets, GenerateTargetsRequest, generate_request)
for generate_request in requests.requests
)
parametrizations.extend(
_TargetParametrization(generate_request.generator, generated_batch)
for generated_batch, generate_request in zip(all_generated, requests.requests)
)
else:
parametrizations.append(
_target_parametrizations(address, target_adaptor, target_type, union_membership)
)
return _TargetParametrizations(parametrizations)
def _target_parametrizations(
address: Address,
target_adaptor: TargetAdaptor,
target_type: type[Target],
union_membership: UnionMembership,
) -> _TargetParametrization:
first, *rest = Parametrize.expand(address, target_adaptor.kwargs)
if rest:
# The target was parametrized, and so the original Target does not exist.
generated = FrozenDict(
(
parameterized_address,
target_type(
parameterized_fields,
parameterized_address,
name_explicitly_set=target_adaptor.name_explicitly_set,
union_membership=union_membership,
description_of_origin=target_adaptor.description_of_origin,
),
)
for parameterized_address, parameterized_fields in (first, *rest)
)
return _TargetParametrization(None, generated)
else:
# The target was not parametrized.
target = target_type(
target_adaptor.kwargs,
address,
name_explicitly_set=target_adaptor.name_explicitly_set,
union_membership=union_membership,
description_of_origin=target_adaptor.description_of_origin,
)
for field_type in target.field_types:
if (
field_type.deprecated_alias is not None
and field_type.deprecated_alias in target_adaptor.kwargs
):
warn_deprecated_field_type(field_type)
return _TargetParametrization(target, FrozenDict())
def _parametrized_target_generators_with_templates(
address: Address,
target_adaptor: TargetAdaptor,
target_type: type[TargetGenerator],
generator_fields: dict[str, Any],
union_membership: UnionMembership,
) -> list[tuple[TargetGenerator, dict[str, Any]]]:
# Split out the `propagated_fields` before construction.
template_fields = {}
copied_fields = (
*target_type.copied_fields,
*target_type._find_plugin_fields(union_membership),
)
for field_type in copied_fields:
field_value = generator_fields.get(field_type.alias, None)
if field_value is not None:
template_fields[field_type.alias] = field_value
for field_type in target_type.moved_fields:
field_value = generator_fields.pop(field_type.alias, None)
if field_value is not None:
template_fields[field_type.alias] = field_value
field_type_aliases = target_type._get_field_aliases_to_field_types(
target_type.class_field_types(union_membership)
).keys()
generator_fields_parametrized = {
name
for name, field in generator_fields.items()
if isinstance(field, Parametrize) and name in field_type_aliases
}
if generator_fields_parametrized:
noun = pluralize(len(generator_fields_parametrized), "field", include_count=False)
generator_fields_parametrized_text = ", ".join(
repr(f) for f in generator_fields_parametrized
)
raise InvalidFieldException(
f"Only fields which will be moved to generated targets may be parametrized, "
f"so target generator {address} (with type {target_type.alias}) cannot "
f"parametrize the {generator_fields_parametrized_text} {noun}."
)
return [
(
target_type(
generator_fields,
address,
name_explicitly_set=target_adaptor.name is not None,
union_membership=union_membership,
description_of_origin=target_adaptor.description_of_origin,
),
template,
)
for address, template in Parametrize.expand(address, template_fields)
]
@rule(_masked_types=[EnvironmentName])
async def resolve_target(
request: WrappedTargetRequest,
target_types_to_generate_requests: TargetTypesToGenerateTargetsRequests,
local_environment_name: ChosenLocalEnvironmentName,
) -> WrappedTarget:
address = request.address
base_address = address.maybe_convert_to_target_generator()
parametrizations = await Get(
_TargetParametrizations,
{
_TargetParametrizationsRequest(
base_address, description_of_origin=request.description_of_origin
): _TargetParametrizationsRequest,
local_environment_name.val: EnvironmentName,
},
)
target = parametrizations.get(address, target_types_to_generate_requests)
if target is None:
raise ResolveError(
softwrap(
f"""
The address `{address}` from {request.description_of_origin} was not generated by
the target `{base_address}`. Did you mean one of these addresses?
{bullet_list(str(t.address) for t in parametrizations.all)}
"""
)
)
return WrappedTarget(target)
@dataclass(frozen=True)
class WrappedTargetForBootstrap:
"""Used to avoid a rule graph cycle when evaluating bootstrap targets.
This does not work with target generation and parametrization. It also ignores any unrecognized
fields in the target, to accommodate plugin fields which are not yet registered during
bootstrapping.
This should only be used by bootstrapping code.
"""
val: Target
@rule
async def resolve_target_for_bootstrapping(
request: WrappedTargetRequest,
union_membership: UnionMembership,
) -> WrappedTargetForBootstrap:
adaptor_and_type = await Get(
_AdaptorAndType,
_RequestAdaptorAndType(
request.address,
description_of_origin=request.description_of_origin,
),
)
target_adaptor = adaptor_and_type.adaptor
target_type = adaptor_and_type.target_type
target = target_type(
target_adaptor.kwargs,
request.address,
name_explicitly_set=target_adaptor.name_explicitly_set,
union_membership=union_membership,
ignore_unrecognized_fields=True,
description_of_origin=target_adaptor.description_of_origin,
)
return WrappedTargetForBootstrap(target)
@rule(_masked_types=[EnvironmentName])
async def resolve_targets(
targets: UnexpandedTargets,
target_types_to_generate_requests: TargetTypesToGenerateTargetsRequests,
local_environment_name: ChosenLocalEnvironmentName,
) -> Targets:
# Replace all generating targets with what they generate. Otherwise, keep them. If a target
# generator does not generate any targets, keep the target generator.
# TODO: This method does not preserve the order of inputs.
expanded_targets: OrderedSet[Target] = OrderedSet()
generator_targets = []
parametrizations_gets = []
for tgt in targets:
if (
target_types_to_generate_requests.is_generator(tgt)
and not tgt.address.is_generated_target
):
generator_targets.append(tgt)
parametrizations_gets.append(
Get(
_TargetParametrizations,
{
_TargetParametrizationsRequest(
tgt.address.maybe_convert_to_target_generator(),
# Idiomatic rules should not be manually creating `UnexpandedTargets`, so
# we can be confident that the targets actually exist and the addresses
# are already legitimate.
description_of_origin="<infallible>",
): _TargetParametrizationsRequest,
local_environment_name.val: EnvironmentName,
},
)
)
else:
expanded_targets.add(tgt)
all_generated_targets = await MultiGet(parametrizations_gets)
expanded_targets.update(
tgt
for generator, parametrizations in zip(generator_targets, all_generated_targets)
for tgt in parametrizations.generated_or_generator(generator.address)
)
return Targets(expanded_targets)
@rule(desc="Find all targets in the project", level=LogLevel.DEBUG, _masked_types=[EnvironmentName])
async def find_all_targets() -> AllTargets:
tgts = await Get(
Targets,
RawSpecsWithoutFileOwners(
recursive_globs=(RecursiveGlobSpec(""),), description_of_origin="the `AllTargets` rule"
),
)
return AllTargets(tgts)
@rule(
desc="Find all (unexpanded) targets in the project",
level=LogLevel.DEBUG,
_masked_types=[EnvironmentName],
)
async def find_all_unexpanded_targets() -> AllUnexpandedTargets:
tgts = await Get(
UnexpandedTargets,
RawSpecsWithoutFileOwners(
recursive_globs=(RecursiveGlobSpec(""),), description_of_origin="the `AllTargets` rule"
),
)
return AllUnexpandedTargets(tgts)
# -----------------------------------------------------------------------------------------------
# TransitiveTargets
# -----------------------------------------------------------------------------------------------
class CycleException(Exception):
def __init__(self, subject: Address, path: tuple[Address, ...]) -> None:
path_string = "\n".join((f"-> {a}" if a == subject else f" {a}") for a in path)
super().__init__(
f"The dependency graph contained a cycle:\n{path_string}\n\nTo fix this, first verify "
"if your code has an actual import cycle. If it does, you likely need to re-architect "
"your code to avoid the cycle.\n\nIf there is no cycle in your code, then you may need "
"to use more granular targets. Split up the problematic targets into smaller targets "
"with more granular `sources` fields so that you can adjust the `dependencies` fields "
"to avoid introducing a cycle.\n\nAlternatively, use Python dependency inference "
"(`--python-infer-imports`), rather than explicit `dependencies`. Pants will infer "
"dependencies on specific files, rather than entire targets. This extra precision "
"means that you will only have cycles if your code actually does have cycles in it."
)
self.subject = subject
self.path = path
def _detect_cycles(
roots: tuple[Address, ...], dependency_mapping: dict[Address, tuple[Address, ...]]
) -> None:
path_stack: OrderedSet[Address] = OrderedSet()
visited: set[Address] = set()
def maybe_report_cycle(address: Address) -> None:
# NB: File-level dependencies are cycle tolerant.
if address.is_file_target or address not in path_stack:
return
# The path of the cycle is shorter than the entire path to the cycle: if the suffix of
# the path representing the cycle contains a file dep, it is ignored.
in_cycle = False
for path_address in path_stack:
if in_cycle and path_address.is_file_target:
# There is a file address inside the cycle: do not report it.
return
elif in_cycle:
# Not a file address.
continue
else:
# We're entering the suffix of the path that contains the cycle if we've reached
# the address in question.
in_cycle = path_address == address
# If we did not break out early, it's because there were no file addresses in the cycle.
raise CycleException(address, (*path_stack, address))
def visit(address: Address):
if address in visited:
maybe_report_cycle(address)
return
path_stack.add(address)
visited.add(address)
for dep_address in dependency_mapping[address]:
visit(dep_address)
path_stack.remove(address)
for root in roots:
visit(root)
if path_stack:
raise AssertionError(
f"The stack of visited nodes should have been empty at the end of recursion, "
f"but it still contained: {path_stack}"
)
@dataclass(frozen=True)
class _DependencyMappingRequest:
tt_request: TransitiveTargetsRequest
expanded_targets: bool
@dataclass(frozen=True)
class _DependencyMapping:
mapping: FrozenDict[Address, tuple[Address, ...]]
visited: FrozenOrderedSet[Target]
roots_as_targets: Collection[Target]
@rule
async def transitive_dependency_mapping(request: _DependencyMappingRequest) -> _DependencyMapping:
"""This uses iteration, rather than recursion, so that we can tolerate dependency cycles.
Unlike a traditional BFS algorithm, we batch each round of traversals via `MultiGet` for
improved performance / concurrency.
"""
roots_as_targets = await Get(UnexpandedTargets, Addresses(request.tt_request.roots))
visited: OrderedSet[Target] = OrderedSet()
queued = FrozenOrderedSet(roots_as_targets)
dependency_mapping: dict[Address, tuple[Address, ...]] = {}
while queued:
direct_dependencies: tuple[Collection[Target], ...]
if request.expanded_targets:
direct_dependencies = await MultiGet( # noqa: PNT30: this is inherently sequential
Get(
Targets,
DependenciesRequest(
tgt.get(Dependencies),
should_traverse_deps_predicate=request.tt_request.should_traverse_deps_predicate,
),
)
for tgt in queued
)
else:
direct_dependencies = await MultiGet( # noqa: PNT30: this is inherently sequential
Get(
UnexpandedTargets,
DependenciesRequest(
tgt.get(Dependencies),
should_traverse_deps_predicate=request.tt_request.should_traverse_deps_predicate,
),
)
for tgt in queued
)
dependency_mapping.update(
zip(
(t.address for t in queued),
(tuple(t.address for t in deps) for deps in direct_dependencies),
)
)
queued = FrozenOrderedSet(itertools.chain.from_iterable(direct_dependencies)).difference(
visited
)
visited.update(queued)
# NB: We use `roots_as_targets` to get the root addresses, rather than `request.roots`. This
# is because expanding from the `Addresses` -> `Targets` may have resulted in generated
# targets being used, so we need to use `roots_as_targets` to have this expansion.
# TODO(#12871): Fix this to not be based on generated targets.
_detect_cycles(tuple(t.address for t in roots_as_targets), dependency_mapping)
return _DependencyMapping(
FrozenDict(dependency_mapping), FrozenOrderedSet(visited), roots_as_targets
)
@rule(desc="Resolve transitive targets", level=LogLevel.DEBUG, _masked_types=[EnvironmentName])
async def transitive_targets(
request: TransitiveTargetsRequest, local_environment_name: ChosenLocalEnvironmentName
) -> TransitiveTargets:
"""Find all the targets transitively depended upon by the target roots."""
dependency_mapping = await Get(_DependencyMapping, _DependencyMappingRequest(request, True))
# Apply any transitive excludes (`!!` ignores).
transitive_excludes: FrozenOrderedSet[Target] = FrozenOrderedSet()
unevaluated_transitive_excludes = []
for t in (*dependency_mapping.roots_as_targets, *dependency_mapping.visited):
unparsed = t.get(Dependencies).unevaluated_transitive_excludes
if unparsed.values:
unevaluated_transitive_excludes.append(unparsed)
if unevaluated_transitive_excludes:
nested_transitive_excludes = await MultiGet(
Get(Targets, UnparsedAddressInputs, unparsed)
for unparsed in unevaluated_transitive_excludes
)
transitive_excludes = FrozenOrderedSet(
itertools.chain.from_iterable(excludes for excludes in nested_transitive_excludes)
)
return TransitiveTargets(
tuple(dependency_mapping.roots_as_targets),
FrozenOrderedSet(dependency_mapping.visited.difference(transitive_excludes)),
)
# -----------------------------------------------------------------------------------------------
# CoarsenedTargets
# -----------------------------------------------------------------------------------------------
@rule(_masked_types=[EnvironmentName])
def coarsened_targets_request(addresses: Addresses) -> CoarsenedTargetsRequest:
return CoarsenedTargetsRequest(addresses)
@rule(desc="Resolve coarsened targets", level=LogLevel.DEBUG, _masked_types=[EnvironmentName])
async def coarsened_targets(
request: CoarsenedTargetsRequest, local_environment_name: ChosenLocalEnvironmentName
) -> CoarsenedTargets:
dependency_mapping = await Get(
_DependencyMapping,
_DependencyMappingRequest(
TransitiveTargetsRequest(
request.roots,
should_traverse_deps_predicate=request.should_traverse_deps_predicate,
),
expanded_targets=request.expanded_targets,
),
)
addresses_to_targets = {
t.address: t for t in [*dependency_mapping.visited, *dependency_mapping.roots_as_targets]
}
# Because this is Tarjan's SCC (TODO: update signature to guarantee), components are returned
# in reverse topological order. We can thus assume when building the structure shared
# `CoarsenedTarget` instances that each instance will already have had its dependencies
# constructed.
components = native_engine.strongly_connected_components(
list(dependency_mapping.mapping.items())
)
coarsened_targets: dict[Address, CoarsenedTarget] = {}
root_coarsened_targets = []
root_addresses_set = set(request.roots)
try:
for component in components:
component = sorted(component)
component_set = set(component)
# For each member of the component, include the CoarsenedTarget for each of its external
# dependencies.
coarsened_target = CoarsenedTarget(
(addresses_to_targets[a] for a in component),
(
coarsened_targets[d]
for a in component
for d in dependency_mapping.mapping[a]
if d not in component_set
),
)
# Add to the coarsened_targets mapping under each of the component's Addresses.
for address in component:
coarsened_targets[address] = coarsened_target
# If any of the input Addresses was a member of this component, it is a root.
if component_set & root_addresses_set:
root_coarsened_targets.append(coarsened_target)
except KeyError:
# TODO: This output is intended to help uncover a non-deterministic error reported in
# https://github.com/pantsbuild/pants/issues/17047.
mapping_str = json.dumps(
{str(a): [str(d) for d in deps] for a, deps in dependency_mapping.mapping.items()}
)
components_str = json.dumps([[str(a) for a in component] for component in components])
logger.warning(f"For {request}:\nMapping:\n{mapping_str}\nComponents:\n{components_str}")
raise
return CoarsenedTargets(tuple(root_coarsened_targets))
# -----------------------------------------------------------------------------------------------
# Find the owners of a file
# -----------------------------------------------------------------------------------------------
def _log_or_raise_unmatched_owners(
file_paths: Sequence[PurePath],
owners_not_found_behavior: GlobMatchErrorBehavior,
ignore_option: str | None = None,
) -> None:
option_msg = (
f"\n\nIf you would like to ignore un-owned files, please pass `{ignore_option}`."
if ignore_option
else ""
)
if len(file_paths) == 1:
prefix = (
f"No owning targets could be found for the file `{file_paths[0]}`.\n\n"
f"Please check that there is a BUILD file in the parent directory "
f"{file_paths[0].parent} with a target whose `sources` field includes the file."
)
else:
prefix = (
f"No owning targets could be found for the files {sorted(map(str, file_paths))}`.\n\n"
f"Please check that there are BUILD files in each file's parent directory with a "
f"target whose `sources` field includes the file."
)
msg = (
f"{prefix} See {doc_url('targets')} for more information on target definitions."
f"\n\nYou may want to run `{bin_name()} tailor` to autogenerate your BUILD files. See "
f"{doc_url('create-initial-build-files')}.{option_msg}"
)
if owners_not_found_behavior == GlobMatchErrorBehavior.warn:
logger.warning(msg)
else:
raise ResolveError(msg)
@dataclass(frozen=True)
class OwnersRequest:
"""A request for the owners of a set of file paths.
TODO: This is widely used as an effectively-public API. It should probably move to
`pants.engine.target`.
"""
sources: tuple[str, ...]
owners_not_found_behavior: GlobMatchErrorBehavior = GlobMatchErrorBehavior.ignore
filter_by_global_options: bool = False
match_if_owning_build_file_included_in_sources: bool = False
class Owners(FrozenOrderedSet[Address]):
pass
@rule(desc="Find which targets own certain files", _masked_types=[EnvironmentName])
async def find_owners(
owners_request: OwnersRequest,
local_environment_name: ChosenLocalEnvironmentName,
) -> Owners:
# Determine which of the sources are live and which are deleted.
sources_paths = await Get(Paths, PathGlobs(owners_request.sources))
live_files = FrozenOrderedSet(sources_paths.files)
deleted_files = FrozenOrderedSet(s for s in owners_request.sources if s not in live_files)
live_dirs = FrozenOrderedSet(os.path.dirname(s) for s in live_files)
deleted_dirs = FrozenOrderedSet(os.path.dirname(s) for s in deleted_files)
def create_live_and_deleted_gets(
*, filter_by_global_options: bool
) -> tuple[Get[FilteredTargets | Targets], Get[UnexpandedTargets],]:
"""Walk up the buildroot looking for targets that would conceivably claim changed sources.
For live files, we use Targets, which causes generated targets to be used rather than their
target generators. For deleted files we use UnexpandedTargets, which have the original
declared `sources` globs from target generators.
We ignore unrecognized files, which can happen e.g. when finding owners for deleted files.
"""
live_raw_specs = RawSpecsWithoutFileOwners(
ancestor_globs=tuple(AncestorGlobSpec(directory=d) for d in live_dirs),
filter_by_global_options=filter_by_global_options,
description_of_origin="<owners rule - unused>",
unmatched_glob_behavior=GlobMatchErrorBehavior.ignore,
)
live_get: Get[FilteredTargets | Targets] = (
Get(FilteredTargets, RawSpecsWithoutFileOwners, live_raw_specs)
if filter_by_global_options
else Get(Targets, RawSpecsWithoutFileOwners, live_raw_specs)
)
deleted_get = Get(
UnexpandedTargets,
RawSpecsWithoutFileOwners(
ancestor_globs=tuple(AncestorGlobSpec(directory=d) for d in deleted_dirs),
filter_by_global_options=filter_by_global_options,
description_of_origin="<owners rule - unused>",
unmatched_glob_behavior=GlobMatchErrorBehavior.ignore,
),
)
return live_get, deleted_get
live_get, deleted_get = create_live_and_deleted_gets(
filter_by_global_options=owners_request.filter_by_global_options
)
live_candidate_tgts, deleted_candidate_tgts = await MultiGet(live_get, deleted_get)
result = set()
unmatched_sources = set(owners_request.sources)
for live in (True, False):
candidate_tgts: Sequence[Target]
if live:
candidate_tgts = live_candidate_tgts
sources_set = live_files
else:
candidate_tgts = deleted_candidate_tgts
sources_set = deleted_files
build_file_addresses = await MultiGet( # noqa: PNT30: requires triage
Get(
BuildFileAddress,
BuildFileAddressRequest(
tgt.address, description_of_origin="<owners rule - cannot trigger>"
),
)
for tgt in candidate_tgts
)
for candidate_tgt, bfa in zip(candidate_tgts, build_file_addresses):
matching_files = set(
candidate_tgt.get(SourcesField).filespec_matcher.matches(list(sources_set))
)
if not matching_files and not (
owners_request.match_if_owning_build_file_included_in_sources
and bfa.rel_path in sources_set
):
continue
unmatched_sources -= matching_files
result.add(candidate_tgt.address)
if (
unmatched_sources
and owners_request.owners_not_found_behavior != GlobMatchErrorBehavior.ignore
):
_log_or_raise_unmatched_owners(
[PurePath(path) for path in unmatched_sources], owners_request.owners_not_found_behavior
)
return Owners(result)
# -----------------------------------------------------------------------------------------------
# Resolve SourcesField
# -----------------------------------------------------------------------------------------------
@rule
def extract_unmatched_build_file_globs(
global_options: GlobalOptions,
) -> UnmatchedBuildFileGlobs:
return UnmatchedBuildFileGlobs(global_options.unmatched_build_file_globs)
class AmbiguousCodegenImplementationsException(Exception):
"""Exception for when there are multiple codegen implementations and it is ambiguous which to
use."""
@classmethod
def create(
cls,
generators: Iterable[type[GenerateSourcesRequest]],
*,
for_sources_types: Iterable[type[SourcesField]],
) -> AmbiguousCodegenImplementationsException:
all_same_generator_paths = (
len({(generator.input, generator.output) for generator in generators}) == 1
)
example_generator = list(generators)[0]
input = example_generator.input.__name__
if all_same_generator_paths:
output = example_generator.output.__name__
return cls(
f"Multiple registered code generators can generate {output} from {input}. "
"It is ambiguous which implementation to use.\n\nPossible implementations:\n\n"
f"{bullet_list(sorted(generator.__name__ for generator in generators))}"
)
possible_output_types = sorted(
generator.output.__name__
for generator in generators
if issubclass(generator.output, tuple(for_sources_types))
)
possible_generators_with_output = [
f"{generator.__name__} -> {generator.output.__name__}"
for generator in sorted(generators, key=lambda generator: generator.output.__name__)
]
return cls(
f"Multiple registered code generators can generate one of "
f"{possible_output_types} from {input}. It is ambiguous which implementation to "
f"use. This can happen when the call site requests too many different output types "
f"from the same original protocol sources.\n\nPossible implementations with their "
f"output type:\n\n"
f"{bullet_list(possible_generators_with_output)}"
)
@rule(desc="Hydrate the `sources` field")
async def hydrate_sources(
request: HydrateSourcesRequest,
unmatched_build_file_globs: UnmatchedBuildFileGlobs,
union_membership: UnionMembership,
) -> HydratedSources:
sources_field = request.field
# First, find if there are any code generators for the input `sources_field`. This will be used
# to determine if the sources_field is valid or not.
# We could alternatively use `sources_field.can_generate()`, but we want to error if there are
# 2+ generators due to ambiguity.
generate_request_types = union_membership.get(GenerateSourcesRequest)
relevant_generate_request_types = [
generate_request_type
for generate_request_type in generate_request_types
if isinstance(sources_field, generate_request_type.input)
and issubclass(generate_request_type.output, request.for_sources_types)
]
if request.enable_codegen and len(relevant_generate_request_types) > 1:
raise AmbiguousCodegenImplementationsException.create(
relevant_generate_request_types, for_sources_types=request.for_sources_types
)
generate_request_type = next(iter(relevant_generate_request_types), None)
# Now, determine if any of the `for_sources_types` may be used, either because the
# sources_field is a direct subclass or can be generated into one of the valid types.
def compatible_with_sources_field(valid_type: type[SourcesField]) -> bool:
is_instance = isinstance(sources_field, valid_type)
can_be_generated = (
request.enable_codegen
and generate_request_type is not None
and issubclass(generate_request_type.output, valid_type)
)
return is_instance or can_be_generated
sources_type = next(
(
valid_type
for valid_type in request.for_sources_types
if compatible_with_sources_field(valid_type)
),
None,
)
if sources_type is None:
return HydratedSources(EMPTY_SNAPSHOT, sources_field.filespec, sources_type=None)
# Now, hydrate the `globs`. Even if we are going to use codegen, we will need the original
# protocol sources to be hydrated.
path_globs = sources_field.path_globs(unmatched_build_file_globs)
snapshot = await Get(Snapshot, PathGlobs, path_globs)
sources_field.validate_resolved_files(snapshot.files)
# Finally, return if codegen is not in use; otherwise, run the relevant code generator.
if not request.enable_codegen or generate_request_type is None:
return HydratedSources(snapshot, sources_field.filespec, sources_type=sources_type)
wrapped_protocol_target = await Get(
WrappedTarget,
WrappedTargetRequest(
sources_field.address,
# It's only possible to hydrate sources on a target that we already know exists.
description_of_origin="<infallible>",
),
)
generated_sources = await Get(
GeneratedSources,
GenerateSourcesRequest,
generate_request_type(snapshot, wrapped_protocol_target.target),
)
return HydratedSources(
generated_sources.snapshot, sources_field.filespec, sources_type=sources_type
)
@rule(desc="Resolve `sources` field file names")
async def resolve_source_paths(
request: SourcesPathsRequest, unmatched_build_file_globs: UnmatchedBuildFileGlobs
) -> SourcesPaths:
sources_field = request.field
path_globs = sources_field.path_globs(unmatched_build_file_globs)
paths = await Get(Paths, PathGlobs, path_globs)
sources_field.validate_resolved_files(paths.files)
return SourcesPaths(files=paths.files, dirs=paths.dirs)
# -----------------------------------------------------------------------------------------------
# Resolve addresses, including the Dependencies field
# -----------------------------------------------------------------------------------------------
class SubprojectRoots(Collection[str]):
pass
@rule
def extract_subproject_roots(global_options: GlobalOptions) -> SubprojectRoots:
return SubprojectRoots(global_options.subproject_roots)
class ParsedDependencies(NamedTuple):
addresses: list[AddressInput]
ignored_addresses: list[AddressInput]
class TransitiveExcludesNotSupportedError(ValueError):
def __init__(
self,
*,
bad_value: str,
address: Address,
registered_target_types: Iterable[type[Target]],
union_membership: UnionMembership,
) -> None:
applicable_target_types = sorted(
target_type.alias
for target_type in registered_target_types
if (
target_type.class_has_field(Dependencies, union_membership=union_membership)
and target_type.class_get_field(
Dependencies, union_membership=union_membership
).supports_transitive_excludes
)
)
super().__init__(
f"Bad value '{bad_value}' in the `dependencies` field for {address}. "
"Transitive excludes with `!!` are not supported for this target type. Did you mean "
"to use a single `!` for a direct exclude?\n\nTransitive excludes work with these "
f"target types: {applicable_target_types}"
)
@rule
async def convert_dependencies_request_to_explicitly_provided_dependencies_request(
request: DependenciesRequest,
) -> ExplicitlyProvidedDependenciesRequest:
"""This rule discards any deps predicate from DependenciesRequest.
Calculating ExplicitlyProvidedDependencies does not use any deps traversal predicates as it is
meant to list all explicit deps from the given field. By stripping the predicate from the
request, we ensure that the cache key for ExplicitlyProvidedDependencies calculation does not
include the predicate increasing the cache-hit rate.
"""
# TODO: Maybe require Get(ExplicitlyProvidedDependencies, ExplicitlyProvidedDependenciesRequest)
# and deprecate Get(ExplicitlyProvidedDependencies, DependenciesRequest) via this rule.
return ExplicitlyProvidedDependenciesRequest(request.field)
@rule
async def determine_explicitly_provided_dependencies(
request: ExplicitlyProvidedDependenciesRequest,
union_membership: UnionMembership,
registered_target_types: RegisteredTargetTypes,
subproject_roots: SubprojectRoots,
) -> ExplicitlyProvidedDependencies:
parse = functools.partial(
AddressInput.parse,
relative_to=request.field.address.spec_path,
subproject_roots=subproject_roots,
description_of_origin=(
f"the `{request.field.alias}` field from the target {request.field.address}"
),
)
addresses: list[AddressInput] = []
ignored_addresses: list[AddressInput] = []
for v in request.field.value or ():
is_ignore = v.startswith("!")
if is_ignore:
# Check if it's a transitive exclude, rather than a direct exclude.
if v.startswith("!!"):
if not request.field.supports_transitive_excludes:
raise TransitiveExcludesNotSupportedError(
bad_value=v,
address=request.field.address,
registered_target_types=registered_target_types.types,
union_membership=union_membership,
)
v = v[2:]
else:
v = v[1:]
result = parse(v)
if is_ignore:
ignored_addresses.append(result)
else:
addresses.append(result)
parsed_includes = await MultiGet(Get(Address, AddressInput, ai) for ai in addresses)
parsed_ignores = await MultiGet(Get(Address, AddressInput, ai) for ai in ignored_addresses)
return ExplicitlyProvidedDependencies(
request.field.address,
FrozenOrderedSet(sorted(parsed_includes)),
FrozenOrderedSet(sorted(parsed_ignores)),
)
async def _fill_parameters(
field_alias: str,
consumer_tgt: Target,
addresses: Iterable[Address],
target_types_to_generate_requests: TargetTypesToGenerateTargetsRequests,
field_defaults: FieldDefaults,
local_environment_name: ChosenLocalEnvironmentName,
) -> tuple[Address, ...]:
assert not isinstance(addresses, Iterator)
parametrizations = await MultiGet(
Get(
_TargetParametrizations,
{
_TargetParametrizationsRequest(
address.maybe_convert_to_target_generator(),
description_of_origin=f"the `{field_alias}` field of the target {consumer_tgt.address}",
): _TargetParametrizationsRequest,
local_environment_name.val: EnvironmentName,
},
)
for address in addresses
)
return tuple(
parametrizations.get_subset(
address, consumer_tgt, field_defaults, target_types_to_generate_requests
).address
for address, parametrizations in zip(addresses, parametrizations)
)
@rule(desc="Resolve direct dependencies of target", _masked_types=[EnvironmentName])
async def resolve_dependencies(
request: DependenciesRequest,
target_types_to_generate_requests: TargetTypesToGenerateTargetsRequests,
union_membership: UnionMembership,
subproject_roots: SubprojectRoots,
field_defaults: FieldDefaults,
local_environment_name: ChosenLocalEnvironmentName,
) -> Addresses:
environment_name = local_environment_name.val
wrapped_tgt = await Get(
WrappedTarget,
# It's only possible to find dependencies for a target that we already know exists.
WrappedTargetRequest(request.field.address, description_of_origin="<infallible>"),
)
tgt = wrapped_tgt.target
# This predicate allows the dep graph to ignore dependencies of selected targets
# including any explicit deps and any inferred deps.
# For example, to avoid traversing the deps of package targets.
if request.should_traverse_deps_predicate(tgt, request.field) == DepsTraversalBehavior.EXCLUDE:
return Addresses([])
try:
explicitly_provided = await Get(
ExplicitlyProvidedDependencies, DependenciesRequest, request
)
except Exception as e:
raise InvalidFieldException(
f"{tgt.description_of_origin}: Failed to get dependencies for {tgt.address}: {e}"
)
# Infer any dependencies (based on `SourcesField` field).
inference_request_types = cast(
"Sequence[Type[InferDependenciesRequest]]", union_membership.get(InferDependenciesRequest)
)
inferred: tuple[InferredDependencies, ...] = ()
if inference_request_types:
relevant_inference_request_types = [
inference_request_type
for inference_request_type in inference_request_types
if inference_request_type.infer_from.is_applicable(tgt)
]
inferred = await MultiGet(
Get(
InferredDependencies,
{
inference_request_type(
inference_request_type.infer_from.create(tgt)
): InferDependenciesRequest,
environment_name: EnvironmentName,
},
)
for inference_request_type in relevant_inference_request_types
)
# If it's a target generator, inject dependencies on all of its generated targets.
generated_addresses: tuple[Address, ...] = ()
if target_types_to_generate_requests.is_generator(tgt) and not tgt.address.is_generated_target:
parametrizations = await Get(
_TargetParametrizations,
{
_TargetParametrizationsRequest(
tgt.address.maybe_convert_to_target_generator(),
description_of_origin=(
f"the target generator {tgt.address.maybe_convert_to_target_generator()}"
),
): _TargetParametrizationsRequest,
environment_name: EnvironmentName,
},
)
generated_addresses = tuple(parametrizations.generated_for(tgt.address).keys())
# See whether any explicitly provided dependencies are parametrized, but with partial/no
# parameters. If so, fill them in.
explicitly_provided_includes: Iterable[Address] = explicitly_provided.includes
if explicitly_provided_includes:
explicitly_provided_includes = await _fill_parameters(
request.field.alias,
tgt,
explicitly_provided_includes,
target_types_to_generate_requests,
field_defaults,
local_environment_name,
)
explicitly_provided_ignores: FrozenOrderedSet[Address] = explicitly_provided.ignores
if explicitly_provided_ignores:
explicitly_provided_ignores = FrozenOrderedSet(
await _fill_parameters(
request.field.alias,
tgt,
tuple(explicitly_provided_ignores),
target_types_to_generate_requests,
field_defaults,
local_environment_name,
)
)
# If the target has `SpecialCasedDependencies`, such as the `archive` target having
# `files` and `packages` fields, then we possibly include those too. We don't want to always
# include those dependencies because they should often be excluded from the result due to
# being handled elsewhere in the calling code. So, we only include fields based on
# the should_traverse_deps_predicate.
# Unlike normal, we don't use `tgt.get()` because there may be >1 subclass of
# SpecialCasedDependencies.
special_cased_fields = tuple(
field
for field in tgt.field_values.values()
if isinstance(field, SpecialCasedDependencies)
and request.should_traverse_deps_predicate(tgt, field) == DepsTraversalBehavior.INCLUDE
)
# We can't use the normal `Get(Addresses, UnparsedAddressInputs)` due to a graph cycle.
special_cased = await MultiGet(
Get(
Address,
AddressInput,
AddressInput.parse(
addr,
relative_to=tgt.address.spec_path,
subproject_roots=subproject_roots,
description_of_origin=(
f"the `{special_cased_field.alias}` field from the target {tgt.address}"
),
),
)
for special_cased_field in special_cased_fields
for addr in special_cased_field.to_unparsed_address_inputs().values
)
excluded = explicitly_provided_ignores.union(
*itertools.chain(deps.exclude for deps in inferred)
)
result = Addresses(
sorted(
{
addr
for addr in (
*generated_addresses,
*explicitly_provided_includes,
*itertools.chain.from_iterable(deps.include for deps in inferred),
*special_cased,
)
if addr not in excluded
}
)
)
# Validate dependencies.
_ = await MultiGet(
Get(
ValidatedDependencies,
{
vd_request_type(vd_request_type.field_set_type.create(tgt), result): ValidateDependenciesRequest, # type: ignore[misc]
environment_name: EnvironmentName,
},
)
for vd_request_type in union_membership.get(ValidateDependenciesRequest)
if vd_request_type.field_set_type.is_applicable(tgt) # type: ignore[misc]
)
return result
@rule(desc="Resolve addresses")
async def resolve_unparsed_address_inputs(
request: UnparsedAddressInputs, subproject_roots: SubprojectRoots
) -> Addresses:
address_inputs = []
invalid_addresses = []
for v in request.values:
try:
address_inputs.append(
AddressInput.parse(
v,
relative_to=request.relative_to,
subproject_roots=subproject_roots,
description_of_origin=request.description_of_origin,
)
)
except AddressParseException:
if not request.skip_invalid_addresses:
raise
invalid_addresses.append(v)
if request.skip_invalid_addresses:
maybe_addresses = await MultiGet(
Get(MaybeAddress, AddressInput, ai) for ai in address_inputs
)
valid_addresses = []
for maybe_address, address_input in zip(maybe_addresses, address_inputs):
if isinstance(maybe_address.val, Address):
valid_addresses.append(maybe_address.val)
else:
invalid_addresses.append(address_input.spec)
if invalid_addresses:
logger.debug(
softwrap(
f"""
Invalid addresses from {request.description_of_origin}:
{sorted(invalid_addresses)}. Skipping them.
"""
)
)
return Addresses(valid_addresses)
addresses = await MultiGet(Get(Address, AddressInput, ai) for ai in address_inputs)
# Validate that the addresses exist. We do this eagerly here because
# `Addresses -> UnexpandedTargets` does not preserve the `description_of_origin`, so it would
# be too late, per https://github.com/pantsbuild/pants/issues/15858.
await MultiGet(
Get(
WrappedTarget,
WrappedTargetRequest(addr, description_of_origin=request.description_of_origin),
)
for addr in addresses
)
return Addresses(addresses)
# -----------------------------------------------------------------------------------------------
# Dynamic Field defaults
# -----------------------------------------------------------------------------------------------
@rule
async def field_defaults(union_membership: UnionMembership) -> FieldDefaults:
requests = list(union_membership.get(FieldDefaultFactoryRequest))
factories = await MultiGet(
Get(FieldDefaultFactoryResult, FieldDefaultFactoryRequest, impl()) for impl in requests
)
return FieldDefaults(
FrozenDict(
(request.field_type, factory.default_factory)
for request, factory in zip(requests, factories)
)
)
# -----------------------------------------------------------------------------------------------
# Find applicable field sets
# -----------------------------------------------------------------------------------------------
@rule
def find_valid_field_sets(
request: FieldSetsPerTargetRequest, union_membership: UnionMembership
) -> FieldSetsPerTarget:
field_set_types = union_membership.get(request.field_set_superclass)
return FieldSetsPerTarget(
(
field_set_type.create(target)
for field_set_type in field_set_types
if field_set_type.is_applicable(target)
)
for target in request.targets
)
class GenerateFileTargets(GenerateTargetsRequest):
generate_from = TargetFilesGenerator
@rule
async def generate_file_targets(
request: GenerateFileTargets,
union_membership: UnionMembership,
) -> GeneratedTargets:
try:
sources_paths = await Get(
SourcesPaths, SourcesPathsRequest(request.generator[MultipleSourcesField])
)
except Exception as e:
tgt = request.generator
fld = tgt[MultipleSourcesField]
raise InvalidFieldException(
softwrap(
f"""
{tgt.description_of_origin}: Invalid field value for {fld.alias!r} in target {tgt.address}:
{e}
"""
)
) from e
add_dependencies_on_all_siblings = False
if request.generator.settings_request_cls:
generator_settings = await Get(
TargetFilesGeneratorSettings,
TargetFilesGeneratorSettingsRequest,
request.generator.settings_request_cls(),
)
add_dependencies_on_all_siblings = generator_settings.add_dependencies_on_all_siblings
return _generate_file_level_targets(
type(request.generator).generated_target_cls,
request.generator,
sources_paths.files,
request.template_address,
request.template,
request.overrides,
union_membership,
add_dependencies_on_all_siblings=add_dependencies_on_all_siblings,
)
def rules():
return [
*collect_rules(),
UnionRule(GenerateTargetsRequest, GenerateFileTargets),
]
|
from django.db import models
GENRE_CHOICES = (
("rock", "Rock"),
("blues", "Blues"),
)
class Artist(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
artistic_name = models.CharField(max_length=255)
picture_url = models.URLField()
popularity = models.IntegerField()
genre = models.CharField(choices=GENRE_CHOICES, max_length=255)
class Song(models.Model):
artist = models.ForeignKey(Artist, on_delete=models.CASCADE)
title = models.CharField(max_length=255)
album_name = models.CharField(max_length=255, blank=True)
|
#!/usr/bin/python
#
# Sample python code using the standard http lib only
#
import httplib
## Your Infinispan WAR server host
hostname = "localhost:8080"
webapp_name = "infinispan-server-rest"
cache_name = "___defaultcache"
key = "my_key"
#putting data in
print "Storing data on server %s under key [%s] over REST" % (hostname, key)
try:
conn = httplib.HTTPConnection(hostname)
data = "This is some test data." #could be string, or a file...
conn.request("POST", "/%s/rest/%s/%s" % (webapp_name, cache_name, key), data, {"Content-Type": "text/plain"})
response = conn.getresponse()
print "HTTP status: %s" % response.status
except:
print "Unable to connect to the REST server on %s. Is it running?" % hostname
#getting data out
print "Retrieving data from server %s under key [%s]" % (hostname, key)
try:
conn = httplib.HTTPConnection(hostname)
conn.request("GET", "/%s/rest/%s/%s" % (webapp_name, cache_name, key))
response = conn.getresponse()
print "HTTP status: %s" % response.status
print "Value retrieved: %s" % response.read()
except:
print "Unable to connect to the REST server on %s. Is it running?" % hostname
## For more information on usage see http://community.jboss.org/wiki/InfinispanRESTserver
|
# Generated by Django 3.2.4 on 2021-07-07 13:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('churrasco', '0002_auto_20210706_1629'),
]
operations = [
migrations.AlterField(
model_name='produto',
name='nome',
field=models.CharField(max_length=100, unique=True, verbose_name='Nome'),
),
]
|
import numpy as np
import cv2
import math
import struct
import time
import serial
ser1=serial.Serial('com7',9600)
time.sleep(2)
detect_cascade=cv2.CascadeClassifier('/home/jayesh/Desktop/cv2_test/haarcascade_a.xml')
detectc_cascade=cv2.CascadeClassifier('/home/jayesh/Desktop/cv2_test/haarcascade_star.xml')
camR=cv2.VideoCapture(4)
camR.set(cv2.CAP_PROP_FRAME_WIDTH,720)
camR.set(cv2.CAP_PROP_FRAME_HEIGHT,405)
camL=cv2.VideoCapture(2)
camL.set(cv2.CAP_PROP_FRAME_WIDTH,720)
camL.set(cv2.CAP_PROP_FRAME_HEIGHT,405)
X=0
Y=0
Z=0
Xc=0
Yc=0
Zc=0
XX=0
YY=0
ZZ=0
XXc=0
YYc=0
ZZc=0
while(True):
a1=2000 #right
a2=2000 #left
tfR,frameR=camR.read()
gray = cv2.cvtColor(frameR, cv2.COLOR_BGR2GRAY)
detect = detect_cascade.detectMultiScale(gray, 1.3, 5)
for (xrr,yrr,wr,hr) in detect:
cv2.rectangle(frameR,(xrr,yrr),(xrr+wr,yrr+hr),(255,0,0),2)
cv2.putText(frameR, "A",(xrr,yrr-50), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(255,0,0),2);
cv2.putText(frameR, "X=" + str(X),(xrr,yrr-30), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),2);
cv2.putText(frameR, "Y=" + str(Y),(xrr,yrr-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),2);
cv2.putText(frameR, "Z=" + str(Z),(xrr,yrr), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),2);
cv2.putText(frameR, "XX=" + str(XX),(xrr,(yrr+hr)+15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),2);
cv2.putText(frameR, "YY=" + str(YY),(xrr,(yrr+hr)+30), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),2);
cv2.putText(frameR, "ZZ=" + str(ZZ),(xrr,(yrr+hr)+45), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),2);
xr=xrr+(wr/2)
yr=yrr+(hr/2)
a1=(50*math.tan(32.5*3.14/180)*(xr-360)/360)-3.4
hr=(50*math.tan(17*3.14/180)*(yr-202.5)/202.5)
cv2.imshow('frame_Right',frameR)
tfL,frameL=camL.read()
gray = cv2.cvtColor(frameL, cv2.COLOR_BGR2GRAY)
detect = detect_cascade.detectMultiScale(gray, 1.3, 5)
for (xll,yll,wl,hl) in detect:
cv2.rectangle(frameL,(xll,yll),(xll+wl,yll+hl),(255,0,0),2)
cv2.putText(frameL, "A",(xll,yll-50), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(255,0,0),2);
cv2.putText(frameL, "X=" + str(X),(xll,yll-30), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),2);
cv2.putText(frameL, "Y=" + str(Y),(xll,yll-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),2);
cv2.putText(frameL, "Z=" + str(Z),(xll,yll), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),2);
cv2.putText(frameL, "XX=" + str(XX),(xll,(yll+hl)+15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),2);
cv2.putText(frameL, "YY=" + str(YY),(xll,(yll+hl)+30), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),2);
cv2.putText(frameL, "ZZ=" + str(ZZ),(xll,(yll+hl)+45), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,0,0),2);
xl=xll+(wl/2)
yl=yll+(hl/2)
a2=(50*math.tan(32.5*3.14/180)*(xl-360)/360)+3.4
hl=(50*math.tan(17*3.14/180)*(yl-202.5)/202.5)
cv2.imshow('frame_Left',frameL)
if (a1<1000):
if (a2<1000):
x=((3.4*((-a2)-a1))/((-6.8)+a2-a1))
y=(50-(340/(6.8-a2+a1)))
X=-x
Y=-(50-y)
z=-(Y*hr)/50
Z=z
TT=1
ZZ=Z+13.5
XX=38-(Y+1)
YY=X+27.5
Xm=0
Ym=0
Zm=0
if XX<0:
XX=-XX
Xm=1
if YY<0:
YY=-YY
Ym=1
if ZZ<0:
ZZ=-ZZ
Zm=1
print ('Xa=')
print (X)
print ('Ya=')
print (Y)
print ('Za=')
print (Z)
ac1=2000 #right
ac2=2000 #left
gray = cv2.cvtColor(frameR, cv2.COLOR_BGR2GRAY)
detectc = detectc_cascade.detectMultiScale(gray, 1.3, 5)
for (xrrc,yrrc,wrc,hrc) in detectc:
cv2.rectangle(frameR,(xrrc,yrrc),(xrrc+wrc,yrrc+hrc),(0,0,255),2)
cv2.putText(frameR, "Star",(xrrc,yrrc-50), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2);
cv2.putText(frameR, "X=" + str(Xc),(xrrc,yrrc-30), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,255),2);
cv2.putText(frameR, "Y=" + str(Yc),(xrrc,yrrc-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,255),2);
cv2.putText(frameR, "Z=" + str(Zc),(xrrc,yrrc), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,255),2);
cv2.putText(frameR, "XX=" + str(XXc),(xrrc,(yrrc+hrc)+15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,255),2);
cv2.putText(frameR, "YY=" + str(YYc),(xrrc,(yrrc+hrc)+30), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,255),2);
cv2.putText(frameR, "ZZ=" + str(ZZc),(xrrc,(yrrc+hrc)+45), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,255),2);
xrc=xrrc+(wrc/2)
yrc=yrrc+(hrc/2)
ac1=(50*math.tan(32.5*3.14/180)*(xrc-360)/360)-3.4
hrc=(50*math.tan(17*3.14/180)*(yrc-202.5)/202.5)
cv2.imshow('frame_Right',frameR)
gray = cv2.cvtColor(frameL, cv2.COLOR_BGR2GRAY)
detectc = detectc_cascade.detectMultiScale(gray, 1.3, 5)
for (xllc,yllc,wlc,hlc) in detectc:
cv2.rectangle(frameL,(xllc,yllc),(xllc+wlc,yllc+hlc),(0,0,255),2)
cv2.putText(frameL, "Star",(xllc,yllc-50), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2);
cv2.putText(frameL, "X=" + str(Xc),(xllc,yllc-30), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,255),2);
cv2.putText(frameL, "Y=" + str(Yc),(xllc,yllc-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,255),2);
cv2.putText(frameL, "Z=" + str(Zc),(xllc,yllc), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,255),2);
cv2.putText(frameL, "XX=" + str(XXc),(xllc,(yllc+hlc)+15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,255),2);
cv2.putText(frameL, "YY=" + str(YYc),(xllc,(yllc+hlc)+30), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,255),2);
cv2.putText(frameL, "ZZ=" + str(ZZc),(xllc,(yllc+hlc)+45), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0,0,255),2);
xlc=xllc+(wlc/2)
ylc=yllc+(hlc/2)
ac2=(50*math.tan(32.5*3.14/180)*(xlc-360)/360)+3.4
hlc=(50*math.tan(17*3.14/180)*(ylc-202.5)/202.5)
cv2.imshow('frame_Left',frameL)
if (ac1<1000):
if (ac2<1000):
xc=((3.4*((-ac2)-ac1))/((-6.8)+ac2-ac1))
yc=(50-(340/(6.8-ac2+ac1)))
Xc=-xc
Yc=-(50-yc)
zc=-(Yc*hrc)/50
Zc=zc
TTc=2
ZZc=Zc+13.5
XXc=38-(Yc+1)
YYc=Xc+27.5
Xmc=0
Ymc=0
Zmc=0
if XXc<0:
XXc=-XXc
Xmc=1
if YYc<0:
YYc=-YYc
Ymc=1
if ZZc<0:
ZZc=-ZZc
Zmc=1
print ('Xc=')
print (Xc)
print ('Yc=')
print (Yc)
print ('Zc=')
print (Zc)
key=cv2.waitKey(1)
if key==ord('a'):
ser1.write(struct.pack('>BBBBBBB',XX,YY,ZZ,TT,Xm,Ym,Zm))
if key==ord('s'):
ser1.write(struct.pack('>BBBBBBB',XXc,YYc,ZZc,TTc,Xmc,Ymc,Zmc))
if key==ord('k'):
break
camR.release()
camL.release()
cv2.destroyAllWindows()
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# 实现一个函数,根据标题序列生成相应的标题序号。
# 输入参数,一个 Array,每个元素都是 # 为前缀的标题,保证层级连续,然后返回解析好的数据结构。
# 输入
# ["# a", "## b", "## c", "### d", "# e"]
#
#
# 输出
# [{"hn": "1", "title": "a"},
# {"hn": "1.1", "title": "b"},
# {"hn": "1.2", "title": "c"},
# {"hn": "1.2.1", "title": "d"},
# {"hn": "2", "title": "e"}]
class Solution(object):
def markdown_parser(self, titles):
"""
:type titles: List
:rtype: List
"""
idxs = [1] # 控制标题级别
nums = [0] # 保存标题对应的编号
res = []
for title in titles:
idx = title.index(' ')
if idx not in idxs:
nums.append(1)
idxs.append(idx)
else:
nums[idx - 1] += 1
hn = '.'.join([str(x) for x in nums[:idx]])
res.append({"hn": hn, "title": title[idx + 1]})
return res
if __name__ == '__main__':
print(Solution().markdown_parser(
["# a", "## b", "## c", "### d", "# e"]
))
print( Solution().markdown_parser(["# a", "# b", "# c", "# d", "# e"]))
print( Solution().markdown_parser(["# a", "## b", "## c", "### d", "## e"]))
assert Solution().markdown_parser(["# a", "## b", "## c", "### d", "# e"]) == [{"hn": "1", "title": "a"},{"hn": "1.1", "title": "b"},{"hn": "1.2", "title": "c"},{"hn": "1.2.1", "title": "d"},{"hn": "2", "title": "e"}]
assert Solution().markdown_parser(["# a", "# b", "# c", "# d", "# e"]) == [{'hn': '1', 'title': 'a'}, {'hn': '2', 'title': 'b'}, {'hn': '3', 'title': 'c'}, {'hn': '4', 'title': 'd'}, {'hn': '5', 'title': 'e'}]
|
#!/usr/bin/python
def isAGirl(nome):
nomiMaschili = ["LUCA","GIANLUCA","MATTIA","NICOLA","ANDREA","ELIA","ENEA"]
numeri = ["1","2","3","4","5"] #il numero della classe es 1R
stampa = True
for i in numeri:
#print i
if nome.find(i) > 0:
#print nome.find(i),i
conf = nome.find(i)
nome = nome[1:conf]
#print nome
for maschio in nomiMaschili:
#print maschio
if nome == maschio:
stampa = False
if stampa and nome[conf-2] == "A":
return True
else:
return False
def isAGirlOnlyN(nome):
nomiMaschili = ["LUCA","GIANLUCA","MATTIA","NICOLA","ANDREA","ELIA","ENEA"]
stampa = True
for maschio in nomiMaschili:
#print maschio
if nome == maschio:
stampa = False
if stampa and nome[-1] == "A":
return True
else:
return False
"""
a = ">ELIA2AINF_BERNACCIA<"
if isAGirl(a):
print "G"
else:
print "B"
"""
|
# -*- coding: utf-8 -*-
from numpy import zeros
from numpy import int16
import scipy.io.wavfile
from constantes import *
from operator import add
import math as m
""" coeff_lissage est un entier paramétrant l'intensité du lissage indispensable pour éviter de commencer trop tôt à cause du bruit (à déterminer)
t_min est l'intervalle de temps de sécurité (à déterminer)
coeff_coupe est l'intensité de la coupe (à déterminer expérimentalement)
"""
def synchro(amplitudes,coeff_lissage,t_min,coeff_coupe):
N=len(amplitudes)
N_lissage = (int(N/coeff_lissage))
amplitude_lisse = zeros(N_lissage)
maxi = 0
mini = 0
COEFF = t_min*RATE/coeff_lissage/1000
for i in range(N_lissage):
amplitude_lisse[i] = reduce(add, [m.exp(abs(amplitudes[i * coeff_lissage + j]/100)) for j in range(coeff_lissage)], 0)/coeff_lissage
if(i == 0):
maxi = amplitude_lisse[i]
mini = maxi
elif(amplitude_lisse[i] > maxi):
maxi = amplitude_lisse[i]
elif(amplitude_lisse[i] < mini):
mini = amplitude_lisse[i]
print "maxi", maxi
print "mini", mini
valeur_seuil = coeff_coupe*(maxi-mini)
print "valeur_seuil", valeur_seuil
compt = 0
for i in range(N_lissage):
if(amplitude_lisse[i] > valeur_seuil):
compt += 1
print "compt ", compt*coeff_lissage
i_min = 0
i_max = N_lissage - 1
i_minTrouve = False
i_maxTrouve = False
for i in range(N_lissage):
if((not i_minTrouve) and amplitude_lisse[i] > valeur_seuil):
i_minTrouve = True
i_min = i
print "i_min", i_min
if((not i_maxTrouve) and amplitude_lisse[N_lissage - i - 1] > valeur_seuil):
i_max = N_lissage - i - 1
i_maxTrouve = True
print "i_max", i_max
if(i_minTrouve and i_maxTrouve):
print "fin pour i = ", i
break
if(i_min < COEFF):
print "L'enregistrement a commence trop tard"
if(i_max > N_lissage - COEFF):
print "L'enregistrement a fini trop tot"
print i_min*coeff_lissage
print i_max*coeff_lissage
print N
taille = (i_max-i_min)*coeff_lissage
print "taille = ", taille
amplitudes_coupe = zeros(taille)
for i in range(taille):
amplitudes_coupe[i] = amplitudes[i+i_min*coeff_lissage]
return amplitudes_coupe
#ampli = scipy.io.wavfile.read("0.wav")
#ampli2 = synchro(ampli[1],COEFF_LISSAGE,T_MIN,COEFF_COUPE)
#scipy.io.wavfile.write("0e.wav", ampli[0], int16(ampli2))
|
"""
切片(slice)
切片是取出序列中一个范围对应的元素
"""
a = list(range(10))
print(a)
print(a[2:3]) # 2
print(a[5:9]) # [5, 6, 7, 8]
print(a[5:-1]) # [5, 6, 7, 8]
print(a[-5:9]) # [5, 6, 7, 8]
print(a[-5:-1]) # [5, 6, 7, 8]
# 缺省
print(a[5:]) # [5, 6, 7, 8, 9]
print(a[:5]) # [0, 1, 2, 3, 4]
print(a[100:]) # []
print(a[:100]) # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# step为正数
print(a[0:6:2]) # [0, 2, 4]
print(a[::2]) # [0, 2, 4, 6, 8]
print(a[:-2:2]) # [0, 2, 4, 6]
print(a[4::2]) # [4, 6, 8]
# step为负数
print(a[::-1]) # 实现字符串反转 [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
print(a[5::-1]) # [5, 4, 3, 2, 1, 0]
print(a[:4:-2]) # [9, 7, 5]
a1 = str(-321)
print(a1)
print(a1[:0:-1])
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
MATRIX_INT = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
MATRIX_STR = [['a', 'b', 'c'],
['d', 'e', 'f'],
['g', 'h', 'i']]
# 示例一、获取矩阵第二列的内容-------------------------------------------------
# 列表解析
res = [row[1] for row in MATRIX_INT]
print(res)
# 等效 for 循环
res = []
for row in MATRIX_INT:
res.append(row[1])
print(res)
# 示例二、获取矩阵对应位置元素的乘积-------------------------------------------
# 列表解析
res = [MATRIX_INT[row][col] * MATRIX_STR[row][col]
for row in range(3) for col in range(3)]
print(res)
# 等效 for 循环
res = []
for row in range(3):
for col in range(3):
res.append(MATRIX_INT[row][col] * MATRIX_STR[row][col])
print(res)
# 列表解析 - 生成矩阵
res = [[MATRIX_INT[row][col] * MATRIX_STR[row][col]
for col in range(3)] for row in range(3)]
"""
res = [[MATRIX_INT[row][col] * MATRIX_STR[row][col]
for row in range(3)] for col in range(3)]
# 这个输出的结果会与上面的有所不同,在于是按照123顺序还是按照147顺序
"""
print(res)
# 等效 for 循环
res = []
for row in range(3):
temp = []
for col in range(3):
temp.append(MATRIX_INT[row][col] * MATRIX_STR[row][col])
res.append(temp)
print(res)
"""
res = []
for col in range(3):
temp = []
for row in range(3):
temp.append(MATRIX_INT[row][col] * MATRIX_STR[row][col])
res.append(temp)
print(res)
"""
|
[gtfsrt://<name>]
feed = <string>
auth = <string>
|
import valkyrie
class UniqueList:
def __init__(self, unique):
self.list = []
self.unique = unique
def add (self, item):
idx = 0
if self.unique:
# make a function pointer
item_equals = item.equals
for ref in self.list:
if item_equals(ref):
return idx
idx += 1
idx = len(self.list)
self.list.append(item)
return idx
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
seqmat.py
=============================================
Debugging seqmat mismatch, zeros.
::
tconcentric.py --cmx 5
[2016-11-06 17:30:15,759] p43702 {/Users/blyth/opticks/ana/seq.py:404} INFO - compare dbgseq 0 dbgmsk 0
. seqmat_ana 1:concentric -1:concentric c2 ab ba
. 1000000 1000000 2325.00/233 = 9.98
12 4443231 3040 3272 8.53 0.929 +- 0.017 1.076 +- 0.019 [7 ] Gd Ac LS Ac MO MO MO
40 3443231323443231 194 483 123.37 0.402 +- 0.029 2.490 +- 0.113 [16] Gd Ac LS Ac MO MO Ac LS Ac Gd Ac LS Ac MO MO Ac
50 4443231323443231 299 57 164.51 5.246 +- 0.303 0.191 +- 0.025 [16] Gd Ac LS Ac MO MO Ac LS Ac Gd Ac LS Ac MO MO MO
62 3323111323443231 181 1 178.02 181.000 +- 13.454 0.006 +- 0.006 [16] Gd Ac LS Ac MO MO Ac LS Ac Gd Gd Gd Ac LS Ac Ac
68 4323111323443231 0 147 147.00 0.000 +- 0.000 0.000 +- 0.000 [16] Gd Ac LS Ac MO MO Ac LS Ac Gd Gd Gd Ac LS Ac MO
70 344323132231 147 111 5.02 1.324 +- 0.109 0.755 +- 0.072 [12] Gd Ac LS LS Ac Gd Ac LS Ac MO MO Ac
76 4323132344323111 0 132 132.00 0.000 +- 0.000 0.000 +- 0.000 [16] Gd Gd Gd Ac LS Ac MO MO Ac LS Ac Gd Ac LS Ac MO
79 3323132344323111 126 1 123.03 126.000 +- 11.225 0.008 +- 0.008 [16] Gd Gd Gd Ac LS Ac MO MO Ac LS Ac Gd Ac LS Ac Ac
84 3323113234432311 118 0 118.00 0.000 +- 0.000 0.000 +- 0.000 [16] Gd Gd Ac LS Ac MO MO Ac LS Ac Gd Gd Ac LS Ac Ac
86 1132231323443231 114 32 46.05 3.562 +- 0.334 0.281 +- 0.050 [16] Gd Ac LS Ac MO MO Ac LS Ac Gd Ac LS LS Ac Gd Gd
91 1132344323443231 108 16 68.26 6.750 +- 0.650 0.148 +- 0.037 [16] Gd Ac LS Ac MO MO Ac LS Ac MO MO Ac LS Ac Gd Gd
93 4323113234432311 0 107 107.00 0.000 +- 0.000 0.000 +- 0.000 [16] Gd Gd Ac LS Ac MO MO Ac LS Ac Gd Gd Ac LS Ac MO
106 1132344323132231 84 23 34.78 3.652 +- 0.398 0.274 +- 0.057 [16] Gd Ac LS LS Ac Gd Ac LS Ac MO MO Ac LS Ac Gd Gd
107 3132344323443231 0 83 83.00 0.000 +- 0.000 0.000 +- 0.000 [16] Gd Ac LS Ac MO MO Ac LS Ac MO MO Ac LS Ac Gd Ac
110 2223111 79 52 5.56 1.519 +- 0.171 0.658 +- 0.091 [7 ] Gd Gd Gd Ac LS LS LS
111 3132231323443231 0 79 79.00 0.000 +- 0.000 0.000 +- 0.000 [16] Gd Ac LS Ac MO MO Ac LS Ac Gd Ac LS LS Ac Gd Ac
125 2332332332332231 0 64 64.00 0.000 +- 0.000 0.000 +- 0.000 [16] Gd Ac LS LS Ac Ac LS Ac Ac LS Ac Ac LS Ac Ac LS
127 3322311323443231 60 0 60.00 0.000 +- 0.000 0.000 +- 0.000 [16] Gd Ac LS Ac MO MO Ac LS Ac Gd Gd Ac LS LS Ac Ac
129 3332332332332231 56 4 45.07 14.000 +- 1.871 0.071 +- 0.036 [16] Gd Ac LS LS Ac Ac LS Ac Ac LS Ac Ac LS Ac Ac Ac
135 2231111323443231 51 6 35.53 8.500 +- 1.190 0.118 +- 0.048 [16] Gd Ac LS Ac MO MO Ac LS Ac Gd Gd Gd Gd Ac LS LS
. 1000000 1000000 2325.00/233 = 9.98
"""
import os, sys, logging, numpy as np
log = logging.getLogger(__name__)
from opticks.ana.base import opticks_main
from opticks.ana.evt import Evt
from opticks.ana.nbase import count_unique_sorted
if __name__ == '__main__':
ok = opticks_main(det="concentric",src="torch",tag="1")
#seq = "Gd Ac LS Ac MO MO MO"
#seq = "TO BT BT BT BT DR AB"
#seq = "TO BT BT BT BT SC AB"
#seq = "Gd Ac LS Ac MO MO Ac LS Ac Gd Ac LS Ac MO MO MO"
seq = "Gd Gd Gd Ac LS Ac MO MO Ac LS Ac Gd Ac LS Ac Ac"
a = Evt(tag="%s"%ok.utag, src=ok.src, det=ok.det, args=ok, seqs=[seq])
b = Evt(tag="-%s"%ok.utag, src=ok.src, det=ok.det, args=ok, seqs=[seq])
a.history_table(slice(0,20))
b.history_table(slice(0,20))
acu = count_unique_sorted(a.seqhis[a.psel])
bcu = count_unique_sorted(b.seqhis[b.psel])
|
import matchAndSend
import form_extraction
import config
import sys
def main():
config.config(sys.argv)
# Get new Bell Ringers
if not config.DISABLE_EXTRACT:
form_extraction.extract(is_listener = False)
form_extraction.organize_form()
matchAndSend.matchAndSend()
if __name__ == '__main__':
main()
|
import json
import requests
youtube_url = "https://www.youtube.com/watch?v=y6XX39DaEL4"
#transcript_json_url = "https://00e9e64bac4a30dc976c87ac7bed719495328d5a37d54a496e-apidata.googleusercontent.com/download/storage/v1/b/citeit_speech_text/o/freakonomics%2F280-why-is-my-life-so-hard-times-original.json?qk=AD5uMEvDxiilsEmq809_vWydy_RMJ9l3Sc1Ym5GB84cI1Gb__h1VKnF4t7x6GylmHvAhJg5iIC75sAijf5-y13wFlYAkOwTKzyT4SnQP8v4ILd51L0nTy7pBJgvclW6-QBGYD8W9pBUImAUCgsb25AJnaXdiCkBDJzUkXYCOVGdPkYA8opymk7ld7gQV13RvjH49tzPZHir-4PBPx3Uea80EG5FYiaLYdrGKMwacyPaC2lYcqikSlWThRbCi1wQn7dXGVUWKgavH0GZrtW0t4SkOe9zSMijxRnbm1XWDvQhfIhum_nLQhGxHNxADtEAfiIHjAlKCBnucXcTyN2wbiyJfZSO8LI3EynC-9Xe4vU1N-FQALzxLzAN5Tic0WfnQvQXWNqHpuQawLqoiMC-C9j6yf36cbkwWWdQHrB7S2DMH0xZsNuIGcbjOcWSxzacYDFrVRxVCYdNIx9335wlKxPR_iPhK3mmaFai6xAptKGnsVS5-lsNVAU9odja1sLSZuwK4F1g-ZeW4h74Y_bEDfj5ZOc6rueXIxxPjRoAIT5vhRvcauGjxPNXYzmdH0vM8sVlXN8yo4vlvj6nXz-sVJTnDVfA0gHkTYo9Rdtgwxc3qwt4iP7wr0tZnS8o50fP66QXt_4xqgc0HTuKfdnvU5KxhhRUV-Okp29xmIJAMKUSBR8vFUf8tI3ucNWsWMRHJeRonCj9nSZKyQ6FKZgAC_Ki-jDuMSZ2GaXCmlZ_S1BuShH1eoOaNd50nRRZHavYJ3QDdRcqz68RyedYh68j6GKDkmsnXJok1f0nzlh6CfSxSWCG0mkZ2dp0B2qjSwnFIWvT-CeCDQRLv8Afclp2JpVAqEsPYhhe87Q"
#transcript_json_url = "https://storage.cloud.google.com/citeit_speech_text/freakonomics/280-why-is-my-life-so-hard-times-original.json"
#response = requests.get(transcript_json_url)
#text = json.loads(response.text)
#data = json.dumps(text)
#output = json.loads(data)
with open('examples/freakonomics/280-why-is-my-life-so-hard/280-why-is-my-life-so-hard-times-original.json') as json_file:
output = json.load(json_file)
transcript_list = []
word_times = []
for root_key in output:
if (root_key == 'response'):
for results_cnt, results in enumerate(output[root_key]['results']):
for alt in results['alternatives']:
transcript_list.append(alt['transcript'])
for word in alt['words']:
word_dict = {
'word': word['word'],
'startTime': word['startTime'],
'endTime': word['endTime']
}
word_times.append(word_dict)
transcript = ''.join(transcript_list)
with open("transcript.md", "w") as text_file:
text_file.write(transcript)
for word in word_times:
print(word['word'], ': ' , word['startTime'])
json_output = {
'meta' : {
'title': '| FreaKonomics',
'web_story_uri': 'https://medium.com/conversations-with-tyler/malcolm-gladwell-podcast-outliers-tyler-cowen-3abdf99068ee',
'web_transcript_uri': 'https://medium.com/conversations-with-tyler/malcolm-gladwell-podcast-outliers-tyler-cowen-3abdf99068ee',
'transcript_json_uri': 'https://storage.googleapis.com/citeit_speech_text/malcolm-gladwell-transcript.json',
'transcript_json_times_uri': 'https://storage.googleapis.com/citeit_speech_text/malcolm-gladwell-transcript-times.json',
'audio_story_uri': 'https://storage.googleapis.com/citeit_speech_text/malcolm-gladwell.mp3',
'web_series_uri': 'https://medium.com/conversations-with-tyler',
'video_uri': 'https://www.youtube.com/watch?v=ehlhrqSWPbo',
'video_channel_name': 'Mercatus Center',
'video_channel_uri': 'https://www.youtube.com/channel/UCKtFwcQCsl1ttW2CgOqFMUQ',
},
'word_times': word_times
}
with open('why-is-my-life-so-hard-times.json', 'w') as transcript_times:
json.dump(json_output, transcript_times)
|
a = 11
b = 22
c = a >= b # c 會等於 False
d = a <= b # d 會等於 True
e = c == d # e 會等於 False
f = a != b # f 會等於 True
# 檔名: exp_demo06.py
# 作者: Kaiching Chang
# 時間: July, 2014
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
1、编写函数有什么意义?
减少代码冗余
提高代码的重复利用率
便于修改
2、什么时候 python 将创建函数?
python 执行到 def 语句时
3、当一个函数没有 return 语句时,返回什么?
None
4、在函数定义内部的语句什么时候运行?
函数被调用时
5、检查传入函数的对象类型有什么错误?
会破坏函数的灵活性,把函数限制在特定的类型上
不检查对象类型,则函数可能处理所有的对象类型
"""
|
from django.shortcuts import render
from .models import Employee, Student
from rest_framework import viewsets
from employee.serializers import EmployeeSerializers, StudnetSerializers, LoginSerializers
from django.http import JsonResponse, HttpResponse
from rest_framework.parsers import JSONParser
from django.views.decorators.csrf import csrf_exempt
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import mixins
from rest_framework import generics
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework.authentication import SessionAuthentication, BasicAuthentication, TokenAuthentication
from django.contrib.auth import login, logout
from rest_framework.authtoken.models import Token
from rest_framework import viewsets
# Create your views here.
def employee(request):
user = Employee.objects.all()
print(user)
class EmployeeViewSet(viewsets.ModelViewSet):
queryset = Employee.objects.all()
serializer_class = EmployeeSerializers
# function based api
@csrf_exempt
def student(request):
if request.method == 'GET':
student = Student.objects.all()
serializer = StudnetSerializers(student, many=True)
return JsonResponse(serializer.data, safe=False)
elif request.method == 'POST':
data = JSONParser().parse(request)
serializer = StudnetSerializers(data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data, status=201)
return JsonResponse(serializer.errors, status=401)
@csrf_exempt
def student_details(request, id):
try:
instance = Student.objects.get(id=id)
except Student.DoesNotExist:
return HttpResponse(Status=404)
if request.method == 'GET':
student = Student.objects.all()
serializer = StudnetSerializers(student, many=True)
return JsonResponse(serializer.data, safe=False)
elif request.method == 'PUT':
data = JSONParser().parse(request)
serializer = StudnetSerializers(instance, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data, status=200)
return JsonResponse(serializer.errors, status=400)
elif request.method == 'DELETE':
instance.delete()
return HttpResponse(status=204)
# class based api
class StudentAPI(APIView):
def get(self, request):
student = Student.objects.all()
serializer = StudnetSerializers(student, many=True)
return Response(serializer.data, status=200)
def post(self, request):
data = request.data
serializer = StudnetSerializers(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=201)
return Response(serializer.errors, status=401)
class Student_Details(APIView):
def get_object(self, id):
try:
return Student.objects.get(id=id)
except Student.DoesNotExist:
return Response(status=404)
def get(self, request, id):
instance = self.get_object(id=id)
serializer = StudnetSerializers(instance)
return Response(serializer.data, status=200)
def put(self, request, id=None, format=None):
instance = self.get_object(id)
serializer = StudnetSerializers(instance, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=400)
def delete(self, request, id):
instance = self.get_object(id)
instance.delete()
return Response(status=204)
class ApiGeneric(generics.GenericAPIView, mixins.ListModelMixin, mixins.CreateModelMixin, mixins.UpdateModelMixin, mixins.RetrieveModelMixin, mixins.DestroyModelMixin):
serializer_class = StudnetSerializers
queryset = Student.objects.all()
lookup_field = 'id'
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request, id):
if id:
return self.retrieve(request, id)
else:
return self.list(request)
def post(self, request):
return self.create(request)
def put(self, request, id):
return self.update(request, id)
def delete(self, request, id):
return self.destroy(request, id)
class LoginView(APIView):
def post(self, request, *args, **kwargs):
serializer = LoginSerializers(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data["user"]
print(user)
login(request, user)
token, created = Token.objects.get_or_create(user=user)
return Response({"token": token.key}, status=200)
class LogoutView(APIView):
authentication_classes = (TokenAuthentication)
def post(self, request):
logout(request)
return Response(status=204)
class Studentviewset(viewsets.ViewSet):
model = Student
queryset = Student.objects.all()
serializer_class = StudnetSerializers
lookup_field = 'id'
authentication_classes = (TokenAuthentication,)
permission_classes=(IsAuthenticated,)
def list(self, request):
student = self.queryset
serializer = self.serializer_class(student, many=True)
return Response(serializer.data, status=200)
def create(self, request):
serializer=self.serializer_class(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(data={"msg":"Data has been created"})
else:
return Response(data={"msg":"Unable to create the data"},status=403)
def retrieve(self, request, id):
student = self.model.objects.get(id=id)
serializer = self.serializer_class(student)
return Response(serializer.data)
def update(self, request, id=None):
student=self.model.objects.get(id=id)
serializer=self.serializer_class(student,data=request.data)
if serializer.is_valid():
serializer.save()
return Response(data={"msg":"Data has been created"})
else:
return Response(data={"msg":"Unable to update the data"},status=403)
def partial_update(self, request, id=None):
return Response(status=403, data={"msg": "API not allowed."})
def destroy(self, request, id=None):
student=self.model.objects.get(id=id)
student.delete()
return Response(data={"msg":"data is deleted "})
class StudentModelViewSet(viewsets.ModelViewSet):
queryset=Student.objects.all()
serializer_class=StudnetSerializers
authentication_classes = (TokenAuthentication,)
permission_classes=(IsAuthenticated,)
|
# coding: utf-8
"""
Telstra SMS Messaging API
The Telstra SMS Messaging API allows your applications to send and receive SMS text messages from Australia's leading network operator. It also allows your application to track the delivery status of both sent and received SMS messages.
OpenAPI spec version: 2.1.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class InboundPollResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, to=None, _from=None, body=None, received_timestamp=None, more_messages=None, message_id=None):
"""
InboundPollResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'to': 'str',
'_from': 'str',
'body': 'str',
'received_timestamp': 'str',
'more_messages': 'int',
'message_id': 'str'
}
self.attribute_map = {
'to': 'to',
'_from': 'from',
'body': 'body',
'received_timestamp': 'receivedTimestamp',
'more_messages': 'moreMessages',
'message_id': 'messageId'
}
self._to = to
self.__from = _from
self._body = body
self._received_timestamp = received_timestamp
self._more_messages = more_messages
self._message_id = message_id
@property
def to(self):
"""
Gets the to of this InboundPollResponse.
The phone number (recipient) that the message was sent to(in E.164 format).
:return: The to of this InboundPollResponse.
:rtype: str
"""
return self._to
@to.setter
def to(self, to):
"""
Sets the to of this InboundPollResponse.
The phone number (recipient) that the message was sent to(in E.164 format).
:param to: The to of this InboundPollResponse.
:type: str
"""
self._to = to
@property
def _from(self):
"""
Gets the _from of this InboundPollResponse.
The phone number (sender) that the message was sent from (in E.164 format).
:return: The _from of this InboundPollResponse.
:rtype: str
"""
return self.__from
@_from.setter
def _from(self, _from):
"""
Sets the _from of this InboundPollResponse.
The phone number (sender) that the message was sent from (in E.164 format).
:param _from: The _from of this InboundPollResponse.
:type: str
"""
self.__from = _from
@property
def body(self):
"""
Gets the body of this InboundPollResponse.
Text body of the message that was sent
:return: The body of this InboundPollResponse.
:rtype: str
"""
return self._body
@body.setter
def body(self, body):
"""
Sets the body of this InboundPollResponse.
Text body of the message that was sent
:param body: The body of this InboundPollResponse.
:type: str
"""
self._body = body
@property
def received_timestamp(self):
"""
Gets the received_timestamp of this InboundPollResponse.
The date and time when the message was recieved by recipient.
:return: The received_timestamp of this InboundPollResponse.
:rtype: str
"""
return self._received_timestamp
@received_timestamp.setter
def received_timestamp(self, received_timestamp):
"""
Sets the received_timestamp of this InboundPollResponse.
The date and time when the message was recieved by recipient.
:param received_timestamp: The received_timestamp of this InboundPollResponse.
:type: str
"""
self._received_timestamp = received_timestamp
@property
def more_messages(self):
"""
Gets the more_messages of this InboundPollResponse.
Indicates if there are more messages that can be polled from the server. 0=No more messages available. Anything else indicates there are more messages on the server.
:return: The more_messages of this InboundPollResponse.
:rtype: int
"""
return self._more_messages
@more_messages.setter
def more_messages(self, more_messages):
"""
Sets the more_messages of this InboundPollResponse.
Indicates if there are more messages that can be polled from the server. 0=No more messages available. Anything else indicates there are more messages on the server.
:param more_messages: The more_messages of this InboundPollResponse.
:type: int
"""
self._more_messages = more_messages
@property
def message_id(self):
"""
Gets the message_id of this InboundPollResponse.
Optional message ID of the SMS you sent. Use this ID to view the message status or get responses.
:return: The message_id of this InboundPollResponse.
:rtype: str
"""
return self._message_id
@message_id.setter
def message_id(self, message_id):
"""
Sets the message_id of this InboundPollResponse.
Optional message ID of the SMS you sent. Use this ID to view the message status or get responses.
:param message_id: The message_id of this InboundPollResponse.
:type: str
"""
self._message_id = message_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, InboundPollResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
from datetime import datetime
from tensorboardX import SummaryWriter
import torch
import torch.distributed as dist
from torch.utils.data import DataLoader
from torchvision.utils import save_image
import torch.nn.functional as F
import os
import numpy as np
import time
import parameters as params
from dataset import Dataset
from model import CNN
from loss import loss_class_mean
def adjust_learning_rate(optimizer, epoch, alpha_plan, beta1_plan):
for param_group in optimizer.param_groups:
param_group['lr']=alpha_plan[epoch]
param_group['betas']=(beta1_plan[epoch], 0.999)
def accuracy(logit, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
output = F.softmax(logit, dim=1)
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def train(run_id, use_cuda, epoch, rate_schedule, noise_or_not, writer, train_loader, model1, optimizer1):
pure_ratio_1_list=[]
loss_net_1 = []
accuracy_net_1 = []
train_total=0
train_correct=0
start_time = time.time()
for i, (images, labels, indexes) in enumerate(train_loader):
ind=indexes.cpu().numpy().transpose()
if use_cuda:
images = images.float().cuda()
labels = labels.cuda()
logits1, embeddings1 = model1(images)
prec1,_ = accuracy(logits1, labels, topk=(1, 5))
accuracy_net_1.append(prec1.item())
train_total+=1
train_correct+=prec1
loss_1, pure_ratio_1 = loss_class_mean(logits1, labels, rate_schedule[epoch], ind, noise_or_not, embeddings1, epoch)
pure_ratio_1_list.append(100*pure_ratio_1)
optimizer1.zero_grad()
loss_1.backward()
optimizer1.step()
loss_net_1.append(loss_1.item())
if i % params.print_freq == 0:
print ('Epoch [%d/%d], Batch [%d] Training Accuracy1: %.4F, Loss1: %.4f, Pure Ratio: %.4f'
%(epoch, params.n_epoch, i, np.mean(accuracy_net_1), np.mean(loss_net_1), \
np.mean(pure_ratio_1_list)))
time_taken = time.time() - start_time
print('epoch ',epoch,' time taken: ', time_taken, ' Acc1: ',np.mean(accuracy_net_1),' Loss1: ',np.mean(loss_net_1), \
' pure ratio1: ',np.mean(pure_ratio_1_list))
save_dir = os.path.join(params.saved_models_dir, run_id)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if epoch % params.save_frequency == 0:
save_file_path = os.path.join(save_dir, 'model_{}.pth'.format(epoch))
states = {
'epoch': epoch,
'state_dict1': model1.state_dict(),
'optimizer1': optimizer1.state_dict(),
}
torch.save(states, save_file_path)
writer.add_scalar('Training Loss 1', np.mean(loss_net_1), epoch)
writer.add_scalar('Training Accuracy 1', np.mean(accuracy_net_1), epoch)
return np.mean(accuracy_net_1), pure_ratio_1_list, model1
def evaluate(run_id, use_cuda, epoch, writer, test_loader, model):
model.eval()
correct = 0
total = 0
start_time = time.time()
for i, (images, labels, indexes) in enumerate(test_loader):
if use_cuda:
images = images.float().cuda()
# labels = labels.cuda()
# Forward + Backward + Optimize
logits, _ = model(images)
outputs = F.softmax(logits, dim=1)
_, pred = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (pred.cpu() == labels).sum()
acc = float(correct)/float(total)
time_taken = time.time() - start_time
print('epoch ',epoch,' time taken: ',time_taken, ' Acc: ',acc)
writer.add_scalar('Validation Accuracy ', acc, epoch)
def train_classifier(run_id, use_cuda):
writer = SummaryWriter(os.path.join(params.logs_dir, str(run_id)))
dataset_info = Dataset(run_id, params.dataset)
train_dataset = dataset_info.train_dataset
test_dataset = dataset_info.test_dataset
noise_or_not = train_dataset.noise_or_not
if params.forget_rate is None:
forget_rate=params.noise_rate
else:
forget_rate = params.forget_rate
# Adjust learning rate and betas for Adam Optimizer
mom1 = 0.9
mom2 = 0.1
alpha_plan = [params.learning_rate] * params.n_epoch
beta1_plan = [mom1] * params.n_epoch
for i in range(params.epoch_decay_start, params.n_epoch):
alpha_plan[i] = float(params.n_epoch - i) / (params.n_epoch - params.epoch_decay_start) * params.learning_rate
beta1_plan[i] = mom2
# define drop rate schedule
rate_schedule = np.ones(params.n_epoch) * forget_rate
rate_schedule[:params.num_gradual] = np.linspace(0, forget_rate**params.exponent, params.num_gradual)
#print('rate_schedule: ',rate_schedule)
saved_model = None
cnn1 = CNN(input_channel=dataset_info.input_channel, n_outputs=dataset_info.num_classes)
if saved_model is not None:
cnn1.load_state_dict(torch.load(saved_model)['state_dict1'])
print('model loaded from: ',saved_model)
if use_cuda:
cnn1.cuda()
optimizer1 = torch.optim.Adam(cnn1.parameters(), lr=params.learning_rate)
for epoch in range(params.n_epoch):
train_dataloader = DataLoader(train_dataset, batch_size = params.batch_size, shuffle=True, num_workers=4)
print('train dataloader: ',len(train_dataloader),flush=True)
accuracy_1, pure_ratio_1_list, model1 = train(run_id, use_cuda, \
epoch, rate_schedule, noise_or_not, writer, train_dataloader, cnn1, optimizer1)
adjust_learning_rate(optimizer1, epoch, alpha_plan, beta1_plan)
test_dataloader = DataLoader(test_dataset, batch_size = params.batch_size, shuffle=True, num_workers=4)
print('valid dataloader: ',len(test_dataloader),flush=True)
evaluate(run_id, use_cuda, epoch, writer, test_dataloader, model1)
if __name__ == "__main__":
run_started = datetime.today().strftime('%d-%m-%y_%H%M')
use_cuda = torch.cuda.is_available()
print('USE_CUDA: ',use_cuda,flush=True)
print('run id: ',run_started)
train_classifier(run_started, use_cuda)
|
a = True
b = False
c = a and b # c 會等於 False
d = a or b # d 會等於 True
e = not b # e 會等於 True
# 檔名: exp_demo03.py
# 作者: Kaiching Chang
# 時間: July, 2014
|
"""!@mainpage HPTools
@section intro_sec Introduction
Python Tools to manipulate, analyze and plot DNA helical parameter data
TcB @ Louisiana Tech 2018
contributions from Zilong Li, Ran Sun
@page HPTools HPTools
Usage: HPTools.py
@page Run-Me RunMe
Usage: Run-Me.py
"""
import sys
import os
import urllib2
import twobitreader
####################
### Check valid url
####################
def _CHECKURL(url):
"""Check whether the url is valid"""
try:
urllib2.urlopen(url)
return True
except urllib2.URLError:
return False
####################
### Check valid file
####################
def _CHECKFILE(fp):
"""Check whether the file is valid"""
if os.path.isfile(fp) == True:
return True
else:
return False
#################################
### Read sequence from 2bit files
#################################
def Read_sequence_2bit(fp,chromatin,start,end):
"""
Given 2bit file, chromatin e.g. chrIII, start position and end position
Return sequence(string) with uppercase 'ACGT's
Tested that url for 2bit will not work, need to be 2bit file.
"""
if _CHECKFILE(fp)==True:
tbf = twobitreader.TwoBitFile(str(fp))
seq = tbf[str(chromatin)][int(start):int(end)]
seq = seq.upper()
else:
print "Provide a valid file"
sys.exit(0)
return seq
#############################
### Read sequence from txt files
#############################
def Read_sequence_txt(fp):
"""
Given seqin.txt, either with one column of sequence or one/several rows of sequence
Return sequence(string) with uppercase 'ACGT's
"""
if _CHECKFILE(fp)==True:
with open(fp) as f:
seq=''.join(line.replace('\n', '') for line in f)
seq = seq.upper()
else:
print "Provide a valid file"
sys.exit(0)
return seq
|
#!/usr/bin/env python
# Author: Benjamin Smith
# Date: 6th Feb 2017
# File: bot.py
# Purpose: Retweet tweets associated toward computer science topics
# import libraries
import os
import tweepy
import json
import logging
import warnings
import time
import http.client
from random import randint
from pprint import pprint
from tweepy import Stream
from tweepy import StreamListener
from tweepy import OAuthHandler
from tweepy import API
from secrets import *
from time import gmtime, strftime
warnings.filterwarnings("ignore")
# Individual bot config
# Replace with your bot name
bot_username = 'codingbot1000'
logfile_name = bot_username + ".log"
# Autheticating keys
auth_handler = OAuthHandler(C_KEY, C_SECRET)
auth_handler.set_access_token(A_TOKEN, A_TOKEN_SECRET)
# Autheticating client
twitter_client = API(auth_handler, retry_count=1, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
# set logging
logging.getLogger("main").setLevel(logging.INFO)
# exclude these keywords
AVOID = ["java" ]
count = 0
class PyStreamListener(StreamListener):
def on_data(self, data):
while True:
tweet = json.loads(data)
try:
try:
global count
if count == 0:
nap = 25
elif count == 1:
nap = 28
elif count == 2:
nap = 22
elif count == 3:
nap = 62
elif count == 4:
nap = 26
count = 0
else:
count = 0
publish = True
#if tweet includes excluded words don't retweet
for word in AVOID:
if word in tweet['text'].lower():
logging.info("SKIPPED FOR {}".format(word))
publish = False
# if tweet is not in english don't retweet
if tweet.get('lang') and tweet.get('lang') != 'en':
publish = False
# if tweet hasnt been retweeted
if publish:
# if new id - retweet
twitter_client.retweet(tweet['id_str'])
twitter_client.create_favorite(tweet['id_str'])
logging.debug("RT: ".format(tweet['text']))
log("Retweeted: " + tweet['id_str'])
# sleep for 6 minutes before posting again
#print("Retweeted & Favorited --> Sleeping")
log("Retweeted & Favorited --> Sleeping")
print("sleeping for: '%d' minutes", nap)
print("Count: '%d'", count)
#print twitter_client.rate_limit_status()
count += 1
time.sleep(60*nap)
# exception handling for failed retweeting
except Exception as e:
logging.error(e)
# ugly logging of rate limit status
#log(twitter_client.rate_limit_status())
return True
# Handle incomplete reads by continuing to the next target
except httplib.IncompleteRead:
print("Incomplete Read occurred --> continuing")
continue
except KeyboardInterrupt:
print("\n\nUser disconnected stream")
stream.disconnect()
break
# exception handling for rate limits
except TweepError:
handle_rate_limit_error()
#print("Rate limit reached --> Sleeping for 1hr")
log("Rate limit reached --> Sleeping for 1hr")
time.sleep(60*60)
def on_error(self, status_code):
if status_code == 185:
#print("Code 185: User is over daily status update limit --> Sleeping")
log("Code 185: User us over daily status update limit --> Sleeping")
time.sleep(60*15)
return True
if status_code == 420:
# disconnect stream if rate limit is reached
#print("Code 420: Disconnecting stream")
log("Code 420: Disconnecting stream")
time.sleep(60*60)
#print("Retrying stream")
return True # return False
if status_code == 88:
# disconnect stream if rate limit is reached
#print("Code 88: Rate Limit Exceeded")
log("Code 88: Rate Limit Exceeded")
#time.sleep(60*15)
#print("Retrying stream")
return False # return False
print(status_code)
#
#def create_tweet():
# """Crease the text of the tweet you want to send"""
#replace with with custom code
# text = "My first tweet"
# return text
#def tweet(text):
# """Send out the text as a tweet"""
# api = tweepy.API(auth)
# send the tweet and log success or failure
# try:
# api.update_status(text)
# except tweepy.error.TweepError as e:
# log(e.message)
# else:
# log("Tweeted: " + text)
# alternate use for message logging
def log(message):
"""Log message to logfile"""
path = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(path, logfile_name), 'a+') as f:
t = strftime("%d %b %Y %H:%M:%S", gmtime())
f.write("\n" + t + " " + str(message))
# main execution
if __name__ == "__main__":
# hashtags to track
#t = ['#develop', '#coding', '#programming', '#software', '#algorithm', '#bigdata', '#developer']
# set up random tag to retweet about
#tag = randint(0,len(t))
#hashtag = t[tag]
#print hashtag
listener = PyStreamListener()
stream = Stream(auth_handler, listener)
# which hashtags to track and send to stream
stream.filter(track=['#datascience', '#learnpython', '#machinelearning', '#python', '#deeplearning'])
#print track
|
from django.shortcuts import render, Http404, HttpResponseRedirect
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.db.models import Q
from datetime import datetime
from index.models import *
# Create your views here.
# 判断用户是否登录
def user_auth(func):
def inner(request, *args, **kwargs):
userId = request.session.get('uid')
if not userId:
return HttpResponseRedirect('/login/')
return func(request, *args, **kwargs)
return inner
# 判断客服是否登录
def cc_auth(func):
def inner(request, *args, **kwargs):
if 'cid' not in request.COOKIES:
return HttpResponseRedirect('/customercarelogin/')
return func(request, *args, **kwargs)
return inner
@user_auth
# 用户进入的客服界面
def customercareuser_views(request):
userId = request.session.get('uid')
user = User.objects.get(id = userId)
customercares = CustomerCare.objects.all()
for customercare in customercares:
if customercare.chatUser == user:
chats = list(Chat.objects.filter(Q(userSender = user),Q(ccSender = customercare)))[-50:]
return render(request, 'CustomerCare_User.html', locals())
if customercare.isLogin == True and customercare.isChat == False:
customercare.isChat = True
customercare.chatUser = user
customercare.save()
chats = list(Chat.objects.filter(Q(userSender = user),Q(ccSender = customercare)))[-50:]
return render(request, 'CustomerCare_User.html', locals())
return render(request, 'NoChat.html')
# 同步用户聊天消息到数据库
@csrf_exempt
def user_post(request):
if request.method == 'POST':
post_type = request.POST.get('post_type')
if post_type == 'send_chat':
new_chat = Chat.objects.create(
userSender = User.objects.get(id = request.POST.get('userId')),
ccSender = CustomerCare.objects.get(id = request.POST.get('customercareId')),
content = request.POST.get('content'),
emoji = request.POST.get('emoji'),
identity = "user"
)
new_chat.save()
return HttpResponse()
elif post_type == 'get_chat':
last_chat_id = int(request.POST.get('last_chat_id'))
chats = Chat.objects.filter(id__gt = last_chat_id)
user = User.objects.get(id = request.POST.get('userId'))
customercare = user.customercare
return render(request, 'chat_list_user.html', locals())
else:
raise Http404
# 用户回个人中心
def customercareusermessage_views(request):
userId = request.session.get('uid')
user = User.objects.get(id = userId)
customercare = user.customercare
customercare.isChat = False
customercare.chatUser = None
customercare.save()
return HttpResponseRedirect('/userMessage/')
# 用户回首页
def customercareuserindex_views(request):
userId = request.session.get('uid')
user = User.objects.get(id = userId)
customercare = user.customercare
customercare.isChat = False
customercare.chatUser = None
customercare.save()
return HttpResponseRedirect('/index/')
# 用户退出
def customercareuserexit_views(request):
userId = request.session.get('uid')
user = User.objects.get(id = userId)
customercare = user.customercare
customercare.isChat = False
customercare.chatUser = None
customercare.save()
return HttpResponseRedirect('/loginout/')
# 客服登录
def customercarelogin_views(request):
if request.method == 'GET':
return render(request, 'CustomerCareLogin.html')
else:
ccname = request.POST.get('ccname')
ccpwd = request.POST.get('ccpwd')
if CustomerCare.objects.filter(ccName = ccname).count() == 0:
message = '客服不存在'
elif ccpwd != CustomerCare.objects.get(ccName = ccname).password:
message = '密码错误'
else:
customercare = CustomerCare.objects.get(ccName = ccname)
customercare.isLogin = True
customercare.save()
response = HttpResponseRedirect('/customercare/')
response.set_cookie('cid', customercare.id, 60*60*24)
return response
return render(request, 'CustomerCareLogin.html', locals())
# 客服回复界面
@cc_auth
def customercare_views(request):
ccId = request.COOKIES['cid']
customercare = CustomerCare.objects.get(id = ccId)
if customercare.chatUser != None:
user = customercare.chatUser
chats = list(Chat.objects.filter(Q(userSender = user),Q(ccSender = customercare)))[-50:]
return render(request, 'CustomerCare_cc.html', locals())
else:
return render(request, 'CustomerCare_cc.html',locals())
# 同步客服聊天消息到数据库
@csrf_exempt
def customercare_post(request):
if request.method == 'POST':
post_type = request.POST.get('post_type')
if post_type == 'send_chat':
new_chat = Chat.objects.create(
userSender = User.objects.get(id = request.POST.get('userId')),
ccSender = CustomerCare.objects.get(id = request.POST.get('customercareId')),
content = request.POST.get('content'),
emoji = request.POST.get('emoji'),
identity = "cc"
)
new_chat.save()
return HttpResponse()
elif post_type == 'get_chat':
customercare = CustomerCare.objects.get(id = request.POST.get('customercareId'))
if customercare.chatUser != None:
last_chat_id = int(request.POST.get('last_chat_id'))
chats = Chat.objects.filter(id__gt = last_chat_id)
user = customercare.chatUser
return render(request, 'chat_list_cc.html', locals())
else:
raise Http404
# 退出客服帐号
@csrf_exempt
def customercareexit_views(request):
ccId = request.COOKIES['cid']
customercare = CustomerCare.objects.get(id = ccId)
customercare.isLogin = False
customercare.save()
response = HttpResponseRedirect('/customercarelogin/')
response.delete_cookie('cid')
return response
|
#!/usr/bin/env python
PACKAGE = "drive_ros_custom_behavior_trees"
from dynamic_reconfigure.parameter_generator_catkin import *
gen = ParameterGenerator()
#Parameters: name, type, level, description, defalut value, min, max
gen.add("mode", str_t, 0, "The driving mode (OBSTACLES or PARKING)", "DFAULT")
gen.add("tick_freq_ms", int_t, 0, "The minimum duration of one cycle in ms", -1000)
gen.add("general_max_speed", double_t, 0, "The fastest speed the car may drive.", -1000.0)
gen.add("general_max_speed_cautious", double_t, 0, "A maximum speed where more accurate sensing is required / breaking may shorty be needed.", -1000.0)
gen.add("max_bridge_speed", double_t, 0, "Max speed on a bridge", -1000.0)
gen.add("parking_spot_search_speed", double_t, 0, "...", -1000.0)
gen.add("max_lane_switch_speed", double_t, 0, "Speed when changing lanes", -1000.0)
gen.add("sharp_turn_speed", double_t, 0, "Max speed in a sharp turn", -1000.0)
gen.add("very_sharp_turn_speed", double_t, 0, "Max speed in a very sharp turn", -1000.0)
gen.add("overtake_distance", double_t, 0, "Maximum distance to object in front where it is allowed to overtake it.", -1000.0)
gen.add("object_following_break_factor", double_t, 0, "Internal calculation factor for adjusting distance to object in front", -1000.0)
gen.add("universal_break_factor", double_t, 0, "...", -1000.0)
gen.add("barred_area_react_distance", double_t, 0, "When a car starts to pass a barred area", -1000.0)
gen.add("oncoming_traffic_clearance", double_t, 0, "Safety distance to any traffic on the left lane when overtaking", -1000.0)
gen.add("max_start_box_distance", double_t, 0, "Used to determine whether the start box is open", -1000.0)
gen.add("intersection_turn_speed", double_t, 0, "...", -1000.0)
gen.add("break_distance_safety_factor", double_t, 0, "A factor multiplied by the (accurate) calculated break distance", -1000.0)
gen.add("intersection_max_obj_distance", double_t, 0, "Used to determine whether an object at an intersection is to be considered", -1000.0)
gen.add("speed_zero_tolerance", double_t, 0, "Any speed smaller than this is considered to be 0", -1000.0)
gen.add("intersection_turn_duration", int_t, 0, "Duration of a turn in an intersection", 800)
#Runtime values
gen.add("overtaking_forbidden_zone", bool_t, 0, "...", False)
gen.add("express_way", bool_t, 0, "...", False)
gen.add("priority_road", bool_t, 0, "...", False)
gen.add("force_stop", bool_t, 0, "Set for stop signs", False)
gen.add("on_bridge", bool_t, 0, "...", False)
gen.add("give_way", bool_t, 0, "...", False)
gen.add("successful_parking_count", int_t, 0, "...", -1000)
gen.add("intersection_turn_indication", int_t, 0, "Turn left/right/don't", -1000)
gen.add("speed_limit", double_t, 0, "...", -1000.0)
exit(gen.generate(PACKAGE, "BehaviorTree", "BehaviorTree"))
|
import tweepy,sys, os, newt, argparse, datetime, csv, random,math
import networkx as nx
import newtx as nwx
import urllib, unicodedata
def checkDir(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
parser = argparse.ArgumentParser(description='Data about list members')
group = parser.add_mutually_exclusive_group()
group.add_argument('-list',help='Grab users from a list. Provide source as: username/listname')
group.add_argument('-users',nargs='*', help="A space separated list of usernames (without the @) for whom you want to do the grab.")
parser.add_argument('-sample',default=197,type=int,metavar='N',help='Sample the friends/followers (user, users); use 0 if you want all (users/users).')
parser.add_argument('-fname',default='',help='Custom folder name')
ORDEREDSAMPLE=1
args=parser.parse_args()
api=newt.getTwitterAPI()
def checkDir(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
def getUsersFromList(userList):
userList_l =userList.split('/')
user=userList_l[0]
list=userList_l[1]
tmp=newt.listDetailsByScreenName({},api.list_members,user,list)
u=[]
for i in tmp:
u.append(tmp[i].screen_name)
return u
sampleSize=args.sample
if args.fname!=None: fpath=str(args.fname)+'/'
else:fpath=''
now = datetime.datetime.now()
def outputter():
checkDir(fd)
print 'Writing file...',fn
writer=csv.writer(open(fn,'wb+'),quoting=csv.QUOTE_ALL)
writer.writerow([ 'source','screen_name','name','description','location','time_zone','created_at','contributors_enabled','url','listed_count','friends_count','followers_count','statuses_count','favourites_count','id_str','id','verified','utc_offset','profile_image_url','protected'])
twDetails={}
for u in twd:
twDetails[u.screen_name]=u
ux=[source]
for x in [u.screen_name,u.name,u.description,u.location,u.time_zone]:
if x != None:
ux.append(unicodedata.normalize('NFKD', unicode(x)).encode('ascii','ignore'))
else: ux.append('')
for x in [u.created_at,u.contributors_enabled,u.url,u.listed_count,u.friends_count,u.followers_count,u.statuses_count,u.favourites_count,u.id_str,u.id,u.verified,u.utc_offset,u.profile_image_url,u.protected]:
ux.append(x)
try:
writer.writerow(ux)
except: pass
twd=[]
twn=[]
if args.list!=None:
source=args.list.replace('/','_')
users=getUsersFromList(args.list)
fd='reports/'+fpath+args.list.replace('/','_')+'/'
fn=fd+'listTest_'+now.strftime("_%Y-%m-%d-%H-%M-%S")+'.csv'
print fn
for l in newt.chunks(users,100):
#print 'partial',l
tmp=api.lookup_users(screen_names=l)
for u in tmp:
twd.append(u)
twn.append(u.screen_name)
outputter()
elif args.users!=None:
for l in newt.chunks(args.users,100):
#print 'partial',l
tmp=api.lookup_users(screen_names=l)
for u in tmp:
twd.append(u)
twn.append(u.screen_name)
else: exit(-1)
for user in twn:
currSampleSize=sampleSize
source=user
twd=[]
fd='reports/'+fpath #+user+'/'
fn=fd+user+'_fo_'+str(sampleSize)+'_'+now.strftime("_%Y-%m-%d-%H-%M-%S")+'.csv'
print 'grabbing follower IDs for',user
try:
mi=tweepy.Cursor(api.followers_ids,id=user).items()
except:
continue
users=[]
try:
for m in mi: users.append(m)
except: continue
biglen=str(len(users))
print 'Number of followers:',biglen
#HACK
if str(len(users))>10000: currSampleSize=10000
#this breaks the date recreation on followers - need a run of 10000 users
if currSampleSize>0:
if len(users)>currSampleSize:
if ORDEREDSAMPLE !=1:
users=random.sample(users, currSampleSize)
print 'Using a random sample of '+str(currSampleSize)+' from '+str(biglen)
else:
#tmpsamp=int(len(users)/currSampleSize)
#need some way of getting 100 consecutive samples of 100 or so users?
print 'Using ordered sample of '+str(currSampleSize)+' from '+str(biglen)
ss=[]
offset=math.floor(len(users)/100)
for i in range(100):
randoff=random.randint(0, offset-100)
li=int(randoff+i*offset)
ui=int(li+100-1)
ss=ss+users[li:ui]
users=ss
else:
print 'Fewer members ('+str(len(users))+') than sample size: '+str(currSampleSize)
n=1
print 'Hundred batching'
for l in newt.chunks(users,100):
#print 'partial',l
print str(n)
n=n+1
try:
tmp=api.lookup_users(user_ids=l)
for u in tmp:twd.append(u)
except: continue
print '...done'
outputter()
|
"""Perform operations to find Reference Notes zones."""
import ReferenceOps as ro
import ZoneNeutralOps as zno
import numpy as np
import pandas as pd
import os
pd.options.mode.chained_assignment = None
# pd.set_option('display.max_colwidth', -1)
# pd.set_option('display.max_rows', None)
class reference_overhead(object):
"""
Container class to construct testing dataframe looking for Reference Notes strings /
zones. Calls methods from similarly-named Ops module. Filters resulting dummy columns
in data to define 'Reference Notes' zones with a 1 or 0 score.
Attributes:
zones_full:
zones_small:
working_df:
sub_working_df:
clean_starts_data: returned data from id_clean_starts()
clean_starts:
clean_starts_indices:
confident_rows_indices:
output_dataframe:
"""
def __init__(self, zones_full, zones_small):
self.zones_full = zones_full
self.zones_small = zones_small
self.working_df = zno.file_to_df(self.zones_small)
self.sub_working_df = self.test_stock_bond()
self.clean_starts_data = self.id_clean_starts()
self.clean_starts = self.clean_starts_data[0]
self.clean_starts_indices = self.clean_starts_data[1]
self.non_clean_start_indices = self.clean_starts_data[2]
self.on_stocks_clean_indices = self.on_stocks()
self.on_bonds_clean_indices = self.on_bonds()
self.output_dataframe = self.update_original()
def test_stock_bond(self):
"""Search zone content for 'year(s) ended' string."""
sub_working_df = self.working_df[['file_name', 'text']]
sub_working_df['zone_next'] = sub_working_df['text']
sub_working_df['zone_next_next'] = sub_working_df['text']
sub_working_df.zone_next_next = sub_working_df.zone_next_next.shift(-2)
sub_working_df = sub_working_df.fillna(value='')
# sub_working_df['consec_years'] = sub_working_df.apply(zno.test_consec_years, axis=1)
sub_working_df['caps_reference'] = sub_working_df.text.apply(ro.test_caps_reference)
return sub_working_df
def id_clean_starts(self):
"""Identify well-defined income accounts / statements starts."""
clean_starts = self.sub_working_df.loc[(self.sub_working_df['caps_reference'] == 1)]
clean_starts_indices = clean_starts.index.values
non_clean_start_indices = [index for index in self.sub_working_df.index.values if index not in clean_starts_indices]
return (clean_starts, clean_starts_indices, non_clean_start_indices)
def on_stocks(self):
"""Search for 'ON STOCKS' string in matched reference note zones."""
self.sub_working_df['ref_on_stocks'] = self.sub_working_df.text.apply(ro.on_stocks)
for index in self.non_clean_start_indices:
self.sub_working_df.set_value(index, 'ref_on_stocks', 0)
on_stocks_clean_starts = self.sub_working_df.loc[(self.sub_working_df['ref_on_stocks'] == 1)]
on_stocks_clean_indices = on_stocks_clean_starts.index.values
return on_stocks_clean_indices
def on_bonds(self):
"""Search for 'ON BONDS' string in matched reference note zones."""
self.sub_working_df['ref_on_bonds'] = self.sub_working_df.text.apply(ro.on_bonds)
for index in self.non_clean_start_indices:
self.sub_working_df.set_value(index, 'ref_on_bonds', 0)
on_bonds_clean_starts = self.sub_working_df.loc[(self.sub_working_df['ref_on_bonds'] == 1)]
on_bonds_clean_indices = on_bonds_clean_starts.index.values
return on_bonds_clean_indices
def update_original(self):
"""Add dummy column denoting Income Statements."""
self.working_df['ref_on_stocks'] = 0
for index in self.on_stocks_clean_indices:
self.working_df.set_value(index, 'ref_on_stocks', 1)
self.working_df['ref_on_bonds'] = 0
for index in self.on_bonds_clean_indices:
self.working_df.set_value(index, 'ref_on_bonds', 1)
output_dataframe = self.working_df[['file_name', 'manual', 'manual_yr', 'fiche', 'fiche_num',
'zone_num', 'CoName', 'CoNum', 'Hist', 'Dir', 'ref_on_stocks',
'ref_on_bonds', 'text']]
return output_dataframe
|
answer = input("Is it your birthday today?")
if answer == "yes":
print("Wow! Have a great celebration!")
elif answer == "no":
answer2 = input("Is your birthday over?")
if answer2 == "yes":
print("Hope you had a great celebration!")
elif answer2 == "no":
print("I look forward to your celebration!")
else:
print("Please answer only yes or no.")
else:
print("Please answer only yes or no.")
|
# flake8: noqa
from .attention_reporter_callback import EvalAttentionReporter
from .csv_callback import EvalCSVReporter
from .google_sheets_callback import EvalGoogleSheetsReporter
from .handler import EvalCallbackHandler
from .progress_bar_callback import EvalProgressBarCallback
from .simple_logger_callback import EvalSimpleLogger
from .yaml_callback import EvalYAMLReporter
|
from django.http import HttpResponse
from django.shortcuts import render
from django.template.loader import get_template
# DRY - > Dont Repeat Yourself
def home_page(request):
data = "Home"
context = {"title":data}
if request.user.is_authenticated:
context["mylist"] = [1,2,3,4,5]
return render(request, "home.html", context)
#return HttpResponse("<h1>Hello World!!</h1>")
def about_page(request):
return render(request, "about.html", {"title":"About Us"})
def contact_page(request):
return render(request, "contact.html", {"title":"Contact Us"})
def test_page(request):
#to render templates from txt or other files
context = {"title" : "Testing Page"}
template_name = "home.html"
template_obj = get_template(template_name)
rendered_item = template_obj.render(context)
return HttpResponse(rendered_item) #render(request, "contact.html", {"title":"Contact Us"})
|
import random
healthe = 10
health = 10
class Creature:
def __init__(self, name, the_level):
self.name = name
self.level = the_level
def __repr__(self):
return "{}, Level {}".format(
self.name, self.level
)
def game_loop():
creatures = [
Creature('Random PlaceHolder Name', 1),
Creature('c2', 1),
Creature('c3', 1),
Creature('c4', 1),
Creature('c5', 1),
Creature('c6', 1),
]
while health >= 0:
print()
def battle_sequence():
global healthe
global action
Random_Creature = random.choice(creatures)
print("You Are Fighting {}".format(Random_Creature))
print("Battle Has Started")
while healthe >= 0:
action = input('What Is Your Move? : ')
print()
if action == "a":
healthe = healthe - 1
print("Enemy Health Is {}".format(healthe))
if healthe == 0:
healthe = 10
print("You Have Won!")
print()
break
action = input(": ")
if action == "p":
battle_sequence()
game_loop()
|
"""
LICENCE
-------
Copyright 2013-2016 by Kitware, Inc. All Rights Reserved. Please refer to
KITWARE_LICENSE.TXT for licensing information, or contact General Counsel,
Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065.
"""
import copy
import functools
import logging
import operator as op
# noinspection PyUnresolvedReferences
from six.moves import range
class SmqtkObject (object):
"""
Highest level object interface for classes defined in SMQTK.
Currently defines logging methods.
"""
@classmethod
def get_logger(cls):
"""
:return: logging object for this class
:rtype: logging.Logger
"""
return logging.getLogger('.'.join((cls.__module__, cls.__name__)))
@property
def _log(self):
"""
:return: logging object for this class as a property
:rtype: logging.Logger
"""
return self.get_logger()
def ncr(n, r):
"""
N-choose-r method, returning the number of combinations possible in integer
form.
From dheerosaur:
http://stackoverflow.com/questions/4941753/is-there-a-math-ncr-function-in-python
:param n: Selection pool size.
:type n: int
:param r: permutation selection size.
:type r: int
:return: Number of n-choose-r permutations for the given n and r.
:rtype: int
"""
r = min(r, n - r)
if r == 0:
return 1
numer = functools.reduce(op.mul, range(n, n - r, -1))
denom = functools.reduce(op.mul, range(1, r + 1))
return numer // denom
def merge_dict(a, b, deep_copy=False):
"""
Merge dictionary b into dictionary a.
This is different than normal dictionary update in that we don't bash
nested dictionaries, instead recursively updating them.
For congruent keys, values are are overwritten, while new keys in ``b`` are
simply added to ``a``.
Values are assigned (not copied) by default. Setting ``deep_copy`` causes
values from ``b`` to be deep-copied into ``a``.
:param a: The "base" dictionary that is updated in place.
:type a: dict
:param b: The dictionary to merge into ``a`` recursively.
:type b: dict
:param deep_copy: Optionally deep-copy values from ``b`` when assigning into
``a``.
:type deep_copy: bool
:return: ``a`` dictionary after merger (not a copy).
:rtype: dict
"""
for k in b:
if k in a and isinstance(a[k], dict) and isinstance(b[k], dict):
merge_dict(a[k], b[k], deep_copy)
elif deep_copy:
a[k] = copy.deepcopy(b[k])
else:
a[k] = b[k]
return a
###
# In specific ordering for dependency resolution
#
# No internal util dependencies
from .bin_utils import initialize_logging
from .configurable_interface import Configurable
from .database_info import DatabaseInfo
from .iter_validation import check_empty_iterable
from .read_write_lock import ReaderUpdateException, DummyRWLock, ReadWriteLock
from .safe_config_comment_parser import SafeConfigCommentParser
from .signal_handler import SignalHandler
from .simple_timer import SimpleTimer
|
import turtle
bob = turtle.Turtle()
bob.speed(30)
for i in range(180):
bob.forward(100)
bob.right(30)
bob.forward(20)
bob.left(60)
bob.forward(50)
bob.right(30)
bob.penup()
bob.forward(30)
bob.pendown()
bob.dot()
bob.penup()
bob.setposition(0,0)
bob.pendown()
bob.right(2)
turtle.done()
|
import tensorflow as tf
import numpy as np
class Add:
def __init__(self):
pass
def execute(self, a, b):
return tf.constant(a) + tf.constant(b)
class Mean:
def __init__(self):
pass
def execute(self):
x_array = np.arange(18).reshape(3, 2, 3)
x2 = tf.reshape(x_array, shape=(-1, 6))
# 각 열의 합을 계산
xsum = tf.reduce_sum(x2, axis=0)
# 각 열의 평균을 계산
xmean = tf.reduce_mean(x2, axis=0)
print('입력 크기: ', x_array.shape)
print('크기가 변경된 입력 크기: \n', x2.numpy())
print('열의 합: ', xsum.numpy())
print('열의 평균: ', xmean.numpy())
if __name__ == '__main__':
# add = Add()
# print(add.execute(5, 7))
mean = Mean()
mean.execute()
|
# -*- encoding: utf-8 -*-
import json
from datetime import datetime
from django.views.generic import View
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from django.utils.datastructures import MultiValueDictKeyError
from django.http import (HttpResponse, HttpResponseRedirect,JsonResponse)
from rest_framework.authentication import (SessionAuthentication, BasicAuthentication,
TokenAuthentication)
from rest_framework.generics import GenericAPIView
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import User
from django.contrib.auth.hashers import check_password, make_password
from pydub import AudioSegment
from django.conf import settings
from shazam.task import detectar_sonido
from models import (TokensFCM)
class GetDetectarSonido(View):
"""docstring for GetDetectarSonido"""
# authentication_classes = (SessionAuthentication, BasicAuthentication, TokenAuthentication)
# permission_classes = (IsAuthenticated,)
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
return super(GetDetectarSonido, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
"""
Defino las variables con las cuales voy a enviarselas al template para reenderizar luego las graficas
"""
try:
print(request.FILES)
token = request.POST['token']
audio = request.FILES['audio']
token = Token.objects.get(key=token)
usuario = token.user
song = AudioSegment.from_file(audio, format="mp3")
nombre_archivo=str(token)+"_"+str(datetime.now())
song.export(settings.BASE_DIR+"/archivos_reconocer/"+nombre_archivo+".mp3", format="mp3")
###mandamos a celery para que busque la cancion
resultado = detectar_sonido.delay(nombre_archivo)
data = ([{"detail":"Información para procesar almacenada correctamente."}])
data = json.dumps(data)
response = HttpResponse(data, content_type="application/json", status=200)
return response
except MultiValueDictKeyError:
data = ([{"detail":"Por favor complete los campos"}])
data = json.dumps(data)
response = HttpResponse(data, content_type="application/json", status=400)
return response
except Token.DoesNotExist:
data = ([{"detail":"No se encuentra autenticado,por favor inicie sesión"}])
data = json.dumps(data)
response = HttpResponse(data, content_type="application/json", status=400)
return response
@csrf_exempt
def LoginToken(request):
if request.method == 'POST':
try:
username = request.POST['usuario']
password = request.POST['contrasena']
#password = b64decode(password)
registrationId = request.POST.get('registrationId', None)
try:
user = User.objects.get(username=username)
check = check_password(password, user.password)
except User.DoesNotExist:
user = False
if user is not False and check == True:
if user.is_active == True:
###guardar el token para el usuario en la tabla de fcm
token = Token.objects.get_or_create(user=user)
print(token[0])
fcm_user=TokensFCM.objects.update_or_create(
token=registrationId,
usuario=user
)
#fcm_user.save()
data = ([{"detail":"Has iniciado sesión correctamente.", "token":str(token[0])}])
data = json.dumps(data)
response = HttpResponse(data, content_type="application/json", status=200)
response['Authorization'] = str(token)
return response
else:
data = ([{"detail":"Su cuenta se encuentra inhabilitada por el administrador por esta razón no puedes iniciar sesión"}])
data = json.dumps(data)
response = HttpResponse(data, content_type="application/json", status=405)
response['WWW-Authenticate'] = 'Token'
return response
else:
data = ([{"detail":"Usuario y/o contraseña incorrectas."}])
data = json.dumps(data)
response = HttpResponse(data, content_type="application/json" ,status=401)
response['WWW-Authenticate'] = 'Token'
return response
except MultiValueDictKeyError:
data = ([{"detail":"Por favor complete los campos"}])
data = json.dumps(data)
response = HttpResponse(data, content_type="application/json", status=400)
return response
else:
data = ([{"detail":"Método no permitido."}])
data = json.dumps(data)
response = HttpResponse(data, content_type="application/json", status=405)
return response
@csrf_exempt
def LogoutToken(request):
"""
Api para cerrar sesión de un usuario
@return json
@method POST
"""
if request.method == "POST":
try:
token = request.POST['token']
token = Token.objects.get(key=token)
token.delete()
fcm_user=TokensFCM.objects.get(usuario=token.user)
fcm_user.delete()
data = ([{"detail":"Sesión finalizada correctamente."}])
data = json.dumps(data)
response = HttpResponse(data, content_type="application/json", status=200)
return response
except Token.DoesNotExist:
data = ([{"detail":"El usuario no existe, por favor verifique la información."}])
data = json.dumps(data)
response = HttpResponse(data, content_type="application/json", status=400)
return response
except TokensFCM.DoesNotExist:
data = ([{"detail":"El usuario no existe, por favor verifique la información."}])
data = json.dumps(data)
response = HttpResponse(data, content_type="application/json", status=400)
return response
except MultiValueDictKeyError:
data = ([{"detail":"Algo ha ocurrido mal, por favor reinicie la aplicación"}])
data = json.dumps(data)
response = HttpResponse(data, content_type="application/json", status=400)
return response
else:
data = ([{"detail":"Método no permitido."}])
data = json.dumps(data)
response = HttpResponse(data, content_type="application/json", status=405)
return response
|
from keras.datasets import cifar10
from keras.utils import np_utils
import matplotlib.pyplot as plt
import numpy as np
#데이터 불러오기
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
#300개로 나누기
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, train_size = 0.006, random_state=66)
x_test = x_test[:300]
y_test = y_test[:300]
#이미지 제너레이터
def generate_data(x_train, y_train):
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
datagen = ImageDataGenerator(rotation_range=20,
width_shift_range = 0.1,
height_shift_range=0.1,
shear_range=0.1,
zoom_range=0.1,
horizontal_flip=True,
fill_mode='nearest'
)
x_ext = []
y_ext = []
for i in range(x_train.shape[0]):
img = x_train[i]
img = img.reshape((1,) + img.shape)
j = 0
for batch in datagen.flow(img, batch_size=1):
x_ext.append(batch[0])
y_ext.append(y_train[i])
if j == 4:
break
j += 1
x_ext = np.array(x_ext)
y_ext = np.array(y_ext)
return x_ext, y_ext
#데이터 갯수 늘리기
x_ext, y_ext = generate_data(x_train, y_train)
#변환
y_ext = np_utils.to_categorical(y_ext, 10)
y_test = np_utils.to_categorical(y_test, 10)
x_ext = x_ext.astype('float32')
x_test = x_test.astype('float32')
x_ext /= 255
x_test /= 255
#셔플
s = np.arange(x_ext.shape[0])
np.random.shuffle(s)
x_ext = x_ext[s]
y_ext = y_ext[s]
print("x: ", x_ext.shape)
print("y: ", y_ext.shape)
#모델
from keras.models import Model, Input
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Activation, Flatten
#신경망 정의
def build_network(keep_prob=0.5, optimizer='adam', node1=32, node2=60):
inputs = Input(shape=(32,32,3), name='input')
x1 = Conv2D(node1, kernel_size=(3,3), padding='same', activation='relu', name='hidden1')(inputs)
# max1 = MaxPooling2D(pool_size=(2,2))(x1)
dp1 = Dropout(keep_prob)(x1)
f1 = Flatten()(dp1)
x2 = Dense(node2, activation='relu')(f1)
# dp2 = Dropout(keep_prob)(x2)
prediction = Dense(10, activation='softmax')(x2)
model = Model(inputs=inputs, outputs=prediction)
model.compile(loss= 'categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
#하이퍼파람
def create_hyperparameter():
batches =[32,64,128,256,500]
optimizers = ['rmsprop', 'adam', 'adadelta']
dropout = np.linspace(0.1, 0.5, 5)
node1 = [10,16,32,64]
node2 = [5,10,30,100,200]
return{"batch_size": batches, "optimizer": optimizers, "keep_prob":dropout, "node1":node1, "node2":node2}
# 모델 생성
from keras.wrappers.scikit_learn import KerasClassifier
model = KerasClassifier(build_fn=build_network, verbose=1)
#파람 생성
hyperparameters = create_hyperparameter()
#최적값 찾기
from sklearn.model_selection import RandomizedSearchCV
rs = RandomizedSearchCV(estimator=model, param_distributions=hyperparameters, cv=3, verbose=1)
rs.fit(x_ext, y_ext)
print("최적: ", rs.best_params_)
print("train: ",rs.score(x_ext, y_ext))
print("test: ",rs.score(x_test, y_test))
|
#!/usr/bin/python
arr = [line.rstrip('\n') for line in open('problem_11.in')]
max = 0
for i in range(0, len(arr)):
arr[i] = arr[i].split()
for i in range(0, 20):
for j in range(0, 20):
arr[i][j] = int(arr[i][j])
for i in range(0, 16):
for j in range(0, 16):
num = arr[i][j] * arr[i][j + 1] * arr[i][j + 2] * arr[i][j + 3]
if num > max:
max = num
num = arr[i][j] * arr[i + 1][j] * arr[i + 2][j] * arr[i + 3][j]
if num > max:
max = num
num = arr[i][j] * arr[i + 1][j + 1] * arr[i + 2][j + 2] * arr[i + 3][j + 3]
if num > max:
max = num
for i in range(16, 20):
for j in range(0, 16):
num = arr[i][j] * arr[i][j + 1] * arr[i][j + 2] * arr[i][j + 3]
if num > max:
max = num
num = arr[j][i] * arr[j + 1][i] * arr[j + 2][i] * arr[j + 3][i]
if num > max:
max = num
for i in range(3, 19):
for j in range(0, 16):
num = arr[i][j] * arr[i - 1][j + 1] * arr[i - 2][j + 2] * arr[i - 3][j + 3]
if num > max:
max = num
print(max)
|
# -*- coding:utf-8 -*-
import os
import cv2
import math
import xml.etree.ElementTree as ET
Base_dir = r"C:\Users\maggie\Desktop\dir_points"
rootdir = './r_xml' # 存有xml的文件夹路径
img_path = './JPEGImages'
new_xml_path = './Annotations'
def file_name (file_dir):
L = []
for root, dirs, files in os.walk(file_dir):
for file in files:
if os.path.splitext(file)[1] == '.xml':
L.append(os.path.join(root, file))
return L
def rotatePoint(xc, yc, xp, yp, theta):
xoff = xp - xc
yoff = yp - yc
cosTheta = math.cos(theta)
sinTheta = math.sin(theta)
pResx = cosTheta * xoff + sinTheta * yoff
pResy = - sinTheta * xoff + cosTheta * yoff
return xc + pResx, yc + pResy
xml_dirs = file_name(os.path.join(Base_dir + rootdir))
def pretty_xml(element, indent, newline, level=0): # elemnt为传进来的Elment类,参数indent用于缩进,newline用于换行
if element: # 判断element是否有子元素
if (element.text is None) or element.text.isspace(): # 如果element的text没有内容
element.text = newline + indent * (level + 1)
else:
element.text = newline + indent * (level + 1) + element.text.strip() + newline + indent * (level + 1)
# else: # 此处两行如果把注释去掉,Element的text也会另起一行
# element.text = newline + indent * (level + 1) + element.text.strip() + newline + indent * level
temp = list(element) # 将element转成list
for subelement in temp:
if temp.index(subelement) < (len(temp) - 1): # 如果不是list的最后一个元素,说明下一个行是同级别元素的起始,缩进应一致
subelement.tail = newline + indent * (level + 1)
else: # 如果是list的最后一个元素, 说明下一行是母元素的结束,缩进应该少一个
subelement.tail = newline + indent * level
pretty_xml(subelement, indent, newline, level=level + 1) # 对子元素进行递归操作
# 循环
for ind, item in enumerate(xml_dirs):
print(item)
xml = ET.parse(item)
root = xml.getroot()
for obj in root.findall("object"):
cx = obj.find('robndbox').find('cx').text
cy = obj.find('robndbox').find('cy').text
h = obj.find('robndbox').find('h').text
angle = obj.find('robndbox').find('angle').text
rp = rotatePoint(float(cx), float(cy), float(cx), (float(cy) - 0.5 * float(h)), -float(angle))
# 在节点robndbox下面创建子节点dx和dy
robndbox = obj.find('robndbox')
dx = ET.SubElement(robndbox, 'dx')
dx.text = str(rp[0])
dy = ET.SubElement(robndbox, 'dy')
dy.text = str(rp[1])
# 美化xml
pretty_xml(root, '\t', '\n')
xml.write(os.path.join(Base_dir,new_xml_path,item.split("\\")[-1]), encoding="utf-8")
|
# Generated by Django 2.2 on 2020-01-29 11:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='歌曲名称')),
('create_time', models.DateTimeField(auto_now=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('singer', models.CharField(max_length=50, verbose_name='原唱歌手')),
('is_pub', models.BooleanField(default=True, verbose_name='是否发布')),
],
options={
'verbose_name': '歌曲',
'verbose_name_plural': '歌曲',
'db_table': 'singer_song',
},
),
migrations.CreateModel(
name='SongList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now=True, verbose_name='点歌时间')),
('sang_time', models.DateTimeField(auto_now=True, verbose_name='唱歌时间')),
('sponsor', models.CharField(max_length=50, verbose_name='打赏人')),
('money', models.DecimalField(decimal_places=2, default=0, max_digits=6, verbose_name='打赏金额')),
('is_sang', models.BooleanField(default=False, verbose_name='是否已唱')),
('song', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='song.Song')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': '点歌列表',
'verbose_name_plural': '点歌列表',
'db_table': 'singer_song_list',
},
),
migrations.CreateModel(
name='SongGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='分组名称')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': '歌曲分组',
'verbose_name_plural': '歌曲分组',
'db_table': 'singer_song_group',
},
),
migrations.AddField(
model_name='song',
name='group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='song.SongGroup', verbose_name='分组名称'),
),
migrations.AddField(
model_name='song',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
"""
Given an array of integers sorted in ascending order, find the starting and ending position of a given target value.
Your algorithm's runtime complexity must be in the order of O(log n).
If the target is not found in the array, return [-1, -1].
For example,
Given [5, 7, 7, 8, 8, 10] and target value 8,
return [3, 4].
"""
class Solution(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
res = [-1,-1]
if not nums:
return res
l,r = 0,len(nums)-1
while l<r:
m = (l+r)/2
if target<nums[m]:
r = m
elif target > nums[m]:
l = m+1
else:
break
if l == r:
return res if nums[l]!=target else [l,r]
res = [m,m]
while res[0]>l:
lm = (l+res[0])/2
if nums[lm]!= target:
l = lm+1
else:
res[0] = lm
while res[1]<r:
rm=(r+res[1]+1)/2
if nums[rm]!=target:
r = rm-1
else:
res[1] = rm
return res
|
from django.views.generic import View
from django.http import JsonResponse
from django.conf import settings
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
class CoreView(View):
"""
所有API基类
"""
permission_view_map = {
}
superuser_required_action = []
app_name = ""
login_required_action = []
def __init__(self, **kwargs):
super(CoreView, self).__init__(**kwargs)
self.status_code = 200
self.response_data = {
'status': True,
'data': [],
'info': '',
'has_next': False,
'has_previous': False,
'total_page': 0,
'per_page': 20
}
def parameters(self, key):
"""
获取POST或者GET中的参数
:param key:
:return:
"""
if self.request.method == 'GET':
return self.request.GET.get(key)
if self.request.method == 'POST':
return self.request.POST.get(key)
def get(self, request, *args, **kwargs):
"""
收到GET请求后的处理
:param request:
:param args:
:param kwargs:
:return:
"""
if 'action' not in kwargs:
self.response_data['status'] = False
self.response_data['info'] = 'Request action is empty'
response_obj = JsonResponse(self.response_data)
response_obj.status_code = 504
return response_obj
action = 'get_%s' % kwargs['action'].lower()
return self.run(action, request)
def post(self, request, *args, **kwargs):
"""
收到POST请求后的处理
:param request:
:param args:
:param kwargs:
:return:
"""
if 'action' not in kwargs:
self.response_data['status'] = False
self.response_data['info'] = 'Request action is empty'
response_obj = JsonResponse(self.response_data)
response_obj.status_code = 504
return response_obj
action = 'post_%s' % kwargs['action'].lower()
return self.run(action, request)
def run(self, action, request):
"""
执行相应的逻辑
:param action:
:param request:
:return:
"""
self.request = request
if hasattr(self, action):
if action in self.login_required_action:
if self.request.user and self.request.user.is_authenticated():
if self.check_permission(action) and self.check_superuser(action):
func = getattr(self, action)
else:
func = getattr(self, "get_not_permission")
# func = getattr(self, action)
else:
func = getattr(self, 'get_invalid_login')
else:
func = getattr(self, action)
try:
func()
except Exception as e:
self.response_data['info'] = e
self.response_data['status'] = False
response_obj = JsonResponse(self.response_data)
response_obj.status_code = 500
return response_obj
else:
self.response_data['status'] = False
self.response_data['info'] = 'Request action is invalid'
response_obj = JsonResponse(self.response_data)
response_obj.status_code = 501
return response_obj
response_obj = JsonResponse(self.response_data)
response_obj.status_code = self.status_code
return response_obj
def page_split(self, objs):
page = self.parameters('page') if self.parameters('page') else 1
per_page = None
try:
if per_page:
per_page = int(per_page)
else:
per_page = getattr(settings, 'PER_PAGE', 20)
except ValueError:
per_page = getattr(settings, 'PER_PAGE', 20)
paginator = Paginator(objs, per_page=per_page)
try:
objs = paginator.page(page)
except PageNotAnInteger:
objs = paginator.page(1)
except EmptyPage:
objs = paginator.page(paginator.num_pages)
self.response_data['has_previous'] = objs.has_previous()
self.response_data['has_next'] = objs.has_next()
self.response_data['total_page'] = paginator.num_pages
self.response_data['pre_page'] = per_page
return objs
def get_invalid_login(self):
self.response_data['info'] = "It's not login"
self.response_data['status'] = False
self.response_data['data'] = {"login_url": settings.LOGIN_URL if hasattr(settings, "LOGIN_URL") else "/login"}
self.status_code = 401
def check_permission(self, view):
permission = self.permission_view_map.get(view, "")
if permission:
if self.request.user.has_perm("%s.%s" % (self.app_name, permission)):
return True
else:
return False
else:
return True
def get_not_permission(self):
self.response_data['info'] = "Permission denied"
self.response_data['status'] = False
self.status_code = 403
def check_superuser(self, view):
if view in self.superuser_required_action:
if self.request.user.is_superuser:
return True
else:
return False
else:
return True
|
from rich.live import Live
from selenium.webdriver.chrome.webdriver import WebDriver
from pages.base import BasePage
from pages.cancelrequest import CancelRequests
from pages.closerequest import CloseRequests
from pages.createrequest import CreateRequests
from pages.home import HomePage
from pages.login import LoginPage
from prettify.cancel_prettifier import CancelPrettify
from utilites import make_data
from utilites.static_data import StaticData
class Cancel(BasePage):
def __init__(self, driver: WebDriver):
""" Cancel NCR E2E Actions """
super().__init__(driver)
self.login_page = LoginPage(self._driver)
self.home_page = HomePage(self.login_page._driver)
self.closeRequest = CloseRequests(self.home_page._driver)
self.cancel_requests = CancelRequests(self.closeRequest._driver)
self.create_requests = CreateRequests(self.closeRequest._driver)
def cancelRequest(self):
""" All the functionalities in one function to mimic a user interactions to cancel a Change Request"""
# Log in to the server
self.login_page.enter_username_textbox()
self.login_page.enter_password_textbox()
self.login_page.click_login_button()
# Parse all the change numbers from the home page
all_changes_web = self.home_page.get_all_change_numbers()
# Parse all the user requested change number from the source
all_changes_file = make_data.list_of_change(StaticData.CANCEL_CHANGE_TXT_FILE_PATH)
# Prettify tables
CancelPrettify.make_layout()
CancelPrettify.make_table()
progress = CancelPrettify.progress_bar(len(all_changes_file))
CancelPrettify.merge_layout(progress, CancelPrettify.get_table())
with Live(CancelPrettify.show_layout(), refresh_per_second=5, vertical_overflow="visible") as live:
while not progress.finished:
for task in progress.tasks:
for _task_no, a_change in enumerate(all_changes_file):
# find the index of the change number from the list (custom algorithm is used).
# Searching an element time complexity is O(1)
index = self.closeRequest.get_index_for_change_number(a_change, all_changes_web)
if index is not None:
# select the change number after found
self.closeRequest.find_the_change_request(a_change, index)
if not self.closeRequest.is_change_status_closed():
if not self.closeRequest.is_status_scheduled_for_approval():
if not self.cancel_requests.is_change_request_opened():
if not self.cancel_requests.is_cancelled():
# Perform the user interactions to cancel
self.cancel_requests.wait_for_loading_icon_disappear()
self.cancel_requests.select_cancel()
self.cancel_requests.save_status()
# // Cancelled //
CancelPrettify.add_row_table(str(_task_no + 1),
self.cancel_requests.get_cancelled_cr_number(),
"CANCELLED")
live.update(CancelPrettify.show_layout())
self.create_requests.go_back_to_homepage()
else:
# // Already Closed //
CancelPrettify.add_row_table(str(_task_no + 1),
self.cancel_requests.get_cancelled_cr_number(),
"A/C", style="yellow")
live.update(CancelPrettify.show_layout())
self.create_requests.go_back_to_homepage()
else:
# // Already Opened //
CancelPrettify.add_row_table(str(_task_no + 1),
self.cancel_requests.get_cancelled_cr_number(),
"A/O", style="red")
live.update(CancelPrettify.show_layout())
self.create_requests.go_back_to_homepage()
else:
# // Scheduled for Approval
CancelPrettify.add_row_table(str(_task_no + 1),
self.cancel_requests.get_cancelled_cr_number(),
"S/F/A")
live.update(CancelPrettify.show_layout())
self.create_requests.go_back_to_homepage()
else:
# // Already Closed or Completed
CancelPrettify.add_row_table(str(_task_no + 1),
self.cancel_requests.get_cancelled_cr_number(),
"Closed/Completed")
live.update(CancelPrettify.show_layout())
self.create_requests.go_back_to_homepage()
if not task.finished:
progress.advance(task.id)
self.home_page.click_logout_button()
|
""" test dryrun, that PyGemini can correctly invoke Gemini3D """
import shutil
import pytest
import sys
from pathlib import Path
import importlib.resources
import gemini3d
import gemini3d.run
import gemini3d.job as job
import gemini3d.web
@pytest.mark.skipif(sys.version_info < (3, 8), reason="test requires Python >= 3.8")
@pytest.mark.parametrize("name,bref", [("mini2dew_eq", 1238112), ("mini3d_eq", 2323072)])
def test_memory(name, bref):
with importlib.resources.path("gemini3d.tests.data", "__init__.py") as fn:
ref = gemini3d.web.download_and_extract(name, fn.parent)
est = job.memory_estimate(ref)
assert isinstance(est, int)
assert est == bref
@pytest.mark.skipif(shutil.which("mpiexec") is None, reason="no Mpiexec available")
def test_mpiexec():
gemini3d.setup()
exe = job.get_gemini_exe()
assert isinstance(exe, Path)
# It's OK if MPIexec doesn't exist, but make the test assert consistent with that
# there are numerous possibilities that MPIexec might not work
# predicting the outcome of this test requires the function we're testing!
mpiexec = job.check_mpiexec("mpiexec", exe)
assert isinstance(mpiexec, str) or mpiexec is None
@pytest.mark.parametrize("name", ["mini2dew_eq"])
def test_dryrun(name, tmp_path):
gemini3d.setup()
with importlib.resources.path("gemini3d.tests.data", "__init__.py") as fn:
ref = gemini3d.web.download_and_extract(name, fn.parent)
params = {
"config_file": ref,
"out_dir": tmp_path,
"dryrun": True,
}
job.runner(params)
|
# -*- coding: utf-8 -*-
"""MRI RF excitation pulse design functions,
including SLR and small tip spatial design
"""
import sigpy as sp
from sigpy.mri import rf as rf
from sigpy import backend
__all__ = ['stspa']
def stspa(target, sens, coord, dt, roi=None, alpha=0, b0=None, tseg=None,
st=None, phase_update_interval=float('inf'), explicit=False,
max_iter=1000, tol=1E-6):
"""Small tip spatial domain method for multicoil parallel excitation.
Allows for constrained or unconstrained designs.
Args:
target (array): desired magnetization profile. [dim dim]
sens (array): sensitivity maps. [Nc dim dim]
coord (array): coordinates for noncartesian trajectories. [Nt 2]
dt (float): hardware sampling dwell time.
roi (array): array for error weighting, specify spatial ROI. [dim dim]
alpha (float): regularization term, if unconstrained.
b0 (array): B0 inhomogeneity map [dim dim]. For explicit matrix
building.
tseg (None or Dictionary): parameters for time-segmented off-resonance
correction. Parameters are 'b0' (array), 'dt' (float),
'lseg' (int), and 'n_bins' (int). Lseg is the number of
time segments used, and n_bins is the number of histogram bins.
st (None or Dictionary): 'subject to' constraint parameters. Parameters
are avg power 'cNorm' (float), peak power 'cMax' (float),
'mu' (float), 'rhoNorm' (float), 'rhoMax' (float), 'cgiter' (int),
'max_iter' (int), 'L' (list of arrays), 'c' (float), 'rho' (float),
and 'lam' (float). These parameters are explained in detail in the
SDMM documentation.
phase_update_interval (int): number of iters between exclusive phase
updates. If 0, no phase updates performed.
explicit (bool): Use explicit matrix.
max_iter (int): max number of iterations.
tol (float): allowable error.
Returns:
array: pulses out.
References:
Grissom, W., Yip, C., Zhang, Z., Stenger, V. A., Fessler, J. A.
& Noll, D. C.(2006).
Spatial Domain Method for the Design of RF Pulses in Multicoil
Parallel Excitation. Magnetic resonance in medicine, 56, 620-629.
"""
Nc = sens.shape[0]
Nt = coord.shape[0]
device = backend.get_device(target)
xp = device.xp
with device:
pulses = xp.zeros((Nc, Nt), xp.complex)
# set up the system matrix
if explicit:
A = rf.linop.PtxSpatialExplicit(sens, coord, dt,
target.shape, b0)
else:
A = sp.mri.linop.Sense(sens, coord, weights=None, tseg=tseg,
ishape=target.shape).H
# handle the Ns * Ns error weighting ROI matrix
W = sp.linop.Multiply(A.oshape, xp.ones(target.shape))
if roi is not None:
W = sp.linop.Multiply(A.oshape, roi)
# apply ROI
A = W * A
# Unconstrained, use conjugate gradient
if st is None:
I = sp.linop.Identity((Nc, coord.shape[0]))
b = A.H * W * target
alg_method = sp.alg.ConjugateGradient(A.H * A + alpha * I,
b, pulses, P=None,
max_iter=max_iter, tol=tol)
# Constrained case, use SDMM
else:
# vectorize target for SDMM
target = W * target
d = xp.expand_dims(target.flatten(), axis=0)
alg_method = sp.alg.SDMM(A, d, st['lam'], st['L'], st['c'],
st['mu'], st['rho'], st['rhoMax'],
st['rhoNorm'], 10**-5, 10**-2, st['cMax'],
st['cNorm'], st['cgiter'], st['max_iter'])
# perform the design: apply optimization method to find solution pulse
while not alg_method.done():
# phase_update switch
if (alg_method.iter > 0) and \
(alg_method.iter % phase_update_interval == 0):
target = xp.abs(target) * xp.exp(
1j * xp.angle(
xp.reshape(A * alg_method.x, target.shape)))
b = A.H * target
alg_method.b = b
alg_method.update()
if st is not None:
pulses = xp.reshape(alg_method.x, [Nc, Nt])
return pulses
|
#!/usr/bin/python
i = input( " in")
print i
a = input("out")
print a
|
import curses, time
def main(screen):
curses.curs_set(0)
current_row=0
height, width = screen.getmaxyx()
# Options to show
play = 'Play.'
score = 'Score.'
exit = 'Exit.'
# init_pair is a function that create a pair, first arg is identifier for color_pair
# second arg is foreground color, last arg is background color.
# initially, the pair is not activated.
curses.init_pair(1,curses.COLOR_YELLOW, curses.COLOR_BLACK)
# In order to retrieve the color, we use color.pair.
# attron, will activate the pair.
screen.attron(curses.color_pair(1))
# retrieving x and y axis to set the options in center.
x = width//2 - len(play)/2
y = height//2
x1 = width//2 - len(score)/2
y1 = height//2+1
x2 = width//2 - len(exit)/2
y2 = height//2+2
screen.addstr(int(y),int(x) , play)
screen.addstr(int(y1),int(x1) , score)
screen.addstr(int(y2),int(x2) , exit)
# attron, will deactivate the pair.
screen.attroff(curses.color_pair(1))
screen.refresh()
time.sleep(5)
curses.wrapper(main)
|
import os
import numpy as np
from types import SimpleNamespace
import core.utils as utils
def next_expr_name(path_dir, n_digit_length, id_only=False):
myhost = os.uname()[1]
myid = os.getlogin()
if id_only:
start_pattern = myid.lower() + '_' + "e"
else:
start_pattern = myid.lower() + '_' + myhost.lower() + '_' + "e"
len_start = len(start_pattern)
len_end = len_start + n_digit_length
present_numbers = [int(x[len_start:len_end]) for x in os.listdir(path_dir) if x.startswith(start_pattern)]
next_number = np.max([0] + present_numbers) + 1
return start_pattern + str(next_number).zfill(n_digit_length)
def dict2str(d, start_n=0):
"""
Convert dict or SimpleNamespace to string.
Primary used to print settings (from param file).
:param d: dict or SimpleNamespace to print
:param start_n: number of white spaces before output
:return: string with dict
"""
res = ""
prefix_val = " " * start_n
if isinstance(d, SimpleNamespace):
d = d.__dict__
sorted_keys = sorted(d.keys())
for k in sorted_keys:
if isinstance(d[k], dict) or isinstance(d[k], SimpleNamespace):
res += prefix_val + str(k) + ": " + "\n" + dict2str(d[k], start_n + 2)
else:
res += prefix_val + str(k) + ": " + str(d[k]) + "\n"
return res
def init_experiment_settings(params):
if utils.is_main_process():
# new experiment name
if len(params.experiment_name) == 0:
params.experiment_name = next_expr_name(params.output_dir, 4)
# create folder for this experiment
params.output_dir = os.path.join(params.output_dir, params.experiment_name)
os.mkdir(params.output_dir)
# copy of param file
save_param_filename = os.path.join(params.output_dir, params.experiment_name + "_params.txt")
print(dict2str(params), file=open(save_param_filename, 'w'))
|
#!/usr/bin/python
from __future__ import division
from ete3 import Tree
import sys
import copy
from blosum import *
from Bio.PDB import *
import urllib2
tree_test = sys.argv[1]
newick_file = sys.argv[2]
name = sys.argv[3]
matrix = BlosumMatrix('./blosum62.txt')
probability_matrix = ProbabilityMatrix(tree_test,matrix)
class Etree(Tree):
_names = []
alignements = dict()
_identificators = []
_IDs = dict()
_idArray = dict()
def get_pdb(self,name):
protein_file = name + ".pdb"
pdb_output = open(protein_file, "w")
url_pdb = "https://files.rcsb.org/download/%s.pdb" %name
try:
handle = urllib2.urlopen(url_pdb)
except URLError as error:
print(error.reason)
sys.exit(1)
pdb_output.write(handle.read())
pdb_output.close()
def PDB_parse(self,name):
p = PDBParser()
structure = p.get_structure(name,name+".pdb")
model = structure[0]
#pridat try na jednotlive chainy
try:
chain = model['A']
except KeyError as error:
try:
chain = model['B']
except KeyError as error:
try:
chain = model['C']
except KeyError as error:
try:
chain = model['I']
except KeyError as error:
try:
chain = model['X']
except KeyError as error:
print("Cannot find this type of chain.")
sys.exit(1)
else:
pass
else:
pass
else:
pass
else:
pass
else:
pass
#always returns position of first chain which could no be correct
residue_list = Selection.unfold_entities(chain,'A')
#print(residue_list[0].get_full_id()[3][1])
residue_start = residue_list[0].get_full_id()[3][1]
return residue_start
def compute_conservation(self,file,residue_start,index,weightsArray,acid1):
count_mezera = 0
count_basic_acid = 0
count_mutated_acid = 0
count_else=0
all_count =0
count_pos=0
start_position = 1 #meni sa
pos = 0
handle = open(file,"r")
lines = iter(handle.readlines())
for line in lines:
if(line.startswith('>')):
continue
else:
for word in line.split():
#if(word[0] == '-'):
# break
#if(word[0] == 'M'):
# count_pos -=1#-residue_start+1
print(residue_start)
if(residue_start > len(word)):
#print(residue_start)
#print(index)
count_pos = residue_start
#print(count_pos)
for i in range(0,len(word),1):
if(word[i] != '-'):
count_pos +=1
if(count_pos == residue_start+index):
pos = i
print(word[i])
break
else:
#print(residue_start)
#print(index)
count_pos = residue_start
if(residue_start < 0):
chain_res = index#+residue_start + abs(residue_start) + abs(residue_start) -1
elif (residue_start == 1):
chain_res= index+residue_start
else:
chain_res= index+residue_start+2
print("index:" + str(index))
print(chain_res)
for i in range(0,len(word),1):
if(word[i] != '-'):
count_pos +=1
if(count_pos == chain_res):
pos = i
print("position:" + str(i))
print(word[i])
break
break
#print("POSITION:"+str(pos))
conservation_value = 0
base_acid = 0
weights = 0
for name in self._names:
sequence = self._idArray[name]
acid = sequence[pos]
#print(str(acid))
if(acid == acid1):
base_acid = 1
else:
base_acid= 0
weights += weightsArray[name]
conservation_value += weightsArray[name] * base_acid
accuracy = conservation_value/ weights
return accuracy
def create_ID_table(self):
"""create table where key is node name and value is sequence to speed up lookup"""
for name in self._names:
key1 = self._IDs.get(name)
seq1 = self.alignements[key1]
self._idArray[name] = seq1
def create_alignement_table(self,file):
"""creates lookup table for sequence names and sequences"""
with open(file,'r') as f:
lines = iter(f.readlines())
for line in lines:
if(line.startswith('>')):
name = line.strip('>').strip('\n')
sequence = lines.next().strip('\n')
self.alignements[name] = sequence
def create_names_table(self,file):
"""create lookup table for complete sequence ID according to its abbrevation"""
with open(file,'r') as f:
lines = iter(f.readlines())
for line in lines:
if(line.startswith('>')):
self._identificators.append(line.strip('>').strip('\n'))
for item in self._identificators:
for name in self._names:
if(name in item):
self._IDs[name] = item
def get_table_value(self,value):
"""get value from alignements table"""
return self.alignements[value]
def get_names(self):
"""get all leaf names in the tree and stores them in _names array"""
for leaf in self:
if(leaf.is_leaf()):
self._names.append(leaf.name)
def print_names(self):
"""function for printing leafs names"""
for name in self._names:
print(name)
def create_array(self):
"""creates array of weights and fills it with value according to its node"""
self.weightsArray = dict()
for name in self._names:
self.weightsArray[name] = 0
if self.name != '':
self.weightsArray[self.name] = 1
def add_node_array(self):
"""adds weights array to every node in the tree"""
for node in self.traverse('postorder'):
node.create_array()
def calculate_weights(self):
"""calculates the values in weights array in each node"""
#fudge factor constant to prevent 0 in the weights array
fugde_factor = 0.1
#traverse the tree and compute values in each node
for node in t.traverse('postorder'):
#get children nodes of actual node
children = node.get_children()
#if no children found, continue with next node
if not children:
continue
else:
i = 0
#array where value of multiplication for each item in array is stored
vals = [1]*250
#calculate value for each child
for child in children:
for parentItem in node._names:
result = 0
seq2 = node._idArray[parentItem]
for childItem in child._names:
#calculate probability of changing child sequence to parent sequence
seq1 = child._idArray[childItem]
probability = probability_matrix.find_pair(seq1,seq2)
#vzorec Pi*Li*t
result += probability * child.weightsArray[childItem] * (child.dist + fugde_factor)
#value from each child needs to be multiplicated
vals[i] *= result
#store actual value to weightsArray item in parent node
node.weightsArray[parentItem] = vals[i]
i+=1
i = 0
#print(node.weightsArray.values())
#print(t.get_tree_root().weightsArray)
return t.get_tree_root().weightsArray
#t = Tree(newick_file)
#print(t)
t = Etree(newick_file)
t.create_alignement_table(tree_test)
R = t.get_midpoint_outgroup()
t.set_outgroup(R)
t.get_names()
t.add_node_array()
t.create_names_table(tree_test)
t.create_ID_table()
rootWeightsArray = t.calculate_weights()
#for name in names:
t.get_pdb(name)
start_pos = t.PDB_parse(name)
f = open(name+'_NEW.txt','r')
out = open(name+'_conservation_results1.txt','w')
for line in f.readlines():
original_acid = line[0]
out.write(original_acid+ " ")
position = int(line[1:])
out.write(str(position)+ ' ')
conservation_score = t.compute_conservation(tree_test,start_pos,position,rootWeightsArray,original_acid)
out.write(str(conservation_score)+ '\n')
|
# usage: python cv.py --genofile 'myAveImpGenotype_wheat183.csv' --phenfile 'y8new.txt' [--CVfolds 5 --ridge 0 -3 -9]
import sys
import csv
import matplotlib.pyplot as plt
import numpy as np
import random
import math
from argparse import ArgumentParser
from collections import Counter
def parse_args():
'Parse the command line arguments for the program.'
parser = ArgumentParser(
description="Cross validation for csv and txt files")
parser.add_argument('--CVfolds', type=int,
help='number of cv folds', default=3)
parser.add_argument('--genofile', required=True, type=str,
help='Input Genotype File')
parser.add_argument('--phenfile', required=True, type=str,
help='Input Phenotype File')
parser.add_argument('--ridge', type=int, default=range(-3,3),
help='ridge parameters')
parser.add_argument('--it', type =int, default = 10,
help = 'number of iterations')
return parser.parse_args()
def get_genotype(gen_filename):
'Read the input genotype file.'
genotype = np.genfromtxt(gen_filename, delimiter=' ')
print(gen_filename)
x = np.transpose(genotype)
return x
def get_phenotype(phen_filename):
'Read the input phenotype file.'
phen = open(phen_filename)
y = []
for line in phen:
line = line.strip()
y.append(float(line))
y = np.transpose(y)
return y
def rr(xtrain, ytrain, xtest, ridgepara):
'Ridge regression program for a given ridge parameter.'
a = 10 ** np.array(ridgepara)
Imat = a * np.mat(np.eye(len(xtrain)))
xt = np.transpose(xtrain)
xxt = np.matrix(xtrain)*np.matrix(xt)
w = (xxt+Imat).I
b= np.matrix(xt)* np.matrix(w) *np.matrix(ytrain).T
ypred = np.matrix(xtest)*b
return ypred
def crossval(k,xtrain,ytrain, ridgepara):
'K folds cross validation for a set of given parameters.'
n = len(xtrain)
indices = np.arange(n)
p = len(ridgepara)
corr_output = np.zeros(shape=(k,p)) # = np.random.random(k,p)
for j in np.arange(len(ridgepara)):
newid = indices
for fold in range(k):
tstID = newid[0:n/k]
tstacc = tstID # Accumulated indices of test set
trnID = newid[n/k:]
trnrmn = trnID # Remaining indices of train set
pred = rr(xtrain[trnID,:], ytrain[trnID],xtrain[tstID,:], ridgepara[j])
corr = np.corrcoef(np.matrix(ytrain[tstID]), np.transpose(pred))
corr_output[fold,j] = corr[1,0]
newid = np.hstack((indices[trnrmn],indices[tstacc]))
corr_sum = map(sum,zip(*corr_output))
optpara = ridgepara[corr_sum.index(max(corr_sum))]
# print('The opt para is %f') % 10**optpara
return optpara
def main():
args = parse_args()
k = args.CVfolds
x = get_genotype(args.genofile)
m = len(x)
print(x.shape)
y = get_phenotype(args.phenfile)
paras = args.ridge
Iter = args.it
predictions = []
optparameters = []
correlations = []
print(':-))) Ridge Regression with CV in %i iterations') % Iter
for iteration in range(Iter):
whole = random.sample(np.arange(m), m)
trainid = whole[0:m*3/4]
testid = whole[m*3/4:]
xtrain = x[trainid, :]
xtest = x[testid, :]
ytrain = y[trainid]
ytest = y[testid]
optpara = crossval(k,xtrain,ytrain, args.ridge)
optparameters.append(optpara)
ypred = rr(xtrain, ytrain,xtest, optpara)
predictions.append(ypred)
cor = np.corrcoef(np.matrix(ytest), np.transpose(ypred))
corr_pred = cor[1,0]
correlations.append(corr_pred)
freqofpara =[]
for p in range(len(paras)):
freqofpara.append (optparameters.count(paras[p]))
opt = paras[freqofpara.index(max(freqofpara))]
print('Output: The most frequently selected optimal parameter by CVs is %f' ) % 10**opt
subcorr =[]
for q in range(len(correlations)):
if optparameters[q] == opt:
subcorr.append(correlations[q])
print('Corresponding Pearson''s correlation btw true and pred is %f'+ '\n') % np.mean(subcorr)
if __name__ == '__main__':
main()
|
from pathlib import Path
import itertools
def get_numbers():
yield from (int(x) for x in Path('data/day_01.txt').read_text().strip().split())
def part_1():
return sum(get_numbers())
def part_2():
sums = set()
sum = 0
for n in itertools.cycle(get_numbers()):
sum += n
if sum in sums:
return sum
sums.add(sum)
if __name__ == '__main__':
print(f'Part 1: {part_1()}')
print(f'Part 2: {part_2()}')
|
import numpy as np
from ..io import get_bb_all2d, get_bb_all3d
def seg2Count(seg,do_sort=True,rm_zero=False):
sm = seg.max()
if sm==0:
return None,None
if sm>1:
segIds,segCounts = np.unique(seg,return_counts=True)
if rm_zero:
segCounts = segCounts[segIds>0]
segIds = segIds[segIds>0]
if do_sort:
sort_id = np.argsort(-segCounts)
segIds=segIds[sort_id]
segCounts=segCounts[sort_id]
else:
segIds=np.array([1])
segCounts=np.array([np.count_nonzero(seg)])
return segIds, segCounts
def seg_iou3d(seg1, seg2, ui0=None):
ui,uc = np.unique(seg1,return_counts=True)
uc=uc[ui>0]
ui=ui[ui>0]
ui2,uc2 = np.unique(seg2,return_counts=True)
if ui0 is None:
ui0=ui
out = np.zeros((len(ui0),5),int)
bbs = get_bb_all3d(seg1,uid=ui0)[:,1:]
out[:,0] = ui0
out[:,2] = uc[np.in1d(ui,ui0)]
for j,i in enumerate(ui0):
bb= bbs[j]
ui3,uc3=np.unique(seg2[bb[0]:bb[1]+1,bb[2]:bb[3]+1,bb[4]:bb[5]+1]*(seg1[bb[0]:bb[1]+1,bb[2]:bb[3]+1,bb[4]:bb[5]+1]==i), return_counts=True)
uc3[ui3==0]=0
out[j,1] = ui3[np.argmax(uc3)]
out[j,3] = uc2[ui2==out[j,1]]
out[j,4] = uc3.max()
return out
def seg_iou2d(seg1, seg2, ui0=None, bb1=None, bb2=None):
# bb1/bb2: first column of indexing, last column of size
if bb1 is None:
ui,uc = np.unique(seg1,return_counts=True)
uc=uc[ui>0];ui=ui[ui>0]
else:
ui = bb1[:,0]
uc = bb1[:,-1]
if bb2 is None:
ui2, uc2 = np.unique(seg2,return_counts=True)
else:
ui2 = bb2[:,0]
uc2 = bb2[:,-1]
if bb1 is None:
if ui0 is None:
bb1 = get_bb_all2d(seg1, uid=ui)
ui0 = ui
else:
bb1 = get_bb_all2d(seg1, uid=ui0)
else:
if ui0 is None:
ui0 = ui
else:
# make sure the order matches..
bb1 = bb1[np.in1d(bb1[:,0], ui0)]
ui0 = bb1[:,0]
out = np.zeros((len(ui0),5),int)
out[:,0] = ui0
out[:,2] = uc[np.in1d(ui,ui0)]
for j,i in enumerate(ui0):
bb= bb1[j, 1:]
ui3,uc3 = np.unique(seg2[bb[0]:bb[1]+1,bb[2]:bb[3]+1]*(seg1[bb[0]:bb[1]+1,bb[2]:bb[3]+1]==i),return_counts=True)
uc3[ui3==0] = 0
if (ui3>0).any():
out[j,1] = ui3[np.argmax(uc3)]
out[j,3] = uc2[ui2==out[j,1]]
out[j,4] = uc3.max()
return out
def relabel(seg, do_dtype = False):
if seg is None or seg.max()==0:
return seg
uid = np.unique(seg)
uid = uid[uid > 0]
max_id = int(max(uid))
mapping = np.zeros(max_id + 1, dtype = seg.dtype)
mapping[uid] = np.arange(1, len(uid) + 1)
if do_dtype:
return relabelDtype(mapping[seg])
else:
return mapping[seg]
def relabelDtype(seg):
max_id = seg.max()
m_type = np.uint64
if max_id<2**8:
m_type = np.uint8
elif max_id<2**16:
m_type = np.uint16
elif max_id<2**32:
m_type = np.uint32
return seg.astype(m_type)
def seg_postprocess(seg, sids=[]):
# watershed fill the unlabeled part
if seg.ndim == 3:
for z in range(seg.shape[0]):
seg[z] = mahotas.cwatershed(seg[z]==0, seg[z])
for sid in sids:
tmp = binary_fill_holes(seg[z]==sid)
seg[z][tmp>0] = sid
elif seg.ndim == 2:
seg = mahotas.cwatershed(seg==0, seg)
return seg
|
class LinkedList:
def __init__(self, nodes=None):
self.head = None
if nodes is not None:
node = Node(data=nodes.pop(0))
self.head = node
for elem in nodes:
node.next = Node(data=elem)
node = node.next
def __repr__(self):
node = self.head
nodes = []
while node is not None:
nodes.append(node.data)
node = node.next
nodes.append("None")
return str(nodes)
class Node:
def __init__(self, data):
self.data = data
self.next = None
def __repr__(self):
return self.data
def printLinkedList(a) -> LinkedList:
node = a
result = f'{node.data}'
node = node.next
while node:
result += f' -> {str(node.data)}'
node = node.next
print(result)
def insertNode(head, value):
new_node = Node(value) # create a new node
# Case1: determine if the inserted node is before head
if head is None or head.data >= value:
new_node.next = head
return new_node
# Case2: insert the new node to the right position
prev = head # 一定要有这步:head保持不变, prev用来找插入位置
# get the insert position: between prev and prev.next
# 保证 prev.data 一定小于 value; 而 prev.next.data 一定大于等于 value
while prev.next is not None and prev.next.data < value:
prev = prev.next
# insert value between prev and prev.next
new_node.next = prev.next # 先记下原来的 prev.next
prev.next = new_node # prev 指向新的 new_node
return head
head_element = Node(1)
head_element.next = Node(2)
head_element.next.next = Node(5)
printLinkedList(insertNode(head_element, 3))
head_element = Node(1)
head_element.next = Node(2)
head_element.next.next = Node(5)
printLinkedList(insertNode(head_element, 0))
head_element = Node(1)
head_element.next = Node(2)
head_element.next.next = Node(5)
printLinkedList(insertNode(head_element, 10))
head_element = None
printLinkedList(insertNode(head_element, 3))
|
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
from sklearn.manifold import TSNE
import tensorflow as tf
from scipy import stats
import os
import time
from matplotlib import animation
from tf_util.stat_util import approx_equal
from dsn.util.dsn_util import assess_constraints
def plot_opt(
model_dirs,
converge_dict,
legendstrs=None,
xlim_its=None,
maxconlim=3.0,
plotR2=False,
fontsize=16,
T_x_labels=None,
):
max_legendstrs = 10
n_fnames = len(model_dirs)
if legendstrs is None:
legendstrs = n_fnames * [""]
if (type(xlim_its) == int):
xlim_its = n_fnames*[xlim_its]
fnames = []
for i in range(n_fnames):
fnames.append(model_dirs[i] + "opt_info.npz")
first_its, ME_its, MEs = assess_constraints(model_dirs, converge_dict)
# read optimization diagnostics from files
costs_list = []
Hs_list = []
R2s_list = []
mean_T_xs_list = []
T_xs_list = []
epoch_inds_list = []
last_inds = []
flag = False
for i in range(n_fnames):
fname = fnames[i]
if os.path.isfile(fname):
try:
npzfile = np.load(fname)
except:
n_fnames = n_fnames - 1
print("Could not read %s. Skipping." % fname)
continue
else:
n_fnames = n_fnames - 1
continue
costs = npzfile["costs"]
Hs = npzfile["Hs"]
R2s = npzfile["R2s"]
mean_T_xs = npzfile["mean_T_xs"]
T_xs = npzfile["T_xs"]
epoch_inds = npzfile["epoch_inds"]
check_rate = npzfile["check_rate"]
if (xlim_its is None):
last_inds.append(npzfile["it"] // check_rate)
else:
last_inds.append(xlim_its[i] // check_rate)
costs_list.append(costs)
Hs_list.append(Hs)
R2s_list.append(R2s)
mean_T_xs_list.append(mean_T_xs)
epoch_inds_list.append(epoch_inds)
if not flag:
iterations = np.arange(0, check_rate * Hs.shape[0] + 1, check_rate)
n_suff_stats = T_xs.shape[2]
mu = npzfile["mu"]
flag = True
if n_fnames == 0:
print("Filenames invalid. Exitting.")
return None, None
figs = []
# plot cost, entropy and r^2
num_panels = 2 if plotR2 else 1
figsize = (num_panels * 4, 4)
fig, axs = plt.subplots(1, num_panels, figsize=(8, 4))
figs.append(fig)
"""
ax = axs[0]
for i in range(n_fnames):
costs = costs_list[i]
last_ind = last_inds[i]
ax.plot(iterations[:last_ind], costs[:last_ind], label=legendstrs[i])
ax.set_xlabel("iterations", fontsize=fontsize)
ax.set_ylabel("cost", fontsize=fontsize)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
"""
if plotR2:
ax = axs[0]
else:
ax = axs
for i in range(n_fnames):
Hs = Hs_list[i]
epoch_inds = epoch_inds_list[i]
last_ind = last_inds[i]
if np.sum(np.isnan(Hs[:last_ind])) > 0:
print("has nan")
if i < 5:
ax.plot(iterations[:last_ind], Hs[:last_ind], label=legendstrs[i])
else:
ax.plot(iterations[:last_ind], Hs[:last_ind])
if n_fnames == 1 and ME_its[i] is not None:
if Hs.shape[0] > T_xs.shape[0]:
ME_it = epoch_inds[ME_its[i]]
else:
ME_it = iterations[ME_its[i]]
finite_inds = np.isfinite(Hs[:last_ind])
ax.plot(
[ME_it, ME_it],
[
np.min(Hs[:last_ind][finite_inds]),
np.max(Hs[:last_ind][finite_inds]),
],
"k--",
)
xticks = epoch_inds[epoch_inds <= (last_ind*check_rate)]
xtick_labels = ['%d' % i for i in range(len(xticks))]
xlabel = "EPI epochs (%d iterations)" % epoch_inds[1]
ax.set_xticks(xticks)
ax.set_xticklabels(xtick_labels, fontsize=(fontsize-4))
ax.set_yticks(ax.get_yticks(False))
ax.set_yticklabels(['%d' % (int(x)) for x in ax.get_yticks(False)], fontsize=(fontsize-4))
ax.set_xlabel(xlabel, fontsize=fontsize)
ax.set_ylabel(r"$H(q_\theta(z))$", fontsize=fontsize)
if plotR2:
ax = axs[1]
for i in range(n_fnames):
last_ind = last_inds[i]
R2s = R2s_list[i]
epoch_inds = epoch_inds_list[i]
if i < max_legendstrs:
ax.plot(iterations[:last_ind], R2s[:last_ind], label=legendstrs[i])
else:
ax.plot(iterations[:last_ind], R2s[:last_ind])
if n_fnames == 1 and ME_its[i] is not None:
if Hs.shape[0] > T_xs.shape[0]:
ME_it = epoch_inds[ME_its[i]]
else:
ME_it = iterations[ME_its[i]]
ax.plot(
[ME_it, ME_it], [np.min(R2s[:last_ind]), np.max(R2s[:last_ind])], "k--"
)
ax.set_xlabel("iterations", fontsize=fontsize)
ax.set_ylabel(r"$r^2$", fontsize=fontsize)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
if not legendstrs[0] == "":
ax.legend(fontsize=fontsize)
plt.tight_layout()
plt.show()
# plot constraints throughout optimization
yscale_fac = 5
n_cols = min(n_suff_stats, 4)
n_rows = int(np.ceil(n_suff_stats / n_cols))
xlabel = "EPI epochs"
figsize = (n_cols * 4, n_rows * 4)
fig, axs = plt.subplots(n_rows, n_cols, figsize=figsize)
if n_rows == 1:
axs = [axs]
figs.append(fig)
for i in range(n_suff_stats):
ax = axs[i // n_cols][i % n_cols]
# make ylim 2* mean abs error of last 50% of optimization
median_abs_errors = np.zeros((n_fnames,))
for j in range(n_fnames):
mean_T_xs = mean_T_xs_list[j]
epoch_inds = epoch_inds_list[j]
num_epoch_inds = len(epoch_inds)
last_ind = last_inds[j]
if j < max_legendstrs:
ax.plot(
iterations[:last_ind], mean_T_xs[:last_ind, i], label=legendstrs[j]
)
else:
ax.plot(iterations[:last_ind], mean_T_xs[:last_ind, i])
median_abs_errors[j] = np.median(
np.abs(mean_T_xs[(last_ind // 2) : last_ind, i] - mu[i])
)
if n_fnames == 1:
T_x_means = np.mean(T_xs[:, :, i], axis=1)
T_x_stds = np.std(T_xs[:, :, i], axis=1)
num_epoch_inds = len(epoch_inds)
# ax.errorbar(epoch_inds, T_x_means[:num_epoch_inds], T_x_stds[:num_epoch_inds], c='r', elinewidth=3)
finite_inds = np.isfinite(mean_T_xs[:last_ind, i])
line_min = min(
[
np.min(mean_T_xs[:last_ind, i][finite_inds]),
mu[i] - yscale_fac * median_abs_errors[j],
np.min(T_x_means - 2 * T_x_stds),
]
)
line_max = max(
[
np.max(mean_T_xs[:last_ind, i][finite_inds]),
mu[i] + yscale_fac * median_abs_errors[j],
np.max(T_x_means + 2 * T_x_stds),
]
)
ymin = line_min
ymax = line_max
if ME_its[j] is not None:
if Hs.shape[0] > T_xs.shape[0]:
ME_it = epoch_inds[ME_its[j]]
else:
ME_it = iterations[ME_its[j]]
ax.plot([ME_it, ME_it], [line_min, line_max], "k--")
ax.plot([iterations[0], iterations[max(last_inds)]], [mu[i], mu[i]], "k-")
# make ylim 2* mean abs error of last 50% of optimization
if not n_fnames == 1:
ymin = mu[i] - yscale_fac * np.max(median_abs_errors)
ymax = mu[i] + yscale_fac * np.max(median_abs_errors)
if np.isnan(ymin) or np.isnan(ymax):
ax.set_ylim(mu[i] - maxconlim, mu[i] + maxconlim)
else:
ax.set_ylim(max(ymin, mu[i] - maxconlim), min(ymax, mu[i] + maxconlim))
if T_x_labels is not None:
ax.set_ylabel(
r"$E_{z\sim q_\theta}[$" + T_x_labels[i] + "$]$",
fontsize=(fontsize + 2),
)
else:
ax.set_ylabel(r"$E[T_%d(z)]$" % (i + 1), fontsize=fontsize)
if i == (n_cols - 1):
if not legendstrs[0] == "":
ax.legend(fontsize=fontsize)
if i > n_suff_stats - n_cols - 1:
ax.set_xticks(xticks)
ax.set_xticklabels(xtick_labels, fontsize=(fontsize-2))
ax.set_xlabel(xlabel, fontsize=fontsize)
ax.set_yticks(ax.get_yticks(False))
ax.set_yticklabels(['%d' % (int(x)) for x in ax.get_yticks(False)], fontsize=(fontsize-2))
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
plt.tight_layout()
plt.show()
return figs, ME_its
def coloring_from_str(c_str, system, npzfile, AL_final_it):
cm = plt.cm.get_cmap("viridis")
vmin = None
vmax = None
if c_str == "log_q_z":
c = npzfile["log_q_zs"][AL_final_it]
c_label_str = r"$log(q(z))$"
elif c_str == "T_x1":
c = npzfile["T_xs"][AL_final_it, :, 0]
c_label_str = system.T_x_labels[0]
elif c_str == "T_x2":
c = npzfile["T_xs"][AL_final_it, :, 1]
c_label_str = system.T_x_labels[1]
elif c_str == "real part":
c = npzfile["T_xs"][AL_final_it, :, 0]
cm = plt.cm.get_cmap("Reds")
c_label_str = r"real($\lambda_1$)"
elif c_str == "dE":
c = npzfile["T_xs"][AL_final_it, :, 0]
cm = plt.cm.get_cmap("Greys")
c_label_str = r"$d_{E,ss}$"
elif c_str == "dP":
c = npzfile["T_xs"][AL_final_it, :, 1]
cm = plt.cm.get_cmap("Blues")
c_label_str = r"$d_{P,ss}$"
elif c_str == "dS":
c = npzfile["T_xs"][AL_final_it, :, 2]
cm = plt.cm.get_cmap("Reds")
c_label_str = r"$d_{S,ss}$"
elif c_str == "dV":
c = npzfile["T_xs"][AL_final_it, :, 3]
cm = plt.cm.get_cmap("Greens")
c_label_str = r"$d_{V,ss}$"
elif c_str == "ISN":
_Z = npzfile["Zs"][AL_final_it, :, :]
n = _Z.shape[0]
print("running simulations to figure out what steady states are.")
Z = tf.placeholder(dtype=tf.float64, shape=(1, n, system.D))
r_t = system.simulate(Z)
with tf.Session() as sess:
_r_t = sess.run(r_t, {Z: np.expand_dims(_Z, 0)})
assert system.behavior["type"] == "difference"
r_E_ss_1 = _r_t[-1, 0, :, 0, 0]
r_E_ss_2 = _r_t[-1, 1, :, 0, 0]
W_EE = system.fixed_params["W_EE"]
ISN_stat = r_E_ss_1 > (1.0 / np.square(2 * W_EE))
ISN_running = r_E_ss_2 > (1.0 / np.square(2 * W_EE))
c = np.zeros((n,))
c[np.logical_and(ISN_stat, ISN_running)] = 1.0
c[np.logical_and(np.logical_not(ISN_stat), ISN_running)] = 0.5
c[np.logical_and(ISN_stat, np.logical_not(ISN_running))] = -0.5
c[np.logical_and(np.logical_not(ISN_stat), np.logical_not(ISN_running))] = -1.0
cm = plt.cm.get_cmap("rainbow")
c_label_str = "ISN"
elif c_str == "mu":
c = npzfile["T_xs"][AL_final_it, :, 0]
cm = plt.cm.get_cmap("Greys")
c_label_str = r"$\mu$"
elif c_str == "deltainf":
c = npzfile["T_xs"][AL_final_it, :, 1]
cm = plt.cm.get_cmap("Blues")
c_label_str = r"$\Delta_\infty$"
elif c_str == "deltaT":
c = npzfile["T_xs"][AL_final_it, :, 2]
cm = plt.cm.get_cmap("Reds")
c_label_str = r"$\Delta_T$"
elif c_str == "hubfreq":
c = npzfile["T_xs"][AL_final_it, :, 0]
cm = plt.cm.get_cmap("jet")
c_label_str = r"$f_h$"
vmin = 0.3
vmax = 0.8
else:
# no coloring
c = np.ones((npzfile["T_xs"].shape[1],))
c_label_str = ""
return c, c_label_str, cm, vmin, vmax
def dist_from_str(dist_str, f_str, system, npzfile, AL_final_it):
dist_label_strs = []
if dist_str in ["Zs", "T_xs"]:
dist = npzfile[dist_str][AL_final_it, :, :]
if f_str == "identity":
if dist_str == "Zs":
dist_label_strs = system.z_labels
elif dist_str == "T_xs":
dist_label_strs = system.T_x_labels
elif f_str == "PCA":
dist, evecs, evals = PCA(dist, dist.shape[1])
dist_label_strs = ["PC%d" % i for i in range(1, system.D + 1)]
elif f_str == "tSNE":
np.random.seed(0)
dist = TSNE(n_components=2).fit_transform(dist)
dist_label_strs = ["tSNE 1", "tSNE 2"]
else:
raise NotImplementedError()
return dist, dist_label_strs
def filter_outliers(c, num_stds=4):
max_stat = 10e5
_c = c[np.logical_and(c < max_stat, c > -max_stat)]
c_mean = np.mean(_c)
c_std = np.std(_c)
all_inds = np.arange(c.shape[0])
below_inds = all_inds[c < c_mean - num_stds * c_std]
over_inds = all_inds[c > c_mean + num_stds * c_std]
plot_inds = all_inds[
np.logical_and(c_mean - num_stds * c_std <= c, c <= c_mean + num_stds * c_std)
]
return plot_inds, below_inds, over_inds
def plot_var_ellipse(ax, x, y):
mean_x = np.mean(x)
mean_y = np.mean(y)
std_x = np.std(x)
std_y = np.std(y)
h = plot_ellipse(ax, mean_x, mean_y, std_x, std_y, "k")
return h
def plot_target_ellipse(ax, i, j, system, mu):
mean_only = False
if system.name == "Linear2D":
if system.behavior["type"] == "oscillation":
mean_x = mu[j]
mean_y = mu[i]
std_x = np.sqrt(mu[j + system.num_suff_stats // 2] - mu[j] ** 2)
std_y = np.sqrt(mu[i + system.num_suff_stats // 2] - mu[i] ** 2)
elif system.name in ["V1Circuit", "SCCircuit", "LowRankRNN"]:
if system.behavior["type"] in ["difference", "standard", "struct_chaos"]:
mean_x = mu[j]
mean_y = mu[i]
std_x = np.sqrt(mu[j + system.num_suff_stats // 2] - mu[j] ** 2)
std_y = np.sqrt(mu[i + system.num_suff_stats // 2] - mu[i] ** 2)
elif system.behavior["type"]:
mean_x = mu[j]
mean_y = mu[i]
mean_only = True
std_x = None
std_y = None
else:
raise NotImplementedError()
else:
raise NotImplementedError()
plot_ellipse(ax, mean_x, mean_y, std_x, std_y, "r", mean_only)
def plot_ellipse(ax, mean_x, mean_y, std_x, std_y, c, mean_only=False):
t = np.arange(0, 1, 0.01)
h = ax.plot(mean_x, mean_y, c=c, marker="+", ms=20)
if not mean_only:
rx_t = std_x * np.cos(2 * np.pi * t) + mean_x
ry_t = std_y * np.sin(2 * np.pi * t) + mean_y
h = ax.plot(rx_t, ry_t, c)
return h
def lin_reg_plot(x, y, xlabel="", ylabel="", pfname="images/temp.png", fontsize=30):
gradient, intercept, r_value, p_value, std_err = stats.linregress(x, y)
plt.figure()
plt.scatter(x, y)
xmin = np.min(x)
xmax = np.max(x)
ymin = np.min(y)
ymax = np.max(y)
x_ax = np.arange(xmin, xmax, (xmax - xmin) / 95.0)
y_lin = intercept + gradient * x_ax
plt.plot(x_ax, y_lin, "-r")
plt.text(
xmin + 0.15 * (xmax - xmin),
ymin + 0.95 * (ymax - ymin),
"r = %.2f, p = %.2E" % (r_value, p_value),
fontsize=(fontsize - 10),
)
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
ax = plt.gca()
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
plt.tight_layout()
plt.savefig(pfname)
plt.show()
def dsn_pairplots(
model_dirs,
dist_str,
system,
AL_final_its,
D=None,
f_str="identity",
c_str="log_q_z",
legendstrs=[],
xlims=None,
ylims=None,
ticks=None,
fontsize=14,
line_mins=None,
line_maxs=None,
ellipses=False,
tri=True,
outlier_stds=2,
cmaps=None,
pfnames=None,
cbarticks=None,
figsize=(10, 10),
):
n_fnames = len(model_dirs)
if D is None:
if dist_str == "Zs":
D = system.D
elif dist_str == "T_xs":
D = system.num_suff_stats
# make sure D is greater than 1
# if D < 2:
# print("Warning: D must be at least 2. Setting D = 2.")
# D = 2
# If plotting ellipses, make sure D <= |T(x)|
if system.behavior["type"] in ["means", "pvar"]:
if ellipses and D > system.num_suff_stats:
D = system.num_suff_stats
else:
if ellipses and D > system.num_suff_stats // 2:
print("Warning: When plotting elipses, can only pairplot first moments.")
print("Assuming T(x) = [first moments, second moments].")
print("Setting D = |T(x)|/2.")
D = system.num_suff_stats // 2
# make all the legendstrs empty if no input
if len(legendstrs) == 0:
legendstrs = n_fnames * [""]
# take the last aug lag iteration if haven't checked for convergence
if len(AL_final_its) == 0:
AL_final_its = n_fnames * [-1]
figs = []
dists = []
cs = []
models = []
for k in range(n_fnames):
fname = model_dirs[k] + "opt_info.npz"
AL_final_it = AL_final_its[k]
if AL_final_it is None:
print("%s has not converged so not plotting." % legendstrs[k])
continue
try:
npzfile = np.load(fname)
except:
continue
dist, dist_label_strs = dist_from_str(
dist_str, f_str, system, npzfile, AL_final_it
)
dists.append(dist)
if D == 1:
continue
c, _, _, _, _, = coloring_from_str(c_str, system, npzfile, AL_final_it)
cs.append(c)
vmin = None
vmax = None
if cmaps is not None:
cmap = cmaps[k]
else:
cmap = plt.cm.get_cmap("viridis")
plot_inds, below_inds, over_inds = filter_outliers(c, outlier_stds)
if vmin is None and vmax is None:
vmin = np.min(c[plot_inds])
vmax = np.max(c[plot_inds])
# levels = np.linspace(vmin, vmax, 20)
if tri:
fig, axs = plt.subplots(D - 1, D - 1, figsize=figsize)
for i in range(D - 1):
for j in range(1, D):
if D == 2:
ax = plt.gca()
else:
ax = axs[i, j - 1]
if j > i:
"""
ax.scatter(
dist[below_inds, j],
dist[below_inds, i],
c="w",
edgecolors="k",
linewidths=0.25,
)
ax.scatter(
dist[over_inds, j],
dist[over_inds, i],
c="k",
edgecolors="k",
linewidths=0.25,
)
"""
h = ax.scatter(
dist[plot_inds, j],
dist[plot_inds, i],
c=c[plot_inds],
cmap=cmap,
edgecolors="k",
linewidths=0.25,
vmin=vmin,
vmax=vmax,
)
# cm = plt.get_cmap('Blues')
# h = ax.tricontour(dist[plot_inds, j], dist[plot_inds, i], c[plot_inds], cmap=cm, levels=levels)
if line_mins is not None and line_maxs is not None:
ax.plot(
[line_mins[k][j], line_maxs[k][j]],
[line_mins[k][i], line_maxs[k][i]],
"m-",
lw=10,
)
if ellipses:
plot_target_ellipse(ax, i, j, system, system.mu)
plot_var_ellipse(ax, dist[:, j], dist[:, i])
if i == j - 1:
ax.set_xlabel(dist_label_strs[j], fontsize=fontsize)
ax.set_ylabel(dist_label_strs[i], fontsize=fontsize)
if xlims is not None:
if dist_str == "T_xs":
xmin = system.mu[j] + xlims[0]
xmax = system.mu[j] + xlims[1]
else:
xmin = xlims[0]
xmax = xlims[1]
ax.set_xlim([xmin, xmax])
ax.plot([xmin, xmax], [0, 0], "--", c=[0.5, 0.5, 0.5])
if ylims is not None:
if dist_str == "T_xs":
ymin = system.mu[i] + ylims[0]
ymax = system.mu[i] + ylims[1]
else:
ymin = ylims[0]
ymax = ylims[1]
ax.set_ylim([ymin, ymax])
ax.plot([0, 0], [ymin, ymax], "--", c=[0.5, 0.5, 0.5])
if ticks is not None:
ax.set_xticks(ticks)
ax.set_xticklabels(ticks, fontsize=(fontsize - 5))
ax.set_yticks(ticks)
ax.set_yticklabels(ticks, fontsize=(fontsize - 5))
else:
ax.axis("off")
else:
fig, axs = plt.subplots(D, D, figsize=figsize)
for i in range(D):
for j in range(D):
ax = axs[i, j]
ax.scatter(
dist[below_inds, j],
dist[below_inds, i],
c="w",
edgecolors="k",
linewidths=0.25,
)
ax.scatter(
dist[over_inds, j],
dist[over_inds, i],
c="k",
edgecolors="k",
linewidths=0.25,
)
h = ax.scatter(
dist[plot_inds, j],
dist[plot_inds, i],
c=c[plot_inds],
cmap=cm,
edgecolors="k",
linewidths=0.25,
)
if ellipses:
plot_target_ellipse(ax, i, j, system, system.mu)
plot_var_ellipse(ax, dist[:, j], dist[:, i])
if i == (D - 1):
ax.set_xlabel(dist_label_strs[j], fontsize=fontsize)
if j == 0:
ax.set_ylabel(dist_label_strs[i], fontsize=fontsize)
if xlims is not None:
ax.set_xlim(xlims)
if ylims is not None:
ax.set_ylim(ylims)
# add the colorbar
# if c is not None:
# fig.subplots_adjust(right=0.90)
# cbar_ax = fig.add_axes([0.92, 0.15, 0.04, 0.7])
# clb = fig.colorbar(h, cax=cb_ax, ticks=cbarticks)
# clb.ax.tick_params(labelsize=24)
# a = (0.8 / (D - 1)) / (0.95 / (D - 1))
# b = (D - 1) * 1.15
# cbar_ax.text(
# a, b-.1, c_label_str, {"fontsize": fontsize + 2}, transform=ax.transAxes
# )
# clb.ax.set_ylabel(c_label_str, rotation=270, fontsize=fontsize);
plt.suptitle(legendstrs[k], fontsize=(fontsize + 15))
if pfnames is not None:
print("saving figure to ", pfnames[k])
plt.savefig(pfnames[k])
figs.append(fig)
return dists, cs, axs
def pairplot(
Z,
dims,
labels,
origin=False,
xlims=None,
ylims=None,
ticks=None,
c=None,
c_label=None,
cmap=None,
ss=False,
fontsize=12,
figsize=(12, 12),
outlier_stds=10,
pfname="images/temp.png",
):
num_dims = len(dims)
rand_order = np.random.permutation(Z.shape[0])
Z = Z[rand_order, :]
if c is not None:
c = c[rand_order]
plot_inds, below_inds, over_inds = filter_outliers(c, outlier_stds)
fig, axs = plt.subplots(num_dims - 1, num_dims - 1, figsize=figsize)
for i in range(num_dims - 1):
dim_i = dims[i]
for j in range(1, num_dims):
if num_dims == 2:
ax = plt.gca()
else:
ax = axs[i, j - 1]
if j > i:
dim_j = dims[j]
if (xlims is not None) and (ylims is not None) and origin:
ax.plot(xlims, [0, 0], c=0.5 * np.ones(3), linestyle="--")
ax.plot([0, 0], ylims, c=0.5 * np.ones(3), linestyle="--")
if ss:
M = Z.shape[0]
ax.plot(
np.reshape(Z[:, dim_j].T, (M // 2, 2)),
np.reshape(Z[:, dim_i].T, (M // 2, 2)),
"k",
lw=0.2,
)
if c is not None:
ax.scatter(
Z[below_inds, dim_j],
Z[below_inds, dim_i],
c="k",
edgecolors="k",
linewidths=0.25,
)
ax.scatter(
Z[over_inds, dim_j],
Z[over_inds, dim_i],
c="w",
edgecolors="k",
linewidths=0.25,
)
h = ax.scatter(
Z[plot_inds, dim_j],
Z[plot_inds, dim_i],
c=c[plot_inds],
cmap=cmap,
edgecolors="k",
linewidths=0.25,
)
else:
h = ax.scatter(
Z[:, dim_j], Z[:, dim_i], edgecolors="k", linewidths=0.25, s=2
)
if i + 1 == j:
ax.set_xlabel(labels[j], fontsize=fontsize)
ax.set_ylabel(labels[i], fontsize=fontsize)
else:
ax.set_xticklabels([])
ax.set_yticklabels([])
if ticks is not None:
ax.set_xticks(ticks, fontsize=fontsize)
ax.set_yticks(ticks, fontsize=fontsize)
if xlims is not None:
ax.set_xlim(xlims)
if ylims is not None:
ax.set_ylim(ylims)
else:
ax.axis("off")
if c is not None:
fig.subplots_adjust(right=0.90)
cbar_ax = fig.add_axes([0.92, 0.15, 0.04, 0.7])
clb = fig.colorbar(h, cax=cbar_ax)
a = (1.01 / (num_dims - 1)) / (0.9 / (num_dims - 1))
b = (num_dims - 1) * 1.15
plt.text(a, b, c_label, {"fontsize": fontsize}, transform=ax.transAxes)
# plt.savefig(pfname)
return fig, axs
def contour_pairplot(
Z,
c,
dims,
labels,
origin=False,
xlims=None,
ylims=None,
ticks=None,
c_label=None,
cmap=None,
fontsize=12,
figsize=(12, 12),
N=20,
alpha=1.0,
levels=None,
pfname="images/temp.png",
fig=None,
axs=None,
):
num_dims = len(dims)
rand_order = np.random.permutation(Z.shape[0])
Z = Z[rand_order, :]
if (fig is None) or (axs is None):
fig, axs = plt.subplots(num_dims - 1, num_dims - 1, figsize=figsize)
for i in range(num_dims - 1):
dim_i = dims[i]
for j in range(1, num_dims):
if num_dims == 2:
ax = plt.gca()
else:
ax = axs[i, j - 1]
if j > i:
dim_j = dims[j]
if (xlims is not None) and (ylims is not None) and origin:
ax.plot(xlims, [0, 0], c=0.5 * np.ones(3), linestyle="--")
ax.plot([0, 0], ylims, c=0.5 * np.ones(3), linestyle="--")
h = ax.tricontourf(
Z[:, dim_j],
Z[:, dim_i],
c,
N,
alpha=alpha,
cmap=cmap,
levels=levels,
)
if i + 1 == j:
ax.set_xlabel(labels[j], fontsize=fontsize)
ax.set_ylabel(labels[i], fontsize=fontsize)
else:
ax.set_xticklabels([])
ax.set_yticklabels([])
if ticks is not None:
ax.set_xticks(ticks)
ax.set_yticks(ticks)
if xlims is not None:
ax.set_xlim(xlims)
if ylims is not None:
ax.set_ylim(ylims)
else:
ax.axis("off")
if c is not None:
fig.subplots_adjust(right=0.90)
cbar_ax = fig.add_axes([0.92, 0.15, 0.04, 0.7])
clb = fig.colorbar(h, cax=cbar_ax)
a = (1.01 / (num_dims - 1)) / (0.9 / (num_dims - 1))
b = (num_dims - 1) * 1.15
plt.text(a, b, c_label, {"fontsize": fontsize}, transform=ax.transAxes)
# plt.savefig(pfname)
return fig, axs
def imshow_pairplot(
c,
dims,
labels,
lb,
ub,
a,
b,
ticks=None,
c_label=None,
cmap=None,
fontsize=12,
figsize=(12, 12),
alpha=1.0,
levels=None,
pfname="images/temp.png",
fig=None,
axs=None,
vmins=None,
q=75,
):
def marginalize_mesh(c, ax1, ax2):
D = len(c.shape)
for i in range(D - 1, -1, -1):
if not (i == ax1 or i == ax2):
c = np.mean(c, i)
return c
num_dims = len(dims)
K = c.shape[0]
if (fig is None) or (axs is None):
fig, axs = plt.subplots(num_dims - 1, num_dims - 1, figsize=figsize)
for i in range(num_dims - 1):
dim_i = dims[i]
pix_i = int(K * (ub[i] - lb[i]) / (b[i] - a[i]))
I_start_i = int(pix_i * ((a[i] - lb[i]) / (ub[i] - lb[i])))
for j in range(1, num_dims):
pix_j = int(K * (ub[j] - lb[j]) / (b[j] - a[j]))
I_start_j = int(pix_j * ((a[j] - lb[j]) / (ub[j] - lb[j])))
if num_dims == 2:
ax = plt.gca()
else:
ax = axs[i, j - 1]
if j > i:
dim_j = dims[j]
c_ij = marginalize_mesh(c, i, j)
print(np.max(c_ij))
vmin = np.percentile(c_ij, q)
I = vmin * np.ones((pix_i, pix_j))
I[I_start_i : (I_start_i + K), I_start_j : (I_start_j + K)] = c_ij
extent = [lb[j], ub[j], lb[i], ub[i]]
ax.imshow(
I,
extent=extent,
cmap=cmap,
alpha=alpha,
origin="lower",
vmin=vmin,
interpolation="bilinear",
)
if i + 1 == j:
ax.set_xlabel(labels[j], fontsize=fontsize)
ax.set_ylabel(labels[i], fontsize=fontsize)
# else:
# ax.set_xticklabels([])
# ax.set_yticklabels([])
# if ticks is not None:
# ax.set_xticks(ticks)
# ax.set_yticks(ticks)
else:
ax.axis("off")
"""
if c is not None:
fig.subplots_adjust(right=0.90)
cbar_ax = fig.add_axes([0.92, 0.15, 0.04, 0.7])
clb = fig.colorbar(h, cax=cbar_ax)
a = (1.01 / (num_dims - 1)) / (0.9 / (num_dims - 1))
b = (num_dims - 1) * 1.15
plt.text(a, b, c_label, {"fontsize": fontsize}, transform=ax.transAxes)
#plt.savefig(pfname)"""
return fig, axs
def dsn_tSNE(
fnames,
dist_str,
c_str,
system,
legendstrs=[],
AL_final_its=[],
fontsize=14,
pfname="images/temp.png",
):
n_fnames = len(fnames)
# take the last aug lag iteration if haven't checked for convergence
if len(AL_final_its) == 0:
AL_final_its = n_fnames * [-1]
figsize = (8, 8)
figs = []
for k in range(n_fnames):
fname = fnames[k]
AL_final_it = AL_final_its[k]
npzfile = np.load(fname)
dist, dist_label_strs = dist_from_str(
dist_str, "tSNE", None, npzfile, AL_final_it
)
c, c_label_str, cm, _, _ = coloring_from_str(
c_str, system, npzfile, AL_final_it
)
if AL_final_it is None:
print("%s has not converged so not plotting." % legendstrs[k])
continue
fig = plt.figure(figsize=figsize)
ax = plt.subplot(111)
h = plt.scatter(
dist[:, 0], dist[:, 1], c=c, cmap=cm, edgecolors="k", linewidths=0.25
)
plt.xlabel(dist_label_strs[0], fontsize=fontsize)
plt.ylabel(dist_label_strs[1], fontsize=fontsize)
# add the colorbar
if c is not None:
fig.subplots_adjust(right=0.90)
cbar_ax = fig.add_axes([0.92, 0.15, 0.04, 0.7])
clb = fig.colorbar(h, cax=cbar_ax)
plt.text(-0.2, 1.02 * np.max(c), c_label_str, {"fontsize": fontsize})
# clb.ax.set_ylabel(c_label_str, rotation=270, fontsize=fontsize);
plt.suptitle(legendstrs[k], fontsize=fontsize)
plt.savefig(pfname)
figs.append(fig)
return figs
def dsn_corrhists(fnames, dist_str, system, D, AL_final_its):
rs, r2s, dist_label_strs = dsn_correlations(
fnames, dist_str, system, D, AL_final_its
)
figs = []
figs.append(pairhists(rs, dist_label_strs, "correlation hists"))
figs.append(pairhists(r2s, dist_label_strs, r"$r^2$ hists"))
return figs
def pairhists(x, dist_label_strs, title_str="", fontsize=16):
D = x.shape[1]
hist_ns = []
fig, axs = plt.subplots(D, D, figsize=(12, 12))
for i in range(D):
for j in range(D):
n, _, _ = axs[i][j].hist(x[:, j, i])
if not (i == j):
hist_ns.append(n)
max_n = np.max(np.array(hist_ns))
for i in range(D):
for j in range(D):
ax = axs[i][j]
ax.set_xlim([-1, 1])
ax.set_ylim([0, max_n])
if i == (D - 1):
ax.set_xlabel(dist_label_strs[j], fontsize=fontsize)
if j == 0:
ax.set_ylabel(dist_label_strs[i], fontsize=fontsize)
plt.suptitle(title_str, fontsize=fontsize + 2)
plt.show()
return fig
def dsn_correlations(fnames, dist_str, system, D, AL_final_its):
n_fnames = len(fnames)
rs = np.zeros((n_fnames, D, D))
r2s = np.zeros((n_fnames, D, D))
for k in range(n_fnames):
fname = fnames[k]
AL_final_it = AL_final_its[k]
if AL_final_it is None:
rs[k, :, :] = np.nan
r2s[k, :, :] = np.nan
continue
npzfile = np.load(fname)
dist, dist_label_strs = dist_from_str(
dist_str, "identity", system, npzfile, AL_final_it
)
for i in range(D):
for j in range(D):
ind = D * i + j + 1
slope, intercept, r_value, p_value, stderr = scipy.stats.linregress(
dist[:, j], dist[:, i]
)
rs[k, i, j] = r_value
r2s[k, i, j] = r_value ** 2
return rs, r2s, dist_label_strs
def PCA(data, dims_rescaled_data=2):
"""
returns: data transformed in 2 dims/columns + regenerated original data
pass in: data as 2D NumPy array
"""
import numpy as NP
from scipy import linalg as LA
m, n = data.shape
# mean center the data
data -= data.mean(axis=0)
# calculate the covariance matrix
R = NP.cov(data, rowvar=False)
# calculate eigenvectors & eigenvalues of the covariance matrix
# use 'eigh' rather than 'eig' since R is symmetric,
# the performance gain is substantial
evals, evecs = LA.eigh(R)
# sort eigenvalue in decreasing order
idx = NP.argsort(evals)[::-1]
evecs = evecs[:, idx]
# sort eigenvectors according to same index
evals = evals[idx]
# select the first n eigenvectors (n is desired dimension
# of rescaled data array, or dims_rescaled_data)
evecs = evecs[:, :dims_rescaled_data]
# carry out the transformation on the data using eigenvectors
# and return the re-scaled data, eigenvalues, and eigenvectors
return NP.dot(evecs.T, data.T).T, evals, evecs
def get_default_axlims(sysname):
if sysname == "Linear2D":
xlims = [-15, 15]
ylims = [-15, 15]
return xlims, ylims
elif sysname == "STGCircuit":
xlims = [0, 20]
ylims = [0, 20]
elif sysname == "V1Circuit":
xlims = [0, 5]
ylims = [0, 5]
elif sysname == "SCCircuit":
xlims = [-5, 5]
ylims = [-5, 5]
return xlims, ylims
def plot_V1_vec(v, label, save_fname=None):
black = "k"
blue = np.array([71, 105, 160]) / 255.0
red = np.array([175, 58, 49]) / 255.0
green = np.array([39, 124, 49]) / 255.0
c = [
["k", blue, red, green],
["k", blue, red, green],
["k", blue, red, green],
["k", blue, red, green],
]
space = 0.01
plt.figure()
x = space * np.arange(0, 4)
y = space * np.arange(3, -1, -1)
X, Y = np.meshgrid(x, y)
red = [0.8, 0, 0]
blue = [0, 0, 0.8]
green = [0.0, 0.8, 0.0]
s = np.array(
[
0.0,
v[1],
v[4],
0.0,
v[0],
v[2],
v[5],
0.0,
v[0],
0.0,
0.0,
v[7],
v[0],
v[3],
v[6],
0.0,
]
)
vinds = [None, 1, 4, None, 0, 2, 5, None, 0, None, None, 7, 0, 3, 6, None]
for ii in range(4):
for jj in range(4):
ind = 4 * ii + jj
if vinds[ind] is not None and v[vinds[ind]] < 0.0:
marker = "_"
else:
marker = "+"
plt.scatter(
X[ii, jj],
Y[ii, jj],
marker=marker,
c=c[ii][jj],
s=2500.0 * abs(s[ind]),
linewidth=5.0,
)
lw = 8
plt.plot([-space / 2, -space / 2], [-space / 2, np.max(x) + space / 2], "k-", lw=lw)
plt.plot([-space / 2, -space / 8], [-space / 2, -space / 2], "k-", lw=lw)
plt.plot(
[-space / 2, -space / 8],
[np.max(x) + space / 2, np.max(x) + space / 2],
"k-",
lw=lw,
)
plt.plot(
[np.max(y) + space / 2, np.max(y) + space / 2],
[-space / 2, np.max(x) + space / 2],
"k-",
lw=lw,
)
plt.plot(
[np.max(y) + space / 2, np.max(y) + space / 8],
[-space / 2, -space / 2],
"k-",
lw=lw,
)
plt.plot(
[np.max(y) + space / 2, np.max(y) + space / 8],
[np.max(x) + space / 2, np.max(x) + space / 2],
"k-",
lw=lw,
)
ax = plt.gca()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim([-space / 2, np.max(y) + space / 2])
ax.set_ylim([-space / 2, np.max(y) + space / 2])
ax.set_title(label, fontsize=30)
if save_fname is not None:
plt.savefig(save_fname, transparent=True)
plt.show()
return None
def make_training_movie(model_dir, system, step, save_fname="temp", axis_lims=None):
fname = model_dir + "opt_info.npz"
npzfile = np.load(fname)
Hs = npzfile["Hs"]
base_Hs = npzfile["base_Hs"]
sum_log_det_Hs = npzfile["sum_log_det_Hs"]
Zs = npzfile["Zs"]
mean_T_xs = npzfile["mean_T_xs"]
log_q_zs = npzfile["log_q_zs"]
log_base_q_zs = npzfile["log_base_q_zs"]
Cs = npzfile["Cs"]
alphas = npzfile["alphas"]
check_rate = npzfile["check_rate"]
epoch_inds = npzfile["epoch_inds"]
last_ind = npzfile["it"] // check_rate
if axis_lims is not None:
xlims, ylims = axis_lims
else:
xlims, ylims = get_default_axlims(system.name)
cm = plt.get_cmap("tab20")
scale = 100
Cs = np.argmax(Cs, 2)
def size_renorm(x, scale=30):
y = x - np.min(x)
y = y / np.max(y)
return scale * y
colors = [[0.0, 0.3, 0.6], [0.0, 0.6, 0.3], [0.6, 0.0, 0.3]]
M = 100
fontsize = 20
Writer = animation.writers["ffmpeg"]
writer = Writer(fps=30, metadata=dict(artist="Me"), bitrate=1800)
K = alphas.shape[1]
N, _, D = Zs.shape
Zs = np.transpose(Zs, [1, 0, 2])
if D == 2:
fig, axs = plt.subplots(3, 2, figsize=(10, 8))
else:
fig, axs = plt.subplots(D + 1, D - 1, figsize=(14, 12))
scats = []
Cs = Cs.astype(float) / float(K)
for i in range(D - 1):
for j in range(1, D):
if D == 2:
ax = axs[2, 1]
else:
ax = axs[i + 2, j - 1]
if j > i:
s = size_renorm(log_q_zs[0, :M], scale)
scats.append(
ax.scatter(
Zs[:M, 0, j],
Zs[:M, 0, i],
s=s,
c=cm(Cs[0, :M]),
edgecolors="k",
linewidths=0.25,
)
)
scats[-1].set_cmap(cm)
ax.set_xlim(xlims)
ax.set_ylim(ylims)
elif (i == (D - 2)) and j == 1:
pass
else:
ax.axis("off")
if i == j - 1:
ax.set_xlabel(system.z_labels[j], fontsize=fontsize)
ax.set_ylabel(system.z_labels[i], fontsize=fontsize)
if K > 1:
if D == 2:
bar_ax = axs[2, 0]
else:
bar_ax = axs[-1, 0]
rect_colors = np.arange(K) / float(K)
bar_rects = bar_ax.bar(np.arange(1, K + 1), alphas[0], color=cm(rect_colors))
bar_ax.set_ylim([0, 3.0 / K])
bar_ax.set_xlabel("k")
bar_ax.set_ylabel(r"$\alpha_k$")
bar_ax.spines["right"].set_visible(False)
bar_ax.spines["top"].set_visible(False)
# plot entropy
converge_dict = {'tol':None, 'tol_inds':[], 'alpha':alpha, 'nu':nu}
n_suff_stats = system.num_suff_stats
AL_final_its, ME_its, MEs = assess_constraints(model_dir, converge_dict)
iterations = np.arange(0, check_rate * Hs.shape[0], check_rate)
if D == 2:
H_ax = plt.subplot(3, 1, 1)
else:
H_ax = plt.subplot(D + 2, 1, 1)
lines = H_ax.plot(iterations, Hs, lw=1, c=colors[0])
lines += H_ax.plot(iterations, base_Hs, lw=1, c=colors[1])
lines += H_ax.plot(iterations, sum_log_det_Hs, lw=1, c=colors[2])
H_ax.legend(["H (entropy)", "base H", "SLDJ H"])
font_fac = 0.6
H_ax.spines["right"].set_visible(False)
H_ax.spines["top"].set_visible(False)
H_ax.set_xlabel("iterations", fontsize=fontsize * font_fac)
H_ax.set_ylabel("entropy (H)", fontsize=fontsize * font_fac)
if AL_final_its[0] is not None:
conv_it = iterations[AL_final_its[0]]
H_ax.plot([conv_it, conv_it], [np.min(Hs), np.max(Hs)], "k--")
msize = 10
pts = H_ax.plot(iterations[0], Hs[0], "o", c=colors[0], markersize=msize)
pts += H_ax.plot(iterations[0], base_Hs[0], "o", c=colors[1], markersize=msize)
pts += H_ax.plot(
iterations[0], sum_log_det_Hs[0], "o", c=colors[2], markersize=msize
)
ncons = system.num_suff_stats
con_pts = []
for i in range(ncons):
if D == 2:
con_ax = plt.subplot(3, ncons, ncons + i + 1)
else:
con_ax = plt.subplot(D + 2, ncons, ncons + i + 1)
lines = con_ax.plot(iterations, mean_T_xs[:, i], lw=1, c=colors[0])
con_ax.plot([0, iterations[-1]], [system.mu[i], system.mu[i]], "k--")
con_ax.spines["right"].set_visible(False)
con_ax.spines["top"].set_visible(False)
con_ax.set_xlabel("iterations", fontsize=fontsize * font_fac)
con_ax.set_ylabel(system.T_x_labels[i], fontsize=fontsize * font_fac)
yfac = 5.0
mean_abs_err = np.median(
np.abs(mean_T_xs[(last_ind // 2) : last_ind, i] - system.mu[i])
)
con_ax.set_ylim(
[system.mu[i] - yfac * mean_abs_err, system.mu[i] + yfac * mean_abs_err]
)
con_pts.append(
con_ax.plot(
iterations[0], mean_T_xs[0, i], "o", c=colors[0], markersize=msize
)
)
def animate(i):
# we'll step k time-steps per frame.
i = (step * i) % N
print("i", i)
ind = 0
for ii in range(D - 1):
for j in range(1, D):
if j > ii:
s = size_renorm(log_q_zs[i, :M], scale)
scat = scats[ind]
scat.set_offsets(np.stack((Zs[:M, i, j], Zs[:M, i, ii]), 1))
scat.set_color(cm(Cs[i, :M]))
scat.set_sizes(s)
ind += 1
AL_it = np.sum(epoch_inds < i * check_rate)
H_ax.set_title("AL=%d" % AL_it)
if K > 1:
j = 0
for rect in bar_rects:
rect.set_height(alphas[i, j])
j += 1
print(Hs.shape, Zs.shape)
if not Hs.shape[0] == iterations.shape[0]:
ind = epoch_inds[i] // check_rate
else:
ind = i
pts[0].set_data(iterations[ind], Hs[ind])
pts[1].set_data(iterations[ind], base_Hs[ind])
pts[2].set_data(iterations[ind], sum_log_det_Hs[ind])
for j in range(ncons):
con_pts[j][0].set_data(iterations[ind], mean_T_xs[ind, j])
fig.canvas.draw()
return lines + scats
# instantiate the animator.
frames = (N - 1) // step
print("# frames", frames)
anim = animation.FuncAnimation(fig, animate, frames=frames, interval=30, blit=True)
print("Making video.")
start_time = time.time()
anim.save("%s.mp4" % save_fname, writer=writer)
end_time = time.time()
print("Video complete after %.3f seconds." % (end_time - start_time))
return None
def get_log_q_z_mesh(Z_grid, W, Z_input, Z_INV, log_q_Z, sess, feed_dict, K):
M = Z_grid.shape[1]
D = Z_grid.shape[2]
_W = np.zeros((1, M, D))
feed_dict.update({Z_input: Z_grid, W: _W})
_Z_INV = sess.run(Z_INV, feed_dict)
feed_dict.update({W: _Z_INV})
_log_q_z = sess.run(log_q_Z, feed_dict)
log_q_z_mesh = np.reshape(_log_q_z[0], D * (K,))
return log_q_z_mesh
|
import os
from qtgui.cli import run_gui
if __name__ == '__main__':
# Full (absolute) path to finlandia_talo.py file
root = os.path.dirname(__file__)
#run_gui(os.path.join(root, "simple_scenario.py"))
run_gui(os.path.join(root, "finlandia_talo.py"))
|
from AND_Gate import AND
from OR_Gate import OR
from NAND_Gate import NAND
def XOR(a1, a2):
x1 = NAND(a1, a2)
x2 = OR(a1, a2)
y = AND(x1, x2)
return y
if __name__ == '__main__':
for i in [(0, 0), (1, 0), (0, 1), (1, 1)]:
y = XOR(i[0], i[1])
print(str(i) + " -> " + str(y))
|
'''30 sept 2018 @Mkchaudhary'''
#Nonuniform Bspline curve using python opengl
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from math import *
from time import *
import sys
t=[0 for i in range(20)]
def init():
glClearColor(0.0,1.0,1.0,0.0)
glColor3f(1.0,0.0,0.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glPointSize(3.0)
gluOrtho2D(0,599,0,599)
def setPixel(xcoordinate,ycoordinate):
glBegin(GL_POINTS)
glVertex2f(xcoordinate,ycoordinate)
glEnd()
glFlush()
def read_controlpoint():
global px,py,no_controlpoint,k
no_controlpoint=input("Enter no of control points: ")
k=input("Enter order of curve: ")
px=[0 for x in range(no_controlpoint)]
py=[0 for y in range(no_controlpoint)]
for i in range(no_controlpoint):
px[i]=input("Enter control point_x: ")
py[i]=input("Enter control point_y: ")
setPixel(px[i],py[i])
def calc_knot_value(): #to calculate knot vectors
n=no_controlpoint-1
for i in range(n+k+1):
if i<k:
t[i]=0
elif k<=i<=n:
t[i]=i-k+1
elif i>n:
t[i]=n-k+2
def bsplinefun(i,k,u):
result=0
if k==1:
if t[i]<=u and u<=t[i+1]:
return 1
else:
return 0
if (t[i+k-1] - t[i])!=0:
result+=float(((u-t[i])*bsplinefun(i,k-1,u))/(t[i+k-1]-t[i]))
if (t[i+k] - t[i+1])!=0:
result+=float(((t[i+k]-u)*bsplinefun(i+1,k-1,u))/(t[i+k]-t[i+1]))
return result
def Bspline():
n=no_controlpoint-1
calc_knot_value()
u=0.0
while u<=n-k+2:
x=0.0
y=0.0
for i in range(no_controlpoint):
x+=bsplinefun(i,k,u)*px[i]
y+=bsplinefun(i,k,u)*py[i]
setPixel(x,y)
u+=0.0005
def draw_Bspline_curve():
while True:
read_controlpoint()
Bspline()
print("Enter any decimal to continue")
check=int(input("Enter 0 to exit: "))
if check==0:
sleep(5)
sys.exit()
else:
pass
def Display():
glClear(GL_COLOR_BUFFER_BIT)
draw_Bspline_curve()
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutInitWindowSize(600,600)
glutInitWindowPosition(50,50)
glutCreateWindow("Bspline curve")
glutDisplayFunc(Display)
init()
glutMainLoop()
main()
|
import os
import sys, subprocess, socket, string
import wmi, win32api, win32con
import win32com.shell.shell as sh
command = 'runas /user:DOMAIN\username "D:/Python27/python.exe myscript.py"'
pst = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE)
pst.communicate("password123")
|
# https://wikidocs.net/29
# import mod
# print(mod.add(10, 20))
# print(mod.sub(20, 10))
from mod import *
import sys
print(add(10, 20))
print(sub(20, 10))
print(PI)
a = Math()
print(a.solv(2))
print(sys.path)
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
# This works but is super-slow. Not sure about the faster solution.
# Enter your code here. Read input from STDIN. Print output to STDOUT
from math import ceil, floor, sqrt
t = int(raw_input())
for _ in xrange(t):
min, max = map(int, raw_input().split())
print int(floor(sqrt(max)) - ceil(sqrt(min))) + 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.