text
stringlengths 8
6.05M
|
|---|
# Generated by Django 2.2.10 on 2020-02-24 10:47
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user', '0002_auto_20200223_2349'),
]
operations = [
migrations.AlterField(
model_name='message',
name='course',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='notices', to='course.Course'),
),
migrations.AlterField(
model_name='messagestatus',
name='message',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='status', to='user.Message'),
),
]
|
from Jumpscale import j
import netaddr
import random
import nacl
import os
def chat(bot):
"""
This chat is to deploy 3bot container on the grid
"""
explorer = j.clients.explorer.explorer
cl = j.clients.s3.get("deployer")
AWS_ID = cl.accesskey_
AWS_SECRET = cl.secretkey_
user_info = bot.user_info()
name = user_info["username"]
email = user_info["email"]
ips = ["IPv6", "IPv4"]
choose = ["Deploy a new 3bot", "Restore my 3bot"]
ip_range_choose = ["Specify IP Range", "Choose IP Range for me"]
expiration = j.data.time.epoch + (60 * 60 * 24) # for one day
backup_directory = name.replace(".", "_")
env = dict()
secret_env = dict()
if not name or not email:
bot.md_show("Username or email not found in session. Please log in properly")
user_choice = bot.single_choice("This wizard will help you deploy or restore your 3bot.", choose)
identity = explorer.users.get(name=name, email=email)
identity_pubkey = identity.pubkey
if user_choice == "Restore my 3bot":
password = bot.secret_ask("Please enter the password you configured to backup your 3bot")
hash_restore = nacl.hash.blake2b(password.encode(), key=identity_pubkey.encode()).decode()
# ask user about corex user:password and ssh-key to give him full access to his container
pub_key = None
while not pub_key:
pub_key = bot.string_ask(
"""Please add your public ssh key, this will allow you to access the deployed container using ssh.
Just copy your key from `~/.ssh/id_rsa.pub`"""
)
form = bot.new_form()
user_corex = form.string_ask(
"Please create a username for your 3bot (this will allow you secure access to the 3bot from your web browser)"
)
password = form.secret_ask("Please create a password for your 3bot")
form.ask()
# create new reservation
reservation = j.sal.zosv2.reservation_create()
ip_version = bot.single_choice("Do you prefer to access your 3bot using IPv4 or IPv6? If unsure, chooose IPv4", ips)
node_selected = j.sal.chatflow.nodes_get(1, cru=4, sru=8, ip_version=ip_version)
if len(node_selected) != 0:
node_selected = node_selected[0]
else:
node_selected = j.sal.chatflow.nodes_get(1, cru=4, hru=8, ip_version=ip_version)
if len(node_selected) != 0:
res = "# We are sorry we don't have empty Node to deploy your 3bot"
res = j.tools.jinja2.template_render(text=res, **locals())
bot.md_show(res)
return
node_selected = node_selected[0]
# Encrypt AWS ID and AWS Secret to send it in secret env
aws_id_encrypted = j.sal.zosv2.container.encrypt_secret(node_selected.node_id, AWS_ID)
aws_secret_encrypted = j.sal.zosv2.container.encrypt_secret(node_selected.node_id, AWS_SECRET)
user_corex_encrypted = j.sal.zosv2.container.encrypt_secret(node_selected.node_id, user_corex.value)
password_corex_encrypted = j.sal.zosv2.container.encrypt_secret(node_selected.node_id, password.value)
# Create network of reservation and add peers
if user_choice == "Restore my 3bot":
hash_encrypt = j.sal.zosv2.container.encrypt_secret(node_selected.node_id, hash_restore)
env.update({"restore": "True"})
secret_env.update({"HASH": hash_encrypt})
reservation, config = j.sal.chatflow.network_configure(
bot, reservation, [node_selected], customer_tid=identity.id, ip_version=ip_version
)
ip_address = config["ip_addresses"][0]
backup = bot.single_choice("Do you want your 3bot to be automatically backed up?", ["Yes", "No"])
if backup == "Yes":
password = bot.secret_ask(
"""The password you add here will be used to encrypt your backup to keep your 3bot safe.
please make sure to keep this password safe so you can later restore your 3bot.
Remember, this password will not be saved anywhere, so there cannot be recovery for it"""
)
hash_backup = nacl.hash.blake2b(password.encode(), key=identity_pubkey.encode()).decode()
hash_encrypted = j.sal.zosv2.container.encrypt_secret(node_selected.node_id, hash_backup)
secret_env.update({"HASH": hash_encrypted})
env.update({"backup": "True", "FOLDER": backup_directory})
env.update({"pub_key": pub_key})
secret_env.update(
{
"AWS_ID": aws_id_encrypted,
"AWS_SECRET": aws_secret_encrypted,
"corex_password": password_corex_encrypted,
"corex_user": user_corex_encrypted,
}
)
container_flist = "https://hub.grid.tf/bola_nasr_1/threefoldtech-3bot-corex.flist"
entry_point = "/usr/bin/zinit init -d"
storage_url = "zdb://hub.grid.tf:9900"
# Add volume and create container schema
vol = j.sal.zosv2.volume.create(reservation, node_selected.node_id, size=8)
rid = j.sal.chatflow.reservation_register(reservation, expiration, customer_tid=identity.id)
# create container
cont = j.sal.zosv2.container.create(
reservation=reservation,
node_id=node_selected.node_id,
network_name=config["name"],
ip_address=ip_address,
flist=container_flist,
storage_url=storage_url,
env=env,
entrypoint=entry_point,
cpu=4,
memory=4096,
secret_env=secret_env,
)
j.sal.zosv2.volume.attach_existing(cont, vol, rid, "/sandbox/var")
resv_id = j.sal.chatflow.reservation_register(reservation, expiration, customer_tid=identity.id)
res = """# reservation sent. ID: {}
""".format(
resv_id
)
bot.md_show(res)
filename = "{}_{}.conf".format(name, resv_id)
res = """
## Use the following template to configure your wireguard connection. This will give you access to your 3bot.
# Make sure you have wireguard ```https://www.wireguard.com/install/``` installed
## ```wg-quick up /etc/wireguard/{}```
Click next
to download your configuration
""".format(
filename
)
res = j.tools.jinja2.template_render(text=j.core.text.strip(res), **locals())
bot.md_show(res)
res = j.tools.jinja2.template_render(text=config["wg"], **locals())
bot.download_file(res, filename)
res = "# Open your browser at ```{}:1500```".format(ip_address)
res = j.tools.jinja2.template_render(text=res, **locals())
bot.md_show(res)
|
# cd /Users/AL/Dropbox/0. AL Current Work/3. To Submit/Dr K/AL/python/
import sdm
import sdm_utils
from numpy import *
def mem_write_x_at_x(count=10):
for i in range (count):
b=sdm.Bitstring()
sdm.thread_write(b,b)
def mem_write_x_at_random(count=10):
for i in range (count):
b=sdm.Bitstring()
c=sdm.Bitstring()
sdm.thread_write(b,c)
def linhares_fig7_1():
import sdm
import sdm_utils
sdm.initialize()
a = sdm_utils.table_7_1()
import pylab
pylab.plot(a)
pylab.show()
def linhares_critical1():
#cd /Users/AL/Dropbox/0. AL Current Work/3. To Submit/Dr K/AL/python/
import sdm
import sdm_utils
import time
start=time.clock()
#sdm.initialize()
sdm.initialize_from_file("/Users/AL/Desktop/mem45000_n1000_10000x_at_x.sdm")
mem_write_x_at_x(5000)
v = sdm.Bitstring()
sdm.thread_write(v,v)
print ("computing distances graph")
print (time.clock()-start, "seconds")
a = sdm_utils.critical_distance2(0, 1000, 1, v)
print (time.clock()-start)
print "saving file"
sdm.save_to_file("/Users/AL/Desktop/mem50000_n1000_10000x_at_x.sdm")
import pylab
pylab.plot(a)
pylab.show()
def scan_for_distances():
import time, cPickle;
sdm.initialize()
v = sdm.Bitstring()
for i in range (0,10,1):
sdm.thread_write(v,v)
import pylab
for i in range (1000,51000,1000):
print 'Computing distances for '+str(i)+' items registered'
#add 1000 itens to memory
mem_write_x_at_x(1000)
a = sdm_utils.critical_distance2(0, 1000, 1, v, read=sdm.thread_read_chada)
#get new distance values in a
#save a
cPickle.dump(a, open (str(i)+'10writes_Chada_Read.cPickle', 'wb'))
print 'saved '+str(i)+'.cPickle'
#print 'now lets see..'
#for i in range (1000,11000,1000):
# print (cPickle.load(open(str(i)+'.cPickle','rb')))
#from pylab import *
def TestFig1():
import os, cPickle
#os.chdir ("results/6_iter_readng/1000D/DrK_Read/x_at_x/")
import pylab
for i in range (1000,51000,1000):
a = (cPickle.load(open(str(i)+'_10writes.cPickle','rb')))
pylab.plot(a)
pylab.show()
from matplotlib.pylab import *
def Plot_Heatmap (data=[]):
# Make plot with vertical (default) colorbar
maxd = int(data.max())
mind = int(data.min())
avgd = int ((maxd+mind) / 2);
print 'minimum value=',mind
fig = plt.figure()
ax = fig.add_subplot(111)
#use aspect=20 when N=1000
#use aspect=5 when N=256
cax = ax.imshow(data, cmap=cm.YlGnBu, aspect=5.0, interpolation=None, norm=None, origin='lower')
ax.set_title('Critical Distance Behavior', fontsize=58)
ax.grid(True, label='Distance')
ax.set_xlabel('original distance', fontsize=100)
ax.set_ylabel("# items previously stored (000's)")
# Add colorbar, make sure to specify tick locations to match desired ticklabels
cbar = fig.colorbar(cax, ticks=[mind, avgd, maxd]) #had ZERO here before
cbar.ax.set_yticklabels([str(mind), str(avgd), str(maxd)])
cbar.ax.set_ylabel('distance obtained after 20 iteractive-readings', fontsize=24)
#########CONTOUR DELINEATES THE CRITICAL DISTANCE
# We are using automatic selection of contour levels;
# this is usually not such a good idea, because they don't
# occur on nice boundaries, but we do it here for purposes
# of illustration.
CS = contourf(data, 100, levels = [mind,avgd,maxd], alpha=0.1, cmap=cm.YlGnBu, origin='lower')
# Note that in the following, we explicitly pass in a subset of
# the contour levels used for the filled contours. Alternatively,
# We could pass in additional levels to provide extra resolution,
# or leave out the levels kwarg to use all of the original levels.
CS2 = contour(CS, levels=[88], colors = 'gray', origin='lower', hold='on', linestyles='dashdot')
title('Critical Distance Behavior', fontsize=40)
xlabel('original distance', fontsize=24)
ylabel("# items previously stored (000's)", fontsize=24)
# Add the contour line levels to the colorbar
#cbar.add_lines(CS2)
show()
from matplotlib.pylab import *
import os, cPickle
def GetDataForPlots(folder='',filenameext='MUST_BE_PROVIDED'):
p=q=r=s=[]
if len(folder)>0: os.chdir (folder)
for i in range(1,51):
S = 'N=256_iter_read=2_'+str(i*1000)+filenameext+'.cPickle'
p.append( (cPickle.load(open(S,'rb') ) ) )
q=concatenate(p,axis=0)
r = q[:,1]
print len(r)
print '& shape (r)=',shape(r)
r.shape=(50,256) #if N=256
#r.shape=(50,1000)
print 'r=',r
return r
def now():
#data=GetDataForPlots("results/6_iter_readng/1000D/DrK_Read/x_at_x/1_write", '')
#data=GetDataForPlots("results/6_iter_readng/1000D/DrK_Read/x_at_x/10_writes", '_10writes')
data=GetDataForPlots('','saved items_x_at_x_0_writes_DrK_cubed')
Plot_Heatmap (data)
|
from django.conf.urls import *
from tagging_autocomplete.views import list_tags
urlpatterns = [
# 'tagging_autocomplete.views',
url(r'^list$', list_tags, name='tagging_autocomplete-list'),
]
|
import numpy
from numpy import array
from numpy import mean
from numpy import cov, var
from PIL import Image
from numpy.linalg import eigh, norm
from matplotlib.pyplot import *
import matplotlib.pyplot as plt
import math
f = open('ts.txt', 'r')
train_images = []
tf = open('tss.txt', 'r')
test_images = []
line_list1 = f.readlines()
#line_list1.pop()
line_list2 = tf.readlines()
for line in line_list1:
line = line.split("-")
train_images.append(
((numpy.asarray(Image.open(line[0]).convert('L').resize((64, 64))).flatten()), line[1]))
for line in line_list2:
test_images.append(numpy.asarray(Image.open(line.split('\n')[0]).convert('L').resize((64, 64))).flatten())
images = []
for (image, name) in train_images:
images.append(image)
matrix = numpy.asarray(images)
#print(matrix)
avg = mean(matrix.T, axis=1)
center = matrix - avg
variance = cov(center.T)
values, vectors = eigh(variance)
feat_vec = numpy.flip(vectors)[:,:32]
norm_line = feat_vec.T.dot(center.T)
vec = feat_vec
line = norm_line.T
avg = avg
classed_eigen = dict()
for index, arr in enumerate(line):
if train_images[index][1] not in classed_eigen:
classed_eigen[train_images[index][1]] = list()
classed_eigen[train_images[index][1]].append(arr)
for key in classed_eigen:
classed_eigen[key] = numpy.asarray(classed_eigen[key])
avgg = {}
vari = {}
for name in classed_eigen:
arr = classed_eigen[name]
mu = [mean(col) for col in arr.T]
sigma_sq = var(arr.T, axis=1)
if name not in avgg:
avgg[name] = 0
vari[name] = 0
avgg[name] = mu
vari[name] = sigma_sq
meuu = avgg
sigsq = vari
matr = numpy.asarray(test_images)
#print(matr)
cc = matr - avg
test_norm_line = vec.T.dot(cc.T)
test_line = test_norm_line.T
prod = 1
max_val = -9999
max_class = list()
for vec in test_line:
temp_name = 'X'
max_val = -9999
for name in meuu:
prod = 1
for index in range(len(vec)):
p_x_1 = (2 * 3.14 * sigsq[name][index]) ** 0.5
ra = (-(vec[index] - meuu[name][index]) ** 2) / (2*sigsq[name][index])
p_x_2 = math.exp(ra)
p_x = p_x_2/p_x_1
prod *= p_x
if prod > max_val:
max_val = prod
temp_name = name
max_class.append(temp_name)
names = max_class
#print((len(train_images)/6), ' Images per Class have been used to Train the Model')
#print('Using ', len(test_images), ' Images per Class have been used to Test the Model')
#print('\n Training Data Size: ', len(train_images))
#print('Testing Data Size: ', len(test_images))
droness = list()
fjets = list()
helicopts = list()
missiles = list()
pplanes = list()
rockets = list()
#dronesfound = 0;fjetsfound = 0;helicoptersfound = 0
#missilesfound = 0;pplanesfound = 0;rocketsfound = 0
tnd = 0; fnd = 0; fpd = 0; tpd = 0;
tnf = 0; fnf = 0; fpf = 0; tpf = 0;
tnh = 0; fnh = 0; fph = 0; tph = 0;
tnm = 0; fnm = 0; fpm = 0; tpm = 0;
tnp = 0; fnp = 0; fpp = 0; tpp = 0;
tnr = 0; fnr = 0; fpr = 0; tpr = 0;
for count in range(len(test_images)):
# Check drone images
if count < (len(test_images)/6):
if (names[count])[0] == 'd':
tpd += 1
tnf += 1
tnh += 1
tnr += 1
tnm += 1
tnp += 1
if (names[count])[0] == 'f':
fnd += 1
fpf += 1
tnp += 1
tnr += 1
tnh += 1
tnm += 1
if (names[count])[0] == 'h':
fnd += 1
tnf += 1
tnr += 1
tnm += 1
tnp += 1
fph += 1
if (names[count])[0] == 'm':
fnd += 1
tnf += 1
tnr += 1
tnp += 1
tnh += 1
fpm += 1
if (names[count])[0] == 'p':
fnd += 1
tnf += 1
tnr += 1
fpp += 1
tnm += 1
tnh += 1
if (names[count])[0] == 'r':
fnd += 1
tnf += 1
tnh += 1
tnp += 1
tnm += 1
fpr += 1
# else:
# tnd += 1
# Check fighterjet images
if count < (len(test_images)/3) and count >= (len(test_images)/6):
if (names[count])[0] == 'd':
fnf += 1
fpd += 1
tnm += 1
tnr += 1
tnp += 1
tnh += 1
if (names[count])[0] == 'f':
tpf += 1
tnd += 1
tnr += 1
tnp += 1
tnm += 1
tnh += 1
if (names[count])[0] == 'h':
fnf += 1
tnp += 1
tnr += 1
tnd += 1
tnm += 1
fph += 1
if (names[count])[0] == 'm':
fnf += 1
fpm += 1
tnd += 1
tnp += 1
tnr += 1
tnh += 1
if (names[count])[0] == 'p':
fnf += 1
tnd += 1
fpp += 1
tnr += 1
tnm += 1
tnh += 1
if (names[count])[0] == 'r':
fnf += 1
tnd += 1
tnh += 1
fpr += 1
tnm += 1
tnp += 1
# else:
# tnf += 1
# Check Helicopter Images
if count < (len(test_images)/2) and count >= (len(test_images)/3):
if (names[count])[0] == 'd':
fnh += 1
fpd += 1
tnm += 1
tnr += 1
tnp += 1
tnf += 1
if (names[count])[0] == 'f':
fnh += 1
tnr += 1
tnd += 1
tnp += 1
fpf += 1
tnm += 1
if (names[count])[0] == 'h':
tph += 1
tnr += 1
tnd += 1
tnp += 1
tnm += 1
tnf += 1
if (names[count])[0] == 'm':
fnh += 1
tnd += 1
tnr += 1
tnp += 1
fpm += 1
tnf += 1
if (names[count])[0] == 'p':
fnh += 1
fpp += 1
tnm += 1
tnr += 1
tnd += 1
tnf += 1
if (names[count])[0] == 'r':
fnh += 1
tnd += 1
tnp += 1
fpr += 1
tnm += 1
tnf += 1
# else:
# tnh += 1
# Check missile images
if count < (len(test_images)/(6/4)) and count >= (len(test_images)/2):
if (names[count])[0] == 'd':
fnm += 1
fpd += 1
tnf += 1
tnr += 1
tnh += 1
tnp += 1
if (names[count])[0] == 'f':
fnm += 1
tnp += 1
tnr += 1
tnd += 1
fpf += 1
tnh += 1
if (names[count])[0] == 'h':
fnm += 1
tnd += 1
tnf += 1
tnr += 1
tnp += 1
fph += 1
if (names[count])[0] == 'm':
tpm += 1
tnp += 1
tnr += 1
tnd += 1
tnf += 1
tnh += 1
if (names[count])[0] == 'p':
fnm += 1
tnd += 1
tnr += 1
fpp += 1
tnf += 1
tnh += 1
if (names[count])[0] == 'r':
fnm += 1
tnd += 1
tnp += 1
fpr += 1
tnf += 1
tnh += 1
# else:
# tnm += 1
# Check passengerplane images
if count < (len(test_images)/(6/5)) and count >= (len(test_images)/(6/4)):
if (names[count])[0] == 'd':
fnp += 1
fpd += 1
tnf += 1
tnm += 1
tnr += 1
tnh += 1
if (names[count])[0] == 'f':
fnp += 1
tnd += 1
tnr += 1
tnm += 1
fpf += 1
tnh += 1
if (names[count])[0] == 'h':
fnp += 1
tnd += 1
tnr += 1
tnm += 1
tnf += 1
fph += 1
if (names[count])[0] == 'm':
fnp += 1
tnd += 1
tnh += 1
tnr += 1
fpm += 1
tnf += 1
if (names[count])[0] == 'p':
tpp += 1
tnd += 1
tnm += 1
tnf += 1
tnr += 1
tnh += 1
if (names[count])[0] == 'r':
fnp += 1
tnd += 1
tnh += 1
fpr += 1
tnm += 1
tnf += 1
# else:
# tnp += 1
# Check rocket images
if count < (len(test_images)) and count >= (len(test_images)/(6/5)):
if (names[count])[0] == 'd':
fnr += 1
fpd += 1
tnh += 1
tnm += 1
tnp += 1
tnf += 1
if (names[count])[0] == 'f':
fnr += 1
tnd += 1
tnh += 1
tnp += 1
fpf += 1
tnm += 1
if (names[count])[0] == 'h':
fnr += 1
tnd += 1
tnf += 1
tnp += 1
tnm += 1
fph += 1
if (names[count])[0] == 'm':
fnr += 1
tnd += 1
tnp += 1
fpm += 1
tnh += 1
tnf += 1
if (names[count])[0] == 'p':
fnr += 1
tnd += 1
tnf += 1
fpp += 1
tnm += 1
tnh += 1
if (names[count])[0] == 'r':
tpr += 1
tnd += 1
tnm += 1
tnp += 1
tnh += 1
tnf += 1
# else:
# tnr += 1
#print('' ,(count+1), 'is a ', names[count])
print('\n Confusion Matrix for Drones Confusion Matrix for FighterJets\n')
print(' TN : ',tnd,' FP : ',fpd,' TN : ',tnf,' FP : ',fpf)
print(' FN : ',fnd,' TP : ',tpd,' FN : ',fnf,' TP : ',tpf)
print('\n')
print('\n Confusion Matrix for Helicopters Confusion Matrix for Missiles\n')
print(' TN : ',tnh,' FP : ',fph,' TN : ',tnm,' FP : ',fpm)
print(' FN : ',fnh,' TP : ',tph,' FN : ',fnm,' TP : ',tpm)
print('\n')
print('\n Confusion Matrix for PassengerPlanes Confusion Matrix for Rockets\n')
print(' TN : ',tnp,' FP : ',fpp,' TN : ',tnr,' FP : ',fpr)
print(' FN : ',fnp,' TP : ',tpp,' FN : ',fnr,' TP : ',tpr)
print('\n')
|
# author: Wenrui Zhang
# email: wenruizhang@ucsb.edu
#
# install required package using: pip install -r requirements.txt
# run the code: python main.py
import numpy as np
from sklearn.preprocessing import StandardScaler, LabelEncoder
import sklearn.model_selection as model_s
from sklearn import neighbors
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
from sklearn.metrics import plot_confusion_matrix
classes = ['Bulbasaur', 'Sudowoodo', 'Charmander', 'Gastly', 'Jigglypuff', 'Pidgey', 'Pikachu', 'Squirtle']
short_classes = ['Bul', 'Sud', 'Cha', 'Gas', 'Jig', 'Pid', 'Pik', 'Squ']
def preprocessing(data, labels=None): # preprocess data
global classes
samples = []
# change the value of gender from char to float
for sample in data:
tmp = list(sample)
tmp[9] = float(0) if sample[9] == 'F' else float(1)
tmp = list(map(float, tmp))
tmp = np.asarray(tmp, dtype=np.float32)
samples.append(tmp)
# input normalization
scaler = StandardScaler()
samples = scaler.fit_transform(samples)
if labels is not None: # for training samples
# encode the labels from string to 0-7
le = LabelEncoder()
le.fit(classes)
labels = le.transform(labels)
# split training data into training set and validtion set
train_x, validate_x, train_y, validate_y = model_s.train_test_split(samples, labels, test_size=0.2, random_state=100)
return train_x, validate_x, train_y, validate_y
else: # for testing samples
return samples
def k_nearest_neighbor(train_x, train_y, validate_x, validate_y):
max_score = 0
best_neighbors = None
best_weights = None
# grid search some possible combinations of hyper parameters
for n_neighbors in [5, 10, 15, 20, 25, 30, 35, 40, 45, 50]:
for weights in ['uniform', 'distance']:
# create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(train_x, train_y)
score = clf.score(validate_x, validate_y)
# record the best result
max_score = score if score > max_score else max_score
best_neighbors = n_neighbors if score == max_score else best_neighbors
best_weights = weights if score == max_score else best_weights
print("number of neighbors: ", n_neighbors, ", weights: ", weights)
print(score)
print("final result of KNN")
print("number of neighbors: ", best_neighbors, ", weights: ", best_weights)
print(max_score)
# plot confusion matrix
clf = neighbors.KNeighborsClassifier(best_neighbors, weights=best_weights)
clf.fit(train_x, train_y)
disp = plot_confusion_matrix(clf, validate_x, validate_y,
display_labels=short_classes,
cmap=plt.cm.Blues,
normalize='true',
xticks_rotation='vertical')
disp.ax_.set_title("Confusion Matrix of k-Nearest Neighbors")
plt.savefig("CM_KNN.png")
def naive_bayes(train_x, train_y, validate_x, validate_y):
# create an instance of Naive Bayes Classifier and fit the data.
clf = GaussianNB()
clf.fit(train_x, train_y)
score = clf.score(validate_x, validate_y)
print("result of Gaussian Naive Bayes")
print(score)
# plot confusion matrix
disp = plot_confusion_matrix(clf, validate_x, validate_y,
display_labels=short_classes,
cmap=plt.cm.Blues,
normalize='true',
xticks_rotation='vertical')
disp.ax_.set_title("Confusion Matrix of Gaussian Naive Bayes")
plt.savefig("CM_GNB.png")
def svm(train_x, train_y, validate_x, validate_y):
parameters = {'kernel': ('linear', 'rbf', 'poly'), 'C': [1, 10, 100, 1000], 'gamma': [0.1, 0.01, 0.001, 0.0001]}
# create an instance of SVM Classifier and fit the data.
clf = SVC()
# grid search some possible combinations of hyper parameters
clf = model_s.GridSearchCV(clf, parameters)
clf.fit(train_x, train_y)
score = clf.score(validate_x, validate_y)
print("result of SVM")
print(score)
print(clf.best_params_) # the hyper parameteres with best result.
# plot confusion matrix
disp = plot_confusion_matrix(clf, validate_x, validate_y,
display_labels=short_classes,
cmap=plt.cm.Blues,
normalize='true',
xticks_rotation='vertical')
disp.ax_.set_title("Confusion Matrix of SVM")
plt.savefig("CM_SVM.png")
def decision_tree(train_x, train_y, validate_x, validate_y):
# create an instance of DT Classifier and fit the data.
clf = DecisionTreeClassifier()
clf.fit(train_x, train_y)
score = clf.score(validate_x, validate_y)
print("result of decision tree")
print(score)
# plot confusion matrix
disp = plot_confusion_matrix(clf, validate_x, validate_y,
display_labels=short_classes,
cmap=plt.cm.Blues,
normalize='true',
xticks_rotation='vertical')
disp.ax_.set_title("Confusion Matrix of decision tree")
plt.savefig("CM_DT.png")
def lda(train_x, train_y, validate_x, validate_y):
# create an instance of LDA Classifier and fit the data.
clf = LinearDiscriminantAnalysis()
clf.fit(train_x, train_y)
score = clf.score(validate_x, validate_y)
print("result of LDA")
print(score)
# plot confusion matrix
disp = plot_confusion_matrix(clf, validate_x, validate_y,
display_labels=short_classes,
cmap=plt.cm.Blues,
normalize='true',
xticks_rotation='vertical')
disp.ax_.set_title("Confusion Matrix of LDA")
plt.savefig("CM_LDA.png")
def random_forest(train_x, train_y, validate_x, validate_y):
# create an instance of RF Classifier and fit the data.
clf = RandomForestClassifier()
clf.fit(train_x, train_y)
score = clf.score(validate_x, validate_y)
print("result of random forest")
print(score)
# plot confusion matrix
disp = plot_confusion_matrix(clf, validate_x, validate_y,
display_labels=short_classes,
cmap=plt.cm.Blues,
normalize='true',
xticks_rotation='vertical')
disp.ax_.set_title("Confusion Matrix of random forest")
plt.savefig("CM_RF.png")
def mlp(train_x, train_y, validate_x, validate_y):
# test on different network sizes.
hidden_size = [(400,), (800,), (1200,), (1600,), (2000,), (200, 200,), (400, 400,), (800, 800,), (400, 400, 400,),
(400, 400, 400, 400,)]
max_accuracy = 0
best_hidden = None
for hidden in hidden_size:
# create an instance of MLP Classifier and fit the data.
clf = MLPClassifier(hidden_layer_sizes=hidden)
clf.fit(train_x, train_y)
score = clf.score(validate_x, validate_y)
best_hidden = hidden if max_accuracy < score else best_hidden
max_accuracy = score if max_accuracy < score else max_accuracy
print("network size: ", hidden)
print(score)
print("result of MLP")
print("network size: ", best_hidden)
print(max_accuracy)
# best result is reported
# plot confusion matrix
clf = MLPClassifier(hidden_layer_sizes=best_hidden)
clf.fit(train_x, train_y)
disp = plot_confusion_matrix(clf, validate_x, validate_y,
display_labels=short_classes,
cmap=plt.cm.Blues,
normalize='true',
xticks_rotation='vertical')
disp.ax_.set_title("Confusion Matrix of MLP")
plt.savefig("CM_MLP.png")
return best_hidden
def mlp_predict(train_x, train_y, test_x, best_hidden):
# train all the training data using MLP with best network size and then predict the testing data
global classes
clf = MLPClassifier(hidden_layer_sizes=best_hidden)
clf.fit(train_x, train_y)
test_y = clf.predict(test_x)
le = LabelEncoder()
le.fit(classes)
# decode the predicted labels to the name of Pokemons.
predict_y = le.inverse_transform(test_y)
np.save("pokemon_test_y.npy", predict_y)
print(predict_y)
# load data
train_x = np.load("pokemon_train_x.npy")
train_y = np.load("pokemon_train_y.npy")
test_x = np.load("pokemon_test_x.npy")
train_x, validate_x, train_y, validate_y = preprocessing(train_x, train_y)
k_nearest_neighbor(train_x, train_y, validate_x, validate_y)
naive_bayes(train_x, train_y, validate_x, validate_y)
svm(train_x, train_y, validate_x, validate_y)
decision_tree(train_x, train_y, validate_x, validate_y)
lda(train_x, train_y, validate_x, validate_y)
random_forest(train_x, train_y, validate_x, validate_y)
best_hidden = mlp(train_x, train_y, validate_x, validate_y)
x = np.concatenate((train_x, validate_x), axis=0)
y = np.concatenate((train_y, validate_y), axis=0)
test_x = preprocessing(test_x)
mlp_predict(x, y, test_x, best_hidden)
|
from Pages.drag_drop import DragDropPage
from Utils.locators import DragDropLocators
import time
from Utils.Logger import Logging
import allure
from allure_commons.types import AttachmentType
@allure.severity(allure.severity_level.NORMAL)
class Test_DragDrop:
logger = Logging.loggen()
##################
@allure.severity(allure.severity_level.BLOCKER)
def test_drag_drop(self, test_setup):
self.logger.info("*************** Test_001_Drag And Drop *****************")
self.logger.info("*************** Drag & Drop Test Started *****************")
self.driver = test_setup
self.driver.get(DragDropLocators.DragDropUrl)
self.obj = DragDropPage(self.driver)
self.obj.drag_and_drop()
self.logger.info("**** Drag & Drop Test Passed ****")
time.sleep(3)
self.driver.save_screenshot(".\\Screenshots\\" + "test_drag&drop.png")
allure.attach(self.driver.get_screenshot_as_png(), name="testDrag&Drop", attachment_type=AttachmentType.PNG)
# close browser
self.driver.close()
# pytest -v -s --alluredir=".\AllureReports\Drag&Drop" Tests\test_drag_drop.py
# pytest -v --html=PytestReports\drag&drop_report.html Tests\test_drag_drop.py
|
from django.shortcuts import render, get_object_or_404
from decimal import Decimal
from django.conf import settings
from django.urls import reverse
from paypal.standard.forms import PayPalPaymentsForm
from django.views.decorators.csrf import csrf_exempt
import pymysql
connection = pymysql.connect(host='localhost',user='root',password='Enter Your DB Password',db='busroad')
a =connection.cursor()
@csrf_exempt
def payment_done(request):
return render(request, 'main/done.html')
@csrf_exempt
def payment_canceled(request):
return render(request, 'main/canceled.html')
def payment_process(request):
cursor = connection.cursor()
cursor.execute("select * from buses_routes where source= %s AND destination = %s AND date = %s",(fromStation, toStation, dte))
bus = cursor.fetchall()
host = request.get_host()
print("Total number of rows in Laptop is: ", cursor.rowcount)
paypal_dict = {
'business' : settings.PAYPAL_RECEIVER_EMAIL,
'amount' : '120',
'currency_code': 'USD',
'notify_url': 'http://{}{}'.format(host,reverse('paypal-ipn')),
'return_url': 'http://{}{}'.format(host, reverse('payment:done')),
'cancel_return': 'http://{}{}'.format(host, reverse('payment:canceled')),
}
form = PayPalPaymentsForm(initial=paypal_dict)
return render(request, 'main/process.html', {'bus':bus,'form':form})
|
"""
:mod:`pysgutils.sg_pt`
~~~~~~~~~~~~~~~~~~~~~~
Python port of sg_pt.h from sg3_utils
Comments from sg_pt.h:
Copyright (c) 2005-2014 Douglas Gilbert.
All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the BSD_LICENSE file.
"""
from __future__ import absolute_import
import os
import ctypes
import enum
import errno
import sys
import six
import weakref
from . import sg_lib, libsgutils2, _impl_check
class SGPTBase(ctypes.c_void_p):
"""
This declaration hides the fact that each implementation has its own
structure "derived" (using a C++ term) from this one. It compiles
because 'struct sg_pt_base' is only referenced (by pointer: 'objp')
in this interface. An instance of this structure represents the
context of one SCSI command.
"""
def scsi_pt_version():
"""The format of the version string is like this: "2.01 20090201".
The leading digit will be incremented if this interface changes
in a way that may impact backward compatibility."""
return libsgutils2.scsi_pt_version().decode('utf-8')
@_impl_check
def scsi_pt_open_device(device_name, read_only=False, verbose=False):
"""Returns >= 0 if successful. If error in Unix returns negated errno."""
ret = libsgutils2.scsi_pt_open_device(device_name.encode('utf-8'), read_only, verbose)
if ret < 0:
raise OSError(-ret, sg_lib.safe_strerror(-ret))
return ret
@_impl_check
def scsi_pt_open_flags(device_name, flags=os.O_RDWR, verbose=False):
"""Similar to scsi_pt_open_device() but takes Unix style open flags OR-ed
together. Returns valid file descriptor( >= 0 ) if successful, otherwise
returns -1 or a negated errno.
In Win32 O_EXCL translated to equivalent."""
ret = libsgutils2.scsi_pt_open_flags(device_name.encode('utf-8'), flags, verbose)
if ret < 0:
raise OSError(-ret, sg_lib.safe_strerror(-ret))
return ret
@_impl_check
def scsi_pt_close_device(device_fd):
"""Returns 0 if successful. If error in Unix returns negated errno."""
ret = libsgutils2.scsi_pt_close_device(device_fd)
if ret < 0:
raise OSError(-ret, sg_lib.safe_strerror(-ret))
@_impl_check
def construct_scsi_pt_obj():
"""Creates an object that can be used to issue one or more SCSI commands
(or task management functions). Returns NULL if problem.
Once this object has been created it should be destroyed with
destruct_scsi_pt_obj() when it is no longer needed."""
ret = libsgutils2.construct_scsi_pt_obj()
if ret == 0:
raise MemoryError("Construction of scsi pt object is failed")
else:
return SGPTBase(ret)
@_impl_check
def clear_scsi_pt_obj(objp):
"""Clear state information held in *objp . This allows this object to be
used to issue more than one SCSI command."""
libsgutils2.clear_scsi_pt_obj(objp)
@_impl_check
def set_scsi_pt_cdb(objp, cdb):
"""Set the CDB (command descriptor block)"""
libsgutils2.set_scsi_pt_cdb(objp, cdb, len(cdb))
@_impl_check
def set_scsi_pt_sense(objp, sense):
"""Set the sense buffer and the maximum length that it can handle"""
libsgutils2.set_scsi_pt_sense(objp, sense, len(sense))
@_impl_check
def set_scsi_pt_data_in(objp, dxferp):
"""Set a pointer and length to be used for data transferred from device"""
libsgutils2.set_scsi_pt_data_in(objp, dxferp, len(dxferp))
@_impl_check
def set_scsi_pt_data_out(objp, dxferp):
"""Set a pointer and length to be used for data transferred to device"""
libsgutils2.set_scsi_pt_data_out(objp, dxferp, len(dxferp))
@_impl_check
def set_scsi_pt_packet_id(objp, packet_id):
"""The following "set_"s implementations may be dummies"""
libsgutils2.set_scsi_pt_packet_id(objp, packet_id)
@_impl_check
def set_scsi_pt_tag(objp, tag):
libsgutils2.set_scsi_pt_tag(objp, tag)
@_impl_check
def set_scsi_pt_task_management(objp, tmf_code):
libsgutils2.set_scsi_pt_task_management(objp, tmf_code)
@_impl_check
def set_scsi_pt_task_attr(objp, attribute, priority):
libsgutils2.set_scsi_pt_task_attr(objp, attribute, priority)
class SCSIPTFlags(enum.IntEnum):
"""Following is a guard which is defined when set_scsi_pt_flags() is
present. Older versions of this library may not have this function.
If neither QUEUE_AT_HEAD nor QUEUE_AT_TAIL are given, or both
are given, use the pass-through default."""
NONE = 0
FUNCTION = 1
QUEUE_AT_TAIL = 0x10
QUEUE_AT_HEAD = 0x20
@_impl_check
def set_scsi_pt_flags(objp, flags):
"""Set (potentially OS dependant) flags for pass-through mechanism.
Apart from contradictions, flags can be OR-ed together."""
libsgutils2.set_scsi_pt_flags(objp, flags)
@_impl_check
def do_scsi_pt(objp, fd, timeout_secs, verbose=False):
"""If OS error prior to or during command submission then returns negated
error value (e.g. Unix '-errno'). This includes interrupted system calls
(e.g. by a signal) in which case -EINTR would be returned. Note that
system call errors also can be fetched with get_scsi_pt_os_err().
Return 0 if okay (i.e. at the very least: command sent). Positive
return values are errors (see SCSI_PT_DO_* defines)."""
ret = libsgutils2.do_scsi_pt(objp, fd, timeout_secs, verbose)
if ret < 0:
raise OSError(-ret, sg_lib.safe_strerror(-ret))
elif ret == 1:
raise ValueError("SCSI_PT_DO_BAD_PARAMS (1)")
elif ret == 2:
if sys.version_info > (3,):
# noinspection PyCompatibility
raise TimeoutError("SCSI_PT_DO_TIMEOUT (2)")
else:
raise OSError(errno.ETIMEDOUT, "SCSI_PT_DO_TIMEOUT (2)")
class SCSIPTResult(enum.IntEnum):
GOOD = 0
#: other than GOOD and CHECK CONDITION
STATUS = 1
SENSE = 2
TRANSPORT_ERR = 3
OS_ERR = 4
@_impl_check
def get_scsi_pt_result_category(objp):
"""highest numbered applicable category returned"""
return SCSIPTResult(libsgutils2.get_scsi_pt_result_category(objp))
@_impl_check
def get_scsi_pt_resid(objp):
"""If not available return 0"""
return libsgutils2.get_scsi_pt_resid(objp)
@_impl_check
def get_scsi_pt_status_response(objp):
"""Returns SCSI status value (from device that received the
command)."""
return sg_lib.SCSIStatusCode(libsgutils2.get_scsi_pt_status_response(objp))
@_impl_check
def get_scsi_pt_sense_len(objp):
"""Actual sense length returned. If sense data is present but
actual sense length is not known, return 'max_sense_len'"""
return libsgutils2.get_scsi_pt_sense_len(objp)
@_impl_check
def get_scsi_pt_os_err(objp):
"""If not available return 0"""
return libsgutils2.get_scsi_pt_os_err(objp)
@_impl_check
def get_scsi_pt_os_err_str(objp):
buffer = ctypes.create_string_buffer(512)
libsgutils2.get_scsi_pt_os_err_str(objp, 512, ctypes.byref(buffer))
return buffer.value.decode('utf-8')
@_impl_check
def get_scsi_pt_transport_err(objp):
"""If not available return 0"""
return libsgutils2.get_scsi_pt_transport_err(objp)
@_impl_check
def get_scsi_pt_transport_err_str(objp):
buffer = ctypes.create_string_buffer(512)
libsgutils2.get_scsi_pt_transport_err_str(objp, 512, ctypes.byref(buffer))
return buffer.value.decode('utf-8')
@_impl_check
def get_scsi_pt_duration_ms(objp):
"""If not available return -1"""
ret = libsgutils2.get_scsi_pt_duration_ms(objp)
if ret == -1:
return None
else:
return ret
@_impl_check
def destruct_scsi_pt_obj(objp):
"""Should be invoked once per objp after other processing is complete in
order to clean up resources. For ever successful construct_scsi_pt_obj()
call there should be one destruct_scsi_pt_obj()."""
libsgutils2.destruct_scsi_pt_obj(objp)
@_impl_check
def scsi_pt_win32_direct(objp, state_direct):
"""Request SPT direct interface when state_direct is 1, state_direct set
to 0 for the SPT indirect interface. Default setting selected by build
(i.e. library compile time) and is usually indirect."""
try:
libsgutils2.scsi_pt_win32_direct(objp, state_direct)
except AttributeError:
pass
@_impl_check
def scsi_pt_win32_spt_state():
try:
return libsgutils2.scsi_pt_win32_spt_state() != 0
except AttributeError:
pass
class TransportError(RuntimeError):
def __init__(self, err, message):
super().__init__("[Error {}] {}".format(err, message))
class SCSIError(RuntimeError):
def __init__(self, status_code, message):
super().__init__("[SCSI Status {}] {}".format(status_code, message))
self.status_code = status_code
class SCSIPTDevice(object):
_refs = weakref.WeakValueDictionary()
_stack = [None]
def __init__(self, device_name, read_only_or_flags=False, verbose=False, **kwargs):
if 'flags' in kwargs:
read_only_or_flags = kwargs['flags']
elif 'read_only' in kwargs:
read_only_or_flags = kwargs['read_only']
if isinstance(read_only_or_flags, bool):
self._fd = scsi_pt_open_device(device_name, read_only_or_flags, verbose)
elif isinstance(read_only_or_flags, six.integer_types):
self._fd = scsi_pt_open_flags(device_name, read_only_or_flags, verbose)
else:
raise ValueError("read_only_or_flags must be one of bool or integer value")
self.device_name = device_name
self._refs[id(self)] = self
def __repr__(self):
return "<{}: {}, fd: {}>".format(type(self).__qualname__, self.device_name, self._fd)
def __del__(self):
if self._fd is not None:
self.close()
def close(self):
scsi_pt_close_device(self._fd)
self._fd = None
def enter(self):
self._stack.append(self)
def exit(self):
self._stack.pop()
@classmethod
def current(cls):
return cls._stack[-1]
def __enter__(self):
return self.enter()
def __exit__(self, exc_type, exc_val, exc_tb):
return self.exit()
class SCSIPTObject(object):
_refs = weakref.WeakValueDictionary()
timeout = 5
class TaskAttr(object):
def __init__(self, pt_obj):
self._pt_obj = pt_obj
self._attrs = dict()
def __getitem__(self, item):
return self._attrs.get(item, None)
def __setitem__(self, key, value):
set_scsi_pt_task_attr(self._pt_obj, key, value)
self._attrs[key] = value
def __init__(self):
self._pt_obj = construct_scsi_pt_obj()
self._cdb = None
self._sense = None
self._data_in = None
self._data_out = None
self._packet_id = None
self._tag = None
self._task_management = None
self.task_attr = self.TaskAttr(self._pt_obj)
self._flags = SCSIPTFlags.NONE
try:
self._win32_direct = scsi_pt_win32_spt_state()
except NotImplementedError:
self._win32_direct = None
self._refs[id(self)] = self
def clear(self):
clear_scsi_pt_obj(self._pt_obj)
def __del__(self):
destruct_scsi_pt_obj(self._pt_obj)
@property
def cdb(self):
return self._cdb
@cdb.setter
def cdb(self, val):
set_scsi_pt_cdb(self._pt_obj, val)
if isinstance(val, sg_lib.SCSICommand):
self._cdb = val
else:
self._cdb = sg_lib.SCSICommand(bytes(val))
@property
def sense(self):
return self._sense
@sense.setter
def sense(self, val):
set_scsi_pt_sense(self._pt_obj, val)
self._sense = val
@property
def data_in(self):
return self._data_in
@data_in.setter
def data_in(self, val):
set_scsi_pt_data_in(self._pt_obj, val)
self._data_in = val
@property
def data_out(self):
return self._data_out
@data_out.setter
def data_out(self, val):
set_scsi_pt_data_out(self._pt_obj, val)
self._data_out = val
@property
def packet_id(self):
return self._packet_id
@packet_id.setter
def packet_id(self, val):
set_scsi_pt_packet_id(self._pt_obj, val)
self._packet_id = val
@property
def tag(self):
return self._tag
@tag.setter
def tag(self, val):
set_scsi_pt_tag(self._pt_obj, val)
self._tag = val
@property
def task_management(self):
return self._task_management
@task_management.setter
def task_management(self, val):
set_scsi_pt_task_management(self._pt_obj, val)
self._task_management = val
@property
def result_category(self):
return get_scsi_pt_result_category(self._pt_obj)
@property
def resid(self):
return get_scsi_pt_resid(self._pt_obj)
@property
def status_response(self):
return get_scsi_pt_status_response(self._pt_obj)
@property
def sense_len(self):
return get_scsi_pt_sense_len(self._pt_obj)
@property
def os_err(self):
return get_scsi_pt_os_err(self._pt_obj)
@property
def os_err_str(self):
return get_scsi_pt_os_err_str(self._pt_obj)
@property
def transport_err(self):
return get_scsi_pt_transport_err(self._pt_obj)
@property
def transport_err_str(self):
return get_scsi_pt_transport_err_str(self._pt_obj)
@property
def duration_ms(self):
return get_scsi_pt_duration_ms(self._pt_obj)
@property
def win32_direct(self):
return self._win32_direct
@win32_direct.setter
def win32_direct(self, val):
scsi_pt_win32_direct(self._pt_obj, val)
self._win32_direct = val
def do_scsi_pt(self, timeout=None, device=None, verbose=False):
if device is None:
device = SCSIPTDevice.current()
if device is None:
raise ValueError("Device is not specified")
if timeout is None:
timeout = self.timeout
do_scsi_pt(self._pt_obj, device._fd, timeout, verbose)
result = self.result_category
if result == SCSIPTResult.OS_ERR or self.os_err:
raise OSError(self.os_err, self.os_err_str)
elif result == SCSIPTResult.TRANSPORT_ERR or self.transport_err:
raise TransportError(self.transport_err, self.transport_err_str)
elif result == SCSIPTResult.SENSE:
raise SCSIError(self.status_response, str(self.sense))
elif result == SCSIPTResult.STATUS:
raise SCSIError(self.status_response, '')
|
import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
def initial_plot(f):
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
delta = 0.025
x = np.arange(-5.0, 5.0, delta)
y = np.arange(-5.0, 5.0, delta)
X, Y = np.meshgrid(x, y)
Z = f([X, Y])
fig = plt.figure(figsize=(20, 6))
ax = fig.add_subplot(1, 2, 1, projection='3d')
ax.plot_surface(X, Y, Z, rstride=4, cstride=4, alpha=0.25)
cset = ax.contour(X, Y, Z, zdir='z', offset=0, cmap=cm.coolwarm)
ax.set_xlim3d(-5, 5)
ax.set_ylim3d(-5, 5)
ax.set_zlim3d(0, 10)
ax2 = fig.add_subplot(1, 2, 2)
levels = [5, 10, 15, 25, 50, 100, 150]
CS = ax2.contour(X, Y, Z, levels)
ax2.clabel(CS, inline=1, fontsize=10, cmap=cm.coolwarm)
ax2.set_xlabel('$x_0$')
ax2.set_ylabel('$x_1$')
ax2.set_title('$f(\mathbf{x})$')
def plot_gradient(f, f_grad):
delta = 0.025
x = np.arange(-5.0, 5.0, delta)
y = np.arange(-5.0, 5.0, delta)
X, Y = np.meshgrid(x, y)
Z = f([X, Y])
X_2, Y_2 = np.meshgrid(x[::40], y[::40])
Z_grad = f_grad([X_2, Y_2])
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(1, 1, 1)
levels = [5, 10, 15, 25, 50, 100, 150]
CS = ax.contour(X, Y, Z, levels)
ax.clabel(CS, inline=1, fontsize=10, cmap=cm.coolwarm)
ax.set_xlabel('$x_0$')
ax.set_ylabel('$x_1$')
ax.set_title('$f(\mathbf{x})$')
plt.quiver(X_2, Y_2, Z_grad[0], Z_grad[1])
def trajectory_visualization(f, n_iter, trajectory):
delta = 0.025
x = np.arange(-5.0, 5.0, delta)
y = np.arange(-5.0, 5.0, delta)
X, Y = np.meshgrid(x, y)
Z = f([X, Y])
fig = plt.figure(figsize=(20, 9))
ax = fig.add_subplot(1, 2, 2)
levels = [5, 10, 15, 25, 50, 100, 150]
CS = ax.contour(X, Y, Z, levels)
ax.clabel(CS, inline=1, fontsize=10, cmap=cm.coolwarm)
ax.set_xlabel('$x_0$')
ax.set_ylabel('$x_1$')
ax.set_title('{}'.format(n_iter))
ax.plot(trajectory[:n_iter, 0], trajectory[:n_iter, 1], '-o', markersize=10, color='red')
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from apps.web import views
from apps.api.views import GameViewSet
from rest_framework import routers
router = routers.DefaultRouter()
router.register(r'games', GameViewSet)
urlpatterns = patterns('',
url(r'^', include(views)),
url(r'^admin/', include(admin.site.urls)),
url(r'^api/v1/', include(router.urls)),
url(r'^api/v1/stats/', include("apps.api.stats.urls")),
url(r'^api/v1/wars/', include("apps.api.wars.urls")),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
)
handler404 = 'apps.web.views.error404'
|
"""
Heber Cooke 10/31/2019
Chapter 6 Exercise 9
Write a program that computes and prints the average of of the numbers in a text file.
You should make use of two higher-order functions to simplify the design.
"""
import random
inpu = input("Enter a file name or C to create one ")
if inpu == 'C' or inpu == 'c': # create a txt file
f = open("numbers.txt",'w')
for i in range(100): # put 100 random integers in the txt file
f.write(str(random.randint(1,100)))
f.write(' ')
f.close() #close the file
f = open('numbers.txt', 'r')#open the created file for reading
else:
f = open(inpu, 'r')
s = f.read().split()
f.close()
def total(s):
n = 0
for i in s:
n += int(i)
return n
def count(s):
return len(s)
def average(t, c):
return t / c
t = total
c = count
a = average
print("Total",t(s))
print("Count",c(s))
print("Average", a(t(s),c(s)))
|
# Generated by Django 2.2.8 on 2020-06-09 08:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0002_teach_time'),
]
operations = [
migrations.AlterField(
model_name='teach',
name='time',
field=models.CharField(blank=True, max_length=10, null=True),
),
]
|
#!/usr/bin/python
"""
Author Paula Dwan
Email paula.dwan@gmail.com
Student ID 13208660
Subject COMP47270 (Computational Network Analysis and Modeling)
Date Jan-2015
Lecturer Dr. Neil Hurley
LABORATORY | CASE STUDY 2 : laplacian.py
"""
# import the networkx network analysis package
import networkx as nx
# import the graphvisualisation package graphviz
from networkx import graphviz_layout
import pygraphviz
# import the plotting functionality from matplotlib
import matplotlib.pyplot as plt
#import Delaunay tesselation
from scipy.spatial import Delaunay
# import kmeans
from scipy.cluster.vq import vq, kmeans, whiten
import numpy as np
import scipy as sp
import random
def placement():
num_nodes = 100
x = [random.random() for i in range(num_nodes)]
y = [random.random() for i in range(num_nodes)]
x = np.array(x)
y = np.array(y)
# Make a graph with num_nodes nodes and zero edges
# Plot the nodes using x,y as the node positions
G = nx.empty_graph(num_nodes)
print "G.number_of_nodes() = ", G.number_of_nodes(), "\n"
pos = dict()
for i in range(num_nodes):
pos[i] = x[i],y[i]
plot_graph(G, pos, 1)
# Now add some edges - use Delaunay tesselation to produce a planar graph.
# Delaunay tesselation covers the convex hull of a set of points with
# triangular simplices (in 2D)
#
# Aside : Paula 13-Jan-2015
# planar graph - graph that can be plotted in 2-D with no overlaps.
points = np.column_stack((x,y))
dl = Delaunay(points)
tri = dl.simplices
edges = np.zeros((2, 6*len(tri)),dtype=int)
data = np.ones(6*len(points))
j=0
for i in range(len(tri)):
edges[0][j]=tri[i][0]
edges[1][j]=tri[i][1]
j = j+1
edges[0][j]=tri[i][1]
edges[1][j]=tri[i][0]
j = j+1
edges[0][j]=tri[i][0]
edges[1][j]=tri[i][2];
j = j+1
edges[0][j]=tri[i][2]
edges[1][j]=tri[i][0];
j = j+1
edges[0][j]=tri[i][1]
edges[1][j]=tri[i][2]
j=j+1
edges[0][j]=tri[i][2]
edges[1][j]=tri[i][1]
j=j+1
data=np.ones(6*len(tri))
A = sp.sparse.csc_matrix((data,(edges[0,:],edges[1,:])))
for i in range(A.nnz):
A.data[i] = 1.0
G = nx.to_networkx_graph(A)
plot_graph(G,pos,2)
# Use the eigenvectors of the normalised Laplacian to calculate placement positions
# for the nodes in the graph
# eigen_pos holds the positions
eigen_pos = dict()
deg = A.sum(0)
diags = np.array([0])
D = sp.sparse.spdiags(deg,diags,A.shape[0],A.shape[1]) # diagonal matrix of degrees
Dinv = sp.sparse.spdiags(1/deg,diags,A.shape[0],A.shape[1]) # inverse of
# Normalised laplacian : multiply by 1 / Deg previously
L = Dinv*(D - A)
E, V = sp.sparse.linalg.eigs(L,3,None,100.0,'SM') # 100x100 martrix --> compress into 100 vector
V = V.real
for i in range(num_nodes):
eigen_pos[i] = V[i,1].real,V[i,2].real
# for n,nbrsdict in G.adjacency_iter():
# for nbr,eattr in nbrsdict.items():
# if 'weight' in eattr:
# print n,nbr,eattr['weight']
plot_graph(G,eigen_pos,3)
# Now let's see if the eigenvectors are good for clustering
# Use k-means to cluster the points in the vector V
features = np.column_stack((V[:,1], V[:,2]))
print "cluster_nodes for e-vector values :-"
cluster_nodes(G,features,pos,eigen_pos) # e-vectors
# Finally, use the columns of A directly for clustering
raw_input("Press Enter to Continue ...\n")
print "cluster_nodes for Delaunay tesselation values :-"
cluster_nodes(G,A.todense(),pos,eigen_pos) # Delaunay tesselationvalues
raw_input("Press Enter to Continue ...\n")
def plot_graph(G,pos,fignum):
label = dict()
labelpos=dict()
for i in range(G.number_of_nodes()):
label[i] = i
labelpos[i] = pos[i][0]+0.02, pos[i][1]+0.02
fig=plt.figure(fignum,figsize=(8,8))
fig.clf()
nx.draw_networkx_nodes(G,
pos,
node_size=40,
hold=False,
)
nx.draw_networkx_edges(G,pos, hold=True)
nx.draw_networkx_labels(G,
labelpos,
label,
font_size=10,
hold=True,
)
fig.show(1)
def cluster_nodes(G, feat, pos, eigen_pos):#
book,distortion = kmeans(feat,3)
codes,distortion = vq(feat, book)
nodes = np.array(range(G.number_of_nodes()))
W0 = nodes[codes==0].tolist()
W1 = nodes[codes==1].tolist()
W2 = nodes[codes==2].tolist()
print "W0 = ", W0
print "W1 = ", W1
print "W2 = ", W2
plt.figure(3) # position of nodes as per e-vectors
nx.draw_networkx_nodes(G,
eigen_pos,
node_size=40,
hold=True,
nodelist=W0,
node_color='m'
)
nx.draw_networkx_nodes(G,
eigen_pos,
node_size=40,
hold=True,
nodelist=W1,
node_color='b'
)
plt.figure(2) # positions of nodes per Delaney tesselation
nx.draw_networkx_nodes(G,
pos,
node_size=40,
hold=True,
nodelist=W0,
node_color='m'
)
nx.draw_networkx_nodes(G,
pos,
node_size=40,
hold=True,
nodelist=W1,
node_color='b'
)
if __name__ == '__main__':
placement()
|
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gio
from gi.repository.GdkPixbuf import Pixbuf
class openSessionWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Open Session Overlay")
self.set_border_width(10)
hb = Gtk.HeaderBar(title="Open Session")
self.connect("destroy", Gtk.main_quit)
hbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.add(hbox)
listbox = Gtk.ListBox()
listbox.add(Gtk.Label(' Open an Existing Session '))
listbox.add(self.sessionName())
listbox.add(self.bottomBttn())
hbox.pack_start(listbox, False, True, 0)
def sessionName(self):
row = Gtk.ListBox()
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)
row.add(hbox)
vbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=15)
hbox.pack_start(vbox, False, True, 0)
label2 = Gtk.Label()
label2.set_markup("Session Name")
vbox.pack_start(label2, False, True, 0)
entry1 = Gtk.Entry()
entry1.set_text('Session Name')
vbox.pack_start(entry1, False, True, 0)
browse1 = Gtk.Button.new_with_label("Browse")
vbox.pack_start(browse1, False, True, 0)
return row
def bottomBttn(self):
row = Gtk.ListBoxRow()
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)
row.add(hbox)
btn = Gtk.Button.new_with_label("Open")
hbox.pack_start(btn, True, True, 0)
btn = Gtk.Button.new_with_label("Cancel")
hbox.pack_start(btn, True, True, 0)
return row
window = openSessionWindow()
window.show_all()
Gtk.main()
|
# -*- coding: utf-8 -*-
# @Time : 2019/11/28 15:30
# @Author : Jeff Wang
# @Email : jeffwang987@163.com OR wangxiaofeng2020@ia.ac.cn
# @Software: PyCharm
import cv2
import numpy as np
canvas = np.zeros((300, 300, 3), dtype='uint8')
green = (0, 255, 0)
red = (0, 0, 255)
white = (255, 255, 255)
cv2.line(canvas, (0, 0), (300, 300), green)
cv2.line(canvas, (300, 0), (0, 300), red, 5)
cv2.rectangle(canvas, (100, 100), (200, 200), green, -1)
cv2.rectangle(canvas, (200, 150), (300, 250), red, 3)
cv2.circle(canvas, (89, 89), 50, white, 1)
cv2.imshow('Canvas', canvas)
cv2.waitKey(0)
|
"""empty message
Revision ID: 76453dfdcd53
Revises: 2e68164a73a9
Create Date: 2020-03-29 00:39:54.361536
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '76453dfdcd53'
down_revision = '2e68164a73a9'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('department',
sa.Column('name', sa.String(length=32), nullable=False),
sa.PrimaryKeyConstraint('name')
)
op.create_table('class',
sa.Column('subject', sa.String(length=32), nullable=True),
sa.Column('num', sa.Integer(), nullable=False),
sa.Column('unit', sa.Float(precision=2, asdecimal=1), nullable=False),
sa.Column('alp', sa.Boolean(), nullable=False),
sa.Column('cz', sa.Boolean(), nullable=False),
sa.Column('ns', sa.Boolean(), nullable=False),
sa.Column('qs', sa.Boolean(), nullable=False),
sa.Column('ss', sa.Boolean(), nullable=False),
sa.Column('cci', sa.Boolean(), nullable=False),
sa.Column('ei', sa.Boolean(), nullable=False),
sa.Column('sts', sa.Boolean(), nullable=False),
sa.Column('fl', sa.Boolean(), nullable=False),
sa.Column('r', sa.Boolean(), nullable=False),
sa.Column('w', sa.Boolean(), nullable=False),
sa.Column('rating', sa.Float(precision=2, asdecimal=1), nullable=True),
sa.Column('desc', sa.String(length=256), nullable=True),
sa.ForeignKeyConstraint(['subject'], ['department.name'], ),
sa.PrimaryKeyConstraint('num')
)
op.create_table('courseoff',
sa.Column('subject', sa.String(length=256), nullable=False),
sa.Column('course_num', sa.Integer(), nullable=False),
sa.Column('type', sa.String(length=8), nullable=False),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('mon', sa.Boolean(), nullable=False),
sa.Column('tues', sa.Boolean(), nullable=False),
sa.Column('wed', sa.Boolean(), nullable=False),
sa.Column('thur', sa.Boolean(), nullable=False),
sa.Column('fri', sa.Boolean(), nullable=False),
sa.Column('start_time', sa.Time(), nullable=False),
sa.Column('end_time', sa.Time(), nullable=False),
sa.ForeignKeyConstraint(['subject', 'course_num'], ['class.subject', 'class.num'], ),
sa.PrimaryKeyConstraint('subject', 'course_num', 'type', 'id')
)
op.create_table('corequisite',
sa.Column('main_subject', sa.String(length=32), nullable=False),
sa.Column('main_num', sa.Integer(), nullable=False),
sa.Column('main_type', sa.String(length=32), nullable=False),
sa.Column('sup_subject', sa.String(length=32), nullable=False),
sa.Column('sup_num', sa.Integer(), nullable=False),
sa.Column('sup_type', sa.String(length=32), nullable=False),
sa.ForeignKeyConstraint(['main_subject', 'main_num', 'main_type'], ['courseoff.subject', 'courseoff.course_num', 'courseoff.type'], ),
sa.ForeignKeyConstraint(['sup_subject', 'sup_num', 'sup_type'], ['courseoff.subject', 'courseoff.course_num', 'courseoff.type'], ),
sa.PrimaryKeyConstraint('sup_subject', 'sup_num', 'sup_type', name='_sup_uc')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('corequisite')
op.drop_table('courseoff')
op.drop_table('class')
op.drop_table('department')
# ### end Alembic commands ###
|
import torch
import torch.nn as nn
import torchvision.models as models
from tqdm import tqdm
from torch.utils.data import DataLoader
from sklearn.metrics import accuracy_score
from dataset import TextDataset
from utils import get_val_augmentations, preprocess_data
def main():
BATCH_SIZE = 64
NUM_WORKERS = 8
IMAGE_SIZE = 256
device = torch.device("cuda:0")
#device_ids = [0, 1]
albumentations_transform_validate = get_val_augmentations(IMAGE_SIZE)
train_df, val_df, train_labels, val_labels = preprocess_data('input/noisy_imagewoof.csv')
validate_data = TextDataset(dataframe=val_df,
labels=val_labels,
path='input',
transform=albumentations_transform_validate)
validate_loader = DataLoader(dataset=validate_data,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
shuffle=False,
drop_last=False)
model = models.resnext50_32x4d(pretrained=False)
model.fc = nn.Linear(2048, 2)
checkpoint = torch.load('model_saved/weight_best.pth')
model.load_state_dict(checkpoint)
model.to(device)
criterion = nn.CrossEntropyLoss()
model.eval()
val_loss = 0
acc_val = 0
val_len = len(validate_loader)
for i, (imgs, labels) in tqdm(enumerate(validate_loader), total=val_len):
with torch.no_grad():
imgs_vaild = imgs.to(device)
labels_vaild = labels.to(device)
output_test = model(imgs_vaild)
val_loss += criterion(output_test, labels_vaild).item()
pred = torch.argmax(torch.softmax(output_test, 1), 1).cpu().detach().numpy()
true = labels.cpu().numpy()
acc_val += accuracy_score(true, pred)
avg_val_acc = acc_val / val_len
print(f'val_loss {val_loss / val_len} val_acc {avg_val_acc}')
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-12-15 22:48
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0003_grade_is_canceled'),
]
operations = [
migrations.AddField(
model_name='courseclass',
name='ranking_size',
field=models.IntegerField(default=10, validators=[django.core.validators.MinValueValidator(0)]),
),
migrations.AlterField(
model_name='grade',
name='is_canceled',
field=models.BooleanField(default=False, verbose_name='Canceled'),
),
]
|
import os,re,math
from time import gmtime, strftime
from flask import Flask,render_template,request,session,g,redirect, url_for,abort, flash
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
# Static routing
# Please check the files ending in .html in the templates folder to understand about rendering template.
@app.route('/TheEngineer') # Replace TheEngineer with your nickname
def routeStaticTheEngineer():
imageURL = 'http://placehold.it/350x150&text=imageOne'
return render_template('TheEngineer.html', imageURL=imageURL)
"""
HitmanFoo # Static routing, static files and return render_template
"""
@app.route('/')
def index():
return render_template('index.html')
@app.route('/biscuit')
def routeStaticbiscuit():
imageUrl = 'http://www.rides-mag.com/wp-content/uploads/2013/01/Lamborghini-Sesto-Elemento-2.jpg'
return render_template('biscuit.html', imageURL = imageUrl)
"""
aronLim # Static routing, static files and return render_template
"""
# Dynamic routing
@app.route('/TheEngineer/<int:visitor>')
def routeDynamicTheEngineer(visitor):
numOfVisitor = visitor
return render_template('DynamicTheEngineer.html', numOfVisitor=numOfVisitor)
"""
HitmanFoo # Dynamic routing
"""
# biscuit # Dynamic routing
@app.route('/biscuit/<int:visitor>')
def routeDynamicbiscuit(visitor):
numOfVisitor = visitor
return render_template('Dynamicbiscuit.html', numOfVisitor=numOfVisitor)
"""
aronLim # Dynamic routing
"""
# HTTP methods
# N.B: The default method is GET. If no method is defined, Flask will think that it should execute GET.
@app.route('/TheEngineer/HTTPmethods',methods=['GET', 'POST'])
def httpMethodsTheEngineer():
if request.method == 'POST':
# if client/browser is requesting a POST method then execute this.
varTheEngineer = 1 + 2
return render_template('HTTPmethodsTheEngineer.html', varTheEngineer = varTheEngineer)
if request.method == 'GET':
varTheEngineer = 1 + 1
return render_template('HTTPmethodsTheEngineer.html', varTheEngineer = varTheEngineer)
"""
HitmanFoo # Dynamic routing
"""
# biscuit # HTTP methods
@app.route('/biscuit/HTTPmethods',methods=['GET', 'POST'])
def httpMethodsbiscuit():
if request.method == 'POST':
varbiscuit = 1 + 2
return render_template('HTTPmethodsbiscuit.html', varbiscuit = varbiscuit)
if request.method == 'GET':
varbiscuit = 1 + 1
return render_template('HTTPmethodsbiscuit.html', varbiscuit = varbiscuit)
"""
aronLim # Dynamic routing
"""
# RequestData
@app.route('/TheEngineer/requestData',methods=['GET', 'POST'])
def requestDataTheEngineer():
if request.method == 'POST':
name = request.form['name']
location = request.form['location']
return render_template('requestDataTheEngineer.html', **locals())
return render_template('requestDataTheEngineer.html')
"""
HitmanFoo # Request Data
"""
# biscuit # Request Data
@app.route('/biscuit/requestData', methods=['GET', 'POST'])
def requestDatabiscuit():
if request.method == 'POST':
name = request.form['name']
location = request.form['location']
return render_template('requestDatabiscuit.html', **locals())
return render_template('requestDatabiscuit.html')
"""
aronLim # Request Data
"""
# Session & url_for & flash
# App secret should be stored in the configuration section
app.secret_key = 'ultimate/123Aron/345Killed/456Hitman/987Foo/432By/543Eating/435Biscuit'
@app.route('/TheEngineer/storeSession')
def storeSessionTheEngineer():
session['timeEntered'] = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
flash('Data stored in session & you have been redirected to index page')
return redirect(url_for('index'))
@app.route('/TheEngineer/checkSession')
def checkSessionTheEngineer():
checkSession = session['timeEntered']
return render_template('checkSessionTheEngineer.html', checkSession=checkSession)
@app.route('/TheEngineer/popSession')
def popSessionTheEngineer():
session.pop('timeEntered', None)
flash('Data removed from session & you have been redirected to index page')
return redirect(url_for('index'))
"""
HitmanFoo # Session
"""
# biscuit # Session
app.secret_key = 'ultimate/123Aron/345Killed/456Hitman/987Foo/432By/543Eating/435Biscuit'
@app.route('/biscuit/storeSession'):
def storeSessionbiscuit();
session['timeEntered'] = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
flash('Data stored in session & you have been redirected to index page')
return redirect(url_for('index'))
@app.route('/biscuit/checkSession')
def checkSessionbiscuit():
checkSession = session['timeEntered']
return render_template('checkSessionbiscuit.html', checkSession=checkSession)
@app.route('/biscuit/popSession')
def popSessionbiscuit():
session.pop('timeEntered', None)
flash('Data removed from session & you have been redirected to index page')
return redirect(url_for('index'))
"""
aronLim # Session
"""
if __name__ == '__main__':
app.debug = True
port = int(os.environ.get('PORT', 5000))
app.run(host='127.0.0.1', port=port)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
"""dir = "cleaned_df/"
file_list = os.listdir(dir)
for i,element in enumerate(file_list) :
print(i)
df=pd.read_csv(dir+str(element))
plt.plot(df["engergy"])
plt.title(str(element))
plt.savefig("./img/"+element.replace(".csv",".png"))
plt.close()
"""
"""for i in range(1,26):
series = pd.read_csv("centroid_class_"+str(i)+".csv")
plt.plot(series)
plt.show()"""
list_R = np.array(pd.read_csv("res.csv")["x"])
elements = os.listdir("../classes")
Mat = {}
for element in elements :
new_mat = []
dirs = os.listdir("../classes/"+element)
for dir in dirs :
if dir[-3 :] == "png" :
new_mat.append(int(dir.replace(".png","")))
Mat[int(element.replace("class_",""))] = new_mat
matrix= np.zeros((18,18))
for key, value in Mat.items():
for num in value :
print(key,num,list_R[num-1]-1)
matrix[key-1,list_R[num-1]-1] = matrix[key-1,list_R[num-1]-1] + 1
print(matrix)
import itertools
import sys
import munkres
import numpy as np
import seaborn as sn
def permute_cols(a, inds):
"""
Permutes the columns of matrix `a` given
a list of tuples `inds` whose elements `(from, to)` describe how columns
should be permuted.
"""
p = np.zeros_like(a)
for i in inds:
p[i] = 1
return np.dot(a, p)
def maximize_trace(a):
"""
Maximize trace by minimizing the Frobenius norm of
`np.dot(p, a)-np.eye(a.shape[0])`, where `a` is square and
`p` is a permutation matrix. Returns permuted version of `a` with
maximal trace.
"""
assert a.shape[0] == a.shape[1]
d = np.zeros_like(a)
n = a.shape[0]
b = np.eye(n, dtype=int)
for i, j in itertools.product(range(n), range(n)):
d[j, i] = sum((b[j, :]-a[i, :])**2)
m = munkres.Munkres()
inds = m.compute(d)
return permute_cols(a, inds)
new_m = maximize_trace(matrix)
print(new_m)
plt.figure(figsize = (10,7))
sn.heatmap(new_m, annot=True)
plt.show()
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import os
import numpy as np
import torch
import rasterio
import rasterio.mask
from rasterio.windows import Window
from rasterio.plot import show
# Inference
def predictor_arg(tensor,model):
"""
args:
- tensor : tensor of size N,C,W,H
- model : Model
Return : argmax of the model with the tensor as input
"""
return model(tensor).argmax(1)
def inference_roi(path_image,roi_size,predictor,output_dir,model):
"""
path_image : path for the image
roi_size : inference size
predictor : model predictor
output_dir : output directory for the prediction
model : Model
Show the prediction
"""
# open image with rasterio
img = rasterio.open(os.path.join(path_image))
height = img.height
width = img.width
nb_col = width // roi_size[0]
nb_row = height // roi_size[1]
base=os.path.basename(path_image)
base_without_ex = os.path.splitext(base)[0]
profile = img.profile.copy()
# And then change the band count to 1, set the
# dtype to uint8, and specify LZW compression.
profile.update(
dtype=rasterio.uint8,
count=1,
driver = "GTiff",
height = height,
width = width,
compress='lzw')
img_transform = img.transform
# Initialisation
mask = np.zeros((1,width, height))
#print('mask shape',np.shape(mask))
shp_width = np.shape(mask)[1]
shp_height = np.shape(mask)[2]
with torch.no_grad():
for col in range(0,nb_col):
for row in range(0,nb_row):
tile = img.read(window=Window(col*roi_size[0],row*roi_size[1],roi_size[0],roi_size[1]))
tile_tensor = torch.from_numpy(tile).float()
pred = predictor(tile_tensor.unsqueeze(dim=0),model)
pred_cm = pred.cpu().detach().numpy()
# Affiche Rvb & Mask
show(tile)
show(pred_cm)
mask[:,row*roi_size[1]:(row+1)*roi_size[1],col*roi_size[0]:(col+1)*roi_size[0]] = pred_cm.astype(np.uint8)
# Cas unique dernière tile en diagonale
if (col == nb_col -1) and (row == nb_row -1):
tile = img.read(window=Window( shp_width - roi_size[0], shp_height - roi_size[1],roi_size[0],roi_size[1]))
tile_tensor = torch.from_numpy(tile).float()
pred = predictor(tile_tensor.unsqueeze(dim=0),model)
pred_cm = pred.cpu().detach().numpy()
mask[:,shp_height - roi_size[0] :,shp_width - roi_size[1]:] = pred_cm.astype(np.uint8)
# Dernière Row -> Recouvrement
if row == nb_row -1:
# window argument : taille height, width image
tile = img.read(window=Window(col*roi_size[0],shp_height - roi_size[0],roi_size[0],roi_size[1]))
tile_tensor = torch.from_numpy(tile).float()
pred = predictor(tile_tensor.unsqueeze(dim=0),model)
pred_cm = pred.cpu().detach().numpy()
mask[:,shp_height - roi_size[1]:,col*roi_size[0]:(col+1)*roi_size[0]] = pred_cm.astype(np.uint8)
# Dernière Col -> Recouvrement
if col == nb_col -1:
# window argument : taille height, width image
tile = img.read(window=Window( shp_width - roi_size[0], row*roi_size[1] ,roi_size[0],roi_size[1]))
tile_tensor = torch.from_numpy(tile).float()
pred = predictor(tile_tensor.unsqueeze(dim=0),model)
pred_cm = pred.cpu().detach().numpy()
mask[:,row*roi_size[1]:(row+1)*roi_size[1],shp_height - roi_size[0]:] = pred_cm.astype(np.uint8)
# Profile update (transformation)
x,y = rasterio.transform.xy(img_transform, nb_col*roi_size[0],nb_row*roi_size[1])
out_transform = rasterio.transform.from_origin(x,y,nb_col*roi_size[0],nb_row *roi_size[1])
out_tile_name = os.path.join(output_dir,f'{base_without_ex}_{nb_col:02}_{nb_row:02}_predfinal.tif')
profile.update(transform = out_transform)
# Plot mask
mask = mask.astype(np.uint8)
show(mask)
with rasterio.open(out_tile_name,"w",**profile) as dst :
dst.write(mask)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 12 17:45:23 2017
@author: modellav
"""
#The goal of this project is to scrape data from Google Finance
#To determine the top gainers and top losers of the market, with corresponding % change
#IMPORT PACKAGES
import urllib.request as ul
from bs4 import BeautifulSoup
import re
import datetime
#OPEN URL
url = "http://www.google.com/finance"
url_response=ul.urlopen(url,timeout=5)
#CREATE SOUP AND FIND SECTOR TABLE
finance_soup = BeautifulSoup(url_response, "lxml")
sector_table = finance_soup.find('div', class_ = 'id-secperf sfe-section-major')
#USE REG. EX. TO FIND OUT WHICH SECTOR MOVED THE MOST AND EXTRACT ITS NAME, THE PCT CHANGE AND LINK TO NEXT PAGE
regex_change = re.compile('[+-](.\...)%')
regex_link = re.compile('href=\"(.+)\">')
regex_name = re.compile('>(.+)<')
maxchange = 0
for row in sector_table.find_all('tr'):
changerow = str(row.find('span', class_='chg'))
changevalue = regex_change.findall(changerow)
if changevalue:
change = float(changevalue[0])
if change > maxchange:
maxchange = change
biggest_mover = regex_name.findall(str(row.a))
nextpage_link = regex_link.findall(str(row.a))
#OPEN NEXT PAGE (SECTOR URL) AND EXTRACT TOP MOVERS TABLE
url2 = "http://www.google.com" + nextpage_link[0]
url_response2=ul.urlopen(url2,timeout=5)
sector_soup = BeautifulSoup(url_response2, "lxml")
top_movers = sector_soup.find('table', class_ = "topmovers")
#SINCE THEY ARE ORDERED IT IS EASY TO FIND TOP GAINER AND TOP LOSER
mover_rows = top_movers.find_all('tr')
top_gainer = mover_rows[1]
top_loser = mover_rows[7]
#USE REGEX TO FIND TOP GAINER/LOSER NAMES AND CORRESPONDING PCT CHANGE
regex_change2 = re.compile('<span class="chg">\((.+\...%)\)')
regex_change3 = re.compile('<span class="chr">\(\-(.+\...%)\)')
topgainer_name = regex_name.findall(str(top_gainer.a))
toploser_name = regex_name.findall(str(top_loser.a))
topgainer_gain = regex_change2.findall(str(top_gainer))
toploser_loss = regex_change3.findall(str(top_loser))
#find today's date
today = datetime.date.today()
#PRINT FINAL RECAP STATEMENT
print('The sector that has moved the most today, '+ today + " is " + biggest_mover[0] + ' +'+str(maxchange)+'%. '+topgainer_name[0] + ' gained the most ('+topgainer_gain[0]+') while ' + toploser_name[0]+ ', the biggest loser, lost '+ toploser_loss[0]+'.')
|
'''
Created on Jul 10, 2013
@author: emma
'''
from UnitTesting.page_objects.base_page_object import base_page_object
from selenium.webdriver.common.action_chains import ActionChains
import time
class booksellers(base_page_object):
def __init__(self, webd_wrap):
base_page_object.__init__(self, webd_wrap)
def get_page(self, category):
return self
def confirm_page(self):
''' raises AssertionError if page is incorrect '''
_actual_url = self._webd_wrap._driver.current_url
_actual_title = self._webd_wrap._driver.title
_url = self._webd_wrap._baseURL + '/people/booksellers'
_title = 'Zola Books | ebook |'# Booksellers'
if _url != _actual_url or _title != _actual_title:
raise AssertionError("Not on the Booksellers list page.")
def click_my_zola(self):
self.confirm_page()
time.sleep(2)
self._webd_wrap._driver.find_element_by_id('h-user-personalized-toolbar').find_element_by_xpath('div/a').click()
########################################################################
########################################################################
def click_first_bookseller(self):
''' clicks the first acp in the main list '''
self.confirm_page()
self._webd_wrap._driver.find_element_by_class_name('l-main-primary').find_element_by_xpath('div/section[1]/div/div/div[1]/h5/a').click()
|
#!/usr/bin/python
# Iskandar Setiadi 13511073@std.stei.itb.ac.id
# Institut Teknologi Bandung (ITB) - Indonesia
# Final Project (c) 2015
# mongodb_testcase2.py
__author__ = 'freedomofkeima'
import sys
import time
from pymongo import MongoClient
def main(args):
client = MongoClient('52.74.132.58', 27017) # Nearest Server location
db = client['tests_database']
tests = db['tests_collection']
max_iteration = 2000
key_size = 10
value_size = 100 * 1024
print '** Starting benchmarking **'
print '** Length key + value: %d byte(s)**' % (key_size + value_size)
print '--EMPTY TIMER--'
tx = 0 # time counter
counter = 0
while counter < max_iteration:
t0 = time.time()
tx = tx + (time.time() - t0)
counter = counter + 1
print 'Number of iteration: %d' % (max_iteration)
empty_timer = tx / max_iteration * 1000000
print 'Average elapsed time: %.10f us' % (empty_timer)
item_id = item_id = tests.distinct('_id')
print '--UPDATE--'
tx = 0 # time counter
counter = 1
for item in item_id:
value = "a" * value_size
t0 = time.time()
tests.update_one({"_id": item}, {'$set': {'mongodbkey' : value}})
tx = tx + (time.time() - t0)
counter = counter + 1
print 'Number of iteration: %d' % (counter)
print 'Average elapsed time: %.10f us' % (tx / counter * 1000000 - empty_timer)
print '--READ--'
tx = 0 # time counter
counter = 1
for item in item_id:
t0 = time.time()
res = tests.find_one({"_id": item})
tx = tx + (time.time() - t0)
counter = counter + 1
print 'Number of iteration: %d' % (counter)
print 'Average elapsed time: %.10f us' % (tx / counter * 1000000 - empty_timer)
print '--DELETE--'
tx = 0 # time counter
counter = 1
for item in item_id:
t0 = time.time()
tests.delete_one({"_id": item})
tx = tx + (time.time() - t0)
counter = counter + 1
print 'Number of iteration: %d' % (counter)
print 'Average elapsed time: %.10f us' % (tx / counter * 1000000 - empty_timer)
client.close()
if __name__ == '__main__':
main(sys.argv[1:])
|
#对数
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(0.01,10,0.01)
y1 = np.log(x)#python以e为底
y2 = np.log(x)/np.log(0.5)
plt.plot(x,y1,c='red')
plt.plot(x,y2,c='yellow')
plt.show()
|
class Call(object):
def __init__(self,unique_id,class_name,caller_phone_num,timeofcall,reason_for_call):
self.unique_id = unique_id
self.class_name = class_name
self.caller_phone_num = caller_phone_num
self.timeofcall = timeofcall
self.reason_for_call = reason_for_call
self.display_all()
def display_all(self):
print self.unique_id
print self.class_name
print self.caller_phone_num
print self.timeofcall
print self.reason_for_call
def __str__(self):
return "unique_id ( {} ) class_name ( {} ) caller_phone_num ( {} ) timeofcall ( {} ) reason_for_call ( {} ) ".format(' '.join(self.unique_id), self.class_name, self.caller_phone_num, self.timeofcall, self.reason_for_call)
call1 = Call("12","mat","408-245-1345","3:45","lsfksfal")
call2 = Call("53","ho","408-255-13345","6:45","Jav")
call3 = Call("64","ajot","408-255-1245","1:15","lav")
call4 = Call("42","matt","508-456-1345","10:45","lsfksfal")
call5 = Call("54","hoht","708-3434-1745","6:45","Jasafv")
call6 = Call("84","ahho","408-255-7435","8:45","ladv")
class CallCenter(object):
def __init__(self):
self.calls = []
self.queue_size = 0
def add(self, newcall):
self.calls.append(newcall)
# print self.calls
return self
def remove(self):
if len(self.calls) > 0:
self.calls.pop(0)
return self
def ninjalevel(self, phonenum):
for idx, call in enumerate(self.calls):
if call.caller_phone_num == phonenum:
self.calls.pop(idx)
return self
def hackerlever(self):
def keyfuc(call):
return call.timeofcall
self.calls = sorted(self.calls , key=keyfuc)
def __str__(self):
callstring = ''
for c in self.calls:
callstring += str(c) + "\n"
return "calls ( {} ) queue_size ( {} )".format(callstring, self.queue_size)
callcenter = CallCenter()
callcenter.add(call1)
callcenter.add(call2)
callcenter.add(call3)
callcenter.add(call4)
callcenter.add(call5)
callcenter.add(call6)
callcenter.remove()
callcenter.ninjalevel("408-255-13345")
callcenter.hackerlever()
print callcenter
|
from django.http import JsonResponse
from index.models import Products
from .models import Message
from user.models import UserProfile
import json
from user.logging_check import logging_check
# Create your views here.
@logging_check('POST')
def message(request):
if request.method == 'GET':
goods_id = request.GET.get('id')
result = {'code':200}
try:
goods = Products.objects.get(id = goods_id)
except Exception as e:
result = {'code':20101,'error':'没有找到此商品'}
return JsonResponse(result)
goods_dict = {}
goods_dict['id'] = goods.id
goods_dict['title'] = goods.title
goods_dict['market_price'] = goods.market_price
goods_dict['supplier'] = goods.supplier
goods_dict['repertory'] = goods.repertory
goods_dict['sell_number'] = goods.sell_number
goods_dict['info'] = goods.info
goods_dict['img'] = str(goods.img)
result['goods'] = goods_dict
all_messages = Message.objects.filter(topic_id=goods_id).order_by('-created_time')
m_count = 0
# 留言专属容器
msg_list = []
# 回复专属容器
reply_home = {}
for message in all_messages:
m_count += 1
if message.parent_message:
# 回复
reply_home.setdefault(message.parent_message, [])
reply_home[message.parent_message].append({'msg_id': message.id, 'content': message.content,
'publisher': message.publisher.username,
'publisher_avatar': str(message.publisher.avatar),
'created_time': message.created_time.strftime('%Y-%m-%d %H:%M:%S')})
else:
# 留言
dic = {}
dic['id'] = message.id
dic['content'] = message.content
dic['publisher'] = message.publisher.username
dic['publisher_avatar'] = str(message.publisher.avatar)
dic['reply'] = []
dic['created_time'] = message.created_time.strftime('%Y-%m-%d %H:%M:%S')
msg_list.append(dic)
# 关联留言及回复
for m in msg_list:
if m['id'] in reply_home:
m['reply'] = reply_home[m['id']]
result['messages'] = msg_list
result['messages_count'] = m_count
return JsonResponse(result)
if request.method == 'POST':
# 发表评论/回复
json_str = request.body
json_obj = json.loads(json_str)
content = json_obj.get('content')
username = json_obj.get('user')
id = json_obj.get('id')
parent_id = json_obj.get('parent_id', 0)
# TODO 参数检查
# 检查商品是否存在
try:
goods = Products.objects.get(id=id)
except Exception as e:
result = {'code': 20102, 'error': '没有此商品 !'}
return JsonResponse(result)
try:
user = UserProfile.objects.get(username=username)
except Exception as e:
result = {'code': 20103, 'error': '没有此用户 !'}
return JsonResponse(result)
if request.user != user:
result = {'code': 20104, 'error': '用户未登陆 !'}
return JsonResponse(result)
# 第一种方案 可以直接对外建属性赋值 对象
Message.objects.create(content=content, parent_message=parent_id, publisher=user, topic=goods)
return JsonResponse({'code': 200})
|
下雨天 hello
下午学习git
秦岭一日游
wo men dou yi yang
|
weight = float(input('Enter your weight in kgs: '))
height = float(input('Enter your height in metres: '))
result = weight / (height**2)
print('Your BMI is {:.2f}'.format(result))
|
#!/usr/bin/env python3
from termcolor import cprint
from nubia import command, argument, context
@command(name_or_function="gcloud")
class GCLOUD:
"""
Google Cloud Platform commands set. This is still not implemented.
"""
def __init__(self) -> None:
pass
@command
def info(self):
"""
print info message
"""
cprint("This module is not implemented")
|
import os
import re
class LanguageModelContent:
def __init__(self, words,count):
self.words = words
self.count = count
def __str__(self):
return self.words + '\t' +self.count
if __name__ == "__main__":
dir_list = sorted(os.listdir('/Users/geekye/Documents/Dataset/LM/UniBiGram'))
# because ngrams-[00030 - 00036]-of-00394 have no invalid data
filtered_list = [dir for dir in dir_list if dir >= 'ngrams-00001-of-00394' and dir <= 'ngrams-0029-of-00394']
for file_name in filtered_list:
grams_2 = []
with open('/Users/geekye/Documents/Dataset/LM/UniBiGram/'+ file_name) as file:
for line in file:
if re.match('^[\u4e00-\u9fa5]{1,8}[\s\t]{1,}[\u4e00-\u9fa5]{1,8}[\s\t]{1,}\d{1,}', line):
segments = line.split('\t')
words = segments[0]
count = segments[1]
model = LanguageModelContent(words, count)
grams_2.append(model)
if len(grams_2) == 0:
continue
with open('/Users/geekye/Documents/Dataset/LM/gram2'+ file_name, 'a') as file:
print(file_name+'has been started!')
for model in grams_2:
file.write(str(model) + '\n')
print(file_name+'has been processed!')
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# File Name: Action.py
# By: Daniel Lamothe
#
# Purpose: A simple object representing an Action a Creature can take. Used to house Action information.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Action:
name = ''
desc = ''
attack = ''
hit = ''
# Default Constructor
def __init__(self):
pass
# Constructor with provided parameters
def __init__(self, name, desc, attack, hit):
self.name = name
self.desc = desc
self.attack = attack
self.hit = hit
|
"""Define test cases for KFLR."""
from test.extensions.secondorder.secondorder_settings import GROUP_CONV_SETTINGS
SHARED_NOT_SUPPORTED_SETTINGS = GROUP_CONV_SETTINGS
LOCAL_NOT_SUPPORTED_SETTINGS = []
NOT_SUPPORTED_SETTINGS = SHARED_NOT_SUPPORTED_SETTINGS + LOCAL_NOT_SUPPORTED_SETTINGS
|
import os
import importlib
from gmc.conf import global_settings
ENVIRONMENT_VARIABLE = "GMC_SETTINGS_MODULE"
class Settings:
"""
Module to load settings to configure gmc
"""
def __init__(self, *args, **kwargs):
self.settings = None
self.settings_module = None
def __getattr__(self, name):
"""
Make settings available as the attributes.
Like settings.DATASET_DIR
"""
self.load_settings()
return self.settings[name]
def __iter__(self):
self.load_settings()
return iter(self.settings)
def load_settings(self):
settings_module = os.environ.get(ENVIRONMENT_VARIABLE)
if self.settings is not None and settings_module == self.settings_module:
return
self.settings = {}
for setting in dir(global_settings):
if setting.isupper():
self.settings[setting] = getattr(global_settings, setting)
self.settings_module = os.environ.get(ENVIRONMENT_VARIABLE, None)
if self.settings_module is not None:
mod = importlib.import_module(self.settings_module)
for setting in dir(mod):
if setting.isupper():
self.settings[setting] = getattr(mod, setting)
def modify(self, new_settings):
for name in new_settings:
if name in self.settings:
self.settings[name] = new_settings[name]
settings = Settings()
|
import os
import PIL
from PIL import Image
from PIL import ImageEnhance
from tqdm import tqdm
first_num = 82
last_num = 84
folder_name = "201013-vib"
for th in tqdm((1,2,3,4,5,6,7,8,9,10)):
TH=str(th)
os.mkdir("enhance_test/"+folder_name+"_"+TH)
for num in tqdm(range (first_num,last_num)):
im_name=str(num)
fileA_name=im_name.zfill(6)+".jpg"
enhance_im_name=im_name.zfill(6)+".jpg"
im = Image.open(folder_name+"/"+fileA_name)
im = im.convert('L')
im = ImageEnhance.Contrast(im)
im = im.enhance(th)
im = im.crop((410,977,1069,1378))
im.save("enhance_test/"+folder_name+"_"+TH+"/"+fileA_name)
|
import unittest
import testutil
import hdbfs
class HiguQueryCases( testutil.TestCase ):
def setUp( self ):
self.init_env()
h = hdbfs.Database()
h.enable_write_access()
red_obj = h.register_file( self._load_data( self.red ) )
yellow_obj = h.register_file( self._load_data( self.yellow ) )
green_obj = h.register_file( self._load_data( self.green ) )
cyan_obj = h.register_file( self._load_data( self.cyan ) )
blue_obj = h.register_file( self._load_data( self.blue ) )
magenta_obj = h.register_file( self._load_data( self.magenta ) )
white_obj = h.register_file( self._load_data( self.white ) )
grey_obj = h.register_file( self._load_data( self.grey ) )
black_obj = h.register_file( self._load_data( self.black ) )
red_obj['test'] = 1
yellow_obj['test'] = 2
green_obj['test'] = 3
blue_obj['test'] = 4
warm_tag = h.make_tag( 'warm' )
cool_tag = h.make_tag( 'cool' )
rgb_tag = h.make_tag( 'rgb' )
cmyk_tag = h.make_tag( 'cmyk' )
paint_tag = h.make_tag( 'paint' )
red_obj.assign( warm_tag )
yellow_obj.assign( warm_tag )
magenta_obj.assign( warm_tag )
green_obj.assign( cool_tag )
cyan_obj.assign( cool_tag )
blue_obj.assign( cool_tag )
red_obj.assign( rgb_tag )
green_obj.assign( rgb_tag )
blue_obj.assign( rgb_tag )
cyan_obj.assign( cmyk_tag )
magenta_obj.assign( cmyk_tag )
yellow_obj.assign( cmyk_tag )
black_obj.assign( cmyk_tag )
red_obj.assign( paint_tag )
yellow_obj.assign( paint_tag )
blue_obj.assign( paint_tag )
self.h = hdbfs.Database()
self.red_obj = self.h.get_object_by_id( red_obj.get_id() )
self.yellow_obj = self.h.get_object_by_id( yellow_obj.get_id() )
self.green_obj = self.h.get_object_by_id( green_obj.get_id() )
self.cyan_obj = self.h.get_object_by_id( cyan_obj.get_id() )
self.blue_obj = self.h.get_object_by_id( blue_obj.get_id() )
self.magenta_obj = self.h.get_object_by_id( magenta_obj.get_id() )
self.white_obj = self.h.get_object_by_id( white_obj.get_id() )
self.grey_obj = self.h.get_object_by_id( grey_obj.get_id() )
self.black_obj = self.h.get_object_by_id( black_obj.get_id() )
self.warm_tag = self.h.get_object_by_id( warm_tag.get_id() )
self.cool_tag = self.h.get_object_by_id( cool_tag.get_id() )
self.rgb_tag = self.h.get_object_by_id( rgb_tag.get_id() )
self.cmyk_tag = self.h.get_object_by_id( cmyk_tag.get_id() )
self.paint_tag = self.h.get_object_by_id( paint_tag.get_id() )
def tearDown( self ):
self.uninit_env()
def test_query_all( self ):
rs = [ r for r in self.h.all_albums_or_free_files() ]
self.assertTrue( self.red_obj in rs, 'Red not in result' )
self.assertTrue( self.yellow_obj in rs, 'Yellow not in result' )
self.assertTrue( self.green_obj in rs, 'Green not in result' )
self.assertTrue( self.cyan_obj in rs, 'Cyan not in result' )
self.assertTrue( self.blue_obj in rs, 'Blue not in result' )
self.assertTrue( self.magenta_obj in rs, 'Magenta not in result' )
self.assertTrue( self.white_obj in rs, 'White not in result' )
self.assertTrue( self.grey_obj in rs, 'Grey not in result' )
self.assertTrue( self.black_obj in rs, 'Black not in result' )
self.assertTrue( len( rs ) == 9, 'Result size mismatch' )
def test_query_unowned( self ):
rs = [ r for r in self.h.unowned_files() ]
self.assertTrue( self.white_obj in rs, 'White not in result' )
self.assertTrue( self.grey_obj in rs, 'Grey not in result' )
self.assertTrue( len( rs ) == 2, 'Result size mismatch' )
def test_query_require( self ):
query = hdbfs.query.Query()
query.add_require_constraint( hdbfs.query.TagConstraint( self.warm_tag ) )
query.add_require_constraint( hdbfs.query.TagConstraint( self.paint_tag ) )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.red_obj in rs, 'Red not in result' )
self.assertTrue( self.yellow_obj in rs, 'Yellow not in result' )
self.assertTrue( len( rs ) == 2, 'Result size mismatch' )
def test_query_add( self ):
query = hdbfs.query.Query()
query.add_or_constraint( hdbfs.query.TagConstraint( self.warm_tag ) )
query.add_or_constraint( hdbfs.query.TagConstraint( self.paint_tag ) )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.red_obj in rs, 'Red not in result' )
self.assertTrue( self.yellow_obj in rs, 'Yellow not in result' )
self.assertTrue( self.blue_obj in rs, 'Blue not in result' )
self.assertTrue( self.magenta_obj in rs, 'Magenta not in result' )
self.assertTrue( len( rs ) == 4, 'Result size mismatch' )
def test_query_sub( self ):
query = hdbfs.query.Query()
query.add_not_constraint( hdbfs.query.TagConstraint( self.warm_tag ) )
query.add_not_constraint( hdbfs.query.TagConstraint( self.paint_tag ) )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.green_obj in rs, 'Green not in result' )
self.assertTrue( self.cyan_obj in rs, 'Cyan not in result' )
self.assertTrue( self.white_obj in rs, 'White not in result' )
self.assertTrue( self.grey_obj in rs, 'Grey not in result' )
self.assertTrue( self.black_obj in rs, 'Black not in result' )
self.assertTrue( len( rs ) == 5, 'Result size mismatch' )
def test_query_add_sub( self ):
query = hdbfs.query.Query()
query.add_or_constraint( hdbfs.query.TagConstraint( self.rgb_tag ) )
query.add_or_constraint( hdbfs.query.TagConstraint( self.cmyk_tag ) )
query.add_not_constraint( hdbfs.query.TagConstraint( self.cool_tag ) )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.red_obj in rs, 'Red not in result' )
self.assertTrue( self.yellow_obj in rs, 'Yellow not in result' )
self.assertTrue( self.magenta_obj in rs, 'Magenta not in result' )
self.assertTrue( self.black_obj in rs, 'Black not in result' )
self.assertTrue( len( rs ) == 4, 'Result size mismatch' )
def test_query_require_add( self ):
query = hdbfs.query.Query()
query.add_require_constraint( hdbfs.query.TagConstraint( self.warm_tag ) )
query.add_require_constraint( hdbfs.query.TagConstraint( self.paint_tag ) )
query.add_or_constraint( hdbfs.query.TagConstraint( self.cool_tag ) )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.red_obj in rs, 'Red not in result' )
self.assertTrue( self.yellow_obj in rs, 'Yellow not in result' )
self.assertTrue( self.green_obj in rs, 'Green not in result' )
self.assertTrue( self.cyan_obj in rs, 'Cyan not in result' )
self.assertTrue( self.blue_obj in rs, 'Blue not in result' )
self.assertTrue( len( rs ) == 5, 'Result size mismatch' )
def test_query_require_add_sub( self ):
query = hdbfs.query.Query()
query.add_require_constraint( hdbfs.query.TagConstraint( self.warm_tag ) )
query.add_require_constraint( hdbfs.query.TagConstraint( self.paint_tag ) )
query.add_or_constraint( hdbfs.query.TagConstraint( self.cool_tag ) )
query.add_not_constraint( hdbfs.query.TagConstraint( self.cmyk_tag ) )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.red_obj in rs, 'Red not in result' )
self.assertTrue( self.green_obj in rs, 'Green not in result' )
self.assertTrue( self.blue_obj in rs, 'Blue not in result' )
self.assertTrue( len( rs ) == 3, 'Result size mismatch' )
def test_query_order_add( self ):
query = hdbfs.query.Query()
query.add_require_constraint( hdbfs.query.TagConstraint( self.rgb_tag ) )
query.set_order( 'add' )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.red_obj == rs[0], 'Red not in pos 0' )
self.assertTrue( self.green_obj == rs[1], 'Green not in pos 1' )
self.assertTrue( self.blue_obj == rs[2], 'Blue not in pos 2' )
self.assertTrue( len( rs ) == 3, 'Result size mismatch' )
def test_query_order_radd( self ):
query = hdbfs.query.Query()
query.add_require_constraint( hdbfs.query.TagConstraint( self.rgb_tag ) )
query.set_order( 'add', True )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.red_obj == rs[2], 'Red not in pos 2' )
self.assertTrue( self.green_obj == rs[1], 'Green not in pos 1' )
self.assertTrue( self.blue_obj == rs[0], 'Blue not in pos 0' )
self.assertTrue( len( rs ) == 3, 'Result size mismatch' )
def test_query_by_name( self ):
query = hdbfs.query.Query()
query.add_require_constraint( hdbfs.query.StringConstraint( self.red ) )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.red_obj in rs, 'Red not in result' )
self.assertTrue( len( rs ) == 1, 'Result size mismatch' )
def test_query_by_name_subset( self ):
query = hdbfs.query.Query()
query.add_require_constraint( hdbfs.query.StringConstraint( 'e_sq.' ) )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.blue_obj in rs, 'Blue not in result' )
self.assertTrue( self.white_obj in rs, 'White not in result' )
self.assertTrue( len( rs ) == 2, 'Result size mismatch' )
def test_query_by_name_wildcard( self ):
query = hdbfs.query.Query()
query.add_require_constraint( hdbfs.query.StringConstraint( 'gr*sq' ) )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.green_obj in rs, 'Green not in result' )
self.assertTrue( self.grey_obj in rs, 'Grey not in result' )
self.assertTrue( len( rs ) == 2, 'Result size mismatch' )
def test_query_by_parameters( self ):
query = hdbfs.query.Query()
query.add_require_constraint( hdbfs.query.ParameterConstraint( 'test', '>=', 2 ) )
query.add_require_constraint( hdbfs.query.ParameterConstraint( 'test', '<=', 3 ) )
rs = [ r for r in query.execute( self.h ) ]
self.assertTrue( self.yellow_obj in rs, 'Yellow not in result' )
self.assertTrue( self.green_obj in rs, 'Green not in result' )
self.assertTrue( len( rs ) == 2, 'Result size mismatch' )
if( __name__ == '__main__' ):
unittest.main()
|
# -*- coding: utf-8 -*-
import scrapy
import time
import labsql
import copy
class PanyuspiderSpider(scrapy.Spider):
name = 'panyuSpider'
allowed_domains = ['qx.panyu.gov.cn']
start_urls = ['http://qx.panyu.gov.cn/pyinterface/wap/sk_zd.jsp']
# create sql server session
conn = labsql.LabSQL('172.168.1.36', 'panyu', 'sa', 'scucc')
# initial setting
year = time.localtime(time.time()).tm_year
# the dict is sorted after collected
info_dict = {}
def parse(self, response):
try:
# get initial information
for id, get_location in enumerate(response.css('.station-val::text').extract()):
self.info_dict[id] = [get_location.strip()]
for id, date_time in enumerate(response.css('.time-val::text').extract()):
date, pub_time = date_time.strip().split(' ')
self.info_dict[id].append('%s-%s' % (self.year, date))
self.info_dict[id].append(pub_time)
# get one hour rainfall
hourrf_dict = copy.deepcopy(self.info_dict)
for id, get_hourrf in enumerate(response.css('.hourrf-val::text').extract()):
hourrf_dict[id].append(get_hourrf.split('m')[0])
hourrf_values = hourrf_dict.values()
for value in hourrf_values:
self.conn.insert("""insert into rf1 ([location]
,[date]
,[time]
,[rainfall_of_one_hour]
) values(?,?,?,?)""",
value)
# get three hour rainfall
rf3_dict = copy.deepcopy(self.info_dict)
for id, get_rf3 in enumerate(response.css('.rf3-val::text').extract()):
rf3_dict[id].append(get_rf3.split('m')[0])
rf3_values = rf3_dict.values()
for value in rf3_values:
self.conn.insert("""insert into rf3 ([location]
,[date]
,[time]
,[rainfall_of_three_hour]
) values(?,?,?,?)""",
value)
# get daily rainfall since 8am
ryl_dict = copy.deepcopy(self.info_dict)
for id, get_ryl in enumerate(response.css('.ryl-val::text').extract()):
ryl_dict[id].append(get_ryl.split('m')[0])
ryl_values = ryl_dict.values()
for value in ryl_values:
self.conn.insert("""insert into daily_am ([location]
,[date]
,[time]
,[rainfall_of_daily_am]
) values(?,?,?,?)""",
value)
# get daily rainfall since 20pm
rf20_dict = copy.deepcopy(self.info_dict)
for id, get_rf20 in enumerate(response.css('.rf20-val::text').extract()):
rf20_dict[id].append(get_rf20.split('m')[0])
rf20_values = rf20_dict.values()
for value in rf20_values:
self.conn.insert("""insert into daily_pm ([location]
,[date]
,[time]
,[rainfall_of_daily_pm]
) values(?,?,?,?)""",
value)
except:
with open('error.txt', 'a') as f:
f.write(time.asctime(time.localtime(time.time()))+'\n')
|
class Section:
section_number = None
type = None
days_of_the_week = None
start_time = None
end_time = None
def __init__(self, json_dict=None):
if json_dict is None:
self.section_number = None
self.type = None
self.days_of_the_week = None
self.start_time = None
self.end_time = None
else:
self.section_number = json_dict['section_number']
self.type = json_dict['type']
self.days_of_the_week = json_dict['days_of_the_week']
self.start_time = json_dict['start_time']
self.end_time = json_dict['end_time']
def section_to_string(self):
output = ""
output += "SectionNumber: " + self.var_to_string(self.section_number) + "\n"
output += "Type: " + self.var_to_string(self.type) + "\n"
output += "Days of the week: " + self.var_to_string(self.days_of_the_week) + "\n"
output += "Start/End: " + self.var_to_string(self.start_time) \
+ " to " + self.var_to_string(self.end_time) + "\n"
return output
def var_to_string(self, variable):
if variable is None:
return "NONE"
else:
return str(variable)
def reprJSON(self):
return dict(section_number = self.section_number, type=self.type, days_of_the_week = self.days_of_the_week,
start_time = self.start_time, end_time = self.end_time)
|
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'downloaderFNzIGb.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
class Ui_Main(object):
def setupUi(self, Main):
if not Main.objectName():
Main.setObjectName(u"Main")
Main.setEnabled(True)
Main.resize(640, 423)
self.centralwidget = QWidget(Main)
self.centralwidget.setObjectName(u"centralwidget")
self.verticalLayout = QVBoxLayout(self.centralwidget)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName(u"verticalLayout")
self.verticalLayout.setContentsMargins(10, 10, 10, 10)
self.drop = QFrame(self.centralwidget)
self.drop.setObjectName(u"drop")
self.drop.setStyleSheet(u"QFrame{\n"
" background-color: rgb(56,58,89);\n"
" color: rgb(220, 220, 220);\n"
" border-radius: 10px\n"
"}")
self.drop.setFrameShape(QFrame.StyledPanel)
self.drop.setFrameShadow(QFrame.Raised)
self.Downloader = QLabel(self.drop)
self.Downloader.setObjectName(u"Downloader")
self.Downloader.setGeometry(QRect(0, 0, 611, 101))
font = QFont()
font.setFamily(u"Segoe UI")
font.setPointSize(40)
self.Downloader.setFont(font)
self.Downloader.setStyleSheet(u"color: rgb(254,121,199);")
self.Downloader.setAlignment(Qt.AlignCenter)
self.progressBar = QProgressBar(self.drop)
self.progressBar.setObjectName(u"progressBar")
self.progressBar.setGeometry(QRect(10, 270, 601, 23))
self.progressBar.setAutoFillBackground(False)
self.progressBar.setStyleSheet(u"QProgressBar {\n"
" background-color: rgb(98,114,164);\n"
" color: rgb(200,200,200);\n"
" border-style: none;\n"
" border-radius: 10px;\n"
" text-align: center;\n"
"}\n"
"QProgressBar::chunk{ \n"
"border-radius:10px;\n"
" background-color: qlineargradient(spread:pad, x1:0, y1:0.42, x2:1, y2:0.443182, stop:0 rgba(254, 121, 199, 255), stop:1 rgba(170, 85, 255, 255));\n"
"}")
self.progressBar.setValue(0)
self.Info_label = QLabel(self.drop)
self.Info_label.setObjectName(u"Info_label")
self.Info_label.setGeometry(QRect(-10, 300, 631, 31))
font1 = QFont()
font1.setFamily(u"Segoe UI")
font1.setPointSize(14)
self.Info_label.setFont(font1)
self.Info_label.setStyleSheet(u"color: rgb(98,114,250);")
self.Info_label.setAlignment(Qt.AlignCenter)
self.fielist = QListWidget(self.drop)
self.fielist.setObjectName(u"fielist")
self.fielist.setGeometry(QRect(20, 81, 571, 181))
self.fielist.setStyleSheet(u"hover{\n"
"color: rgb(98,114,250);\n"
"}")
self.exit_button = QPushButton(self.drop)
self.exit_button.setObjectName(u"exit_button")
self.exit_button.setGeometry(QRect(590, 0, 31, 31))
font2 = QFont()
font2.setFamily(u"Segoe UI")
font2.setPointSize(18)
font2.setKerning(True)
self.exit_button.setFont(font2)
self.exit_button.setAcceptDrops(False)
self.exit_button.setAutoFillBackground(False)
self.exit_button.setStyleSheet(u"")
self.exit_button.setAutoDefault(False)
self.exit_button.setFlat(True)
self.ip_label = QLabel(self.drop)
self.ip_label.setObjectName(u"ip_label")
self.ip_label.setGeometry(QRect(10, 0, 47, 13))
self.ip_label.setStyleSheet(u"color: rgb(98,114,250);")
self.verticalLayout.addWidget(self.drop)
Main.setCentralWidget(self.centralwidget)
self.retranslateUi(Main)
self.exit_button.setDefault(False)
QMetaObject.connectSlotsByName(Main)
# setupUi
def retranslateUi(self, Main):
Main.setWindowTitle(QCoreApplication.translate("Main", u"MainWindow", None))
self.Downloader.setText(QCoreApplication.translate("Main", u"<html><head/><body><p>PyDownloader</p><p><br/></p></body></html>", None))
self.Info_label.setText(QCoreApplication.translate("Main", u"<html><head/><body><p>NA/NA NA Mb/s ETA: NA TM: NA</p></body></html>", None))
self.exit_button.setText(QCoreApplication.translate("Main", u"\u2715", None))
self.ip_label.setText("")
# retranslateUi
|
import board
import neopixel
import time
pixels = neopixel.NeoPixel(board.D18, 20)
for i in range (0,20):
pixels[i] = (0,0,255)
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import functools
import itertools
import json
import logging
import os.path
from dataclasses import dataclass
from pathlib import PurePath
from typing import Any, Iterable, Iterator, NamedTuple, Sequence, Type, cast
from pants.base.deprecated import warn_or_error
from pants.base.specs import AncestorGlobSpec, RawSpecsWithoutFileOwners, RecursiveGlobSpec
from pants.build_graph.address import BuildFileAddressRequest, MaybeAddress, ResolveError
from pants.engine.addresses import (
Address,
Addresses,
AddressInput,
BuildFileAddress,
UnparsedAddressInputs,
)
from pants.engine.collection import Collection
from pants.engine.environment import ChosenLocalEnvironmentName, EnvironmentName
from pants.engine.fs import EMPTY_SNAPSHOT, GlobMatchErrorBehavior, PathGlobs, Paths, Snapshot
from pants.engine.internals import native_engine
from pants.engine.internals.mapper import AddressFamilies
from pants.engine.internals.native_engine import AddressParseException
from pants.engine.internals.parametrize import Parametrize, _TargetParametrization
from pants.engine.internals.parametrize import ( # noqa: F401
_TargetParametrizations as _TargetParametrizations,
)
from pants.engine.internals.parametrize import ( # noqa: F401
_TargetParametrizationsRequest as _TargetParametrizationsRequest,
)
from pants.engine.internals.target_adaptor import TargetAdaptor, TargetAdaptorRequest
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import (
AllTargets,
AllUnexpandedTargets,
CoarsenedTarget,
CoarsenedTargets,
CoarsenedTargetsRequest,
Dependencies,
DependenciesRequest,
DepsTraversalBehavior,
ExplicitlyProvidedDependencies,
ExplicitlyProvidedDependenciesRequest,
Field,
FieldDefaultFactoryRequest,
FieldDefaultFactoryResult,
FieldDefaults,
FieldSetsPerTarget,
FieldSetsPerTargetRequest,
FilteredTargets,
GeneratedSources,
GeneratedTargets,
GenerateSourcesRequest,
GenerateTargetsRequest,
HydratedSources,
HydrateSourcesRequest,
InferDependenciesRequest,
InferredDependencies,
InvalidFieldException,
MultipleSourcesField,
OverridesField,
RegisteredTargetTypes,
SourcesField,
SourcesPaths,
SourcesPathsRequest,
SpecialCasedDependencies,
Target,
TargetFilesGenerator,
TargetFilesGeneratorSettings,
TargetFilesGeneratorSettingsRequest,
TargetGenerator,
Targets,
TargetTypesToGenerateTargetsRequests,
TransitiveTargets,
TransitiveTargetsRequest,
UnexpandedTargets,
UnrecognizedTargetTypeException,
ValidatedDependencies,
ValidateDependenciesRequest,
WrappedTarget,
WrappedTargetRequest,
_generate_file_level_targets,
)
from pants.engine.unions import UnionMembership, UnionRule
from pants.option.global_options import GlobalOptions, UnmatchedBuildFileGlobs
from pants.util.docutil import bin_name, doc_url
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.memo import memoized
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
from pants.util.strutil import bullet_list, pluralize, softwrap
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------------------------
# Address -> Target(s)
# -----------------------------------------------------------------------------------------------
@rule(_masked_types=[EnvironmentName])
async def resolve_unexpanded_targets(addresses: Addresses) -> UnexpandedTargets:
wrapped_targets = await MultiGet(
Get(
WrappedTarget,
WrappedTargetRequest(
a,
# Idiomatic rules should not be manually constructing `Addresses`. Instead, they
# should use `UnparsedAddressInputs` or `Specs` rules.
#
# It is technically more correct for us to require callers of
# `Addresses -> UnexpandedTargets` to specify a `description_of_origin`. But in
# practice, this dramatically increases boilerplate, and it should never be
# necessary.
#
# Note that this contrasts with an individual `Address`, which often is unverified
# because it can come from the rule `AddressInput -> Address`, which only verifies
# that it has legal syntax and does not check the address exists.
description_of_origin="<infallible>",
),
)
for a in addresses
)
return UnexpandedTargets(wrapped_target.target for wrapped_target in wrapped_targets)
@rule
def target_types_to_generate_targets_requests(
union_membership: UnionMembership,
) -> TargetTypesToGenerateTargetsRequests:
return TargetTypesToGenerateTargetsRequests(
{
request_cls.generate_from: request_cls # type: ignore[misc]
for request_cls in union_membership.get(GenerateTargetsRequest)
}
)
@memoized
def warn_deprecated_target_type(tgt_type: type[Target]) -> None:
assert tgt_type.deprecated_alias_removal_version is not None
warn_or_error(
removal_version=tgt_type.deprecated_alias_removal_version,
entity=f"the target name {tgt_type.deprecated_alias}",
hint=(
f"Instead, use `{tgt_type.alias}`, which behaves the same. Run `{bin_name()} "
"update-build-files` to automatically fix your BUILD files."
),
)
@memoized
def warn_deprecated_field_type(field_type: type[Field]) -> None:
assert field_type.deprecated_alias_removal_version is not None
warn_or_error(
removal_version=field_type.deprecated_alias_removal_version,
entity=f"the field name {field_type.deprecated_alias}",
hint=(
f"Instead, use `{field_type.alias}`, which behaves the same. Run `{bin_name()} "
"update-build-files` to automatically fix your BUILD files."
),
)
@dataclass(frozen=True)
class _AdaptorAndType:
adaptor: TargetAdaptor
target_type: type[Target]
@dataclass(frozen=True)
class _RequestAdaptorAndType:
address: Address
description_of_origin: str
@rule
async def _determine_target_adaptor_and_type(
req: _RequestAdaptorAndType, registered_target_types: RegisteredTargetTypes
) -> _AdaptorAndType:
target_adaptor = await Get(
TargetAdaptor,
TargetAdaptorRequest(req.address, description_of_origin=req.description_of_origin),
)
target_type = registered_target_types.aliases_to_types.get(target_adaptor.type_alias, None)
if target_type is None:
raise UnrecognizedTargetTypeException(
target_adaptor.type_alias,
registered_target_types,
req.address,
target_adaptor.description_of_origin,
)
if (
target_type.deprecated_alias is not None
and target_type.deprecated_alias == target_adaptor.type_alias
and not req.address.is_generated_target
):
warn_deprecated_target_type(target_type)
return _AdaptorAndType(target_adaptor, target_type)
@dataclass(frozen=True)
class _TargetGeneratorOverridesRequest:
target_generator: TargetGenerator
@dataclass(frozen=True)
class ResolvedTargetGeneratorRequests:
requests: tuple[GenerateTargetsRequest, ...] = tuple()
@dataclass(frozen=True)
class ResolveTargetGeneratorRequests:
address: Address
description_of_origin: str = dataclasses.field(hash=False, compare=False)
@dataclass(frozen=True)
class ResolveAllTargetGeneratorRequests:
description_of_origin: str = dataclasses.field(hash=False, compare=False)
of_type: type[TargetGenerator] | None = None
@rule
async def resolve_all_generator_target_requests(
req: ResolveAllTargetGeneratorRequests,
) -> ResolvedTargetGeneratorRequests:
address_families = await Get(
AddressFamilies,
RawSpecsWithoutFileOwners(
recursive_globs=(RecursiveGlobSpec(""),),
description_of_origin="the `ResolveAllTargetGeneratorRequests` rule",
),
)
results = await MultiGet(
Get(
ResolvedTargetGeneratorRequests,
ResolveTargetGeneratorRequests(address, req.description_of_origin),
)
for family in address_families
for address, target_adaptor in family.addresses_to_target_adaptors.items()
if not req.of_type or target_adaptor.type_alias == req.of_type.alias
)
return ResolvedTargetGeneratorRequests(
tuple(itertools.chain.from_iterable(result.requests for result in results))
)
async def _target_generator_overrides(
target_generator: TargetGenerator, unmatched_build_file_globs: UnmatchedBuildFileGlobs
) -> dict[str, dict[str, Any]]:
address = target_generator.address
if target_generator.has_field(OverridesField):
overrides_field = target_generator[OverridesField]
overrides_flattened = overrides_field.flatten()
else:
overrides_flattened = {}
if isinstance(target_generator, TargetFilesGenerator):
override_globs = OverridesField.to_path_globs(
address, overrides_flattened, unmatched_build_file_globs
)
override_paths = await MultiGet(
Get(Paths, PathGlobs, path_globs) for path_globs in override_globs
)
return OverridesField.flatten_paths(
address, zip(override_paths, override_globs, overrides_flattened.values())
)
return overrides_flattened
@rule
async def resolve_generator_target_requests(
req: ResolveTargetGeneratorRequests,
union_membership: UnionMembership,
target_types_to_generate_requests: TargetTypesToGenerateTargetsRequests,
unmatched_build_file_globs: UnmatchedBuildFileGlobs,
) -> ResolvedTargetGeneratorRequests:
adaptor_and_type = await Get(
_AdaptorAndType, _RequestAdaptorAndType(req.address, req.description_of_origin)
)
target_adaptor = adaptor_and_type.adaptor
target_type = adaptor_and_type.target_type
if not issubclass(target_type, TargetGenerator):
return ResolvedTargetGeneratorRequests()
generate_request = target_types_to_generate_requests.request_for(target_type)
if not generate_request:
return ResolvedTargetGeneratorRequests()
generator_fields = dict(target_adaptor.kwargs)
generators = _parametrized_target_generators_with_templates(
req.address,
target_adaptor,
target_type,
generator_fields,
union_membership,
)
base_generator = target_type(
generator_fields,
req.address,
name_explicitly_set=target_adaptor.name_explicitly_set,
union_membership=union_membership,
description_of_origin=target_adaptor.description_of_origin,
)
overrides = await _target_generator_overrides(base_generator, unmatched_build_file_globs)
return ResolvedTargetGeneratorRequests(
requests=tuple(
generate_request(
generator,
template_address=generator.address,
template=template,
overrides={
name: dict(Parametrize.expand(generator.address, override))
for name, override in overrides.items()
},
)
for generator, template in generators
)
)
@rule
async def resolve_target_parametrizations(
request: _TargetParametrizationsRequest, union_membership: UnionMembership
) -> _TargetParametrizations:
address = request.address
adaptor_and_type = await Get(
_AdaptorAndType, _RequestAdaptorAndType(request.address, request.description_of_origin)
)
target_adaptor = adaptor_and_type.adaptor
target_type = adaptor_and_type.target_type
parametrizations: list[_TargetParametrization] = []
requests: ResolvedTargetGeneratorRequests | None = None
if issubclass(target_type, TargetGenerator):
requests = await Get(
ResolvedTargetGeneratorRequests,
ResolveTargetGeneratorRequests(address, request.description_of_origin),
)
if requests and requests.requests:
all_generated = await MultiGet(
Get(GeneratedTargets, GenerateTargetsRequest, generate_request)
for generate_request in requests.requests
)
parametrizations.extend(
_TargetParametrization(generate_request.generator, generated_batch)
for generated_batch, generate_request in zip(all_generated, requests.requests)
)
else:
parametrizations.append(
_target_parametrizations(address, target_adaptor, target_type, union_membership)
)
return _TargetParametrizations(parametrizations)
def _target_parametrizations(
address: Address,
target_adaptor: TargetAdaptor,
target_type: type[Target],
union_membership: UnionMembership,
) -> _TargetParametrization:
first, *rest = Parametrize.expand(address, target_adaptor.kwargs)
if rest:
# The target was parametrized, and so the original Target does not exist.
generated = FrozenDict(
(
parameterized_address,
target_type(
parameterized_fields,
parameterized_address,
name_explicitly_set=target_adaptor.name_explicitly_set,
union_membership=union_membership,
description_of_origin=target_adaptor.description_of_origin,
),
)
for parameterized_address, parameterized_fields in (first, *rest)
)
return _TargetParametrization(None, generated)
else:
# The target was not parametrized.
target = target_type(
target_adaptor.kwargs,
address,
name_explicitly_set=target_adaptor.name_explicitly_set,
union_membership=union_membership,
description_of_origin=target_adaptor.description_of_origin,
)
for field_type in target.field_types:
if (
field_type.deprecated_alias is not None
and field_type.deprecated_alias in target_adaptor.kwargs
):
warn_deprecated_field_type(field_type)
return _TargetParametrization(target, FrozenDict())
def _parametrized_target_generators_with_templates(
address: Address,
target_adaptor: TargetAdaptor,
target_type: type[TargetGenerator],
generator_fields: dict[str, Any],
union_membership: UnionMembership,
) -> list[tuple[TargetGenerator, dict[str, Any]]]:
# Split out the `propagated_fields` before construction.
template_fields = {}
copied_fields = (
*target_type.copied_fields,
*target_type._find_plugin_fields(union_membership),
)
for field_type in copied_fields:
field_value = generator_fields.get(field_type.alias, None)
if field_value is not None:
template_fields[field_type.alias] = field_value
for field_type in target_type.moved_fields:
field_value = generator_fields.pop(field_type.alias, None)
if field_value is not None:
template_fields[field_type.alias] = field_value
field_type_aliases = target_type._get_field_aliases_to_field_types(
target_type.class_field_types(union_membership)
).keys()
generator_fields_parametrized = {
name
for name, field in generator_fields.items()
if isinstance(field, Parametrize) and name in field_type_aliases
}
if generator_fields_parametrized:
noun = pluralize(len(generator_fields_parametrized), "field", include_count=False)
generator_fields_parametrized_text = ", ".join(
repr(f) for f in generator_fields_parametrized
)
raise InvalidFieldException(
f"Only fields which will be moved to generated targets may be parametrized, "
f"so target generator {address} (with type {target_type.alias}) cannot "
f"parametrize the {generator_fields_parametrized_text} {noun}."
)
return [
(
target_type(
generator_fields,
address,
name_explicitly_set=target_adaptor.name is not None,
union_membership=union_membership,
description_of_origin=target_adaptor.description_of_origin,
),
template,
)
for address, template in Parametrize.expand(address, template_fields)
]
@rule(_masked_types=[EnvironmentName])
async def resolve_target(
request: WrappedTargetRequest,
target_types_to_generate_requests: TargetTypesToGenerateTargetsRequests,
local_environment_name: ChosenLocalEnvironmentName,
) -> WrappedTarget:
address = request.address
base_address = address.maybe_convert_to_target_generator()
parametrizations = await Get(
_TargetParametrizations,
{
_TargetParametrizationsRequest(
base_address, description_of_origin=request.description_of_origin
): _TargetParametrizationsRequest,
local_environment_name.val: EnvironmentName,
},
)
target = parametrizations.get(address, target_types_to_generate_requests)
if target is None:
raise ResolveError(
softwrap(
f"""
The address `{address}` from {request.description_of_origin} was not generated by
the target `{base_address}`. Did you mean one of these addresses?
{bullet_list(str(t.address) for t in parametrizations.all)}
"""
)
)
return WrappedTarget(target)
@dataclass(frozen=True)
class WrappedTargetForBootstrap:
"""Used to avoid a rule graph cycle when evaluating bootstrap targets.
This does not work with target generation and parametrization. It also ignores any unrecognized
fields in the target, to accommodate plugin fields which are not yet registered during
bootstrapping.
This should only be used by bootstrapping code.
"""
val: Target
@rule
async def resolve_target_for_bootstrapping(
request: WrappedTargetRequest,
union_membership: UnionMembership,
) -> WrappedTargetForBootstrap:
adaptor_and_type = await Get(
_AdaptorAndType,
_RequestAdaptorAndType(
request.address,
description_of_origin=request.description_of_origin,
),
)
target_adaptor = adaptor_and_type.adaptor
target_type = adaptor_and_type.target_type
target = target_type(
target_adaptor.kwargs,
request.address,
name_explicitly_set=target_adaptor.name_explicitly_set,
union_membership=union_membership,
ignore_unrecognized_fields=True,
description_of_origin=target_adaptor.description_of_origin,
)
return WrappedTargetForBootstrap(target)
@rule(_masked_types=[EnvironmentName])
async def resolve_targets(
targets: UnexpandedTargets,
target_types_to_generate_requests: TargetTypesToGenerateTargetsRequests,
local_environment_name: ChosenLocalEnvironmentName,
) -> Targets:
# Replace all generating targets with what they generate. Otherwise, keep them. If a target
# generator does not generate any targets, keep the target generator.
# TODO: This method does not preserve the order of inputs.
expanded_targets: OrderedSet[Target] = OrderedSet()
generator_targets = []
parametrizations_gets = []
for tgt in targets:
if (
target_types_to_generate_requests.is_generator(tgt)
and not tgt.address.is_generated_target
):
generator_targets.append(tgt)
parametrizations_gets.append(
Get(
_TargetParametrizations,
{
_TargetParametrizationsRequest(
tgt.address.maybe_convert_to_target_generator(),
# Idiomatic rules should not be manually creating `UnexpandedTargets`, so
# we can be confident that the targets actually exist and the addresses
# are already legitimate.
description_of_origin="<infallible>",
): _TargetParametrizationsRequest,
local_environment_name.val: EnvironmentName,
},
)
)
else:
expanded_targets.add(tgt)
all_generated_targets = await MultiGet(parametrizations_gets)
expanded_targets.update(
tgt
for generator, parametrizations in zip(generator_targets, all_generated_targets)
for tgt in parametrizations.generated_or_generator(generator.address)
)
return Targets(expanded_targets)
@rule(desc="Find all targets in the project", level=LogLevel.DEBUG, _masked_types=[EnvironmentName])
async def find_all_targets() -> AllTargets:
tgts = await Get(
Targets,
RawSpecsWithoutFileOwners(
recursive_globs=(RecursiveGlobSpec(""),), description_of_origin="the `AllTargets` rule"
),
)
return AllTargets(tgts)
@rule(
desc="Find all (unexpanded) targets in the project",
level=LogLevel.DEBUG,
_masked_types=[EnvironmentName],
)
async def find_all_unexpanded_targets() -> AllUnexpandedTargets:
tgts = await Get(
UnexpandedTargets,
RawSpecsWithoutFileOwners(
recursive_globs=(RecursiveGlobSpec(""),), description_of_origin="the `AllTargets` rule"
),
)
return AllUnexpandedTargets(tgts)
# -----------------------------------------------------------------------------------------------
# TransitiveTargets
# -----------------------------------------------------------------------------------------------
class CycleException(Exception):
def __init__(self, subject: Address, path: tuple[Address, ...]) -> None:
path_string = "\n".join((f"-> {a}" if a == subject else f" {a}") for a in path)
super().__init__(
f"The dependency graph contained a cycle:\n{path_string}\n\nTo fix this, first verify "
"if your code has an actual import cycle. If it does, you likely need to re-architect "
"your code to avoid the cycle.\n\nIf there is no cycle in your code, then you may need "
"to use more granular targets. Split up the problematic targets into smaller targets "
"with more granular `sources` fields so that you can adjust the `dependencies` fields "
"to avoid introducing a cycle.\n\nAlternatively, use Python dependency inference "
"(`--python-infer-imports`), rather than explicit `dependencies`. Pants will infer "
"dependencies on specific files, rather than entire targets. This extra precision "
"means that you will only have cycles if your code actually does have cycles in it."
)
self.subject = subject
self.path = path
def _detect_cycles(
roots: tuple[Address, ...], dependency_mapping: dict[Address, tuple[Address, ...]]
) -> None:
path_stack: OrderedSet[Address] = OrderedSet()
visited: set[Address] = set()
def maybe_report_cycle(address: Address) -> None:
# NB: File-level dependencies are cycle tolerant.
if address.is_file_target or address not in path_stack:
return
# The path of the cycle is shorter than the entire path to the cycle: if the suffix of
# the path representing the cycle contains a file dep, it is ignored.
in_cycle = False
for path_address in path_stack:
if in_cycle and path_address.is_file_target:
# There is a file address inside the cycle: do not report it.
return
elif in_cycle:
# Not a file address.
continue
else:
# We're entering the suffix of the path that contains the cycle if we've reached
# the address in question.
in_cycle = path_address == address
# If we did not break out early, it's because there were no file addresses in the cycle.
raise CycleException(address, (*path_stack, address))
def visit(address: Address):
if address in visited:
maybe_report_cycle(address)
return
path_stack.add(address)
visited.add(address)
for dep_address in dependency_mapping[address]:
visit(dep_address)
path_stack.remove(address)
for root in roots:
visit(root)
if path_stack:
raise AssertionError(
f"The stack of visited nodes should have been empty at the end of recursion, "
f"but it still contained: {path_stack}"
)
@dataclass(frozen=True)
class _DependencyMappingRequest:
tt_request: TransitiveTargetsRequest
expanded_targets: bool
@dataclass(frozen=True)
class _DependencyMapping:
mapping: FrozenDict[Address, tuple[Address, ...]]
visited: FrozenOrderedSet[Target]
roots_as_targets: Collection[Target]
@rule
async def transitive_dependency_mapping(request: _DependencyMappingRequest) -> _DependencyMapping:
"""This uses iteration, rather than recursion, so that we can tolerate dependency cycles.
Unlike a traditional BFS algorithm, we batch each round of traversals via `MultiGet` for
improved performance / concurrency.
"""
roots_as_targets = await Get(UnexpandedTargets, Addresses(request.tt_request.roots))
visited: OrderedSet[Target] = OrderedSet()
queued = FrozenOrderedSet(roots_as_targets)
dependency_mapping: dict[Address, tuple[Address, ...]] = {}
while queued:
direct_dependencies: tuple[Collection[Target], ...]
if request.expanded_targets:
direct_dependencies = await MultiGet( # noqa: PNT30: this is inherently sequential
Get(
Targets,
DependenciesRequest(
tgt.get(Dependencies),
should_traverse_deps_predicate=request.tt_request.should_traverse_deps_predicate,
),
)
for tgt in queued
)
else:
direct_dependencies = await MultiGet( # noqa: PNT30: this is inherently sequential
Get(
UnexpandedTargets,
DependenciesRequest(
tgt.get(Dependencies),
should_traverse_deps_predicate=request.tt_request.should_traverse_deps_predicate,
),
)
for tgt in queued
)
dependency_mapping.update(
zip(
(t.address for t in queued),
(tuple(t.address for t in deps) for deps in direct_dependencies),
)
)
queued = FrozenOrderedSet(itertools.chain.from_iterable(direct_dependencies)).difference(
visited
)
visited.update(queued)
# NB: We use `roots_as_targets` to get the root addresses, rather than `request.roots`. This
# is because expanding from the `Addresses` -> `Targets` may have resulted in generated
# targets being used, so we need to use `roots_as_targets` to have this expansion.
# TODO(#12871): Fix this to not be based on generated targets.
_detect_cycles(tuple(t.address for t in roots_as_targets), dependency_mapping)
return _DependencyMapping(
FrozenDict(dependency_mapping), FrozenOrderedSet(visited), roots_as_targets
)
@rule(desc="Resolve transitive targets", level=LogLevel.DEBUG, _masked_types=[EnvironmentName])
async def transitive_targets(
request: TransitiveTargetsRequest, local_environment_name: ChosenLocalEnvironmentName
) -> TransitiveTargets:
"""Find all the targets transitively depended upon by the target roots."""
dependency_mapping = await Get(_DependencyMapping, _DependencyMappingRequest(request, True))
# Apply any transitive excludes (`!!` ignores).
transitive_excludes: FrozenOrderedSet[Target] = FrozenOrderedSet()
unevaluated_transitive_excludes = []
for t in (*dependency_mapping.roots_as_targets, *dependency_mapping.visited):
unparsed = t.get(Dependencies).unevaluated_transitive_excludes
if unparsed.values:
unevaluated_transitive_excludes.append(unparsed)
if unevaluated_transitive_excludes:
nested_transitive_excludes = await MultiGet(
Get(Targets, UnparsedAddressInputs, unparsed)
for unparsed in unevaluated_transitive_excludes
)
transitive_excludes = FrozenOrderedSet(
itertools.chain.from_iterable(excludes for excludes in nested_transitive_excludes)
)
return TransitiveTargets(
tuple(dependency_mapping.roots_as_targets),
FrozenOrderedSet(dependency_mapping.visited.difference(transitive_excludes)),
)
# -----------------------------------------------------------------------------------------------
# CoarsenedTargets
# -----------------------------------------------------------------------------------------------
@rule(_masked_types=[EnvironmentName])
def coarsened_targets_request(addresses: Addresses) -> CoarsenedTargetsRequest:
return CoarsenedTargetsRequest(addresses)
@rule(desc="Resolve coarsened targets", level=LogLevel.DEBUG, _masked_types=[EnvironmentName])
async def coarsened_targets(
request: CoarsenedTargetsRequest, local_environment_name: ChosenLocalEnvironmentName
) -> CoarsenedTargets:
dependency_mapping = await Get(
_DependencyMapping,
_DependencyMappingRequest(
TransitiveTargetsRequest(
request.roots,
should_traverse_deps_predicate=request.should_traverse_deps_predicate,
),
expanded_targets=request.expanded_targets,
),
)
addresses_to_targets = {
t.address: t for t in [*dependency_mapping.visited, *dependency_mapping.roots_as_targets]
}
# Because this is Tarjan's SCC (TODO: update signature to guarantee), components are returned
# in reverse topological order. We can thus assume when building the structure shared
# `CoarsenedTarget` instances that each instance will already have had its dependencies
# constructed.
components = native_engine.strongly_connected_components(
list(dependency_mapping.mapping.items())
)
coarsened_targets: dict[Address, CoarsenedTarget] = {}
root_coarsened_targets = []
root_addresses_set = set(request.roots)
try:
for component in components:
component = sorted(component)
component_set = set(component)
# For each member of the component, include the CoarsenedTarget for each of its external
# dependencies.
coarsened_target = CoarsenedTarget(
(addresses_to_targets[a] for a in component),
(
coarsened_targets[d]
for a in component
for d in dependency_mapping.mapping[a]
if d not in component_set
),
)
# Add to the coarsened_targets mapping under each of the component's Addresses.
for address in component:
coarsened_targets[address] = coarsened_target
# If any of the input Addresses was a member of this component, it is a root.
if component_set & root_addresses_set:
root_coarsened_targets.append(coarsened_target)
except KeyError:
# TODO: This output is intended to help uncover a non-deterministic error reported in
# https://github.com/pantsbuild/pants/issues/17047.
mapping_str = json.dumps(
{str(a): [str(d) for d in deps] for a, deps in dependency_mapping.mapping.items()}
)
components_str = json.dumps([[str(a) for a in component] for component in components])
logger.warning(f"For {request}:\nMapping:\n{mapping_str}\nComponents:\n{components_str}")
raise
return CoarsenedTargets(tuple(root_coarsened_targets))
# -----------------------------------------------------------------------------------------------
# Find the owners of a file
# -----------------------------------------------------------------------------------------------
def _log_or_raise_unmatched_owners(
file_paths: Sequence[PurePath],
owners_not_found_behavior: GlobMatchErrorBehavior,
ignore_option: str | None = None,
) -> None:
option_msg = (
f"\n\nIf you would like to ignore un-owned files, please pass `{ignore_option}`."
if ignore_option
else ""
)
if len(file_paths) == 1:
prefix = (
f"No owning targets could be found for the file `{file_paths[0]}`.\n\n"
f"Please check that there is a BUILD file in the parent directory "
f"{file_paths[0].parent} with a target whose `sources` field includes the file."
)
else:
prefix = (
f"No owning targets could be found for the files {sorted(map(str, file_paths))}`.\n\n"
f"Please check that there are BUILD files in each file's parent directory with a "
f"target whose `sources` field includes the file."
)
msg = (
f"{prefix} See {doc_url('targets')} for more information on target definitions."
f"\n\nYou may want to run `{bin_name()} tailor` to autogenerate your BUILD files. See "
f"{doc_url('create-initial-build-files')}.{option_msg}"
)
if owners_not_found_behavior == GlobMatchErrorBehavior.warn:
logger.warning(msg)
else:
raise ResolveError(msg)
@dataclass(frozen=True)
class OwnersRequest:
"""A request for the owners of a set of file paths.
TODO: This is widely used as an effectively-public API. It should probably move to
`pants.engine.target`.
"""
sources: tuple[str, ...]
owners_not_found_behavior: GlobMatchErrorBehavior = GlobMatchErrorBehavior.ignore
filter_by_global_options: bool = False
match_if_owning_build_file_included_in_sources: bool = False
class Owners(FrozenOrderedSet[Address]):
pass
@rule(desc="Find which targets own certain files", _masked_types=[EnvironmentName])
async def find_owners(
owners_request: OwnersRequest,
local_environment_name: ChosenLocalEnvironmentName,
) -> Owners:
# Determine which of the sources are live and which are deleted.
sources_paths = await Get(Paths, PathGlobs(owners_request.sources))
live_files = FrozenOrderedSet(sources_paths.files)
deleted_files = FrozenOrderedSet(s for s in owners_request.sources if s not in live_files)
live_dirs = FrozenOrderedSet(os.path.dirname(s) for s in live_files)
deleted_dirs = FrozenOrderedSet(os.path.dirname(s) for s in deleted_files)
def create_live_and_deleted_gets(
*, filter_by_global_options: bool
) -> tuple[Get[FilteredTargets | Targets], Get[UnexpandedTargets],]:
"""Walk up the buildroot looking for targets that would conceivably claim changed sources.
For live files, we use Targets, which causes generated targets to be used rather than their
target generators. For deleted files we use UnexpandedTargets, which have the original
declared `sources` globs from target generators.
We ignore unrecognized files, which can happen e.g. when finding owners for deleted files.
"""
live_raw_specs = RawSpecsWithoutFileOwners(
ancestor_globs=tuple(AncestorGlobSpec(directory=d) for d in live_dirs),
filter_by_global_options=filter_by_global_options,
description_of_origin="<owners rule - unused>",
unmatched_glob_behavior=GlobMatchErrorBehavior.ignore,
)
live_get: Get[FilteredTargets | Targets] = (
Get(FilteredTargets, RawSpecsWithoutFileOwners, live_raw_specs)
if filter_by_global_options
else Get(Targets, RawSpecsWithoutFileOwners, live_raw_specs)
)
deleted_get = Get(
UnexpandedTargets,
RawSpecsWithoutFileOwners(
ancestor_globs=tuple(AncestorGlobSpec(directory=d) for d in deleted_dirs),
filter_by_global_options=filter_by_global_options,
description_of_origin="<owners rule - unused>",
unmatched_glob_behavior=GlobMatchErrorBehavior.ignore,
),
)
return live_get, deleted_get
live_get, deleted_get = create_live_and_deleted_gets(
filter_by_global_options=owners_request.filter_by_global_options
)
live_candidate_tgts, deleted_candidate_tgts = await MultiGet(live_get, deleted_get)
result = set()
unmatched_sources = set(owners_request.sources)
for live in (True, False):
candidate_tgts: Sequence[Target]
if live:
candidate_tgts = live_candidate_tgts
sources_set = live_files
else:
candidate_tgts = deleted_candidate_tgts
sources_set = deleted_files
build_file_addresses = await MultiGet( # noqa: PNT30: requires triage
Get(
BuildFileAddress,
BuildFileAddressRequest(
tgt.address, description_of_origin="<owners rule - cannot trigger>"
),
)
for tgt in candidate_tgts
)
for candidate_tgt, bfa in zip(candidate_tgts, build_file_addresses):
matching_files = set(
candidate_tgt.get(SourcesField).filespec_matcher.matches(list(sources_set))
)
if not matching_files and not (
owners_request.match_if_owning_build_file_included_in_sources
and bfa.rel_path in sources_set
):
continue
unmatched_sources -= matching_files
result.add(candidate_tgt.address)
if (
unmatched_sources
and owners_request.owners_not_found_behavior != GlobMatchErrorBehavior.ignore
):
_log_or_raise_unmatched_owners(
[PurePath(path) for path in unmatched_sources], owners_request.owners_not_found_behavior
)
return Owners(result)
# -----------------------------------------------------------------------------------------------
# Resolve SourcesField
# -----------------------------------------------------------------------------------------------
@rule
def extract_unmatched_build_file_globs(
global_options: GlobalOptions,
) -> UnmatchedBuildFileGlobs:
return UnmatchedBuildFileGlobs(global_options.unmatched_build_file_globs)
class AmbiguousCodegenImplementationsException(Exception):
"""Exception for when there are multiple codegen implementations and it is ambiguous which to
use."""
@classmethod
def create(
cls,
generators: Iterable[type[GenerateSourcesRequest]],
*,
for_sources_types: Iterable[type[SourcesField]],
) -> AmbiguousCodegenImplementationsException:
all_same_generator_paths = (
len({(generator.input, generator.output) for generator in generators}) == 1
)
example_generator = list(generators)[0]
input = example_generator.input.__name__
if all_same_generator_paths:
output = example_generator.output.__name__
return cls(
f"Multiple registered code generators can generate {output} from {input}. "
"It is ambiguous which implementation to use.\n\nPossible implementations:\n\n"
f"{bullet_list(sorted(generator.__name__ for generator in generators))}"
)
possible_output_types = sorted(
generator.output.__name__
for generator in generators
if issubclass(generator.output, tuple(for_sources_types))
)
possible_generators_with_output = [
f"{generator.__name__} -> {generator.output.__name__}"
for generator in sorted(generators, key=lambda generator: generator.output.__name__)
]
return cls(
f"Multiple registered code generators can generate one of "
f"{possible_output_types} from {input}. It is ambiguous which implementation to "
f"use. This can happen when the call site requests too many different output types "
f"from the same original protocol sources.\n\nPossible implementations with their "
f"output type:\n\n"
f"{bullet_list(possible_generators_with_output)}"
)
@rule(desc="Hydrate the `sources` field")
async def hydrate_sources(
request: HydrateSourcesRequest,
unmatched_build_file_globs: UnmatchedBuildFileGlobs,
union_membership: UnionMembership,
) -> HydratedSources:
sources_field = request.field
# First, find if there are any code generators for the input `sources_field`. This will be used
# to determine if the sources_field is valid or not.
# We could alternatively use `sources_field.can_generate()`, but we want to error if there are
# 2+ generators due to ambiguity.
generate_request_types = union_membership.get(GenerateSourcesRequest)
relevant_generate_request_types = [
generate_request_type
for generate_request_type in generate_request_types
if isinstance(sources_field, generate_request_type.input)
and issubclass(generate_request_type.output, request.for_sources_types)
]
if request.enable_codegen and len(relevant_generate_request_types) > 1:
raise AmbiguousCodegenImplementationsException.create(
relevant_generate_request_types, for_sources_types=request.for_sources_types
)
generate_request_type = next(iter(relevant_generate_request_types), None)
# Now, determine if any of the `for_sources_types` may be used, either because the
# sources_field is a direct subclass or can be generated into one of the valid types.
def compatible_with_sources_field(valid_type: type[SourcesField]) -> bool:
is_instance = isinstance(sources_field, valid_type)
can_be_generated = (
request.enable_codegen
and generate_request_type is not None
and issubclass(generate_request_type.output, valid_type)
)
return is_instance or can_be_generated
sources_type = next(
(
valid_type
for valid_type in request.for_sources_types
if compatible_with_sources_field(valid_type)
),
None,
)
if sources_type is None:
return HydratedSources(EMPTY_SNAPSHOT, sources_field.filespec, sources_type=None)
# Now, hydrate the `globs`. Even if we are going to use codegen, we will need the original
# protocol sources to be hydrated.
path_globs = sources_field.path_globs(unmatched_build_file_globs)
snapshot = await Get(Snapshot, PathGlobs, path_globs)
sources_field.validate_resolved_files(snapshot.files)
# Finally, return if codegen is not in use; otherwise, run the relevant code generator.
if not request.enable_codegen or generate_request_type is None:
return HydratedSources(snapshot, sources_field.filespec, sources_type=sources_type)
wrapped_protocol_target = await Get(
WrappedTarget,
WrappedTargetRequest(
sources_field.address,
# It's only possible to hydrate sources on a target that we already know exists.
description_of_origin="<infallible>",
),
)
generated_sources = await Get(
GeneratedSources,
GenerateSourcesRequest,
generate_request_type(snapshot, wrapped_protocol_target.target),
)
return HydratedSources(
generated_sources.snapshot, sources_field.filespec, sources_type=sources_type
)
@rule(desc="Resolve `sources` field file names")
async def resolve_source_paths(
request: SourcesPathsRequest, unmatched_build_file_globs: UnmatchedBuildFileGlobs
) -> SourcesPaths:
sources_field = request.field
path_globs = sources_field.path_globs(unmatched_build_file_globs)
paths = await Get(Paths, PathGlobs, path_globs)
sources_field.validate_resolved_files(paths.files)
return SourcesPaths(files=paths.files, dirs=paths.dirs)
# -----------------------------------------------------------------------------------------------
# Resolve addresses, including the Dependencies field
# -----------------------------------------------------------------------------------------------
class SubprojectRoots(Collection[str]):
pass
@rule
def extract_subproject_roots(global_options: GlobalOptions) -> SubprojectRoots:
return SubprojectRoots(global_options.subproject_roots)
class ParsedDependencies(NamedTuple):
addresses: list[AddressInput]
ignored_addresses: list[AddressInput]
class TransitiveExcludesNotSupportedError(ValueError):
def __init__(
self,
*,
bad_value: str,
address: Address,
registered_target_types: Iterable[type[Target]],
union_membership: UnionMembership,
) -> None:
applicable_target_types = sorted(
target_type.alias
for target_type in registered_target_types
if (
target_type.class_has_field(Dependencies, union_membership=union_membership)
and target_type.class_get_field(
Dependencies, union_membership=union_membership
).supports_transitive_excludes
)
)
super().__init__(
f"Bad value '{bad_value}' in the `dependencies` field for {address}. "
"Transitive excludes with `!!` are not supported for this target type. Did you mean "
"to use a single `!` for a direct exclude?\n\nTransitive excludes work with these "
f"target types: {applicable_target_types}"
)
@rule
async def convert_dependencies_request_to_explicitly_provided_dependencies_request(
request: DependenciesRequest,
) -> ExplicitlyProvidedDependenciesRequest:
"""This rule discards any deps predicate from DependenciesRequest.
Calculating ExplicitlyProvidedDependencies does not use any deps traversal predicates as it is
meant to list all explicit deps from the given field. By stripping the predicate from the
request, we ensure that the cache key for ExplicitlyProvidedDependencies calculation does not
include the predicate increasing the cache-hit rate.
"""
# TODO: Maybe require Get(ExplicitlyProvidedDependencies, ExplicitlyProvidedDependenciesRequest)
# and deprecate Get(ExplicitlyProvidedDependencies, DependenciesRequest) via this rule.
return ExplicitlyProvidedDependenciesRequest(request.field)
@rule
async def determine_explicitly_provided_dependencies(
request: ExplicitlyProvidedDependenciesRequest,
union_membership: UnionMembership,
registered_target_types: RegisteredTargetTypes,
subproject_roots: SubprojectRoots,
) -> ExplicitlyProvidedDependencies:
parse = functools.partial(
AddressInput.parse,
relative_to=request.field.address.spec_path,
subproject_roots=subproject_roots,
description_of_origin=(
f"the `{request.field.alias}` field from the target {request.field.address}"
),
)
addresses: list[AddressInput] = []
ignored_addresses: list[AddressInput] = []
for v in request.field.value or ():
is_ignore = v.startswith("!")
if is_ignore:
# Check if it's a transitive exclude, rather than a direct exclude.
if v.startswith("!!"):
if not request.field.supports_transitive_excludes:
raise TransitiveExcludesNotSupportedError(
bad_value=v,
address=request.field.address,
registered_target_types=registered_target_types.types,
union_membership=union_membership,
)
v = v[2:]
else:
v = v[1:]
result = parse(v)
if is_ignore:
ignored_addresses.append(result)
else:
addresses.append(result)
parsed_includes = await MultiGet(Get(Address, AddressInput, ai) for ai in addresses)
parsed_ignores = await MultiGet(Get(Address, AddressInput, ai) for ai in ignored_addresses)
return ExplicitlyProvidedDependencies(
request.field.address,
FrozenOrderedSet(sorted(parsed_includes)),
FrozenOrderedSet(sorted(parsed_ignores)),
)
async def _fill_parameters(
field_alias: str,
consumer_tgt: Target,
addresses: Iterable[Address],
target_types_to_generate_requests: TargetTypesToGenerateTargetsRequests,
field_defaults: FieldDefaults,
local_environment_name: ChosenLocalEnvironmentName,
) -> tuple[Address, ...]:
assert not isinstance(addresses, Iterator)
parametrizations = await MultiGet(
Get(
_TargetParametrizations,
{
_TargetParametrizationsRequest(
address.maybe_convert_to_target_generator(),
description_of_origin=f"the `{field_alias}` field of the target {consumer_tgt.address}",
): _TargetParametrizationsRequest,
local_environment_name.val: EnvironmentName,
},
)
for address in addresses
)
return tuple(
parametrizations.get_subset(
address, consumer_tgt, field_defaults, target_types_to_generate_requests
).address
for address, parametrizations in zip(addresses, parametrizations)
)
@rule(desc="Resolve direct dependencies of target", _masked_types=[EnvironmentName])
async def resolve_dependencies(
request: DependenciesRequest,
target_types_to_generate_requests: TargetTypesToGenerateTargetsRequests,
union_membership: UnionMembership,
subproject_roots: SubprojectRoots,
field_defaults: FieldDefaults,
local_environment_name: ChosenLocalEnvironmentName,
) -> Addresses:
environment_name = local_environment_name.val
wrapped_tgt = await Get(
WrappedTarget,
# It's only possible to find dependencies for a target that we already know exists.
WrappedTargetRequest(request.field.address, description_of_origin="<infallible>"),
)
tgt = wrapped_tgt.target
# This predicate allows the dep graph to ignore dependencies of selected targets
# including any explicit deps and any inferred deps.
# For example, to avoid traversing the deps of package targets.
if request.should_traverse_deps_predicate(tgt, request.field) == DepsTraversalBehavior.EXCLUDE:
return Addresses([])
try:
explicitly_provided = await Get(
ExplicitlyProvidedDependencies, DependenciesRequest, request
)
except Exception as e:
raise InvalidFieldException(
f"{tgt.description_of_origin}: Failed to get dependencies for {tgt.address}: {e}"
)
# Infer any dependencies (based on `SourcesField` field).
inference_request_types = cast(
"Sequence[Type[InferDependenciesRequest]]", union_membership.get(InferDependenciesRequest)
)
inferred: tuple[InferredDependencies, ...] = ()
if inference_request_types:
relevant_inference_request_types = [
inference_request_type
for inference_request_type in inference_request_types
if inference_request_type.infer_from.is_applicable(tgt)
]
inferred = await MultiGet(
Get(
InferredDependencies,
{
inference_request_type(
inference_request_type.infer_from.create(tgt)
): InferDependenciesRequest,
environment_name: EnvironmentName,
},
)
for inference_request_type in relevant_inference_request_types
)
# If it's a target generator, inject dependencies on all of its generated targets.
generated_addresses: tuple[Address, ...] = ()
if target_types_to_generate_requests.is_generator(tgt) and not tgt.address.is_generated_target:
parametrizations = await Get(
_TargetParametrizations,
{
_TargetParametrizationsRequest(
tgt.address.maybe_convert_to_target_generator(),
description_of_origin=(
f"the target generator {tgt.address.maybe_convert_to_target_generator()}"
),
): _TargetParametrizationsRequest,
environment_name: EnvironmentName,
},
)
generated_addresses = tuple(parametrizations.generated_for(tgt.address).keys())
# See whether any explicitly provided dependencies are parametrized, but with partial/no
# parameters. If so, fill them in.
explicitly_provided_includes: Iterable[Address] = explicitly_provided.includes
if explicitly_provided_includes:
explicitly_provided_includes = await _fill_parameters(
request.field.alias,
tgt,
explicitly_provided_includes,
target_types_to_generate_requests,
field_defaults,
local_environment_name,
)
explicitly_provided_ignores: FrozenOrderedSet[Address] = explicitly_provided.ignores
if explicitly_provided_ignores:
explicitly_provided_ignores = FrozenOrderedSet(
await _fill_parameters(
request.field.alias,
tgt,
tuple(explicitly_provided_ignores),
target_types_to_generate_requests,
field_defaults,
local_environment_name,
)
)
# If the target has `SpecialCasedDependencies`, such as the `archive` target having
# `files` and `packages` fields, then we possibly include those too. We don't want to always
# include those dependencies because they should often be excluded from the result due to
# being handled elsewhere in the calling code. So, we only include fields based on
# the should_traverse_deps_predicate.
# Unlike normal, we don't use `tgt.get()` because there may be >1 subclass of
# SpecialCasedDependencies.
special_cased_fields = tuple(
field
for field in tgt.field_values.values()
if isinstance(field, SpecialCasedDependencies)
and request.should_traverse_deps_predicate(tgt, field) == DepsTraversalBehavior.INCLUDE
)
# We can't use the normal `Get(Addresses, UnparsedAddressInputs)` due to a graph cycle.
special_cased = await MultiGet(
Get(
Address,
AddressInput,
AddressInput.parse(
addr,
relative_to=tgt.address.spec_path,
subproject_roots=subproject_roots,
description_of_origin=(
f"the `{special_cased_field.alias}` field from the target {tgt.address}"
),
),
)
for special_cased_field in special_cased_fields
for addr in special_cased_field.to_unparsed_address_inputs().values
)
excluded = explicitly_provided_ignores.union(
*itertools.chain(deps.exclude for deps in inferred)
)
result = Addresses(
sorted(
{
addr
for addr in (
*generated_addresses,
*explicitly_provided_includes,
*itertools.chain.from_iterable(deps.include for deps in inferred),
*special_cased,
)
if addr not in excluded
}
)
)
# Validate dependencies.
_ = await MultiGet(
Get(
ValidatedDependencies,
{
vd_request_type(vd_request_type.field_set_type.create(tgt), result): ValidateDependenciesRequest, # type: ignore[misc]
environment_name: EnvironmentName,
},
)
for vd_request_type in union_membership.get(ValidateDependenciesRequest)
if vd_request_type.field_set_type.is_applicable(tgt) # type: ignore[misc]
)
return result
@rule(desc="Resolve addresses")
async def resolve_unparsed_address_inputs(
request: UnparsedAddressInputs, subproject_roots: SubprojectRoots
) -> Addresses:
address_inputs = []
invalid_addresses = []
for v in request.values:
try:
address_inputs.append(
AddressInput.parse(
v,
relative_to=request.relative_to,
subproject_roots=subproject_roots,
description_of_origin=request.description_of_origin,
)
)
except AddressParseException:
if not request.skip_invalid_addresses:
raise
invalid_addresses.append(v)
if request.skip_invalid_addresses:
maybe_addresses = await MultiGet(
Get(MaybeAddress, AddressInput, ai) for ai in address_inputs
)
valid_addresses = []
for maybe_address, address_input in zip(maybe_addresses, address_inputs):
if isinstance(maybe_address.val, Address):
valid_addresses.append(maybe_address.val)
else:
invalid_addresses.append(address_input.spec)
if invalid_addresses:
logger.debug(
softwrap(
f"""
Invalid addresses from {request.description_of_origin}:
{sorted(invalid_addresses)}. Skipping them.
"""
)
)
return Addresses(valid_addresses)
addresses = await MultiGet(Get(Address, AddressInput, ai) for ai in address_inputs)
# Validate that the addresses exist. We do this eagerly here because
# `Addresses -> UnexpandedTargets` does not preserve the `description_of_origin`, so it would
# be too late, per https://github.com/pantsbuild/pants/issues/15858.
await MultiGet(
Get(
WrappedTarget,
WrappedTargetRequest(addr, description_of_origin=request.description_of_origin),
)
for addr in addresses
)
return Addresses(addresses)
# -----------------------------------------------------------------------------------------------
# Dynamic Field defaults
# -----------------------------------------------------------------------------------------------
@rule
async def field_defaults(union_membership: UnionMembership) -> FieldDefaults:
requests = list(union_membership.get(FieldDefaultFactoryRequest))
factories = await MultiGet(
Get(FieldDefaultFactoryResult, FieldDefaultFactoryRequest, impl()) for impl in requests
)
return FieldDefaults(
FrozenDict(
(request.field_type, factory.default_factory)
for request, factory in zip(requests, factories)
)
)
# -----------------------------------------------------------------------------------------------
# Find applicable field sets
# -----------------------------------------------------------------------------------------------
@rule
def find_valid_field_sets(
request: FieldSetsPerTargetRequest, union_membership: UnionMembership
) -> FieldSetsPerTarget:
field_set_types = union_membership.get(request.field_set_superclass)
return FieldSetsPerTarget(
(
field_set_type.create(target)
for field_set_type in field_set_types
if field_set_type.is_applicable(target)
)
for target in request.targets
)
class GenerateFileTargets(GenerateTargetsRequest):
generate_from = TargetFilesGenerator
@rule
async def generate_file_targets(
request: GenerateFileTargets,
union_membership: UnionMembership,
) -> GeneratedTargets:
try:
sources_paths = await Get(
SourcesPaths, SourcesPathsRequest(request.generator[MultipleSourcesField])
)
except Exception as e:
tgt = request.generator
fld = tgt[MultipleSourcesField]
raise InvalidFieldException(
softwrap(
f"""
{tgt.description_of_origin}: Invalid field value for {fld.alias!r} in target {tgt.address}:
{e}
"""
)
) from e
add_dependencies_on_all_siblings = False
if request.generator.settings_request_cls:
generator_settings = await Get(
TargetFilesGeneratorSettings,
TargetFilesGeneratorSettingsRequest,
request.generator.settings_request_cls(),
)
add_dependencies_on_all_siblings = generator_settings.add_dependencies_on_all_siblings
return _generate_file_level_targets(
type(request.generator).generated_target_cls,
request.generator,
sources_paths.files,
request.template_address,
request.template,
request.overrides,
union_membership,
add_dependencies_on_all_siblings=add_dependencies_on_all_siblings,
)
def rules():
return [
*collect_rules(),
UnionRule(GenerateTargetsRequest, GenerateFileTargets),
]
|
from django.db import models
GENRE_CHOICES = (
("rock", "Rock"),
("blues", "Blues"),
)
class Artist(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
artistic_name = models.CharField(max_length=255)
picture_url = models.URLField()
popularity = models.IntegerField()
genre = models.CharField(choices=GENRE_CHOICES, max_length=255)
class Song(models.Model):
artist = models.ForeignKey(Artist, on_delete=models.CASCADE)
title = models.CharField(max_length=255)
album_name = models.CharField(max_length=255, blank=True)
|
#!/usr/bin/python
#
# Sample python code using the standard http lib only
#
import httplib
## Your Infinispan WAR server host
hostname = "localhost:8080"
webapp_name = "infinispan-server-rest"
cache_name = "___defaultcache"
key = "my_key"
#putting data in
print "Storing data on server %s under key [%s] over REST" % (hostname, key)
try:
conn = httplib.HTTPConnection(hostname)
data = "This is some test data." #could be string, or a file...
conn.request("POST", "/%s/rest/%s/%s" % (webapp_name, cache_name, key), data, {"Content-Type": "text/plain"})
response = conn.getresponse()
print "HTTP status: %s" % response.status
except:
print "Unable to connect to the REST server on %s. Is it running?" % hostname
#getting data out
print "Retrieving data from server %s under key [%s]" % (hostname, key)
try:
conn = httplib.HTTPConnection(hostname)
conn.request("GET", "/%s/rest/%s/%s" % (webapp_name, cache_name, key))
response = conn.getresponse()
print "HTTP status: %s" % response.status
print "Value retrieved: %s" % response.read()
except:
print "Unable to connect to the REST server on %s. Is it running?" % hostname
## For more information on usage see http://community.jboss.org/wiki/InfinispanRESTserver
|
# Generated by Django 3.2.4 on 2021-07-07 13:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('churrasco', '0002_auto_20210706_1629'),
]
operations = [
migrations.AlterField(
model_name='produto',
name='nome',
field=models.CharField(max_length=100, unique=True, verbose_name='Nome'),
),
]
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 4