blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8ef96178e01ca9c202b4ca5bacb677ada821ee78 | 260b693b52bb8a9b38cd7a10c73928436d318c7b | /Python实现项目进度管理 - 副本.py | f5116a58b6fd50f87dd353f473d56a627c87d91e | [] | no_license | YuweiShen/Simulation | 5c0bcb7f6fd286d2c9cfa5c33e0a8b570a3383dd | 6c99234527fa51d73a6a46331e3c148caaa33056 | refs/heads/master | 2021-10-20T22:17:08.919143 | 2021-10-17T08:44:04 | 2021-10-17T08:44:04 | 174,316,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,684 | py | #!/usr/bin/env python
# coding: utf-8
# ## 随机工程进度优化
#
# 问题设置:项目成本$c_{ij} = i+j$是一个常数,日利率是0.6%,项目完成时间为154天,子工程完成的时间服从U(i,j)形式的正态分布。其中决策变量$x_i$是所有以(i,j)表示的子项目所需贷款的到位时间。
#
# ### 期望费用最小化模型(EVM)
#
# $$
# \begin{align*}
# min &\;\; E[C(x,\xi)]\\
# s.t.&\;\; E[T(x,\xi)] \le T^0\\
# &\;\; x\ge 0
# \end{align*}
# $$
# 其中
#
# $$T(x,\xi) = \max_{(k,n+1)}\{T_k(x,\xi)+\xi_{k,n+1}\}\\
# C(x,\xi) = \sum_{(i,j)}c_{ij}(1+r)^{[T(x,\xi)-x_i]}
# $$
# In[3]:
import time
import numpy as np
import random
## 生成决策变量样本x,每个xi都是整数
## 为了让生成的样本点有效,根据图结构生成样本
# 保证前面的x_i小,后面的x_i相对大
def sample_one():
# x = [None]*18
x = np.zeros(8)
x[0] = 1
x[1],x[2],x[3] = random.randint(4,7),random.randint(4,10),random.randint(4,13)
x[4] = random.randint(max(x[1],x[2],13),25)
x[5] = random.randint(max(x[2],13),28)
x[6] = random.randint(max(x[2],x[3],16),34)
x[7] = random.randint(max(x[4],x[5],x[6],37),58)
return x
def sample_two():
x = np.zeros(8)
x[0] = 1
x[1],x[2],x[3] = random.randint(4,7),random.randint(4,10),random.randint(4,13)
x[4] = random.randint(13,25)
x[5] = random.randint(13,28)
x[6] = random.randint(16,34)
x[7] = random.randint(37,58)
return x
def is_notvalid(xarr): #判断x,numpy是否满足图结构,如果不满足,返回不满足的样本标号
ind_notvalid = []
for ind in range(len(xarr)):
x = xarr[ind]
valid = 0
if min(x[1],x[2],x[3])>x[0] and min(x[5],x[6],x[4])> x[2] and x[6]> x[3]:
if x[4] > x[1]:
valid = 1
if valid ==0:
ind_notvalid.append(ind)
return (np.zeros(len(ind_notvalid)),np.asarray(ind_notvalid))
def initiate(n,FieldDR=None):
Chrom = []
t1 = time.time()
for i in range(n):
Chrom.append(sample_one())
t2 = time.time()
print(t2-t1)
return np.asarray(Chrom)
def initiate1(n):
Chrom = []
t1 = time.time()
while len(Chrom)<n:
x = sample_two()
if len(is_notvalid([x])[0]) ==0 :
Chrom.append(sample_one())
t2 = time.time()
print(t2-t1)
return np.asarray(Chrom)
#%%
parents = initiate(1000) #随机生成两个决策向量
prt = initiate1(1000)
#import matplotlib.pyplot as plt
print(sum((parents.T[7]-parents.T[7].mean())**2))
print(sum((prt.T[7]-prt.T[7].mean())**2))
# In[4]:
import math
# 计算任务开始时间
def calculate_T(x):
t = np.zeros(8)
t[0] = 1
t[1],t[2],t[3] = max(t[0]+random.randint(3,6),x[1]),max(t[0]+random.randint(3,9),x[2]),max(t[0]+random.randint(3,12),x[3])
t[4] = max(t[1]+random.randint(6,15),t[2]+random.randint(9,15),x[4])
t[5] = max(t[2]+random.randint(9,18),x[5])
t[6] = max(t[2]+random.randint(9,21),t[3]+random.randint(12,21),x[6])
t[7] = max(t[4]+random.randint(15,24),t[5]+random.randint(18,24),t[6]+random.randint(21,24),x[7])
return t[7]
##测试
calculate_T(parents[0])
# In[5]:
def percentile(array,alpha,lower=False):
'''
input param array: 随机数序列
input param alpha: 百分位点
input param lower: 下百分位点,默认True
return percentile:返回百分位点
'''
if alpha>1:
return
array=np.sort(array)
n=len(array)
if lower==False:
ind=int(round(n*alpha))
else:
ind=int(round(n*(1-alpha)))
return array[ind]
# In[6]:
# 计算花费
def calculate_C(totaltime,r,x):
SUM = 0
SUM += (3+4+5)*(1+r)**math.ceil(totaltime -x[0]) #1开始
SUM += 7*(1+r)**math.ceil(totaltime -x[1])
SUM += (8+9+10)*(1+r)**math.ceil(totaltime -x[2])
SUM += 11*(1+r)**math.ceil(totaltime -x[3])
SUM += 13*(1+r)**math.ceil(totaltime -x[4])
SUM += 14*(1+r)**math.ceil(totaltime -x[5])
SUM += 15*(1+r)**math.ceil(totaltime -x[6])
return SUM
def expectation(x,r,n):
etime = []
ptime = []
ecost = []
c0cost = []
pcost = []
count = 0
for solution in x:
if count %100 == 0:
print (count)
times = []
costs = []
#print(solution)
for i in range(n):
t = calculate_T(solution)
c = calculate_C(t,r,solution)
times.append(t)
costs.append(c)
count +=1
etime.append(sum(times)/n)
ptime.append(sum(np.asarray(times)<60)/n)
ecost.append(sum(costs)/n)
c0cost.append(percentile(costs,0.85))
pcost.append(sum(np.asarray(costs)<900)/n)
return [np.asarray(etime),np.asarray(ecost),np.asarray(ptime),np.asarray(c0cost),np.asarray(pcost)]
calculate_C(50,0.06,parents[0])
# In[7]:
## 随机抽样
x = initiate(5000)
x
# In[ ]:
y = expectation(x,0.06,3000)
Etime,Ecost = y[0],y[1]
# In[132]:
#训练神经网络,期望时间
from sklearn.neural_network import MLPRegressor as MLP
timefunc = MLP(activation='relu', learning_rate='adaptive',max_iter = 5000)
timefunc.fit(x,Etime)
# In[133]:
#期望损失
costfunc = MLP(activation='relu', learning_rate='adaptive',max_iter = 5000)
costfunc.fit(x,Ecost)
# In[137]:
# 定义目标和约束函数
def aim(variables,legV):
'''其中legV是可行性列向量'''
#y = expectation(variables,0.06,500)
cost = np.array([abs(costfunc.predict(variables))])
constraint = np.array([timefunc.predict(variables)])
# cost = np.array([y[1]])
# constraint = np.array([y[0]])
#print(constraint)
idx1 = np.where(constraint>60)#采用惩罚方法对于超过60天的方法进行惩罚
#print(idx1)
#print(idx1[0])
idx2 = is_notvalid(variables)
exIdx = np.unique(np.hstack([idx1,idx2])) # 得到非可行解个体的下标
exIdx = exIdx.astype(int)
legV[exIdx] = 0 # 标记非可行解在种群可行性列向量中对应的值为0(0表示非可行解,1表示可行解)
return [cost.T,legV]
def punishing(LegV, FitnV):
FitnV[np.where(LegV == 0)[0]] = 0 # 惩罚非可行解个体的适应度
return FitnV
aim(parents,np.ones((2,8)))
expectation(np.asarray([0,1,1,2,5,2,3,3]).reshape((1,8)),0.06,1000)
# In[ ]:
## GA算法
import time
import numpy as np
import sys
import random
import geatpy as ga
## 交叉
rd = np.vectorize(round)
def crossover(parents,recopt):
POP_SIZE = parents.shape[0]
sub_pop = [] #子代
for parent in parents:
if np.random.rand() < recopt: #选定交叉的染色体
i_ = np.random.randint(0, POP_SIZE, size=1) #选另外一个染色体
lamd = random.random() #随机产生另一个数
subpop1 = rd(lamd*parent+(1-lamd)*parents[i_]).reshape(8) # 小孩1
subpop2 = rd(lamd*parents[i_]+(1-lamd)*parent).reshape(8) #小孩2
sub_pop.append(subpop1)
sub_pop.append(subpop2)
else:
sub_pop.append(parent)
return np.asarray(sub_pop)
## 评价适应度函数
def judge(ObjV,maxormin,alpha = 0.7):
#排序
POP_SIZE = ObjV.shape[0]
FitnV = np.zeros(POP_SIZE)
t = list(ObjV.reshape(POP_SIZE))#从小到大
if maxormin == -1:
t = sorted(t) #越小越好
else:
t = sorted(t,reverse=True) #越大越好
for i in range(len(t)):
for j in range(POP_SIZE):
if t[i] == ObjV[j][0]: #找到对应的数
FitnV[j] = alpha*(1-alpha)**(i)
break
FitnV = FitnV.reshape((POP_SIZE,1))
return FitnV
## 变异
def mutation(offspring_crossover,pm):
mut_pop = []
for idx in range(offspring_crossover.shape[0]):
mut_pop.append(offspring_crossover[idx])
random_value = random.randint(-2,2)
# loc = np.random.randint(0,offspring_crossover.shape[1],size = 1)
if np.random.rand()< pm:
# offspring_crossover[idx][loc] = abs(offspring_crossover[idx][loc] + random_value)
offspring_crossover[idx] = abs(offspring_crossover[idx] + random_value)
mut_pop.append(offspring_crossover[idx])
return np.asarray(mut_pop)
## 自然选择,采用精英策略 + 轮盘赌
def select(Chrom,FitnV,NIND,maxormin):
Chrom = Chrom[np.where(FitnV[:,0]!=0)]
FitnV = FitnV[np.where(FitnV[:,0]!=0)]
#print(Chrom)
idx = np.random.choice(np.arange(Chrom.shape[0]), size= math.floor(NIND*0.9), replace=True,
p=FitnV[:,0]/FitnV.sum()) #先不改变Chrom和适应度的大小关系
a = Chrom[idx]
Chrom = Chrom[np.argsort(FitnV[:,0])] #适应度从小到大排列
#print(idx)
#print(Chrom[idx])
return np.vstack([a,Chrom[math.floor(NIND*0.9):]])
def GATemplate(AIM, PUN, FieldDR, maxormin, MAXGEN, NIND,recopt, pm,alpha):
aimfuc = AIM
if PUN is not None:
punishing = PUN # 获得罚函数
if FieldDR is not None:
NVAR = FieldDR.shape[0] # 得到控制变量的个数
NVAR = 8
# 定义进化记录器,初始值为nan
pop_trace = (np.zeros((MAXGEN ,2)) * np.nan)
# 定义变量记录器,记录控制变量值,初始值为nan
var_trace = (np.zeros((MAXGEN ,NVAR)) * np.nan)
#print(var_trace)
# 生成初始种群
Chrom = initiate(NIND, FieldDR)
LegV = np.ones((Chrom.shape[0], 1)) # 生成可行性列向量,元素为1表示对应个体是可行解,0表示非可行解
#print(LegV)
[ObjV, LegV] = aimfuc(Chrom, LegV) # 求种群的目标函数
while sum(LegV)==0:
Chrom = initiate(NIND, FieldDR)
LegV = np.ones((Chrom.shape[0], 1)) # 生成可行性列向量,元素为1表示对应个体是可行解,0表示非可行解
#print(LegV)
[ObjV, LegV] = aimfuc(Chrom, LegV) # 求种群的目标函数
#print(Chrom)
gen = 0
# 开始进化!!
start_time = time.time() # 开始计时
badcount = 0
while gen < MAXGEN:
#print(gen)
# 进行遗传算子,生成子代
SelCh = crossover1(Chrom, recopt) # 重组
Chrom = mutation1(SelCh, pm) # 变异
#print(Chrom)
LegV = np.ones((Chrom.shape[0], 1)) # 生成可行性列向量,元素为1表示对应个体是可行解,0表示非可行解
#print(LegV)
[ObjV, LegV] = aimfuc(Chrom, LegV) # 求种群的目标函数
#print('ObjV',ObjV.shape)
#print('LegV',LegV.shape)
FitnV = judge(ObjV,maxormin,alpha)
if PUN is not None:
FitnV = punishing(LegV, FitnV) # 把不合格的适应度改成0
# 记录进化过程
bestIdx = np.nanargmax(FitnV) # 获取最优个体的下标
#print(bestIdx)
if LegV[bestIdx] != 0:#记录可行解
pop_trace[gen,0] = ObjV[bestIdx] # 记录当代目标函数的最优值
var_trace[gen,:] = Chrom[bestIdx,:] # 记录当代最优的控制变量值
else:
gen -= 1 # 忽略这一代
badcount += 1
if badcount >100:
break
if sum(FitnV)!=0:
#print(FitnV)
Chrom = select(Chrom, FitnV,NIND,maxormin)
gen += 1
if gen % 50 ==0:
print(gen)
end_time = time.time() # 结束计时
times = end_time - start_time
# 输出结果
ga.trcplot(pop_trace, [['种群最优个体目标函数值']])
if maxormin ==1 :
best_gen = np.nanargmax(pop_trace[:, 0]) # 记录最优种群是在哪一代
best_ObjV = np.nanmax(pop_trace[:, 0])
else:
best_gen = np.nanargmin(pop_trace[:, 0]) # 记录最优种群是在哪一代
best_ObjV = np.nanmin(pop_trace[:, 0])
if np.isnan(best_ObjV):
raise RuntimeError('error: no feasible solution. (没找到可行解。)')
print('最优的目标函数值为:', best_ObjV)
print('最优的控制变量值为:')
for i in range(NVAR):
print(var_trace[best_gen, i])
print('最优的一代是第', best_gen + 1, '代')
print('时间已过', times, '秒')
# 返回进化记录器、变量记录器以及执行时间
return [pop_trace, var_trace, times]
# In[1]:
def crossover1(parents,recopt):
POP_SIZE = parents.shape[0]
sub_pop = [] #子代
for parent in parents:
sub_pop.append(parent)
if np.random.rand() < recopt: #选定交叉的染色体
i_ = np.random.randint(0, POP_SIZE, size=1) #选另外一个染色体
cross_points = np.random.randint(0,parents.shape[1] , size=1) # 选择交叉点,进行平坦交叉
rd = np.vectorize(round)
parent[cross_points] = rd((parent[cross_points] + parents[i_, cross_points] )/2) # 小孩
sub_pop.append(parent)
return np.asarray(sub_pop)
## 变异
def mutation1(offspring_crossover,pm):
mut_pop = []
for idx in range(offspring_crossover.shape[0]):
mut_pop.append(offspring_crossover[idx])
random_value = random.randint(-2,2)
loc = np.random.randint(0,offspring_crossover.shape[1],size = 1)
if np.random.rand()< pm:
offspring_crossover[idx][loc] = abs(offspring_crossover[idx][loc] + random_value)
mut_pop.append(offspring_crossover[idx])
return np.asarray(mut_pop)
#%%
#def GAtemplate(AIM, PUN, FieldDR, problem, maxormin, GGAP, MAXGEN, NIND, SUBPOP, selectStyle, recombinStyle, recopt, pm, drawing = 1):
# GGAP = 0.5 # 因为父子合并后选择,因此要将代沟设为0.5以维持种群规模
# aimfuc = AIM
# if PUN is not None:
# punishing = PUN # 获得罚函数
# NVAR = FieldDR.shape[1] # 得到控制变量的个数
# # 定义进化记录器,初始值为nan
# pop_trace = (np.zeros((MAXGEN ,2)) * np.nan)
# # 定义变量记录器,记录控制变量值,初始值为nan
# var_trace = (np.zeros((MAXGEN ,NVAR)) * np.nan)
# repnum = 0 # 初始化重复个体数为0
# ax = None # 存储上一帧图形
# if problem == 'R':
# Chrom = ga.crtrp(NIND, FieldDR) # 生成初始种群
# elif problem == 'I':
# Chrom = initiate(NIND, FieldDR)
# LegV = np.ones((NIND, 1)) # 生成可行性列向量,元素为1表示对应个体是可行解,0表示非可行解
# [ObjV, LegV] = aimfuc(Chrom, LegV) # 求种群的目标函数
# while sum(LegV) == 0:
# #print(LegV)
# Chrom = initiate(NIND, FieldDR)
# LegV = np.ones((Chrom.shape[0], 1)) # 生成可行性列向量,元素为1表示对应个体是可行解,0表示非可行解
# #print(LegV)
# [ObjV, LegV] = aimfuc(Chrom, LegV) # 求种群的目标函数
# gen = 0
# badCounter = 0 # 用于记录在“遗忘策略下”被忽略的代数
# # 开始进化!!
# start_time = time.time() # 开始计时
# while gen < MAXGEN:
# if badCounter >= 10 * MAXGEN: # 若多花了10倍的迭代次数仍没有可行解出现,则跳出
# break
# # 进行遗传算子,生成子代
# SelCh=ga.recombin(recombinStyle, Chrom, recopt, SUBPOP) # 重组
# if problem == 'R':
# SelCh=ga.mutbga(SelCh,FieldDR, pm) # 变异
# elif problem == 'I':
# #SelCh=ga.mutint(SelCh, FieldDR, pm) #变异:整数
# SelCh = mutation(SelCh,pm)
# LegVSel = np.ones((SelCh.shape[0], 1)) # 初始化育种种群的可行性列向量
# [ObjVSel, LegVSel] = aimfuc(SelCh, LegVSel) # 求育种种群的目标函数值
# # 父子合并
# Chrom = np.vstack([Chrom, SelCh])
# ObjV = np.vstack([ObjV, ObjVSel])
# LegV = np.vstack([LegV, LegVSel])
# # 对合并的种群进行适应度评价
# FitnV = ga.ranking(maxormin * ObjV, LegV, None, SUBPOP)
# if PUN is not None:
# FitnV = punishing(LegV, FitnV) # 调用罚函数
# # 记录进化过程
# bestIdx = np.nanargmax(FitnV) # 获取最优个体的下标
# if LegV[bestIdx] != 0:
# feasible = np.where(LegV != 0)[0] # 排除非可行解
# pop_trace[gen,0] = np.sum(ObjV[feasible]) / ObjV[feasible].shape[0] # 记录种群个体平均目标函数值
# pop_trace[gen,1] = ObjV[bestIdx] # 记录当代目标函数的最优值
# var_trace[gen,:] = Chrom[bestIdx, :] # 记录当代最优的控制变量值
# repnum = len(np.where(ObjV[bestIdx] == ObjV)[0]) # 计算最优个体重复数
# # 绘制动态图
# if drawing == 2:
# ax = ga.sgaplot(pop_trace[:,[1]],'种群最优个体目标函数值', False, ax, gen)
# else:
# gen -= 1 # 忽略这一代
# badCounter += 1
# [Chrom,ObjV,LegV]=ga.selecting(selectStyle, Chrom, FitnV, GGAP, SUBPOP, ObjV, LegV) # 选择个体生成新一代种群
# gen += 1
# if gen % 50 ==0 :
# print(gen)
# end_time = time.time() # 结束计时
# times = end_time - start_time
# # 绘图
# if drawing != 0:
# ga.trcplot(pop_trace, [['种群个体平均目标函数值', '种群最优个体目标函数值']])
# # 输出结果
# if maxormin == 1:
# best_gen = np.nanargmin(pop_trace[:, 1]) # 记录最优种群是在哪一代
# best_ObjV = np.nanmin(pop_trace[:, 1])
# if maxormin == -1:
# best_gen = np.nanargmax(pop_trace[:, 1]) # 记录最优种群是在哪一代
# best_ObjV = np.nanmax(pop_trace[:, 1])
# #print(pop_trace)
# #print(best_ObjV)
# if np.isnan(best_ObjV):
# raise RuntimeError('error: no feasible solution. (没找到可行解。)')
# print('最优的目标函数值为:', best_ObjV)
# print('最优的控制变量值为:')
# for i in range(NVAR):
# print(var_trace[best_gen, i])
# print('最优的一代是第', best_gen + 1, '代')
# print('时间已过', times, '秒')
# # 返回进化记录器、变量记录器以及执行时间
# return [pop_trace, var_trace, times]
#%%
# 变量设置
variable = [[1,1],]
boundary = [[1,1],]
for i in range(7):
variable.append([1,60])
boundary.append([1,1])
ranges = np.vstack(variable).T # 生成自变量的范围矩阵
borders = np.vstack(boundary).T # 生成自变量的边界矩阵
FieldDR = ga.crtfld(ranges, borders) # 生成区域描述器
#[pop_trace, var_trace, times] = GAtemplate(aim, punishing, FieldDR, problem = 'I', maxormin = 1, MAXGEN = 2000, NIND = 100, SUBPOP = 1, GGAP = 0.9, selectStyle = 'rws', recombinStyle = 'xovdp', recopt = 0.9, pm = 0.5, drawing = 1)
[pop_trace, var_trace, times] = GATemplate(aim, punishing, FieldDR=None, maxormin = -1, MAXGEN = 2000, NIND = 30, recopt = 0.3, pm = 0.05,alpha = 0.8)
# ### $\alpha$费用最小模型
# $$
# \begin{align*}
# min &\;\; C^0\\
# s.t.&\;\; Pr\{C(x,\xi)\le C^0\}\ge \alpha\\
# &\;\; Pr\{T(x,\xi)\le T^0\}\ge \beta\\
# &\;\; x\ge 0
# \end{align*}
# $$
#
# 在这里,分别让$\alpha,\beta$为0.95
# In[170]:
Ptime, C0cost = y[2],y[3]
from sklearn.neural_network import MLPRegressor as MLP
timecons = MLP(activation='relu', learning_rate='adaptive',max_iter = 5000)
timecons.fit(x,Ptime)
c0cost = MLP(activation='relu', learning_rate='adaptive',max_iter = 5000)
c0cost.fit(x,C0cost)
# In[173]:
## 定义目标函数
def aim(variables,legV):
'''其中legV是可行性列向量'''
cost = np.array([c0cost.predict(variables)])
constraint = np.array([timecons.predict(variables)])
idx1 = np.where(constraint<0.95)#采用惩罚方法对于概率小于0.9的方法进行惩罚
idx2 = is_notvalid(variables)
exIdx = np.unique(np.hstack([idx1,idx2])) # 得到非可行解个体的下标
exIdx = exIdx.astype(int)
legV[exIdx] = 0 # 标记非可行解在种群可行性列向量中对应的值为0(0表示非可行解,1表示可行解)
return [cost.T,legV]
# In[181]:
#[pop_trace, var_trace, times] = GAtemplate(aim, punishing, FieldDR, problem = 'I', maxormin = 1, MAXGEN = 5000, NIND = 100, SUBPOP = 1, GGAP = 0.9, selectStyle = 'rws', recombinStyle = 'xovdp', recopt = 0.5, pm = 0.3, drawing = 1)
[pop_trace, var_trace, times] = GATemplate(aim, punishing, FieldDR=None, maxormin = 1, MAXGEN = 1000, NIND = 100, recopt = 0.3, pm = 0.05,alpha = 0.7)
#### 期望最小模型2
# In[183]:
def aim(variables,legV):
'''其中legV是可行性列向量'''
cost = np.array([costfunc.predict(variables)])
constraint = np.array([timecons.predict(variables)])
idx1 = np.where(constraint<0.95)#采用惩罚方法对于超过60的方法进行惩罚
idx2 = is_notvalid(variables)
exIdx = np.unique(np.hstack([idx1,idx2])) # 得到非可行解个体的下标
exIdx = exIdx.astype(int)
legV[exIdx] = 0 # 标记非可行解在种群可行性列向量中对应的值为0(0表示非可行解,1表示可行解)
return [cost.T,legV]
# In[186]:
#[pop_trace, var_trace, times] = GAtemplate(aim, punishing, FieldDR, problem = 'I', maxormin = 1, MAXGEN = 5000, NIND = 100, SUBPOP = 1, GGAP = 0.9, selectStyle = 'rws', recombinStyle = 'xovdp', recopt = 0.5, pm = 0.3, drawing = 1)
[pop_trace, var_trace, times] = GATemplate(aim, punishing, FieldDR=None, maxormin = -1, MAXGEN = 1000, NIND = 30, recopt = 0.3, pm = 0.05,alpha = 0.7)
# In[ ]:
# ## 用不确定理论计算
# $
# \begin{align*}
# \min_x & \int_0^1 \gamma^{-1}(x,\alpha)d\alpha\\
# s.t.
# &\;\;\Psi^{-1}(x,\alpha_0)\le T_0\\
# &\;\; x\ge 0
# \end{align*}
# $
#
# 其中,$\Psi^{-1}(x,\alpha_0)$是$T(x,\xi)$的逆不确定分布,$\gamma^{-1}(x,\alpha)$是$C(x,\xi)$的逆不确定分布。在这里,让$\alpha_0 = 0.95$
#
# 可以简化成
#
# $
# \begin{align*}
# \min_x & (c_1+c_2+\cdots +c_{99})/99\\
# s.t.
# &\;\;k/100 \ge \alpha \text{ if } s_k \ge T^0 \\
# &\;\; x\ge 0
# \end{align*}
# $
# In[214]:
def expect_c(samples):
timelist = []
costlist = []
for sample in samples:
sample = np.asarray(sample)
#print(sample)
t_table = []
c_table = []
for i in range(99):
t = calculate_T(sample)
#print(t)
c = calculate_C(t,0.06,sample)
t_table.append(t)
c_table.append(c)
t_table = np.sort(np.asarray(t_table))
flag = 0
for ind in range(99):
if t_table[ind] >= 60:
flag = 1
timelist.append(ind+1) #记录首次大于T0的k
break
if flag == 0:
timelist.append(100) #填空
costlist.append(sum(c_table)/99)
return [costlist,timelist]
# In[217]:
def aim(variables,legV):
'''其中legV是可行性列向量'''
a = expect_c(variables)
cost = np.array([a[0]])
constraint = np.array([a[1]])
#print(constraint)
idx1 = np.where(constraint<95) #大于60的必须在95以上
idx2 = is_notvalid(variables)
exIdx = np.unique(np.hstack([idx1,idx2])) # 得到非可行解个体的下标
exIdx = exIdx.astype(int)
legV[exIdx] = 0 # 标记非可行解在种群可行性列向量中对应的值为0(0表示非可行解,1表示可行解)
return [cost.T,legV]
#aim(parents,np.ones((2,19)))
# In[224]:
#[pop_trace, var_trace, times] = GATemplate(aim, punishing, FieldDR, problem = 'I', maxormin = 1, MAXGEN = 1000, NIND = 80, SUBPOP = 1, GGAP = 0.9, selectStyle = 'rws', recombinStyle = 'xovdp', recopt = 0.4, pm = 0.3, drawing = 1)
[pop_trace, var_trace, times] = GATemplate(aim, punishing, FieldDR=None, maxormin = -1, MAXGEN = 1000, NIND = 30, recopt = 0.3, pm = 0.05,alpha = 0.7)
# In[ ]:
# In[ ]:
| [
"noreply@github.com"
] | YuweiShen.noreply@github.com |
710b472a33fb6c8509f4df5c743745e611e706cc | 7e8a001d13b11bfa6ff5d51907d93b92174289d2 | /test.py | 969113c100c0f8e2e097e01df1727d19900c746e | [] | no_license | daludaluking/TF2-AI | 8eb99a80b7e090d1758efb21863fcaff3017a0b4 | cf5f2245d18ed38d26eaa470570b8d86f7452a7d | refs/heads/main | 2023-07-22T23:59:28.859518 | 2021-08-30T05:58:58 | 2021-08-30T05:58:58 | 392,513,154 | 0 | 0 | null | 2021-08-04T01:53:12 | 2021-08-04T01:53:12 | null | UTF-8 | Python | false | false | 742 | py | import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import numpy as np
tf.executing_eagerly()
(mnist_images, mnist_labels), _ = tf.keras.datasets.mnist.load_data()
dataset = tf.data.Dataset.from_tensor_slices(
(tf.cast(mnist_images[...,tf.newaxis]/255, tf.float32),
tf.cast(mnist_labels,tf.int64)))
dataset = dataset.shuffle(1000).batch(32)
mnist_model = tf.keras.Sequential([
tf.keras.layers.Conv2D(16,[3,3], activation='relu',
input_shape=(None, None, 1)),
tf.keras.layers.Conv2D(16,[3,3], activation='relu'),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(10)
])
for images,labels in dataset.take(1):
print("로짓: ", mnist_model(images[0:1]).numpy()) | [
"swdalu74@gmail.com"
] | swdalu74@gmail.com |
e6f473220e6ba826010f02a19ed5052645008b2f | 179c9b2983ba3d4a3757f84fd55ac1356850c363 | /jinete/algorithms/heuristics/local_search/strategies/routes/one_shift.py | 67ed11e8558cde8355886ac88d94ef2aa23b27ae | [
"MIT"
] | permissive | garciparedes/jinete | 0d2fbf68a88b0ec565b8c1ed5c417f8f7cacceb0 | 6ed5687b2016aa7eb1f6499470c6ea21a9a57b8a | refs/heads/master | 2023-03-29T00:35:34.575828 | 2020-09-19T11:02:05 | 2020-09-19T11:02:05 | 150,865,909 | 9 | 2 | MIT | 2023-03-16T21:41:17 | 2018-09-29T13:17:05 | Python | UTF-8 | Python | false | false | 1,834 | py | import logging
from ......models import (
Route,
Stop,
)
from ..abc import (
LocalSearchStrategy,
)
logger = logging.getLogger(__name__)
class OneShiftLocalSearchStrategy(LocalSearchStrategy):
def _improve(self) -> None:
logger.info(f'Starting to improve "Result" with "{self.__class__.__name__}"...')
for route in self._routes:
cost = self._objective.optimization_function(route)
for i in range(1, len(route.stops) - 1):
j = i + 1
k = i + 2
first = route.stops[i]
second = route.stops[j]
third = route.stops[k] if k < len(route.stops) else None
if not set(first.pickup_planned_trips).isdisjoint(second.delivery_planned_trips):
continue
self._flip(route, first, second, third)
if not route.feasible or cost == self._objective.best(cost, route):
self._flip(route, second, first, third)
continue
cost = self._objective.optimization_function(route)
logger.info(f'Flipped "{i}"-th and "{j}"-th stops from "{route}".')
def _flip(self, route: Route, previous: Stop, other: Stop, following: Stop = None) -> None:
assert following is None or following.previous == other
assert other.previous == previous
self_index = route.stops.index(other)
other_index = route.stops.index(previous)
route.stops[self_index], route.stops[other_index] = route.stops[other_index], route.stops[self_index]
if following is not None:
following.previous = previous
other.previous = previous.previous
previous.previous = other
for stop in route.stops[self_index:]:
stop.flush()
| [
"sergio@garciparedes.me"
] | sergio@garciparedes.me |
7c209754538f2496dd447913c18b11e34790c8d0 | de0d7e99d970d75b5356744bacaf7f44fcc731a8 | /bin/nsite/http.py | a8e7808b45c2ee5dd4c4819781814f43e2ef4c6a | [] | no_license | Valvador/NEEShubloader | a3a9e6f072def12423d4516e560974a7f4a744da | a1007cfe37b895d38191d9380ba42b01d45430b3 | refs/heads/master | 2020-05-20T04:24:30.381117 | 2013-03-20T21:58:03 | 2013-03-20T21:58:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,380 | py | #Adapts the abstracted http layer to our NEEShub-specific interfaces.
#Created by Val Gorbunov for the use of NEES@UCSB
from nees.config import *
import nees.http as nh
import nees.nees_logging as nees_logging
#
# NEESHUB SPECIFIC GET FUNCTIONS
#
def get_experiment_id_dictionary():
''' UCSB NEES SPECIFIC function from the http server in nees folder.
Returns:
dictionary of Experiment-IDs in the format of
{'Experiment#':'ID#'...}'''
response = nh.get_experiment_id_dictionary(sitenees_proj)
return response
# This variable dictionary used to be updated manually. Now it initiates automatically
# in order to simplify that process.
experiment_id_dic = get_experiment_id_dictionary()
def get_trial_id_dictionary(experiment_num):
''' UCSB NEES SPECIFIC function from the http server in nees folder.
Args:
experiment_num: NEEShub Experiment Number.
Returns:
dictionary of Trial-IDs in the format of
{'Trial-#':'ID#'...}
"""'''
experiment_id = experiment_id_dic[experiment_num]
response = nh.get_trial_id_dictionary(sitenees_proj, experiment_id)
return response
def get_trial_metadata_dictionaries_partial(experiment_num, experimentdict):
''' UCSB NEES SPECIFIC function from the http server in nees folder.'''
experiment_id = experiment_id_dic[experiment_num]
cache_evid_dict, cache_ml_dict, cache_distance_dict = nh.get_trial_metadata_dictionaries_partial(sitenees_proj,
experiment_id,
experimentdict)
return cache_evid_dict, cache_ml_dict, cache_distance_dict
def get_trial_metadata_dictionaries(experiment_num):
"""Gets metadata from trial descriptions.
WARNING: This requires trial description defines metadata in the
'datatype: data' format.
Args:
experiment_id: NEEShub Experiment ID. Use
get_experiment_id_dictionary to acquire.
Returns:
3-part tuple of dictionaries. Dictionary of event ids,
magnitude ids and distance ids. Keys are 'Trial-#'"""
experiment_id = experiment_id_dic[experiment_num]
evid_dict, ml_dict, dist_dict = nh.get_trial_metadata_dictionaries(sitenees_proj, experiment_id)
return evid_dict, ml_dict, dist_dict
#
# NEESHUB SPECIFIC POSTING FUNCTIONS
#
def post_experiment(title, description, start_date, experiment_num = ''):
""""This uses the webservices abstraction of httplib in the interface folder
to post an experiment to NEES
Args:
title: What you want the Experiment Title to be.
description: How you'd like to describe it.
start_date: start date
(experiment_num): Chose experiment number
(RECOMMNDED TO LEAVE BLANK!)
Returns:
string that is the experiment id for the created experiment."""
experiment_id = nh.post_experiment(sitenees_proj, title, description, start_date, experiment_num = '')
return experiment_id
def post_trial(experiment_id, trialtitle, description, trial_num = ''):
'''This uses the abstracted http layer in the interface folder to
communicate with the NEEShub to post a trial.
Args:
trialtitle: desired title for Trial
description: desired description.
RECOMMENDED: Include entries like "evid: EVTID#",
"ml: MAGNITUDE", "dist: Distance from Source"
Returns:
string with Trial ID number. '''
trial_id = nh.post_trial(sitenees_proj, experiment_id, trialtitle, description)
return trial_id
def post_rep(experiment_id, trial_id):
"""This creates a repetition within a Trial
Args:
experiment_id: NEEShub experiment ID inside Project.
trial_id: NEEShub trial ID inside Experiment.
Returns:
string with Repetition ID."""
rep_id = nh.post_rep(sitenees_proj, experiment_id, trial_id)
return rep_id
def post_full_trial(experiment_id, trialtitle, description, trial_num = ''):
'''Creates a Trial with a Repetition folder inside an experiment.
Args:
experiment_id: NEEShub experiment ID inside Project.
trial_id: NEEShub trial ID inside Experiment.
description: Trial Description.
RECOMMENDED: Include entries like "evid: EVTID#",
"ml: MAGNITUDE", "dist: Distance from Source"
Returns:
tuple of strings with Trial ID and Repetition ID.'''
trialid, repid = nh.post_full_trial(sitenees_proj, experiment_id, trialtitle, description)
nees_logging.log_trial_creation(trial_num, experiment_id, trialid, repid)
return trialid, repid
def multipart_post(filename, expnum, trialnum, rep_num, datafolder, request_path = http_file_path, verb = False):
'''This is technically an upload post. It assumes that there has already been an FTP file uploaded
to the NEEShub and is simply waiting assignment. This post will be the assignment.
Args:
filename: name of the file you wish to upload.
expnum: Experiment Number
rep_num: Repetition Number
trialnum: Trial Number
datafolder: Folder where you wish to upload files within a Repetition.
(request_path): HTTP Request Parameter, where the post is being made on the HTTP server.
(threading): When True, it will create a new Thread for every post being made.
(verb): When True show progress. NOTE YET IMPLEMENETED.'''
post_status, post_data, post_location = nh.multipart_post(filename,
nees_path_id,
expnum,
trialnum,
rep_num,
datafolder,
request_path,
verbose = verb)
return post_status, post_data, post_location
#
# SITE NEESHUB SPECIFIC DELETE FUNCTIONS
#
def delete_experiment(experiment_num):
'''Deletes site specific experiment.
Args:
experiment_num: Experiment number that is to be deleted within the project.
Returns:
integer, HTTP status response.'''
experiment_id = experiment_id_dic[experiment_num]
del_status = nh.delete_experiment(sitenees_proj, experiment_id)
return del_status
def delete_trial(experiment_num, trial_number):
'''Deletes site specific experiment.
Args:
experiment_num: Experiment number for the Trial to be deleted..
trial_number: Trial number that is to be deleted within the experiment.
Returns
integer, HTTP status response.'''
experiment_id = experiment_id_dic[experiment_num]
trial_dict = get_trial_id_dictionary(experiment_num)
trial_key = 'Trial-%s' % (trial_number,)
trial_id = trial_dict[trial_key]
del_status = nh.delete_trial(sitenees_proj, experiment_id, trial_id)
return del_status
| [
"valvador@valvador-VirtualBox.(none)"
] | valvador@valvador-VirtualBox.(none) |
4b098cac73eb5c2008da63916de685407c0a3356 | 7d5df1608e315e860ec342373caa0df06280ab98 | /users/forms.py | 2cb08e12ea5caef0dce956cfc3acd6e46a1f4e61 | [
"MIT"
] | permissive | Joshua-Chiu/PGDBWebServer | beb247d7c231a16eb776f88997cb3660304bd725 | 9777773db763a13f168da633c69b9271f9da24b1 | refs/heads/master | 2022-12-01T20:35:21.921999 | 2021-09-15T20:25:30 | 2021-09-15T20:25:30 | 130,128,689 | 1 | 0 | MIT | 2022-11-22T08:50:01 | 2018-04-18T22:24:53 | Python | UTF-8 | Python | false | false | 927 | py | from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import CustomUser
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm):
model = CustomUser
fields = ('username', 'email',)
class CustomUserChangeForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super(UserChangeForm, self).__init__(*args, **kwargs)
self.fields['email'].help_text = "Only enter an email if you grant user permission to self reset password."
self.fields['password'].help_text = "Raw passwords are not stored, so there is no way to see this user's " \
"password, but you can change the password using" \
" <a href=\"../password/\">this form.</a>"
class Meta:
model = CustomUser
fields = ('username', 'email',)
| [
"pgry@null.net"
] | pgry@null.net |
98b95e5b95c2fca1ffb90674c1dc3c922496957a | 470ab293f1ebfcbec1ce7ab18c2749357d735a9d | /eps_rapid/models/property_content_ratings_guest.py | c7ed1a4eb28a4a32726c46e163e2bd7b79961944 | [] | no_license | TripElephant/eps_rapid_client | 53b51308a5032935ac3db7ce97e17e359b081859 | 2a21a00c3b12cccb16585035ed59845c4ffe1604 | refs/heads/master | 2020-06-28T02:49:22.514338 | 2019-08-01T22:04:46 | 2019-08-01T22:04:46 | 200,124,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,229 | py | # coding: utf-8
"""
Rapid
EPS Rapid V2.3 # noqa: E501
OpenAPI spec version: 2.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PropertyContentRatingsGuest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'count': 'float',
'average': 'str'
}
attribute_map = {
'count': 'count',
'average': 'average'
}
def __init__(self, count=None, average=None): # noqa: E501
"""PropertyContentRatingsGuest - a model defined in Swagger""" # noqa: E501
self._count = None
self._average = None
self.discriminator = None
if count is not None:
self.count = count
if average is not None:
self.average = average
@property
def count(self):
"""Gets the count of this PropertyContentRatingsGuest. # noqa: E501
A count of all of the guest review ratings which currently exist for this property. # noqa: E501
:return: The count of this PropertyContentRatingsGuest. # noqa: E501
:rtype: float
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this PropertyContentRatingsGuest.
A count of all of the guest review ratings which currently exist for this property. # noqa: E501
:param count: The count of this PropertyContentRatingsGuest. # noqa: E501
:type: float
"""
self._count = count
@property
def average(self):
"""Gets the average of this PropertyContentRatingsGuest. # noqa: E501
Overall rating for the property averaged from all guest reviews. Returns a value between 1.0 and 5.0. # noqa: E501
:return: The average of this PropertyContentRatingsGuest. # noqa: E501
:rtype: str
"""
return self._average
@average.setter
def average(self, average):
"""Sets the average of this PropertyContentRatingsGuest.
Overall rating for the property averaged from all guest reviews. Returns a value between 1.0 and 5.0. # noqa: E501
:param average: The average of this PropertyContentRatingsGuest. # noqa: E501
:type: str
"""
self._average = average
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PropertyContentRatingsGuest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PropertyContentRatingsGuest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"daniel.vu@protonmail.ch"
] | daniel.vu@protonmail.ch |
2f78919c7c166ae165af1bec6e2fa9173abf8503 | 819feabab120759291bd2e28107e970e6e3de827 | /Bubble sort visualizer/bubble_sort.py | 18b438b599557853014f79b1613aab71aad0a129 | [] | no_license | Nushrat-Jahan/Python-codes | 0bbd838cd8a3c8c2a0ec46c5c73852c02186750b | 01636fab19b2f7337d44126611b716e669eb1765 | refs/heads/master | 2023-02-24T19:15:10.010829 | 2021-01-28T16:43:46 | 2021-01-28T16:43:46 | 319,365,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | import time
def bubble_sort(arr, displayBar, animSpeed):
for _ in range(len(arr)-1):
for j in range(len(arr)-1):
if arr[j] > arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
displayBar(arr, ['blue' if a == j or a ==j+1 else 'red' for a in range(len(arr))])
time.sleep(animSpeed)
displayBar(arr, ['blue' for a in range(len(arr))])
| [
"nushratjahan296@gmail.com"
] | nushratjahan296@gmail.com |
71cbe10c570bb6d05b46f89c055bb5a2e86a09bf | d0c82aa02b0b572eee74ddc2492e575a4ed6cb0b | /aco_attempt.py | 5fcff409676ebf31f8ff015ea9798a5ac2961293 | [] | no_license | osKlonowski/BachelorThesis | c37f086d6993b77ce85ada45c052ee547b887812 | 370185a5a589530da293d575858a002f31339e9d | refs/heads/master | 2023-06-17T16:40:57.013112 | 2021-07-17T08:25:36 | 2021-07-17T08:25:36 | 340,762,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,397 | py | from preliminaries import getListOfSectionsCompleted
import Formigueiro as Formigueiro
import random
class BRIDGEInstance():
##### FOR NOW ----> IT WILL BE ONLY ONE SECTION #####
def __init__(self, numOfRounds, numOfPairs, listPairIds, prev_meetings_matrix):
self.numRounds = numOfRounds
self.numPairs = numOfPairs
self.pairIds = listPairIds
self.prev_meetings_matrix = prev_meetings_matrix
self.fitness_best = self.get_theoretical_best_fitness()
self.fitness_worst = self.get_theoretical_worst_fitness()
def getMeetingMatrix(self):
return self.prev_meetings_matrix
def compute_meeting_factor(self, meeting_matrix):
meeting_factor = 0
for i in range(1, self.numPairs+1):
for j in range(1, self.numPairs+1):
cell_value = meeting_matrix.at[i, j] ** 3
meeting_factor += cell_value
return meeting_factor
def compute_theoretical_best_meeting_matrix(self, meeting_history_matrix, numPairs, numRounds):
theoretical_optimum_matrix = meeting_history_matrix.copy()
for pair_num in range(1, numPairs+1):
for i in range(0, numRounds):
column = theoretical_optimum_matrix[[pair_num]].copy()
column.drop([pair_num], axis=0, inplace=True)
pair_id_least_meetings = column.idxmin()
theoretical_optimum_matrix[pair_num][int(
pair_id_least_meetings)] += 4
theoretical_optimum_matrix[int(
pair_id_least_meetings)][pair_num] += 4
fitness = self.compute_meeting_factor(theoretical_optimum_matrix)
print(f'\nTheoretical OPTIMUM Matrix: {fitness/fitness}')
print(theoretical_optimum_matrix)
return theoretical_optimum_matrix
def get_theoretical_best_fitness(self):
fitness = 0
if(self.numPairs % 2 == 0):
matrix = self.compute_theoretical_best_meeting_matrix(
self.prev_meetings_matrix, self.numPairs, self.numRounds)
fitness = self.compute_meeting_factor(matrix)
else:
# TODO: Still need to do
pass
return fitness
def compute_theoretical_worst_meeting_matrix(self, meeting_history_matrix, numPairs, numRounds):
theoretical_worst_matrix = meeting_history_matrix.copy()
for pair_num in range(1, numPairs+1):
for i in range(0, numRounds):
column = theoretical_worst_matrix[[pair_num]].copy()
column.drop([pair_num], axis=0, inplace=True)
pair_id_least_meetings = column.idxmax()
theoretical_worst_matrix[pair_num][int(
pair_id_least_meetings)] += 4
theoretical_worst_matrix[int(
pair_id_least_meetings)][pair_num] += 4
fitness = self.compute_meeting_factor(theoretical_worst_matrix)
print(f'\nTheoretical WORST Matrix: {fitness/self.fitness_best}')
print(theoretical_worst_matrix)
return theoretical_worst_matrix
def get_theoretical_worst_fitness(self):
fitness = 0
if(self.numPairs % 2 == 0):
matrix = self.compute_theoretical_worst_meeting_matrix(
self.prev_meetings_matrix, self.numPairs, self.numRounds)
fitness = self.compute_meeting_factor(matrix)
else:
# TODO: Still need to do
pass
return fitness
def computePairMeetingCost(self, x, y):
return (self.prev_meetings_matrix.at[x, y] ** 3) + 1
# Where "x" is a schedule
# calculate a new meetings_matrix
# return a fitness value in range(1, 2)
def compute_fitness(self, x):
sample_solution_matrix = self.prev_meetings_matrix.copy()
for pair1, pair2 in x:
# print(f'Pair1ID: {pair1}, Pair2ID: {pair2}')
sample_solution_matrix[int(pair1)][int(pair2)] += 4
sample_solution_matrix[int(pair2)][int(pair1)] += 4
meeting_factor = self.compute_meeting_factor(sample_solution_matrix)
# Get overhead
return meeting_factor / self.fitness_best
class BRIDGEAnt(Formigueiro.ACS_Ant):
# THIS WILL RECEIVE A BRIDGE INSTANCE --- AN INSTANCE OF THE MEETING MATRIX + WAITING VECTOR (if applicable)
def __init__(self, instance, **kwargs):
self.instance = instance
super().__init__(**kwargs)
def getSolutionComponents(self):
return (c for c in self.components)
def addSolutionComponent(self, component):
# print(
# f'Component to be added: {component}: length: {len(list(self.components))}')
return super().addSolutionComponent(component)
# OVERRIDE with FITNESS VALUE
# Compute the generated schedule onto a new meeting matrix
# Compute the FITNESS VALUE in Respect to the Theoretical Best???
def getSolutionValue(self):
return self.instance.compute_fitness(self.getSolutionComponents())
def getComponentCost(self, component):
# Component is a tuple, pair of players
return self.instance.computePairMeetingCost(*component)
# THIS WILL GENERATE A SINGLE MEETING SCHEDULE
def constructSolution(self):
# A set of the form: [1, 2, 3, 4, 5...14, 15, 16]
V = set(range(1, self.instance.numPairs+1))
for i in range(0, self.instance.numRounds):
S = set([])
while S != V:
remaining_pairIds = [pairId for pairId in V - S]
pair1 = random.choice(remaining_pairIds)
S.add(pair1)
components = [(pair1, pair2) for pair2 in V - S]
pair1, pair2 = self.makeDecision(components)
S.add(pair2)
meeting_history_file = 'bridge_schedules/data2021_pre_balanced/meeting history april 2021'
pre_schedule_file = 'bridge_schedules/data2021_pre_balanced/48 pairs_(3 sections,no_waiting_table)'
listOfSections = getListOfSectionsCompleted(
meeting_history_file, pre_schedule_file)
section = listOfSections.sections[0]
numOfPairs = len(section.listPairIds)
listPairIds = section.listPairIds
prev_meetings_matrix = section.meetings_matrix.copy()
# GENERATE INSTANCE OF THE PROBLEM
instance = BRIDGEInstance(6, numOfPairs, listPairIds,
prev_meetings_matrix)
# ANT-COLONY OPTIMIZATION
# BEST FITNESS IN INTERATION ## GLOBAL BEST FITNESS ## BEST FITNESS FROM ALL ANTS
print('BEST ITER FITNESS -- GLOBAL BEST FITNESS -- BEST ANT FITNESS')
obj, components = Formigueiro.Solve(
antCls=BRIDGEAnt, instance=instance, numIterations=200, numAnts=25, alpha=1, beta=1)
def compute_final_meeting_matrix_from_solution(meeting_matrix, schedule):
sample_solution_matrix = meeting_matrix.copy()
for pair1, pair2 in schedule:
# print(f'Pair1ID: {pair1}, Pair2ID: {pair2}')
sample_solution_matrix[int(pair1)][int(pair2)] += 4
sample_solution_matrix[int(pair2)][int(pair1)] += 4
return sample_solution_matrix
print(f'Fitness Overhead: {obj}')
print(f'\nThe solution components are: {components}\n')
print(f'Num of Pair Meetings in Solution Schedule is: {len(components)}')
final_matrix = compute_final_meeting_matrix_from_solution(
prev_meetings_matrix, components)
print(f'\nORIGINAL MATRIX\n{prev_meetings_matrix}')
print(f'\n\nFINAL MATRIX\n{final_matrix}')
| [
"osklonowski@gmail.com"
] | osklonowski@gmail.com |
8cef7439cfc2680d9b2889fa9559a4c1184b7d58 | f508da1fd2e65491e1e6b5dd3a64d8bf6039bc6c | /eho/openstack/common/jsonutils.py | f800779f11987bc90c847efa74d36a2629bee971 | [
"Apache-2.0"
] | permissive | senhuang/eho | b9595739b109829b44e6d538b36348ac84529af8 | ca4dba1d0e7ab24d748b746e115ca6dc2191997a | refs/heads/master | 2021-01-21T00:36:50.271363 | 2013-03-19T07:55:50 | 2013-03-19T07:55:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,110 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import datetime
import functools
import inspect
import itertools
import json
import xmlrpclib
from eho.openstack.common import timeutils
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
for test in nasty:
if test(value):
return unicode(value)
# value of itertools.count doesn't get caught by inspects
# above and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return unicode(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if isinstance(value, (list, tuple)):
return [recursive(v) for v in value]
elif isinstance(value, dict):
return dict((k, recursive(v)) for k, v in value.iteritems())
elif convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
else:
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return unicode(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
| [
"slukjanov@mirantis.com"
] | slukjanov@mirantis.com |
428a84ee3e5423c47985d083c3808c96631a088a | 544e04ccb9b792e969e586d9c21e488f5a08fc50 | /appChat/models.py | a2fc72ec5a6ca4ef04b0096dc050eacedec7c742 | [] | no_license | LIkelion-at-KOREATECH/Lotte-Is-Horse | 0fa23d8337d8bf9c73d7b011872b95d00d32ee7b | 8798488e2f1bcd9cfe0d9e7cd5fad6af8e74cbd9 | refs/heads/master | 2023-01-05T07:49:34.132268 | 2020-10-25T23:20:56 | 2020-10-25T23:20:56 | 305,366,555 | 0 | 1 | null | 2020-11-03T03:26:42 | 2020-10-19T11:50:07 | JavaScript | UTF-8 | Python | false | false | 870 | py | # from __future__ import unicode_literals
# from django.db import models
# from django.utils import timezone
# class Room(models.Model):
# name = models.TextField()
# label = models.SlugField(unique=True)
# def __unicode__(self):
# return self.label
# class Message(models.Model):
# room = models.ForeignKey(Room, related_name='messages')
# handle = models.TextField()
# message = models.TextField()
# timestamp = models.DateTimeField(default=timezone.now, db_index=True)
# def __unicode__(self):
# return '[{timestamp}] {handle}: {message}'.format(**self.as_dict())
# @property
# def formatted_timestamp(self):
# return self.timestamp.strftime('%b %-d %-I:%M %p')
# def as_dict(self):
# return {'handle': self.handle, 'message': self.message, 'timestamp': self.formatted_timestamp}
| [
"gomdalp1701@gmail.com"
] | gomdalp1701@gmail.com |
6c5e12aaa4a16fb19a082bd90a169bfa78990d94 | d701c8568aa4a7767dd69534d264b29f7c329964 | /model/model.py | 3d8320d395b8e63fc611cbb8fbe8413a5712f7ad | [] | no_license | jack870601/SE_project_meow | 09f2603f1675396bd16bfcae3ba6f3bedec21fda | 670869a2ce02738b4ed4c6ce70c00ab7d3cf8583 | refs/heads/master | 2020-04-13T19:10:17.189165 | 2019-01-03T07:38:07 | 2019-01-03T07:38:07 | 163,394,728 | 2 | 3 | null | 2019-01-03T07:38:08 | 2018-12-28T09:52:30 | CSS | UTF-8 | Python | false | false | 207 | py | #from models.pg_model import pg_model
from model.postgreSQL import postgreSQL
class model():
dao = postgreSQL()
# dao = pg_model()
def create(self):
model.dao.create(type(self).__name__, self.__dict__) | [
"jack870601@gmail.com"
] | jack870601@gmail.com |
868ea6b3432430b2a859faf729510df6f3772d68 | 76f6391deafac34654dec09639858f001c31bc11 | /random-admin/app/scrapings/bbc.py | 3573c951d43cf6e2cdcc6784480ddf62a119ea89 | [] | no_license | kanazawa-pri/random-admin | 4ab6f7fda319484fc98b2ea910a1c71fc444aeff | 2bbd44466eb3a0ca3f17d2e58785b5552756b3e1 | refs/heads/master | 2020-06-28T21:42:17.557399 | 2019-07-30T14:09:09 | 2019-07-30T14:09:09 | 200,349,232 | 0 | 0 | null | 2019-08-03T08:02:31 | 2019-08-03T08:02:30 | null | UTF-8 | Python | false | false | 962 | py | from bs4 import BeautifulSoup
import requests
import random
import re
def scraping():
ng_list = ["bbc"]
html = requests.get('https://www.bbc.com/japanese')
soup = BeautifulSoup(html.text, "html.parser")
article = soup.find(class_="pigeon__column pigeon__column--a")
title = article.find(class_="title-link__title-text").string
for i in ng_list:
if i in title:
return
image = article.find(class_="js-delayed-image-load")
print(image)
image = image.get("data-src")
url = "https://www.bbc.com/" + article.a.get("href")
html = requests.get(url)
soup = BeautifulSoup(html.text, "html.parser")
soup = soup.find(class_="story-body__inner")
text = ''.join([s.text for s in soup.find_all("p")])
return {'article_text':text,'article_title':title, 'article_url':url,'article_reporter':'', 'site_name':'bbc','article_image': image}
if __name__ == "__main__":
print(scraping())
| [
"kanazawak20@yahoo.co.jp"
] | kanazawak20@yahoo.co.jp |
36935416071cd5847133b045897f228cc54e94b1 | c233a3ca87a8c4d99d07008a0833f1317a5ff466 | /run_ColorMatcher.py | ac8ed43f5cd4a8c66dbd5c0f70831af2b16451d7 | [
"MIT"
] | permissive | tknapen/RL_7T_experiment | a5d0d14e66597792343f7dfb95af2136b68d00b3 | 1f2d637db749511f94e36c07fe33ac8184e89f1d | refs/heads/master | 2021-01-22T22:56:45.179301 | 2016-11-14T15:39:34 | 2016-11-14T15:39:34 | 85,590,531 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | import sys
sys.path.append( 'exp_tools' )
from ColorMatcherSession import *
from plot_staircases import *
import appnope
def main():
initials = raw_input('Your initials: ')
run_nr = int(raw_input('Run number: '))
scanner = raw_input('Are you in the scanner (y/n)?: ')
track_eyes = raw_input('Are you recording gaze (y/n)?: ')
if track_eyes == 'y':
tracker_on = True
elif track_eyes == 'n':
tracker_on = False
appnope.nope()
ts = ColorMatcherSession( initials, run_nr, scanner, tracker_on )
ts.run()
if __name__ == '__main__':
main() | [
"daan.van.es@gmail.com"
] | daan.van.es@gmail.com |
751fe9d21328194b1acc39adfcbb9d3d11fd3d90 | 7b71da9189de3358ef73b37a3083a56c1ab10772 | /robobench/calibration/pipette_calibration/bradley_thresh.py | 94a9b93a29c8a7910d1f2b93ceba99c66ca91418 | [] | no_license | EndyLab/opentrons | 91ff3b8364c9b1746f7d073875651baa5efaf4c3 | 75d1789ad6ddef556a2c46e6608d5496f9f5ec7d | refs/heads/master | 2021-01-13T13:39:32.443502 | 2019-11-19T22:23:47 | 2019-11-19T22:23:47 | 76,410,678 | 8 | 0 | null | 2020-10-27T21:05:49 | 2016-12-14T00:42:02 | Roff | UTF-8 | Python | false | false | 2,134 | py | # from: https://stackoverflow.com/questions/33091755/bradley-roth-adaptive-thresholding-algorithm-how-do-i-get-better-performance
import numpy as np
from scipy import ndimage
from PIL import Image
import copy
import time
import cv2
def faster_bradley_threshold(image, threshold=75, window_r=5):
percentage = threshold / 100.
window_diam = 2*window_r + 1
# convert image to numpy array of grayscale values
img = np.array(image.convert('L')).astype(np.float) # float for mean precision
# matrix of local means with scipy
means = ndimage.uniform_filter(img, window_diam)
# result: 0 for entry less than percentage*mean, 255 otherwise
height, width = img.shape[:2]
result = np.zeros((height,width), np.uint8) # initially all 0
result[img >= percentage * means] = 255 # numpy magic :)
# convert back to PIL image
return Image.fromarray(result)
def bradley_threshold(image, threshold=75, windowsize=5):
ws = windowsize
image2 = copy.copy(image).convert('L')
w, h = image.size
l = image.convert('L').load()
l2 = image2.load()
threshold /= 100.0
for y in range(h):
for x in range(w):
#find neighboring pixels
neighbors =[(x+x2,y+y2) for x2 in range(-ws,ws) for y2 in range(-ws, ws) if x+x2>0 and x+x2<w and y+y2>0 and y+y2<h]
#mean of all neighboring pixels
mean = sum([l[a,b] for a,b in neighbors])/len(neighbors)
if l[x, y] < threshold*mean:
l2[x,y] = 0
else:
l2[x,y] = 255
return image2
if __name__ == '__main__':
img = Image.open('img_fail.jpg')
"""
t0 = time.process_time()
threshed0 = bradley_threshold(img)
print('original approach:', round(time.process_time()-t0, 3), 's')
threshed0.show()
"""
t0 = time.process_time()
threshed1 = faster_bradley_threshold(img)
print('w/ numpy & scipy :', round(time.process_time()-t0, 3), 's')
open_cv_image = np.array(threshed1)
cv2.imshow("screen cropped", open_cv_image)
threshed1.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"natagoh@stanford.edu"
] | natagoh@stanford.edu |
79ce11ae807730b501809588bdbc2b9dec1e9067 | effce116340b7d937bd285e43b49e1ef83d56156 | /data_files/662 Maximum Width of Binary Tree.py | ee04fcc76e296c46188c5e41ed7b3c9c54194e8b | [] | no_license | DL2021Spring/CourseProject | a7c7ef57d69bc1b21e3303e737abb27bee3bd585 | 108cdd906e705e9d4d05640af32d34bfc8b124da | refs/heads/master | 2023-04-11T18:52:30.562103 | 2021-05-18T09:59:59 | 2021-05-18T09:59:59 | 365,733,976 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 668 | py |
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def widthOfBinaryTree(self, root: TreeNode) -> int:
if not root:
return 0
ret = 0
q = [(0, root)]
while q:
cur_q = []
left, right = q[0][0], q[-1][0]
ret = max(ret, right - left + 1)
for idx, node in q:
if node.left:
cur_q.append((idx * 2, node.left))
if node.right:
cur_q.append((idx * 2 + 1, node.right))
q = cur_q
return ret
| [
"1042448815@qq.com"
] | 1042448815@qq.com |
e94107b3d0f8b3efc736784daf10fda144d7be2a | e86f40099817f4bf16f695040ef6096bc026b4a5 | /week9/day1/daily_challenge/gif_site/manage.py | 69f2d398a2f26f2fd87b99bfa13532d95d43e9bd | [] | no_license | idosarue/DI_django | 7474e38c8b8cd668df3af5a0d88c021ada8da887 | cd3f91e51458941a5834fb65d6b8d6698770fc6c | refs/heads/main | 2023-07-16T12:10:07.549009 | 2021-09-02T17:42:30 | 2021-09-02T17:42:30 | 396,374,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gif_site.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"dodojoker67@gmail.com"
] | dodojoker67@gmail.com |
a7891d5d0f42e0c9354c2dceac5b592e0cd4b985 | 919b71e265c4e10854768d5baa478aea88c9b00f | /encrypt.py | b5128fbe3f61afffbc18eda3cb2b4231d52b2534 | [] | no_license | AlekHadzidedic/linear-cryptanalysis | 1548c6077ef20e75cf7b3e94789a34a95561b077 | 594dd25f0153518155be437b8a5fc6483e44615b | refs/heads/master | 2020-08-27T17:10:38.915296 | 2019-10-25T19:53:27 | 2019-10-25T19:53:27 | 217,441,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,010 | py | import random
from linear_approximation import s_box_encrypt
from utils import binary_splice
def encrypt(plaintext, round_keys):
num_rounds = 5
temp_plaintext = plaintext
rounds_counter = 0
for i in range(num_rounds - 2):
# 1 XOR Plaintext with First round key
v = key_xor(temp_plaintext, round_keys[rounds_counter])
# 2 Substitute each bye
u = substitute_16_bit(v)
# 3 Permutate binary string
w = permutate(u)
temp_plaintext = w
rounds_counter += 1
# Round 4 (no permutation)
a = key_xor(temp_plaintext, round_keys[rounds_counter])
b = substitute_16_bit(a)
# Round 5 (xor with K5)
rounds_counter += 1
c = key_xor(int(f'0b{b}', 2), round_keys[rounds_counter])
c_string = ""
for i in range(len(c)):
c_string += binary_splice(bin(c[i]), 4)
return c_string
def substitute_16_bit(v):
substituted_string = ""
for i in range(len(v)):
substituted_string += binary_splice(bin(s_box_encrypt(v[i])), 4)
return substituted_string
def key_xor(plaintext, key):
xor_sum = plaintext ^ key
bin_num = binary_splice(bin(xor_sum), 16)
s_box_partition = []
for i in range(4):
s_box_partition.append(int('0b' + bin_num[4 * i:4 * (i + 1)], 2))
return s_box_partition
def permutate(ciphertext):
ciphertext_list = list(ciphertext)
permutation_map = {0: 0, 1: 4, 2: 8, 3: 12, 4: 1, 5: 5, 6: 9, 7: 13}
for i in range(8):
temp = ciphertext_list[i]
ciphertext_list[i] = ciphertext_list[permutation_map[i]]
ciphertext_list[permutation_map[i]] = temp
return int('0b' + ''.join(ciphertext_list), 2)
def generate_plaintext_ciphertext_pairs(n, round_keys, seed):
random.seed(seed)
plaintext_sample = random.sample(range(1, 2 ** 16), n)
pairs = []
for plaintext in plaintext_sample:
pairs.append((binary_splice(bin(plaintext), 16), encrypt(plaintext, round_keys)))
return pairs
| [
"akhia013@uottawa.ca"
] | akhia013@uottawa.ca |
2e5e31aa710ebee37466a831c41c0e519be2c5f9 | 115a8f05878d89338c49c804444d13ee1b8b6bde | /mbta.py | 0b4c1c03774fbcef33e55e72df79f7779c610f3c | [] | no_license | chautieu/mbta-departure-board | 50389d02b33b61825ea190badd3740a34e7393d8 | 9df0e6c54158b7947cf670c62ec7eb218522c6c3 | refs/heads/master | 2022-05-29T06:28:57.345556 | 2020-05-04T21:27:59 | 2020-05-04T21:27:59 | 261,066,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,817 | py | import requests
import json
import datetime
class RouteType:
COMMUTER_RAIL_TYPE = "2"
class MbtaStation:
def __init__(self, stop_name, stop_id):
self.stop_name = stop_name
self.stop_id = stop_id
self.departures = {}
self.arrivals = {}
self.predictions = {}
self.start_time = ""
def get_stop_name(self):
return self.stop_name
def get_departures(self):
return self.departures
def get_arrivals(self):
return self.arrivals
def get_start_time(self):
return self.start_time
def initialize_data(self):
self.update_time()
routes = self.get_commuter_rail_routes()
self.extract_route_ids(routes)
self.update_predictions()
self.extract_departure_times()
self.extract_arrival_times()
def mbta_get(self, api_url, params):
headers = {'user-agent': 'Dataquest'}
try:
response = requests.get(api_url, headers=headers, params=params)
return response
except Exception:
raise Exception(f"Unable to get data from MBTA for url: {api_url}")
def get_commuter_rail_routes(self):
api_url = "https://api-v3.mbta.com/routes?"
filter = "filter[stop]=" + self.stop_id
filter += "&filter[type]=" + RouteType.COMMUTER_RAIL_TYPE
commuter_rails = self.mbta_get(api_url + filter, {})
return commuter_rails
def get_next_departure_time(self, route_id):
api_url = "https://api-v3.mbta.com/schedules?include=route"
filter = "&filter[route]=" + route_id
filter += "&filter[stop]=" + self.stop_id
filter += "&filter[direction_id]=0"
filter += "&filter[min_time]=" + self.start_time
try:
departure_time = self.mbta_get(api_url + filter, {}).json()
return departure_time['data'][0]
except Exception:
raise Exception("Unable to get the next departure time")
def get_arrival_times(self):
api_url = "https://api-v3.mbta.com/schedules?"
filter = "filter[stop]=" + self.stop_id
filter += "&filter[direction_id]=1"
filter += "&filter[min_time]=" + self.start_time
try:
arrival_times = self.mbta_get(api_url + filter, {})
return arrival_times
except Exception:
raise Exception("Unable to get the arrival times")
def update_predictions(self):
api_url = "https://api-v3.mbta.com/predictions?"
filter = "&filter[stop]=" + self.stop_id
filter += "&filter[route_type]=" + RouteType.COMMUTER_RAIL_TYPE
predictions = self.mbta_get(api_url + filter, {}).json()['data']
for prediction in predictions:
trip_id = prediction['relationships']['trip']['data']['id']
status = prediction['attributes']['status']
self.predictions[trip_id] = status
def get_route_info(self, route_id):
api_url = "https://api-v3.mbta.com/routes/"
route_info = self.mbta_get(api_url + route_id, {}).json()['data']['attributes']
result = {
'destination': route_info['direction_destinations'][0],
'direction': route_info['direction_names'][0],
'long_name': route_info['long_name'],
'color': route_info['color']
}
return result
def extract_route_ids(self, routes_response):
routes_data = routes_response.json()["data"]
for route in routes_data:
route_id = route["id"]
self.departures[route_id] = {}
route_info = self.get_route_info(route_id)
self.departures[route_id] = route_info
def update_time(self):
current_time = datetime.datetime.now()
self.start_time = current_time.strftime("%H:%M")
def convert_to_readable_time(self, time_string):
'''
:params time_string in the format YYYY-MM-DDTHH:MM:SS-HH:MM
ex: 2020-05-03T23:30:00-04:00
'''
utc_start_index = len(time_string) - 6
time_string = time_string[:utc_start_index]
time_obj = datetime.datetime.strptime(time_string, '%Y-%m-%dT%H:%M:%S')
return time_obj.strftime("%I:%M %p")
def extract_departure_times(self):
for route_id in self.departures.keys():
# get the departure time
schedule = self.get_next_departure_time(route_id)
departure = self.convert_to_readable_time(schedule['attributes']['departure_time'])
trip_id = schedule['relationships']['trip']['data']['id']
trip_status = "Status Unavailable" if trip_id not in self.predictions else self.predictions[trip_id]
# add it to the routes
self.departures[route_id]['departure'] = departure if departure else "No departure for this route at this time"
self.departures[route_id]['trip_status'] = trip_status
def extract_arrival_times(self):
arrivals = self.get_arrival_times().json()['data']
for schedule in arrivals:
route_id = schedule['relationships']['route']['data']['id']
if route_id in self.departures.keys():
# get the arrival time
arrival_time = self.convert_to_readable_time(schedule['attributes']['arrival_time'])
trip_id = schedule['relationships']['trip']['data']['id']
trip_status = "Status Unavailable" if trip_id not in self.predictions else self.predictions[trip_id]
arrival = {
'arrival': arrival_time,
'trip_status': trip_status,
'line_name': self.departures[route_id]['long_name']
}
# add it to the routes
self.arrivals[route_id] = arrival
| [
"noreply@github.com"
] | chautieu.noreply@github.com |
8767c9e005f5d20fe17afbf38fe8051ce2f8419e | 7596b2187ecc558aee4ac527b600f2776bea2aa2 | /student/views.py | b066997036220f708f944a8611b20cce000ac82c | [] | no_license | gajendrarahul/newlms | f3d9acb0b920466005600597f23fa7a36656d090 | 7a33d261363ae174195ddaef91c0d52db7b8c576 | refs/heads/master | 2020-09-10T08:17:58.184586 | 2019-11-14T13:15:29 | 2019-11-14T13:15:29 | 221,699,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | from django.shortcuts import render,redirect
from Account.models import Account
from django.contrib.auth.hashers import make_password
from new_lms.password import randomPassword
from new_lms.mail import Mail
from student.models import Student
from django.contrib import messages
# Create your views here.
def student_view(request):
return render(request, 'student/dashboard.html')
def manager_studentview(request):
if request.method == 'GET':
return render(request,'manager/manager_studentview.html')
else:
email = request.POST.get('email')
name = request.POST.get('name')
contact = request.POST['contact']
password = randomPassword()
user = Account(email=email, password=make_password(password), is_teacher=False,is_manager=False,is_student=True)
user.save()
msg = f'{name}, your account is created successfully \n use the following credential to login \n email:{email} \n password:{password}'
Mail(subject='Account created', message=msg,recipient_list=[email])
student = Student(name=name, contact=contact, user_id=user.id)
student.save()
messages.add_message(request,messages.SUCCESS,'student Account is created successfully')
return redirect('manager_view') | [
"mahatogajen123@gmail.com"
] | mahatogajen123@gmail.com |
d6ba9957ecd45606826386e031558efca3474a0b | 2c85f5dbef0d4208e6afd6fb6416648badc979af | /vehicle_prices.py | 25730e46af341b915281b8c0797a1028b6c60e88 | [] | no_license | jtanium/turbo-garbanzo | 697c63a2df5c736530af88df60782858f90e3ae6 | 289cdd0d25e97c8b2179e137205f944c2ba0f498 | refs/heads/main | 2023-08-14T16:12:41.007343 | 2021-10-03T03:03:21 | 2021-10-03T03:03:21 | 412,959,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 888 | py | import numpy as np
def predict(X, w):
return np.matmul(X, w)
def loss(X, Y, w):
return np.average((predict(X, w) - Y) ** 2)
def gradient(X, Y, w):
return 2 * np.matmul(X.T, (predict(X, w) -Y)) / X.shape[0]
def train(X, Y, iterations, lr):
w = np.zeros((X.shape[1], 1))
for i in range(iterations):
if i % 1000 == 0:
print("Iteration %4d => Loss: %.15f" % (i, loss(X, Y, w)))
w -= gradient(X, Y, w) * lr
return w
dataset = np.loadtxt("car_details_v3.csv", delimiter=",", skiprows=1) #, unpack=True)
y = dataset[:, -1]
dataset = dataset[:, :-1]
X = np.column_stack((np.ones(dataset[:, 0].size), dataset))
Y = y.reshape(-1, 1)
w = train(X, Y, iterations=1000000, lr=0.0000000001)
print("\nWeights: %s" % w.T)
print("\nA few predictions:")
for i in range(5):
print("X[%d] -> %.4f (label: %d)" % (i, predict(X[i], w), Y[i]))
| [
"jtanium@jtaniums-MacBook-Pro.local"
] | jtanium@jtaniums-MacBook-Pro.local |
881083827d5bbb4da11a5b9d7edec6b217bc34d4 | 033da72a51c76e5510a06be93229a547a538cf28 | /Data Engineer with Python Track/21. Cleaning Data in SQL Server Databases/Chapter/04. Combining, splitting, and transforming data/01-Combining cities and states using +.py | c42e7743cf2ea24d935eb8842b2408c0e660b1ad | [] | no_license | ikhwan1366/Datacamp | d5dcd40c1bfeb04248977014260936b1fb1d3065 | 7738614eaebec446842d89177ae2bc30ab0f2551 | refs/heads/master | 2023-03-06T13:41:06.522721 | 2021-02-17T22:41:54 | 2021-02-17T22:41:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,403 | py | '''
Combining cities and states using +
In this lesson, you learned how to combine columns into one.
The clients table has one column, city, to store the cities where the clients live, and another column, state, to store the state of the city.
| client_id | client_name | client_surname | city | state |
|-----------|-------------|----------------|-----------|----------|
| 1 | Miriam | Antona | Las Vegas | Nevada |
| 2 | Astrid | Harper | Chicago | Illinois |
| 3 | David | Madden | Phoenix | Arizona |
| ... | ... | ... | ... | ... |
You need to combine city and state columns into one, to have the following format: 'Las Vegas, Nevada'.
You will use + operator to do it.
Instructions 1/2
50 XP
- Concatenate the names of the cities with the states using the + operator without worrying about NULL values.
'''
SELECT
client_name,
client_surname,
-- Concatenate city with state
city + ', ' + state AS city_state
FROM clients
'''
Instructions 2/2
50 XP
- Replace each instance of NULL in city and state with an ISNULL() function, so that if either column has a NULL value, an empty string '' is returned instead.
'''
SELECT
client_name,
client_surname,
-- Consider the NULL values
ISNULL(city, '') + ISNULL(', ' + state, '') AS city_state
FROM clients
| [
"surel.chandrapratama@gmail.com"
] | surel.chandrapratama@gmail.com |
805b0ca6e8e91d7b162777326c1e5fd61f5e0315 | f7a4bcf17c8eec0244a657c79136db4cfd73a6cb | /sshkeyrotation.py | 951551a9ca1d272759a10f511d398a2ae39ec75a | [] | no_license | hemanth4984/AWS-Maintanace-Scripts | a1166cba956be12197b47c014810dc98e4d7ee2b | 9eefb5cd445816231b5399d481d86020e68cd567 | refs/heads/master | 2022-07-06T15:53:44.919780 | 2020-05-18T13:00:19 | 2020-05-18T13:00:19 | 263,061,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,718 | py | import boto3
import paramiko
import time
import cmd
import sys
client = boto3.client('ec2', region_name='ap-south-1',aws_access_key_id="*****",aws_secret_access_key="****")
########## Filter Instances With Specified Tags ############
response = client.describe_instances(
Filters=[
{
'Name': 'tag:Name',
'Values': [
'TeamDE',
]
}
]
)
for reservation in (response["Reservations"]):
for instance in reservation["Instances"]:
####### REMOTE EC2 SERVER DEFAULTS #############
Id = instance["InstanceId"]
PrivateIP = instance["PrivateIpAddress"]
PrivateDNS = instance["PrivateDnsName"]
username = "ec2-user"
key_filename = "/home/ec2-user/tes.pem"
print(Id)
print(PrivateIP)
print(PrivateDNS)
print(response)
########### SSH INTO SERVERS ###############
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
privkey = paramiko.RSAKey.from_private_key_file('/home/ec2-user/test.pem')
ssh.connect(instance["PrivateIpAddress"],username='ec2-user',key_filename='/home/ec2-user/test.pem')
############ Add Bash Script file for server configuration ###########
bash_script = open("script.sh").read()
# execute the BASH script
stdin, stdout, stderr = ssh.exec_command(bash_script)
######### Read the standard output and print it #########
print(stdout.read().decode())
######### print errors if there are any #########
err = stderr.read().decode()
if err:
print(err)
############ close the connection ##############
ssh.close()
| [
"noreply@github.com"
] | hemanth4984.noreply@github.com |
e8b0605af7202c3f0a99d727e2425a9f5e1106b7 | 86a3d71a0cc887bc7ace611b7d708fdde2ca9770 | /checklines/files.py | 576132ef9315e861592783768c5ffcdb7f382da4 | [
"MIT"
] | permissive | iRyukizo/check_lines | 7ef9db0f558b8ee9f6ccb35ac5b846a489385284 | 1fbf2e335fd6cccb37bab3bf9a4dee184d1ce786 | refs/heads/main | 2023-03-27T18:28:09.122387 | 2021-02-01T08:50:11 | 2021-02-01T08:50:11 | 307,573,525 | 4 | 0 | MIT | 2021-02-01T08:50:12 | 2020-10-27T03:21:13 | Python | UTF-8 | Python | false | false | 1,231 | py | from colorama import Fore, Style
from . import func
class File:
def __init__(self, location, fun):
self._location = location
self._size = len(fun)
self._res = [-1] * self._size
self._functions = []
for elmt in fun:
self._functions.append(func.Function(elmt))
def __str__(self):
res = ""
for elmt in self._functions[:-1]:
res += elmt.__str__() + "\n"
res += self._functions[-1].__str__()
return self._location + "\n" + res
def check(self, ignore):
f = open(self._location)
lines = f.readlines()
lines = [s.strip() for s in lines]
f.close()
for i, elmt in enumerate(self._functions):
if elmt._type == "function":
self._res[i] = elmt.check(lines, ignore)
def default(self, max_lines):
res = 0
for elmt in self._functions:
res |= elmt.default(max_lines)
return res
def remain(self, max_lines, max_len):
res = 0
print("File:", Style.BRIGHT + Fore.CYAN + self._location + Style.RESET_ALL)
for elmt in self._functions:
res |= elmt.remain(max_lines, max_len)
return res
| [
"hugo.moreau@epita.fr"
] | hugo.moreau@epita.fr |
e6b897190c3bf62f874ee374fb18f6eeef1cbeb8 | 475e5c34e673f73829f5fc36783096ebafe48102 | /breakstatement.py | 7f34fc015d199e6b951a2a0c7e97e40fc4f3ac0c | [] | no_license | rushikadlag0911/Python_Practice | 76ec2d02492ce0e14e600371751493f8c8bc7af5 | 3d3487a89bc9853c768b370e93af82feff25b834 | refs/heads/main | 2023-07-18T22:38:39.755689 | 2021-09-16T11:57:27 | 2021-09-16T11:57:27 | 407,145,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | for n in range(2, 10):
for x in range(2, n):
if n % x == 0:
print(n, 'equals', x, '*', n//x)
break
else:
print(n, 'is a prime number')
| [
"noreply@github.com"
] | rushikadlag0911.noreply@github.com |
2a751775c9018415f52294943ef56468d380a992 | ec4ae20f4edfca76388d727c73ee757778cbee44 | /src/PhoneMessaging/example.py | 9ddabe4aacb63dbd98b38e6d355363fa4da96647 | [] | no_license | zeaktorres/SecureSpaces | 35a3105ec09caa0b9bf26027b76041b243fb8844 | f210791e58ea9111f66e02f259c03784a171e79a | refs/heads/master | 2022-04-06T00:19:25.897557 | 2020-02-26T01:11:45 | 2020-02-26T01:11:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | import send_message as SMS
myMessage = SMS.Message("Howdy!")
myMessage.sendMessage()
| [
"zeak@Zeaks-MacBook-Air.local"
] | zeak@Zeaks-MacBook-Air.local |
6ff7601ddb4bdacfca03abb8b60d53678d9926dc | e94408865d15b1afc0965a4d0525f124d2d2924c | /round1_code_backup/baseline/parpare_data.py | 9f2802b5318999ae21a00b8af37f08139c0a08b6 | [] | no_license | ngc7292/tianchi-oppo-matching | cc1d266a7faa3aa74fdfa492d6045a9671836c26 | 2d5f9a8759f3e96db36477501bce2ee0c49cf9da | refs/heads/master | 2023-05-09T00:42:36.143363 | 2021-05-11T06:46:25 | 2021-05-11T06:46:25 | 353,206,029 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,473 | py | # -*- coding: utf-8 -*-
"""
__title__="parpare_data"
__author__="ngc7293"
__mtime__="2021/3/17"
"""
train_data_path = "/remote-home/zyfei/project/tianchi/data/gaiic_track3_round1_train_20210228.tsv"
test_data_path = "/remote-home/zyfei/project/tianchi/data/gaiic_track3_round1_testA_20210228.tsv"
vocab_data_path = "./vocab.txt"
raw_text = './raw_text.txt'
print("loading train data...")
train_mlm_data = []
vocab = set()
with open(train_data_path, encoding="utf-8") as f:
for line in f.readlines():
rows = line.split('\t')
for sent in rows[0:2]:
vocab.update(sent.split(' '))
train_mlm_data.append(rows[0] + ' [SEP] ' + rows[1])
train_mlm_data.append(rows[1] + ' [SEP] ' + rows[0])
print("loading test data...")
test_mlm_data = []
with open(test_data_path, encoding="utf-8") as f:
for line in f.readlines():
rows = line.replace("\n","").split('\t')
for sent in rows[0:2]:
vocab.update(sent.split(' '))
test_mlm_data.append(rows[0] + ' [SEP] ' + rows[1])
test_mlm_data.append(rows[1] + ' [SEP] ' + rows[0])
print("save pretrain data...")
all_mlm_data = train_mlm_data + test_mlm_data
with open(raw_text, 'w') as f:
for key in all_mlm_data:
f.write(str(key) + '\n')
vocab = ['[PAD]', '[UNK]', '[CLS]', '[SEP]', '[MASK]'] + list(vocab)
print("save vocab data...")
with open(vocab_data_path, "w") as fp:
for key in vocab:
fp.write(str(key) + "\n")
| [
"feizhaoye@gmail.com"
] | feizhaoye@gmail.com |
d4b97eea5aae1a2c11ea2460377ccdc186e66704 | 3fd84aef8c5fc5da1e77404e4b242c0b539bd3cd | /challenges/algorithms/implementation/0gradingStudents.py | 26ab38d8d7e4abaecc582579b47abf628f17b1ef | [] | no_license | mrogove/hackerrank | 6948898c219fd4038cf3c6ec5317fa84719faef2 | ebdd9f10c7054c5cd8a1887fad29a2101d687320 | refs/heads/master | 2021-05-12T08:05:18.206578 | 2018-02-23T06:18:26 | 2018-02-23T06:18:26 | 117,267,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | """
every student gets a grade
Any grade <40 is failing
Professor Sam wants to round up to next multiple of 5 IF diff < 3 (e.g. 88 -> 90; 87 -> 87)
Do not round failing grades.
assume 1 <= n <= 60
assume 0 <= grade <= 100
"""
#!/bin/python3
import sys
fail = 40 #definte this constant first - in case failure benchmark changes.
def solve(grades):
# Complete this function
result = []
for e in grades:
d = (e // 5 + 1) * 5 #how many times does it guzinta? (plus next multiple)
if ((d - e) < 3) and (d >= fail):
e = d
result.append(e)
return result
n = int(input().strip())
grades = []
grades_i = 0
for grades_i in range(n):
grades_t = int(input().strip())
grades.append(grades_t)
result = solve(grades)
print ("\n".join(map(str, result)))
| [
"mrogove@gmail.com"
] | mrogove@gmail.com |
956a4a1dda129b01c57a16b2b87b7d0254c5efd0 | d2845579ea6aa51a2e150f0ffe6ccfda85d035ce | /common/python/calculation/spark/table.py | d6895cad3dcd329649fa6bfc23376bd7b4b0091c | [
"Apache-2.0"
] | permissive | as23187/WeFe | d8de9ff626f9f3e5d98e0850b0b717a80fd73e72 | ba92871d4b1d2eef6c606c34795f4575e84703bd | refs/heads/main | 2023-08-22T12:01:06.718246 | 2021-10-28T01:54:05 | 2021-10-28T01:54:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,065 | py | # Copyright 2021 Tianmian Tech. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from typing import Iterable
from common.python.calculation.spark import util
from common.python.common import consts
from common.python.common.consts import NAMESPACE
from common.python.table import Table
from common.python.utils import conf_utils
from common.python.utils.profile_util import log_elapsed
from common.python.utils.split import split_put, split_get
class RDDSource(Table):
# noinspection PyProtectedMember
@classmethod
def from_dsource(cls, session_id: str, dsource):
namespace = dsource._namespace
name = dsource._name
partitions = dsource._partitions
return RDDSource(session_id=session_id, namespace=namespace, name=name, partitions=partitions, dsource=dsource)
@classmethod
def from_rdd(cls, rdd, job_id: str, namespace: str, name: str):
partitions = rdd.getNumPartitions()
return RDDSource(session_id=job_id, namespace=namespace, name=name, partitions=partitions, rdd=rdd)
def __init__(self, session_id: str,
namespace: str,
name: str = None,
partitions: int = 1,
rdd=None,
dsource=None):
self._valid_param_check(rdd, dsource, namespace, partitions)
setattr(self, util.RDD_ATTR_NAME, rdd)
self._rdd = rdd
self._partitions = partitions
self._dsource = dsource
self.schema = {}
self._name = name or str(uuid.uuid1())
self._namespace = namespace
self._session_id = session_id
def get_name(self):
return self._name
def get_namespace(self):
return self._namespace
def __str__(self):
return f"{self._namespace}, {self._name}, {self._dsource}"
def __repr__(self):
return f"{self._namespace}, {self._name}, {self._dsource}"
def _tmp_table_from_rdd(self, rdd, name=None):
"""
tmp table, with namespace == job_id
"""
rdd = util.materialize(rdd)
name = name or f"{self._session_id}_{str(uuid.uuid1())}"
return RDDSource(session_id=self._session_id,
# namespace=self._namespace,
namespace=NAMESPACE.PROCESS,
name=name,
partitions=rdd.getNumPartitions(),
rdd=rdd,
dsource=None)
# self._rdd should not be pickled(spark requires all transformer/action to be invoked in driver).
def __getstate__(self):
state = dict(self.__dict__)
if "_rdd" in state:
del state["_rdd"]
return state
@staticmethod
def _valid_param_check(rdd, dtable, namespace, partitions):
assert (rdd is not None) or (dtable is not None), "params rdd and storage are both None"
assert namespace is not None, "namespace is None"
assert partitions > 0, "invalid partitions={0}".format(partitions)
def rdd(self):
if hasattr(self, "_rdd") and self._rdd is not None:
return self._rdd
if self._dsource is None:
raise AssertionError("try create rdd from None storage")
return self._rdd_from_dtable()
# noinspection PyProtectedMember,PyUnresolvedReferences
@log_elapsed
def _rdd_from_dtable(self):
storage_iterator = self._dsource.collect(use_serialize=True)
if self._dsource.count() <= 0:
storage_iterator = []
num_partition = self._dsource._partitions
# If the system forces to specify the number of shards, use the specified number
num_slices = conf_utils.get_comm_config(consts.COMM_CONF_KEY_SPARK_NUM_SLICES)
num_partition = int(num_slices) if num_slices else num_partition
from pyspark import SparkContext
self._rdd = SparkContext.getOrCreate() \
.parallelize(storage_iterator, num_partition) \
.persist(util.get_storage_level())
return self._rdd
def dsource(self):
"""
rdd -> storage
"""
if self._dsource:
return self._dsource
else:
if not hasattr(self, "_rdd") or self._rdd is None:
raise AssertionError("try create dtable from None")
return self._rdd_to_dtable()
# noinspection PyUnusedLocal
@log_elapsed
def _rdd_to_dtable(self, **kwargs):
self._dsource = self.save_as(name=self._name,
namespace=self._namespace,
partition=self._partitions,
persistent=False)._dsource
return self._dsource
def get_partitions(self):
return self._partitions
@log_elapsed
def map(self, func, **kwargs):
from common.python.calculation.spark.rdd_func import _map
rtn_rdd = _map(self.rdd(), func)
return self._tmp_table_from_rdd(rtn_rdd)
@log_elapsed
def mapValues(self, func, **kwargs):
from common.python.calculation.spark.rdd_func import _map_value
rtn_rdd = _map_value(self.rdd(), func)
return self._tmp_table_from_rdd(rtn_rdd)
@log_elapsed
def mapPartitions(self, func, **kwargs):
from common.python.calculation.spark.rdd_func import _map_partitions
rtn_rdd = _map_partitions(self.rdd(), func)
return self._tmp_table_from_rdd(rtn_rdd)
@log_elapsed
def mapPartitions2(self, func, **kwargs):
return self._tmp_table_from_rdd(self.rdd().mapPartitions(func))
@log_elapsed
def mapReducePartitions(self, mapper, reducer, **kwargs):
return self._tmp_table_from_rdd(self.rdd().mapPartitions(mapper).reduceByKey(reducer))
@log_elapsed
def applyPartitions(self, func, **kwargs):
return self.mapPartitions(func)
@log_elapsed
def reduce(self, func, key_func=None, **kwargs):
if key_func is None:
return self.rdd().values().reduce(func)
return dict(self.rdd().map(lambda x: (key_func(x[0]), x[1])).reduceByKey(func).collect())
def join(self, other, func=None, **kwargs):
rdd1 = self.rdd()
rdd2 = other.rdd()
# noinspection PyUnusedLocal,PyShadowingNames
@log_elapsed
def _join(rdda, rddb, **kwargs):
from common.python.calculation.spark.rdd_func import _join
return self._tmp_table_from_rdd(_join(rdda, rddb, func))
return _join(rdd1, rdd2, **kwargs)
@log_elapsed
def glom(self, **kwargs):
from common.python.calculation.spark.rdd_func import _glom
return self._tmp_table_from_rdd(_glom(self.rdd()))
@log_elapsed
def sample(self, fraction, seed=None, **kwargs):
from common.python.calculation.spark.rdd_func import _sample
return self._tmp_table_from_rdd(_sample(self.rdd(), fraction, seed))
@log_elapsed
def subtractByKey(self, other, **kwargs):
from common.python.calculation.spark.rdd_func import _subtract_by_key
return self._tmp_table_from_rdd(_subtract_by_key(self.rdd(), other.rdd()))
@log_elapsed
def filter(self, func, **kwargs):
from common.python.calculation.spark.rdd_func import _filter
return self._tmp_table_from_rdd(_filter(self.rdd(), func))
@log_elapsed
def union(self, other, func=lambda v1, v2: v1, **kwargs):
from common.python.calculation.spark.rdd_func import _union
return self._tmp_table_from_rdd(_union(self.rdd(), other.rdd(), func))
@log_elapsed
def flatMap(self, func, **kwargs):
from common.python.calculation.spark.rdd_func import _flat_map
return self._tmp_table_from_rdd(_flat_map(self.rdd(), func))
@log_elapsed
def collect(self, min_chunk_size=0, use_serialize=True, **kwargs):
if self._dsource:
return self._dsource.collect(min_chunk_size, use_serialize)
else:
return iter(self.rdd().collect())
"""
storage api
"""
def put(self, k, v, use_serialize=True, maybe_large_value=False):
if not maybe_large_value:
rtn = self.dsource().put(k, v, use_serialize)
else:
rtn = split_put(k, v, use_serialize=use_serialize, put_call_back_func=self.dsource().put)
self._rdd = None
return rtn
@log_elapsed
def put_all(self, kv_list: Iterable, use_serialize=True, chunk_size=100000):
rtn = self.dsource().put_all(kv_list, use_serialize, chunk_size)
self._rdd = None
return rtn
def get(self, k, use_serialize=True, maybe_large_value=False):
if not maybe_large_value:
return self.dsource().get(k, use_serialize)
else:
return split_get(k=k, use_serialize=use_serialize, get_call_back_func=self.dsource().get)
def delete(self, k, use_serialize=True):
rtn = self.dsource().delete(k, use_serialize)
self._rdd = None
return rtn
def destroy(self):
if self._dsource:
self._dsource.destroy()
else:
self._rdd = None
return True
def put_if_absent(self, k, v, use_serialize=True):
rtn = self.dsource().put_if_absent(k, v, use_serialize)
self._rdd = None
return rtn
# noinspection PyPep8Naming
def take(self, n=1, keysOnly=False, use_serialize=True):
if self._dsource:
return self._dsource.take(n, keysOnly, use_serialize)
else:
rtn = self._rdd.take(n)
if keysOnly:
rtn = [pair[0] for pair in rtn]
return rtn
# noinspection PyPep8Naming
def first(self, keysOnly=False, use_serialize=True):
first = self.take(1, keysOnly, use_serialize)
return first[0] if first else None
def count(self, **kwargs):
if self._dsource:
return self._dsource.count()
else:
return self._rdd.count()
@log_elapsed
def save_as(self, name, namespace, partition=None, use_serialize=True, persistent=True, **kwargs) -> 'RDDSource':
if partition is None:
partition = self._partitions
partition = partition or self._partitions
from common.python import RuntimeInstance
persistent_engine = RuntimeInstance.SESSION.get_persistent_engine()
if self._dsource:
_dtable = self._dsource.save_as(name, namespace, partition,
use_serialize=use_serialize,
persistent_engine=persistent_engine)
return RDDSource.from_dsource(session_id=self._session_id, dsource=_dtable)
else:
from common.python.calculation.spark.rdd_func import _save_as_func
return _save_as_func(self._rdd, name=name, namespace=namespace, partition=partition, persistent=persistent)
| [
"winter.zou@welab-inc.com"
] | winter.zou@welab-inc.com |
65f649c9c12a0a4648e29cef731ddb6c40fbe6ef | 100193a599cd9961356b2c2ee13c734e467b9713 | /cc/apps/coder/migrations/0003_auto__add_participant.py | e0a754b87b3635d299c77eea42dd6f992853ed21 | [
"MIT"
] | permissive | mavroskardia/codechallenge | bd3678003d933b834eddc1d36dda74e53b5afa52 | a5fee4ba73be186d90daafca50819a6817ad3d27 | refs/heads/master | 2016-09-09T17:16:57.818465 | 2015-01-13T14:45:00 | 2015-01-13T14:45:00 | 15,058,542 | 0 | 0 | null | 2014-03-21T21:25:37 | 2013-12-09T20:42:54 | Python | UTF-8 | Python | false | false | 6,721 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Participant'
db.create_table('coder_participant', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('coder', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['coder.Coder'])),
('challenge', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['challenge.Challenge'])),
('date_joined', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2014, 3, 10, 0, 0))),
))
db.send_create_signal('coder', ['Participant'])
# Removing M2M table for field challenges on 'Coder'
db.delete_table(db.shorten_name('coder_coder_challenges'))
def backwards(self, orm):
# Deleting model 'Participant'
db.delete_table('coder_participant')
# Adding M2M table for field challenges on 'Coder'
m2m_table_name = db.shorten_name('coder_coder_challenges')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('coder', models.ForeignKey(orm['coder.coder'], null=False)),
('challenge', models.ForeignKey(orm['challenge.challenge'], null=False))
))
db.create_unique(m2m_table_name, ['coder_id', 'challenge_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'to': "orm['auth.Permission']", 'symmetrical': 'False'})
},
'auth.permission': {
'Meta': {'object_name': 'Permission', 'unique_together': "(('content_type', 'codename'),)", 'ordering': "('content_type__app_label', 'content_type__model', 'codename')"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'blank': 'True', 'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '30'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'to': "orm['auth.Group']", 'symmetrical': 'False', 'related_name': "'user_set'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '30'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'to': "orm['auth.Permission']", 'symmetrical': 'False', 'related_name': "'user_set'"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'challenge.challenge': {
'Meta': {'object_name': 'Challenge'},
'duration': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['coder.Coder']"})
},
'coder.coder': {
'Meta': {'object_name': 'Coder'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '256'}),
'tagline': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '1024'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'unique': 'True', 'to': "orm['auth.User']"}),
'xp': ('django.db.models.fields.BigIntegerField', [], {'default': '0'})
},
'coder.level': {
'Meta': {'object_name': 'Level'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'starting_xp': ('django.db.models.fields.BigIntegerField', [], {})
},
'coder.participant': {
'Meta': {'object_name': 'Participant'},
'challenge': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenge.Challenge']"}),
'coder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['coder.Coder']"}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 3, 10, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'db_table': "'django_content_type'", 'object_name': 'ContentType', 'unique_together': "(('app_label', 'model'),)", 'ordering': "('name',)"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['coder'] | [
"chryso@gmail.com"
] | chryso@gmail.com |
56e0057402c131e7fa97841a81c53100ca87f338 | 288d641777ed1cfe74608e970898256268021537 | /Labs/Lab9_airflow_CNN/.virtualenv/bin/tf_upgrade_v2 | 283e5fababe9be59680f8b969597bec8e116ced5 | [] | no_license | rishvita/Big-Data-Systems-and-Intelligence-Analytics | fbd4a4196616048d3ba716eea31022a21c8e7dc8 | a4cfa6e534f354c39f44f34fc214b6455cb5c21a | refs/heads/main | 2023-05-14T05:31:03.513210 | 2021-05-31T05:25:06 | 2021-05-31T05:25:06 | 334,040,296 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | #!/Users/ng/Downloads/CSYE7245_NidhiGoyal/Team6_CSYE7245_Spring2021/Labs/airflow_CNN/.virtualenv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from tensorflow.tools.compatibility.tf_upgrade_v2_main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"rishvita@gmail.com"
] | rishvita@gmail.com | |
f6de5f780c60294f59c4fd49a6ee574b9a0d8d34 | 1316cd6763e784811c769c1de577235c921af0de | /Apps/AlignOnBPMs/SAMPL/sourceCode/SAMPLcore/Components/ComponentBase.py | f2ed9848ab5bc4adadc7b8a0aa16629bf0f7f015 | [] | no_license | VELA-CLARA-software/Software | a6fb6b848584e5893fd6939a447d23134ce636cc | 2e2a88ac0b2b03a495c868d2e11e6481e05097c3 | refs/heads/master | 2023-02-05T07:40:58.260798 | 2023-01-27T09:39:09 | 2023-01-27T09:39:09 | 69,860,536 | 7 | 3 | null | 2021-04-07T14:17:07 | 2016-10-03T10:20:46 | Mathematica | UTF-8 | Python | false | false | 565 | py | # SAM to Python Conversion
# DJS August 2017
# Version 0.1
#
from ..SAMPLlab import Beam
class ComponentBase(object):
def __init__(self, length=0, name="", aperture=[]):
#super(ComponentBase, self).__init__(**kwargs)
# device length, in meters
self.length = length
# device name, string
self.name = name
# 1x2 array of elliptical aperture half-axes, in metres
self.aperture = aperture
# Each componet stores last beam that was tracked last (TP added)
self.lastTrackedBeam = Beam.Beam()
| [
"david.dunning@stfc.ac.uk"
] | david.dunning@stfc.ac.uk |
9b2e55341c9f7148c5dfe553e2bec953871d0db2 | 377fc6e13101a2a45826cd118110c790f396a805 | /utpc2014-a.py | 9ac43857b565674f15f63bbc23a3181047f353ff | [] | no_license | number09/atcoder | 4076e7223f424b9923754e73992d6442e0bb0de7 | f521ca1205b254d99744abaf6a7a5bfe69845fe0 | refs/heads/master | 2021-06-04T23:16:39.021645 | 2021-01-19T08:30:39 | 2021-01-19T08:30:39 | 132,128,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | li_w = input().split()
li_answer = list()
flag = False
for w in reversed(li_w):
if w != 'not':
flag = True
li_answer.append(w)
else:
if flag == False:
li_answer.append(w)
else:
if li_answer[-1] == 'not':
li_answer = li_answer[:-1]
else:
li_answer.append(w)
print(' '.join(reversed(li_answer)))
| [
"cielo.abierto09@gmail.com"
] | cielo.abierto09@gmail.com |
f98658569da2852dc39597141a14f473e098e230 | d51b4c766661af65b4ee6e7c30f8cb4bdd8603e3 | /python/algorithm/leetcode/91.py | 514aea9145662dfce4819b437de33ec85483955a | [] | no_license | yanxurui/keepcoding | 3e988c76b123d55b32cf7cc35fbffb12c4ccb095 | d6b9f07e2d1437681fa77fee0687ea9b83cab135 | refs/heads/master | 2021-01-24T09:01:41.306597 | 2020-05-21T05:36:04 | 2020-05-21T05:36:04 | 93,400,267 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | # https://leetcode.com/problems/decode-ways/discuss/30358/Java-clean-DP-solution-with-explanation
class Solution(object):
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
if len(s) == 0:
return 0
table = [0] * (len(s)+1)
table[0] = 1
table[1] = 0 if s[0] == '0' else 1
for i in range(2, len(s)+1):
if int(s[i-1:i]) >= 1 and int(s[i-1:i]) <= 9:
table[i] += table[i-1]
if int(s[i-2:i]) >= 10 and int(s[i-2:i]) <= 26:
table[i] += table[i-2]
return table[len(s)]
if __name__ == '__main__':
from testfunc import test
test_data = [
(
"12",
2
),
(
"226",
3
),
(
'0',
0
),
(
'10',
1
),
(
'00',
0
),
(
'01',
0
)
]
test(Solution().numDecodings, test_data)
| [
"617080352@qq.com"
] | 617080352@qq.com |
2e36cb560ae64073c54586ed8fd8919c948cf9c4 | 36d6f0b1fe580cc6132116ef883f9fe2e53b8fe3 | /main.py | b7ab6f79d7620d21717aa14278c6f9a49080da2f | [] | no_license | lokkjo/Py27_Task_9 | f4f73bacf082fdaec5b071b788619560fb45be18 | 68bbdfb871c0bcaf53fa7592ed407fd2bddf6caa | refs/heads/master | 2020-07-21T22:36:29.638049 | 2019-09-08T15:13:29 | 2019-09-08T15:13:29 | 206,989,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,182 | py | import datetime
class OpenWithTimeCount:
def __init__(self, file_path, encoding='utf8',
t_start=datetime.datetime.now()):
self.file_path = file_path
self.encoding = encoding
self.t_start = t_start
def __enter__(self):
print(f'Время запуска кода: {self.t_start}')
self.file = open(self.file_path)
return self.file
def __exit__(self, exc_type, exc_val, exc_tb):
self.file.close()
t_end = datetime.datetime.now()
print(f'Время окончания работы: {t_end}')
print(f'Обработка файла заняла '
f'{t_end.microsecond - self.t_start.microsecond} '
f'микросекунд.')
if __name__ == '__main__':
with OpenWithTimeCount('third_crusade.txt', 'rt') as f:
i_1 = 0
i_2 = 0
for line in f:
if 'Third Crusade' in line:
i_1 += 1
with open('crusade_context.txt', 'a') as doc:
doc.write(f'{i_1}: {line}\n')
elif 'Richard' in line:
i_2 += 1
with open('richard_context.txt', 'a') as doc:
doc.write(f'{i_2}: {line}\n')
print(f'\nКрестовый поход упоминается {i_1} раз.')
print(f'Ричард упоминается {i_2} раз.\n')
| [
"dra-katt@yandex.ru"
] | dra-katt@yandex.ru |
0812527774fef2d427c2e1e56a7966441f10632c | 847273de4b1d814fab8b19dc651c651c2d342ede | /.history/Sudoku_II_005_20180620140617.py | 2ded808b8af91e0513d070dc92160645c14e005e | [] | no_license | Los4U/sudoku_in_python | 0ba55850afcffeac4170321651620f3c89448b45 | 7d470604962a43da3fc3e5edce6f718076197d32 | refs/heads/master | 2020-03-22T08:10:13.939424 | 2018-07-04T17:21:13 | 2018-07-04T17:21:13 | 139,749,483 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,433 | py | from random import randint
sudoku1 = [
[5, 9, 8, 6, 1, 2, 3, 4, 7],
[2, 1, 7, 9, 3, 4, 8, 6, 5],
[6, 4, 3, 5, 8, 7, 1, 2, 9],
[1, 6, 5, 4, 9, 8, 2, 7, 3],
[3, 2, 9, 7, 6, 5, 4, 1, 8],
[7, 8, 4, 3, 2, 1, 5, 9, 6],
[8, 3, 1, 2, 7, 6, 9, 5, 4],
[4, 7, 2, 8, 5, 9, 6, 3, 1],
[9, 5, 6, 1, 4, 3, 7, 8, " "]
]
sudoku2 = [
[9, 8, 7, 4, 3, 2, 5, 6, 1],
[2, 4, 3, 5, 1, 6, 8, 7, 9],
[5, 6, 1, 7, 9, 8, 4, 3, 2],
[3, 9, 5, 6, 4, 7, 2, 1, 8],
[8, 2, 4, 3, 5, 1, 6, 9, 7],
[1, 7, 6, 2, 8, 9, 3, 4, 5],
[7, 1, 2, 8, 6, 3, 9, 5, 4],
[4, 3, 8, 9, 7, 5, 1, 2, 6],
[' ', 5, ' ', ' ', 2, ' ', 7, ' ', ' ']
]
sudoku3 = [
[9, 8, 7, 4, 3, 2, 5, 6, 1],
[2, 4, 3, 5, 1, 6, 8, 7, 9],
[5, 6, 1, 7, 9, 8, 4, 3, 2],
[3, 9, 5, 6, 4, 7, 2, 1, 8],
[8, 2, 4, 3, 5, 1, 6, 9, 7],
[1, 7, 6, 2, 8, 9, 3, 4, 5],
[7, 1, 2, 8, 6, 3, 9, 5, 4],
[4, 3, 8, 9, 7, 5, 1, 2, 6],
[' ', 5, ' ', ' ', 2, ' ', 7, ' ', ' ']
]
def printSudoku():
i = 0
while i < 10:
if i == 0:
print(" 1 2 3 4 5 6 7 8 9")
print(" -------------------------")
elif i == 3 or i == 6 or i == 9:
print(" -------------------------")
line = "|"
if i < 9:
print('{2} {1} {0[0]} {0[1]} {0[2]} {1} {0[3]} {0[4]} {0[5]} {1} {0[6]} {0[7]} {0[8]} {1}'.format(sudoku[i], line, i+1))
i = i + 1
print(" ")
print(" %@@@@@@@ @@@ @@@ (@@@@@@@@@ ,@@@@2@@@@@ @@@, /@@@/ @@@, @@@ ")
print(" @@@* @@@ @@@ (@@( /@@@# .@@@% (@@@ @@@, @@@% @@@, @@@. ")
print(" @@@& @@@ @@@ (@@( @@@* @@@% #@@% @@@,.@@@. @@@, @@@. ")
print(" ,@@@@@@* @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@%@@% @@@, @@@. ")
print(" /@@@@@# @@@ @@@ (@@( (@@% .@@@* ,@@@ @@@,@@@( @@@, @@@. ")
print(" *@@@. @@@ .@@& (@@( @@@. @@@% &@@( @@@, &@@@. @@@* .@@@. ")
print(" &, &@@@ #@@@. ,@@@, (@@( ,&@@@* ,@@@& .@@@@ @@@, (@@@/ #@@@* @@@# ")
print(",@@@@@@@@( (@@@@@@@@% (@@@@@@@@@( #@@@@@@@@@, @@@, ,@@@% ,@@@@@@@@@. \n ")
print("To start game input:")
print(" r - to load random puzzle:")
print(" 1 - to load chart nr 1:")
print(" 2 - to load chart nr 2:")
print(" 3 - to load chart nr 3:")
choice = input("Input here: ")
if choice == "R" or choice == "r":
listaSudoku = [sudoku1, sudoku2, sudoku3]
sudoku_number = randint(0, 2)
print("dupa", sudoku_number)
sudoku = listaSudoku[sudoku_number]
#print("ktore = ", sudoku)
elif int(choice) == 1:
s = 1
sudoku = sudoku
elif int(choice) == 2:
sudoku = sudoku2
elif int(choice) == 3:
sudoku = sudoku3
while True: # prints Sudoku until is solved
print("Your sudoku to solve:")
printSudoku()
print("Input 3 numbers in format a b c, np. 4 5 8")
print(" a - row number")
print(" b - column number ")
print(" c - value")
# vprint(" r - reset chart to start\n ")
x = input("Input a b c: ")
print("")
numbers = " 0123456789" # conditions of entering the numbers !
if (len(x) != 5) or (str(x[0]) not in numbers) or (str(x[2]) not in numbers) or (
str(x[4]) not in numbers) or (str(x[1]) != " ") or (str(x[3]) != " "):
if x == "r": # reset
# sudoku =
print(" Function reset() will be ready in Next Week")
else:
print("Error - wrong number format \n ")
continue
sudoku[int(x[0])-1][int(x[2])-1] = int(x[4])
column1 = 0
column2 = 0
try:
i = 0
list = []
while i < 9:
column = 0
for item in sudoku:
column = column + item[i]
list.append(column)
#p rint(list)
# print("Suma columny ", i, " = ", column)
i += 1
is45 = 0
for listElement in list:
if listElement == 45:
is45 = is45 + 1
# print("Ile kolumen OK", is45)
i = 0
for item in sudoku:
if sum(item) == 45 and is45 == 9:
i = i + 1
if i == 9:
printSudoku()
print("@@@@@@@@@@ YOU WIN @@@@@@@@@@")
break
except TypeError:
print()
| [
"inz.kamil.wos@gmail.com"
] | inz.kamil.wos@gmail.com |
af5fa945e95e98d19143e86961449053c5baa009 | e93ef436dd2cfdd8e3badefd721b639838fd9ef5 | /overview/models.py | a432b8b56ea623e8052b007ca0cd686d07bea4e5 | [] | no_license | sphere-net/spnintra | 38b2384d43bf73842b59c8b30fd4aa6c1a025651 | be2edf021733a239bfc3df5da86e1ac372ff4431 | refs/heads/master | 2023-08-11T02:57:36.772923 | 2021-10-12T01:22:57 | 2021-10-12T01:22:57 | 414,019,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,246 | py | from accounts.models import CustomUser
from django.db import models
#アカウント登録マスタ
class MstAccount(models.Model):
account_cd = models.CharField(verbose_name='アカウントコード', primary_key=True, unique=True, max_length=5)
disp_name = models.CharField(verbose_name='表示名', default='', max_length=50)
disp_name_yomi = models.CharField(verbose_name='ふりがな', default='', max_length=50)
use_func_type_cd = models.CharField(verbose_name='使用機能種別コード', null=True, blank=True, default='99999', max_length=5)
officer_cd = models.CharField(verbose_name='役職コード', null=True, blank=True, default='99999', max_length=5)
default_group_cd = models.CharField(verbose_name='デフォルトグループコード', null=True, blank=True, default='99999', max_length=5)
default_company_cd = models.CharField(verbose_name='デフォルト会社コード', null=True, blank=True, default='99999', max_length=5)
icon_file_url = models.ImageField(verbose_name='icon画像', null=True, blank=True, upload_to=None, height_field=None, width_field=None, max_length=256)
user_id = models.ForeignKey(CustomUser, verbose_name='ユーザID', null=True, blank=True, on_delete=models.PROTECT)
create_dt = models.DateTimeField(verbose_name='作成日', null=True, blank=True, auto_now=False, auto_now_add=False)
create_account = models.CharField(verbose_name='作成者', null=True, blank=True, max_length=5)
is_valid = models.BooleanField(verbose_name='無効フラグ', default=False)
delete_dt = models.DateTimeField(verbose_name='失効日', null=True, blank=True, auto_now=False, auto_now_add=False)
delete_memo = models.CharField(verbose_name='失効理由', null=True, blank=True, default='', max_length=16)
last_login_dt = models.DateTimeField(verbose_name='最終ログイン', null=True, blank=True, auto_now=False, auto_now_add=False)
class Meta:
verbose_name_plural = 'アカウント'
def __str__(self):
return self.account_cd
#管理者マスタ
class MstAdmin(models.Model):
account_id = models.AutoField(verbose_name='ID', primary_key=True, unique=True)
account_cd = models.CharField(verbose_name='アカウントコード', default='', max_length=5)
class Meta:
verbose_name_plural = '管理者マスタ'
def __str__(self):
return self.account_id
#使用機能種別マスタ
class MstUseFncType(models.Model):
use_func_type_cd = models.CharField(verbose_name='使用機能種別コード', primary_key=True, unique=True, max_length=5)
use_func_type_name = models.CharField(verbose_name='使用機能種別名', default='', max_length=24)
disp_order = models.PositiveIntegerField(verbose_name='表示順', null=True, blank=True, default=0)
is_fnc_t_schedule = models.BooleanField(verbose_name='スケジュール', default=True)
is_fnc_t_file = models.BooleanField(verbose_name='ファイル管理', default=False)
is_fnc_t_timecard = models.BooleanField(verbose_name='タイムカード', default=False)
is_fnc_t_workflow = models.BooleanField(verbose_name='ワークフロー', default=False)
is_fnc_t_infoboard = models.BooleanField(verbose_name='掲示板', default=True)
is_fnc_t_message = models.BooleanField(verbose_name='メッセージ', default=True)
is_fnc_t_mail = models.BooleanField(verbose_name='メール', default=False)
is_fnc_t_task = models.BooleanField(verbose_name='タスク管理', default=True)
is_fnc_t_equipment = models.BooleanField(verbose_name='備品管理', default=False)
is_fnc_t_employee_info = models.BooleanField(verbose_name='社員情報', default=False)
is_fnc_t_employee = models.BooleanField(verbose_name='社員評価', default=False)
class Meta:
verbose_name_plural = '使用機能種別マスタ'
def __str__(self):
return self.use_func_type_cd
#グループマスタ
class MstGroup(models.Model):
group_cd = models.CharField(verbose_name='グループコード', primary_key=True, unique=True, max_length=5)
group_name = models.CharField(verbose_name='グループ名', default='', max_length=24)
disp_order = models.PositiveIntegerField(verbose_name='表示順', null=True, blank=True, default=0)
class Meta:
verbose_name_plural = 'グループマスタ'
def __str__(self):
return self.group_cd
#グループ登録マスタ
class MstBelongGroup(models.Model):
belong_group_id = models.AutoField(verbose_name='ID', primary_key=True, unique=True)
group_cd = models.CharField(verbose_name='グループコード', default='', max_length=5)
account_cd = models.CharField(verbose_name='アカウントコード', default='', max_length=5)
class Meta:
verbose_name_plural = 'グループ登録マスタ'
def __str__(self):
return self.belong_group_id
#役職マスタ
class MstOfficer(models.Model):
officer_cd = models.AutoField(verbose_name='役職コード', primary_key=True, unique=True)
officer_name = models.CharField(verbose_name='役職名', default='', max_length=24)
disp_order = models.PositiveIntegerField(verbose_name='表示順', null=True, blank=True, default=0)
officer_level = models.PositiveIntegerField(verbose_name='役職レベル', null=True, blank=True, default=0)
class Meta:
verbose_name_plural = '役職マスタ'
def __str__(self):
return self.officer_cd
#部署マスタ
class MstDepartment(models.Model):
department_cd = models.CharField(verbose_name='部署コード', primary_key=True, unique=True, max_length=5)
department_name = models.CharField(verbose_name='署名', default='', max_length=24)
disp_order = models.PositiveIntegerField(verbose_name='表示順', null=True, blank=True, default=0)
class Meta:
verbose_name_plural = '部署登録マスタ'
def __str__(self):
return self.department_cd
#部署登録マスタ
class MstBelongDepartment(models.Model):
belong_department_id = models.AutoField(verbose_name='ID', primary_key=True, unique=True)
department_cd = models.CharField(verbose_name='部署コード', default='', max_length=5)
account_cd = models.CharField(verbose_name='アカウントコード', default='', max_length=5)
officer_cd = models.CharField(verbose_name='役職コード', default='', max_length=5)
class Meta:
verbose_name_plural = '部署登録マスタ'
def __str__(self):
return self.belong_department_id
#プロジェクト設定マスタ
class MstProjectConfig(models.Model):
pj_config_id = models.AutoField(verbose_name='ID', primary_key=True, unique=True)
pj_config_kubun_cd = models.CharField(verbose_name='区分ID', default='', max_length=8)
pj_config_key = models.CharField(verbose_name='キー項目', default='', max_length=8)
pj_config_value = models.CharField(verbose_name='設定値', default='', max_length=16)
pj_config_memo = models.CharField(verbose_name='メモ', default='', max_length=32)
class Meta:
verbose_name_plural = 'プロジェクト設定マスタ'
def __str__(self):
return self.belong_department_id
| [
"torikoshi4473@gmail.com"
] | torikoshi4473@gmail.com |
24c90e773275e7f451a54c1d751508a4e0f170da | 6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386 | /google/cloud/securitycenter/v1/securitycenter-v1-py/google/cloud/securitycenter_v1/services/security_center/pagers.py | bef157b5d55df896bbcd1981ea658c13a78e2b7b | [
"Apache-2.0"
] | permissive | oltoco/googleapis-gen | bf40cfad61b4217aca07068bd4922a86e3bbd2d5 | 00ca50bdde80906d6f62314ef4f7630b8cdb6e15 | refs/heads/master | 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,304 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional
from google.cloud.securitycenter_v1.types import notification_config
from google.cloud.securitycenter_v1.types import securitycenter_service
from google.cloud.securitycenter_v1.types import source
class GroupAssetsPager:
"""A pager for iterating through ``group_assets`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1.types.GroupAssetsResponse` object, and
provides an ``__iter__`` method to iterate through its
``group_by_results`` field.
If there are more pages, the ``__iter__`` method will make additional
``GroupAssets`` requests and continue to iterate
through the ``group_by_results`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1.types.GroupAssetsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., securitycenter_service.GroupAssetsResponse],
request: securitycenter_service.GroupAssetsRequest,
response: securitycenter_service.GroupAssetsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1.types.GroupAssetsRequest):
The initial request object.
response (google.cloud.securitycenter_v1.types.GroupAssetsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.GroupAssetsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[securitycenter_service.GroupAssetsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[securitycenter_service.GroupResult]:
for page in self.pages:
yield from page.group_by_results
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class GroupAssetsAsyncPager:
"""A pager for iterating through ``group_assets`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1.types.GroupAssetsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``group_by_results`` field.
If there are more pages, the ``__aiter__`` method will make additional
``GroupAssets`` requests and continue to iterate
through the ``group_by_results`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1.types.GroupAssetsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[securitycenter_service.GroupAssetsResponse]],
request: securitycenter_service.GroupAssetsRequest,
response: securitycenter_service.GroupAssetsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1.types.GroupAssetsRequest):
The initial request object.
response (google.cloud.securitycenter_v1.types.GroupAssetsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.GroupAssetsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[securitycenter_service.GroupAssetsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[securitycenter_service.GroupResult]:
async def async_generator():
async for page in self.pages:
for response in page.group_by_results:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class GroupFindingsPager:
"""A pager for iterating through ``group_findings`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1.types.GroupFindingsResponse` object, and
provides an ``__iter__`` method to iterate through its
``group_by_results`` field.
If there are more pages, the ``__iter__`` method will make additional
``GroupFindings`` requests and continue to iterate
through the ``group_by_results`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1.types.GroupFindingsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., securitycenter_service.GroupFindingsResponse],
request: securitycenter_service.GroupFindingsRequest,
response: securitycenter_service.GroupFindingsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1.types.GroupFindingsRequest):
The initial request object.
response (google.cloud.securitycenter_v1.types.GroupFindingsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.GroupFindingsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[securitycenter_service.GroupFindingsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[securitycenter_service.GroupResult]:
for page in self.pages:
yield from page.group_by_results
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class GroupFindingsAsyncPager:
"""A pager for iterating through ``group_findings`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1.types.GroupFindingsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``group_by_results`` field.
If there are more pages, the ``__aiter__`` method will make additional
``GroupFindings`` requests and continue to iterate
through the ``group_by_results`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1.types.GroupFindingsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[securitycenter_service.GroupFindingsResponse]],
request: securitycenter_service.GroupFindingsRequest,
response: securitycenter_service.GroupFindingsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1.types.GroupFindingsRequest):
The initial request object.
response (google.cloud.securitycenter_v1.types.GroupFindingsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.GroupFindingsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[securitycenter_service.GroupFindingsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[securitycenter_service.GroupResult]:
async def async_generator():
async for page in self.pages:
for response in page.group_by_results:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListAssetsPager:
"""A pager for iterating through ``list_assets`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1.types.ListAssetsResponse` object, and
provides an ``__iter__`` method to iterate through its
``list_assets_results`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListAssets`` requests and continue to iterate
through the ``list_assets_results`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1.types.ListAssetsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., securitycenter_service.ListAssetsResponse],
request: securitycenter_service.ListAssetsRequest,
response: securitycenter_service.ListAssetsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1.types.ListAssetsRequest):
The initial request object.
response (google.cloud.securitycenter_v1.types.ListAssetsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.ListAssetsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[securitycenter_service.ListAssetsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[securitycenter_service.ListAssetsResponse.ListAssetsResult]:
for page in self.pages:
yield from page.list_assets_results
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListAssetsAsyncPager:
"""A pager for iterating through ``list_assets`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1.types.ListAssetsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``list_assets_results`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListAssets`` requests and continue to iterate
through the ``list_assets_results`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1.types.ListAssetsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[securitycenter_service.ListAssetsResponse]],
request: securitycenter_service.ListAssetsRequest,
response: securitycenter_service.ListAssetsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1.types.ListAssetsRequest):
The initial request object.
response (google.cloud.securitycenter_v1.types.ListAssetsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.ListAssetsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[securitycenter_service.ListAssetsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[securitycenter_service.ListAssetsResponse.ListAssetsResult]:
async def async_generator():
async for page in self.pages:
for response in page.list_assets_results:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListFindingsPager:
"""A pager for iterating through ``list_findings`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1.types.ListFindingsResponse` object, and
provides an ``__iter__`` method to iterate through its
``list_findings_results`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListFindings`` requests and continue to iterate
through the ``list_findings_results`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1.types.ListFindingsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., securitycenter_service.ListFindingsResponse],
request: securitycenter_service.ListFindingsRequest,
response: securitycenter_service.ListFindingsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1.types.ListFindingsRequest):
The initial request object.
response (google.cloud.securitycenter_v1.types.ListFindingsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.ListFindingsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[securitycenter_service.ListFindingsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[securitycenter_service.ListFindingsResponse.ListFindingsResult]:
for page in self.pages:
yield from page.list_findings_results
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListFindingsAsyncPager:
"""A pager for iterating through ``list_findings`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1.types.ListFindingsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``list_findings_results`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListFindings`` requests and continue to iterate
through the ``list_findings_results`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1.types.ListFindingsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[securitycenter_service.ListFindingsResponse]],
request: securitycenter_service.ListFindingsRequest,
response: securitycenter_service.ListFindingsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1.types.ListFindingsRequest):
The initial request object.
response (google.cloud.securitycenter_v1.types.ListFindingsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.ListFindingsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[securitycenter_service.ListFindingsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[securitycenter_service.ListFindingsResponse.ListFindingsResult]:
async def async_generator():
async for page in self.pages:
for response in page.list_findings_results:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListNotificationConfigsPager:
"""A pager for iterating through ``list_notification_configs`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1.types.ListNotificationConfigsResponse` object, and
provides an ``__iter__`` method to iterate through its
``notification_configs`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListNotificationConfigs`` requests and continue to iterate
through the ``notification_configs`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1.types.ListNotificationConfigsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., securitycenter_service.ListNotificationConfigsResponse],
request: securitycenter_service.ListNotificationConfigsRequest,
response: securitycenter_service.ListNotificationConfigsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1.types.ListNotificationConfigsRequest):
The initial request object.
response (google.cloud.securitycenter_v1.types.ListNotificationConfigsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.ListNotificationConfigsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[securitycenter_service.ListNotificationConfigsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[notification_config.NotificationConfig]:
for page in self.pages:
yield from page.notification_configs
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListNotificationConfigsAsyncPager:
"""A pager for iterating through ``list_notification_configs`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1.types.ListNotificationConfigsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``notification_configs`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListNotificationConfigs`` requests and continue to iterate
through the ``notification_configs`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1.types.ListNotificationConfigsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[securitycenter_service.ListNotificationConfigsResponse]],
request: securitycenter_service.ListNotificationConfigsRequest,
response: securitycenter_service.ListNotificationConfigsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1.types.ListNotificationConfigsRequest):
The initial request object.
response (google.cloud.securitycenter_v1.types.ListNotificationConfigsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.ListNotificationConfigsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[securitycenter_service.ListNotificationConfigsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[notification_config.NotificationConfig]:
async def async_generator():
async for page in self.pages:
for response in page.notification_configs:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListSourcesPager:
"""A pager for iterating through ``list_sources`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1.types.ListSourcesResponse` object, and
provides an ``__iter__`` method to iterate through its
``sources`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListSources`` requests and continue to iterate
through the ``sources`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1.types.ListSourcesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., securitycenter_service.ListSourcesResponse],
request: securitycenter_service.ListSourcesRequest,
response: securitycenter_service.ListSourcesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1.types.ListSourcesRequest):
The initial request object.
response (google.cloud.securitycenter_v1.types.ListSourcesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.ListSourcesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[securitycenter_service.ListSourcesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[source.Source]:
for page in self.pages:
yield from page.sources
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListSourcesAsyncPager:
"""A pager for iterating through ``list_sources`` requests.
This class thinly wraps an initial
:class:`google.cloud.securitycenter_v1.types.ListSourcesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``sources`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListSources`` requests and continue to iterate
through the ``sources`` field on the
corresponding responses.
All the usual :class:`google.cloud.securitycenter_v1.types.ListSourcesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[securitycenter_service.ListSourcesResponse]],
request: securitycenter_service.ListSourcesRequest,
response: securitycenter_service.ListSourcesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.securitycenter_v1.types.ListSourcesRequest):
The initial request object.
response (google.cloud.securitycenter_v1.types.ListSourcesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = securitycenter_service.ListSourcesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[securitycenter_service.ListSourcesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[source.Source]:
async def async_generator():
async for page in self.pages:
for response in page.sources:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
0e41504d380692e4f959215ab53f08b4da54d528 | 3e3baf6632f4a4245883c14c05dc986db9bef352 | /demo/openmrs/wsgi.py | 4720ccb52c9925008356e4214283a3ede623f563 | [
"MIT"
] | permissive | skrulcik/jeeves | 6a1054343d94db93fbb1d0bd31cb6d551884fa40 | d62ec0bf6d1fd5061a5e734283265b8b5ec60423 | refs/heads/master | 2021-05-16T04:08:42.705885 | 2017-09-28T19:30:50 | 2017-09-28T19:30:50 | 105,823,242 | 1 | 0 | null | 2017-10-04T22:02:37 | 2017-10-04T22:02:36 | null | UTF-8 | Python | false | false | 383 | py | """
WSGI config for conf project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "conf.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"jean.yang.writeme@gmail.com"
] | jean.yang.writeme@gmail.com |
74ff6f2478ac5ca15368de4e447eb193e326a6b7 | 479a62137c28330843973b79b75b107008f755d7 | /ch07/mpg.py | c298917cb8cd9a325996bd73fe4fd1e624bf2106 | [] | no_license | shanester85/Murach-Python-Programming | 1079b911bc791008aa911929e422e9034f7437a4 | 73b14589c090cc38d3ec0a15df1a4e297386faa7 | refs/heads/master | 2023-01-10T01:05:28.364797 | 2020-11-16T22:02:23 | 2020-11-16T22:02:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,558 | py | #!/usr/bin/env python3
import csv #Import of the "CSV module" that allows you to use writer functions and writer objects to write a list to disk.
# A file in the current working directory
FILENAME = "trips.csv" #Definition of a global constant named "FILENAME" that stores the name of the CSV file named "trips.csv".
def write_trips(trips): #Addition of a "write_trips()" function that writes the data from a two-dimensional list named "trips" that's passed to it as an argument. This list contains the data for each trip that's entered and is written to a CSV file named "trips.csv".
with open(FILENAME, "w", newline="") as file: #Use of the "with statement" as well as the "open() function" to open the CSV file. And in this open function you have the "w" (write) mode that writes data in the list to a file. Last, you have a third argument, "newline" with a value of an empty string. This enables "universal newlines mode", which allows the CSV module to read and write new lines correctly for all operating systems.
writer = csv.writer(file) #Here inside the "with statement", you have this first statement that calls the "writer() function" of the CSV module to get a CSV writer object for the file. This writer object converts the data into comma separated values.
writer.writerows(trips) #Here inside the "with statement", you have the second statement that calls the "writerows() method" of the CSV writer object to write all specified rows of the "trips" list to the CSV file. This method also automatically handles the formatting of the CSV records that are written to the disk.
def read_trips(): #Addition of a "read_trips()" function that reads the data from the "trips.csv" file and returns the data for the trips in a two-dimensional list named "trips".
trips = [] #Creation of an empty "trips" list.
with open(FILENAME, newline="") as file: #Use of the "with statement" as well as the "open() function" to open the CSV file. And in this open function, since this code doesn't specify the mode for opening the file, Python uses the default mode, which is the "read" mode. Last, you have a third argument, "newline" with a value of an empty string. This enables "universal newlines mode", which allows the CSV module to read and write new lines correctly for all operating systems.
reader = csv.reader(file) #Here inside the "with statement" you have this first statement that calls the "reader() function" of the CSV module to get a CSV reader object for the file. This reader object gets the data from the CSV file.
for row in reader: #Here use of a "for statement" to read each row in the reader object.
trips.append(row) #After reading each row, it's then appended each row to the "trips" list.
return trips #Use of a "return statment" that returns the "trips" list back to the function after it finishes appending all of the trips to the "trips" list.
def get_miles_driven(): #Definition of a "get_miles_driven()" function that gets user input for the number of miles driven.
while True: #Use of an infinite while loop.
miles_driven = float(input("Enter miles driven : ")) #Use of the "input() function" that allows the user to enter data returned back as a "float" value from a "str" value.
if miles_driven > 0: #Use of an "if statement" that evaluates the condition to be "True" if the value of "miles_driven" is greater than 0.
return miles_driven #Use of a "return statement" that returns the result of the calculation back to the calling statement if the "miles_driven" value is greater than 0.
else: #Use of an "else clause" that evaluates the condition to be "False" if the value of the "miles_driven" is less than 0.
print("Entry must be greater than zero. Please try again.\n") #Statement that is displayed on the console when the "miles_driven" value is less than 0.
continue #Use of a "continue statement" that continues a loop by causing the execution to jump to the top of the loop. Also, this causes the loop to execute again by reevaluating its condition.
def get_gallons_used(): #Definition of a "get_gallons_used()" function that gets user input for the number of gallons used.
while True: #Use of an infinite while loop
gallons_used = float(input("Enter gallons of gas: ")) #Use of the "input() function" that allows the user to enter data returned back as a "float" value from a "str" value.
if gallons_used > 0: #Use of an "if statement" that evaluates the condition to be "True" if the value of "gallons_used" is greater than 0.
return gallons_used #Use of a "return statement" that returns the result of the calculation back to the calling statement if the "gallons_used" value is greater than 0.
else: #Use of an "else clause" that evaluates the condition to be "False" if the value of the "gallon_used" is less than 0.
print("Entry must be greater than zero. Please try again.\n") #Statement that is displayed on the console when the "gallons_used" value is less than 0.
continue #Use of a "continue statement" that continues a loop by causing the execution to jump to the top of the loop. Also, this causes the loop to execute again by reevaluating its condition.
def list_trips(trips): #Definition of a "list_trips" function that displays the data in the "trips" list on the console.
print("Distance\tGallons\tMPG") #Display of each trip data that provides the distance of miles driven, gallons of gas used, and miles per gallon on the console.
for i in range(len(trips)): #Use of a "for loop" that displays a numbered list of trips. This "for loop" achieves this by using the "i variable" to number each row(trip).
trip = trips[i] #Use of a "trip"(row) variable which is assigned to a value "i" that corresponds to an index value from the range in the "trips" list.
print(str(trip[0]) + "\t\t" + str(trip[1]) + "\t\t" + str(trip[2])) #Use of the "print() function" that displays the number of miles driven, gallons of gas used and Miles Per Gallon from each column's indexes 0, 1, and 2 respectively for each "trip" (row) from the "trips" list.
print()
def main(): #Definition of a "main() function" that gets the data from the CSV file followed by displaying the data from the CSV file for the updated trips list. This is done after the user enters the last trip data to the "trips" list and before it calculates the "MPG" value as it's displayed on the console.
# display a welcome message
print("The Miles Per Gallon application") #Display of the "Miles Per Gallon application" program title.
print()
trips = read_trips() #Enhancement of the "main() function" in which it starts by getting data from the CSV file.
list_trips(trips) #Enhancement of the "main() function" in which it lists the data after getting it from the CSV file.
more = "y" #Use of the "more" variable assigned a value of "y" for later input in the upcoming "while loop".
while more.lower() == "y": #Use of a while loop with the "lower() method" that converts uppercase letters to lowercase without changing the string itself as the variable "more" is equal to the value of "Y" or "y".
miles_driven = get_miles_driven() #The first statement of the while loop that calculates the number of miles driven from entries by the user.
gallons_used = get_gallons_used() #The second statment of the while loop that calculates the number of gallons used from entries by the user.
mpg = round((miles_driven / gallons_used), 2) #Calculation of the "MPG value" rounded to 2 decimal places.
print("Miles Per Gallon:\t" + str(mpg))# Display of the "MPG value" converted from a float to a string value.
print()
trip = [] #Creation of an empty list for the next trip.
trip.append(miles_driven) #Adding number of miles driven to the trip.
trip.append(gallons_used) #Adding number of gallons used to the trip.
trip.append(mpg) #Adding the MPG to the trip.
trips.append(trip) #Adding the trip list to the "trips" list.
write_trips(trips) #Use of a "write_trips() function" that writes the data from a two-dimensional "trips" list passed to it as an argument. Plus, the list contains data for each trip that's entered and written into a CSV file named "trips.csv".
list_trips(trips) #Display of the data from the updated "trips" list.
more = input("More entries? (y or n): ") #Use of the "more" variable being assigned a value of "y or n" that will allow a while loop to either "continue" or "break" out of the loop by causing the execution to jump to the top of the loop as the user enters "y" or "n" as in input at the prompt.
print("Bye")
if __name__ == "__main__": #Use of an "if statement" that checks whether the current module is the main module.
main() #Calling of the "main() function" which means that the current module is the main module. Plus, the "main() function" starts the operation of the program.
#Reference pages used for this exercise include: pages 210 and 211 provides examples for how to write data from a two-dimensional list named "trips" to a CSV file called "trips.csv".
#Pages 212-215 on examples how to read data from a CSV file named "trips.csv" using the "reader() function" as well as returning the data for the trips into a two-dimensional list named "trips".
#Pages 216 and 217 on examples that show how to use a "list_trips() function" that displays the data in the trips list on the console.
#Page 216 and 217 on example how to enhance the "main() function" to get data from the CSV file and listing it to the console by using a "read_trips() function" as well as a "list_trips() function" before use of the "while loop".
#Page 216 and 217 on examples how to enhance the "main() function" to add the last trip that's entered to the trips list after it calculates the "MPG value" as well as displaying the data for the updated "trips" list using the "list_trips() function" inside of the "while loop".
#Note: Also used Chapter 3 for review on relational operators, boolean expressions, string methods, for statements and while loops, the "range() function" and review of the "Miles Per Gallon" program.
| [
"noreply@github.com"
] | shanester85.noreply@github.com |
8e4afcc449f15b7d6b73cbcd4fc8e9b213912c94 | bc441bb06b8948288f110af63feda4e798f30225 | /container_sdk/api/workload/get_summary_pb2.pyi | 14f7cdbe1ae990f7ac049e645c1ccfa3bde880a7 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,748 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from container_sdk.model.container.pod_detail_pb2 import (
PodDetail as container_sdk___model___container___pod_detail_pb2___PodDetail,
)
from container_sdk.model.container.workload_pb2 import (
Workload as container_sdk___model___container___workload_pb2___Workload,
)
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class GetSummaryRequest(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
instanceId = ... # type: typing___Text
def __init__(self,
*,
instanceId : typing___Optional[typing___Text] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> GetSummaryRequest: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> GetSummaryRequest: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"instanceId",b"instanceId"]) -> None: ...
class GetSummaryResponse(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
@property
def workload(self) -> container_sdk___model___container___workload_pb2___Workload: ...
@property
def pods(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[container_sdk___model___container___pod_detail_pb2___PodDetail]: ...
def __init__(self,
*,
workload : typing___Optional[container_sdk___model___container___workload_pb2___Workload] = None,
pods : typing___Optional[typing___Iterable[container_sdk___model___container___pod_detail_pb2___PodDetail]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> GetSummaryResponse: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> GetSummaryResponse: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"workload",b"workload"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"pods",b"pods",u"workload",b"workload"]) -> None: ...
class GetSummaryResponseWrapper(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
code = ... # type: builtin___int
codeExplain = ... # type: typing___Text
error = ... # type: typing___Text
@property
def data(self) -> GetSummaryResponse: ...
def __init__(self,
*,
code : typing___Optional[builtin___int] = None,
codeExplain : typing___Optional[typing___Text] = None,
error : typing___Optional[typing___Text] = None,
data : typing___Optional[GetSummaryResponse] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> GetSummaryResponseWrapper: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> GetSummaryResponseWrapper: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"data",b"data"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"code",b"code",u"codeExplain",b"codeExplain",u"data",b"data",u"error",b"error"]) -> None: ...
| [
"service@easyops.cn"
] | service@easyops.cn |
fce03c8e1456ccbcd305145c27e222f513c4d844 | c2f92d75d235ff5ed7b213c02c4a0657545ba02f | /oliveapp/home/urls.py | ab2bdd20079d2d83da9374cda7ebebccbcb65b68 | [] | no_license | cash2one/tstpthon | fab6112691eb15a8a26bd168af3f179913e0c4e0 | fc5c42c024065c7b42bea2b9de1e3874a794a30d | refs/heads/master | 2021-01-20T01:52:06.519021 | 2017-04-14T09:50:55 | 2017-04-14T09:50:55 | 89,338,193 | 0 | 1 | null | 2017-04-25T08:46:06 | 2017-04-25T08:46:06 | null | UTF-8 | Python | false | false | 715 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from home import views
import os
css_media = os.path.join(
os.path.dirname(__file__),'templates/css/'
)
images_media = os.path.join(
os.path.dirname(__file__),'templates/images/'
)
js_media = os.path.join(
os.path.dirname(__file__),'templates/js/'
)
urlpatterns = patterns('',
url(r'^$', views.index, name='home'),
url(r'^images/(?P<path>.*)$','django.views.static.serve',{'document_root': images_media }),
url(r'^css/(?P<path>.*)$','django.views.static.serve',{'document_root': css_media }),
url(r'^js/(?P<path>.*)$','django.views.static.serve',{'document_root': js_media }),
)
| [
"yxlz_0910@163.com"
] | yxlz_0910@163.com |
429ff554ac3fbec982f6543fabdf502cbf11eaf3 | 520cc1c536985f72a19e4183e736537a0660029c | /Network/urls.py | 4985dd2a706da8aafe87702c72f79b12e4def8ee | [] | no_license | kswelch53/Network | 9bcce1ebec1ae75100a3198dde779e353daaeb0e | 646a01cb52ffae63ea5d59635f06e9324c248b13 | refs/heads/master | 2021-09-09T04:44:46.666563 | 2018-03-13T23:24:06 | 2018-03-13T23:24:06 | 125,127,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | """Network URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^', include ('apps.app_one.urls', namespace='app1')),
url(r'^app2/', include ('apps.app_two.urls', namespace='app2')),
url(r'^admin/', admin.site.urls),
]
| [
"kswelch53@gmail.com"
] | kswelch53@gmail.com |
f9a7d373fd0a22027404c7c536075e139ac3a6b3 | 36957a9ce540846d08f151b6a2c2d582cff1df47 | /VR/Python/Python36/Lib/site-packages/django/contrib/gis/db/models/__init__.py | 9c0171e697081762f1b1e195bdfdad25bd682f41 | [] | no_license | aqp1234/gitVR | 60fc952307ef413e396d31e0d136faffe087ed2b | e70bd82c451943c2966b8ad1bee620a0ee1080d2 | refs/heads/master | 2022-12-29T15:30:12.540947 | 2020-10-07T15:26:32 | 2020-10-07T15:26:32 | 290,163,043 | 0 | 1 | null | 2020-08-25T09:15:40 | 2020-08-25T08:47:36 | C# | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:051de440079f22fe0ed4d92c8950944a1c2548ee0e08da1419a9fa7424462325
size 817
| [
"aqp1234@naver.com"
] | aqp1234@naver.com |
536d2bb7c20e1b661abe24f0f95926c9918497d4 | aff16ae12059c195b33d52939d7c1eafd3bfa365 | /helper/classbook.py | f3ec49e471c7f1f4ef87aa999960bfdb47bd98f9 | [] | no_license | tritochky/Helper-project | 05afb413ce0f61fd730995bf9b052aaa779ddb65 | 1cbd6b6a70318d8d300fd44efba693452e108fc5 | refs/heads/main | 2023-06-25T19:38:14.689067 | 2021-06-30T06:47:40 | 2021-06-30T06:47:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,003 | py | import re
from collections import UserList
from datetime import datetime
class Field:
def __init__(self, value):
self.__value = value
# self.value=value
@property
def value(self):
return self.__value
@value.setter
def value(self, new_value):
self.__value = new_value
class AddressBook(UserList):
data = []
def add_record(self, record):
self.data.append(record)
def find_value(self, f_value):
f_value = f_value.lower()
result = []
for i in self:
for value in i.values():
if (isinstance(value, str)):
value = value.lower()
if value.find(f_value) != -1:
if i not in result:
result.append(i)
break
elif value != None:
if (isinstance(value, list)):
for j in value:
j = j.lower()
if j.find(f_value) != -1:
result.append(i)
break
return result
def iterator1(self, n):
counter = 0
result = ""
for i in self:
result += f'|{i["Id"]:<5}| {i["Name"]:<25}| { i["Phones"][0] if len(i["Phones"])>=1 else " ":<15} | {i["Birthday"]if i["Birthday"] else " ":<11}|{i["Address"]if i["Address"] else " ":<30}| {i["E-mail"]if i["E-mail"] else " ":<30}| {i["Tags"] if i["Tags"] else " ":<15}|\n'
if len(i["Phones"]) > 1:
for elem in i["Phones"][1:]:
result += f'| | | {elem: <15} | | | | | \n'
result += f"{145*'_'}\n"
# конец записи строки с описанием 1 контакта
counter += 1
if counter == n:
result = result.rstrip("\n")
yield result
result = ""
counter = 0
if result:
result = result.rstrip("\n")
yield result
#START OF CHANGING
class Address(Field):
def __init__(self, address):
self.address = address
class Tags(Field):
def __init__(self, tags):
self.tags = tags
class Id(Field):
def __init__(self, id_n):
self.id_n = id_n
class Email(Field):
def __init__(self, email):
self.email=email
class Birthday(Field):
def __init__(self, value):
self.__birthday = None
self.birthday = value
@ property
def birthday(self):
return self.__birthday.strftime('%d.%m.%Y')
@ birthday.setter
def birthday(self, birthday):
try:
self.__birthday = datetime.strptime(birthday, '%d.%m.%Y')
except Exception:
print("Incorrect format, expected day.month.year (Example:25.12.1970)")
class Record:
def __init__(self, name, id_n, phones=None, birthday=None, address=None, email=None, tags=None ):
self.id_n = id_n
self.phones = []
self.birthday = None
self.address=None
self.email=None
self.tags=None
self.user = {'Id': self.id_n, 'Name': name.name,
'Phones': self.phones,
'Birthday': self.birthday,
'Address':self.address,
'E-mail':self.email,
'Tags':self.tags}
#Start to add
def add_address(self, address):
self.address = address
def add_email(self, email):
self.email = email
def add_id(self, id_n):
self.id_n = id_n
#End
def add_phone(self, phone):
phone = str(phone)
try:
num = re.fullmatch('[+]?[0-9]{3,12}', phone)
if num:
self.phones.append(phone)
except:
print('Phone must start with + and have 12 digits. Example +380501234567 ADD')
def remove_phone(self, phone):
for i in range(len(self.phones)):
if self.phones[i].phone == phone:
self.phones.pop(i)
def edit_phone(self, phone, new_phone):
self.remove_phone(phone)
self.add_phone(new_phone)
class Name(Field):
def __init__(self, name):
self.name = name
class Phone(Field):
def __init__(self, phone):
phones = []
self.phones = list()
self.__phone = phone
@ property
def phone(self):
return self.__phone
@ phone.setter
def phone(self, value):
self.__phone = ''
if re.fullmatch('[+]?[0-9]{3,12}', value):
self.__phone = value
else:
print(
'Phone must start with + and have 12 digits. Example +380501234567')
# def __str__(self):
# return self.phone
def __repr__(self):
return self.phone
| [
"nvova@i.ua"
] | nvova@i.ua |
bdfe284fa0ae3b2a512b04d8940ec3f261a6a829 | 8efc4b613540ed08e2dceb6c2720c45e58e637ac | /Parity_Check.py | 64ff4b2ffc56c80b21f21952ca3a2e107a0aaee7 | [] | no_license | monishnarendra/Python_Programs | 9fca3e17c9792566ccde1a632491fbc815bab4f7 | 2d5a7c0fa32ad34f116dee1be8e3ad3840813bd0 | refs/heads/master | 2020-04-02T00:12:09.746481 | 2018-10-19T14:12:28 | 2018-10-19T14:12:28 | 153,793,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,881 | py | class Sender:
def Get_Bits():
n = int(input("Enter the number of bits u are going to send"))
print("Enter the bits in binary")
for i in range(n):
a = int(input())
print(a)
if a == 1 or a == 0:
list1.append(a)
else:
print("please Enter again 0 ot 1 ")
i = i - 1
print(list1)
def Generator(A):
count_ones = 0
for i in range(n):
if A[i] == 1:
count_ones = count_ones + 1
if count_ones % 2 == 0:
return 0
else:
return 1
class Reciver:
def Recive_Data():
print("Data has been recived")
def Checker(A):
count_ones = 0
for i in range(n):
if A[i] == 1:
count_ones = count_ones + 1
if count_ones % 2 == 0:
return 0
else:
return 1
def Currupt_data():
x1 = int(input("Enter the number of bits to be currupted"))
for i in range(x1):
x = int(input("Enter the Data position to be Currupted"))
if list2[x] == 1:
list2[x] = 0
else:
list2[x] = 1
list1 = []
list2 = []
n = 0
M = Sender
M.Get_Bits()
Parity_Bit = M.Generator(list1)
list1.append(Parity_Bit)
list2 = list1.copy()
if input("Do u wish to currupt the Data??") == 'y':
Currupt_data()
if input("Do u wish to send the bit to the Reciver??") == 'y':
N = Reciver
N.Recive_Data()
Syndrome = N.Checker(list2)
if Syndrome != Parity_Bit:
print("Data has been Discarded")
else:
print("Bit has no error")
print("Sender Bits")
print(list1)
print("Party_bit = ",Parity_Bit)
print("Reciver Bits")
print(list2)
print("Syndrome_bit = ",Syndrome)
| [
"noreply@github.com"
] | monishnarendra.noreply@github.com |
ab3b7da4ed78ef5e96dfff8f00d0f0fa5bd71a7c | 4020a0415ddcd7215205abc255575529370ab466 | /Module 06 - System Hacking/buffer/verify DLL address.py | 5b346837322f52a02e7ef96518f735afc38c8e8a | [] | no_license | markroxor/CEH-Notes | 0c42bee1c056c27a9acd068d20fd3714f4ecc2e4 | 59e32af042cc839df3ccbd1a69998d737ba822fd | refs/heads/main | 2023-07-30T06:46:55.971063 | 2021-09-30T12:33:40 | 2021-09-30T12:33:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | import sys
import socket
junk = "A" * 1052 + "\x7b\x8a\xa9\x68"
# Establish Connection and Send Junk
try:
conn = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
conn.connect(("127.0.0.1",8888))
conn.send(junk)
conn.close()
sys.exit()
except Exception as error:
print("Error Occur " + error)
## DLL Pointer Address = 68a98a7b ( Little Endian )
## DLL Pointer Address in Big Endian = \x7b\x8a\xa9\x68 | [
"noreply@github.com"
] | markroxor.noreply@github.com |
cdd5297894bf5ee4f0daaa2ae94052135a491e24 | bb90bd8c4cad0dc757568944f0f654a938363c6d | /ecomsite/store/admin.py | 6b86f90e8a05cd848c04d219da64af9d3455ee9b | [] | no_license | jayshah52/E-commerce-django | 153267b21dc5e9187785bb926d7d1e2ed603ad1a | 336895e5ac71ed87e966592c8d48fe7cf5d18258 | refs/heads/main | 2023-04-16T19:58:14.303280 | 2021-04-29T20:38:53 | 2021-04-29T20:38:53 | 356,398,669 | 0 | 1 | null | 2021-04-29T19:00:23 | 2021-04-09T21:08:39 | Python | UTF-8 | Python | false | false | 237 | py | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Product)
admin.site.register(Order)
admin.site.register(OrderItem)
admin.site.register(Customer)
admin.site.register(ShippingAddress) | [
"sjay5200@gmail.com"
] | sjay5200@gmail.com |
afc73c49dd4b103e78381366945e0d57107d6148 | f9abe32ae9a9dd25a01187dde74d98214ca32de4 | /migrations/versions/a8679a4d7166_link_icon_and_format.py | 4478ae9dd33e427802abfcc3dcf8cc326f6c4cb9 | [] | no_license | treetrnk/flask_writer | 79bdab2f952d2667d92824b5abab0876b69ff97d | c7b7443b5afc065626850bbde21a38aa30570d1e | refs/heads/master | 2023-09-03T11:41:41.412642 | 2023-08-31T18:54:55 | 2023-08-31T18:54:55 | 181,212,814 | 5 | 3 | null | 2023-02-16T04:40:38 | 2019-04-13T18:28:35 | Python | UTF-8 | Python | false | false | 782 | py | """Link Icon and Format
Revision ID: a8679a4d7166
Revises: 524dbf864261
Create Date: 2020-02-28 08:30:18.512757
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'a8679a4d7166'
down_revision = '524dbf864261'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('link', sa.Column('format', sa.String(length=100), nullable=True))
op.add_column('link', sa.Column('icon', sa.String(length=100), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('link', 'icon')
op.drop_column('link', 'format')
# ### end Alembic commands ###
| [
"nhare330@gmail.com"
] | nhare330@gmail.com |
78d2f029f66ce32ec0db128467765b7e3b1410fc | 79453d52907cb83eec7a7b661245dfa459b5e6a9 | /srt_to_txt.py | 7e165751d20852395d07d923abbe79121ca601e5 | [] | no_license | HavinLeung/memeify | a446031bdf6f1328cdb5273cdf0cccab98784c4f | 356d9cfc1323ccd3ea87f40c589d202bf9d17be3 | refs/heads/master | 2023-03-27T09:42:08.116349 | 2021-03-23T18:55:54 | 2021-03-23T18:55:54 | 308,989,216 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,399 | py | #! /usr/bin/env python3
import sys
import os
import subprocess
def timestr_to_millis(timestr):
hours, minutes, millis = timestr.split(':')
millis = millis.replace(',', '')
millis = int(millis)
millis += int(minutes)*60*1000
millis += int(hours)*60*60*1000
return millis
def transform(s):
times, text = s
start, end = times.split(' --> ')
start, end = timestr_to_millis(start), timestr_to_millis(end)
return (start, end, text)
if len(sys.argv) != 2:
print('usage:', sys.argv[0], '<filename>')
exit(-1)
filename = sys.argv[1]
escaped_filename = filename.replace(' ', '''\ ''')
if os.system(F'ffmpeg -i {escaped_filename} out.srt') == 0:
with open('out.srt', 'r') as f:
output = [l.strip() for l in f.readlines()]
output = zip(output[1::4], output[2::4])
output = list(map(transform, output))
out = []
for i, (start, _, text) in enumerate(output):
if i == 0:
out.append(text)
else:
_, end, _ = output[i-1]
if start - end >= 2.5*1000:
out.append(F'\n\n{text}')
else:
out.append(text)
subprocess.run('pbcopy', input=' '.join(out), universal_newlines=True)
os.system('rm out.srt')
print('\n\nSUCCESS\ncopied transcript to clipboard\n')
else:
print('FAILED')
| [
"havinleung98@gmail.com"
] | havinleung98@gmail.com |
9bed6ed6c401fac8dd4b07157b505d6d45bf5404 | 0a65d42f4f0e491cb2aada408401b94909f821c2 | /Attendance_Monitoring/hrg/hrg_hr/migrations/0007_auto_20200625_1027.py | f121581d75c52217f396b1f22613076fa26f7155 | [] | no_license | jmadlansacay/_Office | 3acde7655784e91c7dcecfc853d4f36cdfeef028 | 7f46449b9f7e8e892e2e0025ba493259197fa592 | refs/heads/main | 2023-07-28T10:23:54.680822 | 2021-09-11T02:28:07 | 2021-09-11T02:28:07 | 379,155,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | # Generated by Django 2.2.5 on 2020-06-25 02:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('hrg_hr', '0006_auto_20200625_1006'),
]
operations = [
migrations.AlterField(
model_name='tblmaster',
name='employeestatus',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='hrg_hr_ref.employeestatuscode'),
),
]
| [
"Q034800@mhi.co.jp"
] | Q034800@mhi.co.jp |
46e038b9982caa16616d2ef6759de3e9189f49b5 | e17d9d2aacec03af99188a873f30646c06633d52 | /tests/test_pyshock.py | 71cf3a6eefbbd654747f99a76ce570936af4814b | [
"MIT"
] | permissive | numshub/pyshock | 10978d13f70c1eff832bb957e7d27a199b65e2b6 | 57cef0dbb1481fec6acc02ee3a064a15dc059e51 | refs/heads/master | 2020-05-18T15:06:36.602844 | 2017-03-07T23:17:07 | 2017-03-07T23:28:43 | 84,256,545 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_pyshock
----------------------------------
Tests for `pyshock` module.
"""
import numpy as np
import pytest
from pyshock import pyshock
@pytest.fixture
def bands():
"""Reference bands
"""
return np.arange(21)
def test_nth_band():
assert pyshock.nth_octave(10) == 3
assert pyshock.nth_octave(20) == 6
assert pyshock.nth_octave(40) == 12
assert pyshock.nth_octave(80) == 24
def test_center_frequency(bands):
freqs = pyshock.center_frequency(bands)
right = np.array([1., 1.12, 1.26, 1.41, 1.58,
1.78, 2., 2.24, 2.51, 2.82,
3.16, 3.55, 3.98, 4.47, 5.01,
5.62, 6.31, 7.08, 7.94, 8.91, 10.])
np.testing.assert_array_equal(freqs, right)
| [
"pierpaolo.dafieno@gmail.com"
] | pierpaolo.dafieno@gmail.com |
a5016bfefccde0f94ae1caf65fdcdc907a1c441f | fa7f66e12223a11a17d42c9a672d03c845b604bd | /pyvisa/resources/helpers.py | 4fa00202bba32d4cd5c69c74c7aa0cf9c8952d9e | [
"MIT"
] | permissive | caryan/pyvisa | 5756e65c42810553f6f4b9f14800b5007b9dee0a | 1529fce2ac42ac8b47cf6f2c8ad1de22c9e88488 | refs/heads/master | 2020-12-03T03:31:59.688014 | 2014-08-29T22:44:24 | 2014-08-29T22:44:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,584 | py | # -*- coding: utf-8 -*-
"""
pyvisa.resources.helpers
~~~~~~~~~~~~~~~~~~~~~~~~
Helper functions.
This file is part of PyVISA.
:copyright: 2014 by PyVISA Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from __future__ import division, unicode_literals, print_function, absolute_import
from ..compat import string_types
from .. import constants
def _redoc(attribute_name, doc, extra_doc=''):
if isinstance(attribute_name, string_types):
if doc is None:
doc = ''
if not doc.endswith('\n\n'):
doc += '\n\n'
doc += ':VISA Attribute: %s.' % attribute_name
if extra_doc:
doc += '\n' + extra_doc
attribute_name = getattr(constants, attribute_name)
return attribute_name, doc
def attr(attribute_name, doc=None, ro=False):
attribute_name, doc = _redoc(attribute_name, doc)
def getter(self):
return self.get_visa_attribute(attribute_name)
if ro:
return property(fget=getter, doc=doc)
def setter(self, value):
self.set_visa_attribute(attribute_name, value)
return property(fget=getter, fset=setter, doc=doc)
def enum_attr(attribute_name, enum_type, doc=None, ro=False):
attribute_name, doc = _redoc(attribute_name, doc,
':type: :class:%s.%s' % (enum_type.__module__, enum_type.__name__))
def getter(self):
return enum_type(self.get_visa_attribute(attribute_name))
if ro:
return property(fget=getter, doc=doc)
def setter(self, value):
if value not in enum_type:
raise ValueError('%r is an invalid value for attribute %s, should be a %r',
value, attribute_name, enum_type)
self.set_visa_attribute(attribute_name, value)
return property(fget=getter, fset=setter, doc=doc)
def range_attr(attribute_name, min_value, max_value, doc=None, ro=False):
attribute_name, doc = _redoc(attribute_name, doc,
':range: %s <= value <= %s\n' % (min_value, max_value))
def getter(self):
return int(self.get_visa_attribute(attribute_name))
if ro:
return property(fget=getter, doc=doc)
def setter(self, value):
if not min_value <= value <= max_value:
raise ValueError('%r is an invalid value for attribute %s, should be between %r and %r',
value, attribute_name, min_value, max_value)
self.set_visa_attribute(attribute_name, value)
return property(fget=getter, fset=setter, doc=doc)
def boolean_attr(attribute_name, doc=None, ro=False):
attribute_name, doc = _redoc(attribute_name, doc,
':type: bool')
def getter(self):
return self.get_visa_attribute(attribute_name) == constants.VI_TRUE
if ro:
return property(fget=getter, doc=doc)
def setter(self, value):
self.set_visa_attribute(attribute_name, constants.VI_TRUE if value else constants.VI_FALSE)
return property(fget=getter, fset=setter, doc=doc)
def char_attr(attribute_name, doc=None, ro=False):
attribute_name, doc = _redoc(attribute_name, doc,
':range: 0 <= x <= 255\n:type: int')
def getter(self):
return chr(self.get_visa_attribute(attribute_name))
if ro:
return property(fget=getter, doc=doc)
def setter(self, value):
self.set_visa_attribute(attribute_name, ord(value))
return property(fget=getter, fset=setter, doc=doc)
| [
"hernan.grecco@gmail.com"
] | hernan.grecco@gmail.com |
16ea2defea7c60d56fa537579e5a2fdc02392c5b | accd72054c5d1ccd198be46b40f8d3b8d7585fad | /version1.0.py | 5ef9797d303fb415388b385528c05a80de4cc435 | [] | no_license | yan-yf/ExcelTools | 34f69005ad9357f0e87fd96bf43a83db8d75c93a | 7e2af639754d78bcbb33414afdaab4ce924f504c | refs/heads/master | 2021-01-19T20:30:47.459157 | 2017-03-03T04:02:26 | 2017-03-03T04:02:26 | 83,755,426 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,309 | py | # -*- coding:utf-8 -*-
import sys
import win32com.client as win32
import xlrd
import os
import wx
import wx.calendar as cal
default_encoding = 'gbk'
if sys.getdefaultencoding() != default_encoding:
reload(sys)
sys.setdefaultencoding(default_encoding)
version="1.00"
test_date = 0
choice1 = 0
choice2 = 0
versionList = ['0.91','0.92','0.93','0.94','0.95','0.96','0.97','0.98','0.99']
testerList = [u'史建航', u'董森', u'韩伟强', u'任晓莉', u'李婷婷', u'范倩雯', u'董爽', u'仲诗禹']
fileList = [
u'SX5_HMI_测试项目_AIR.xlsx',
u'SX5_HMI_测试项目_BT music.xlsx',
u'SX5_HMI_测试项目_BT Pairing.xlsx',
u'SX5_HMI_测试项目_BT_Calls.xlsx',
u'SX5_HMI_测试项目_CAN Settings.xlsx',
u'SX5_HMI_测试项目_CarPlay.xlsx',
u'SX5_HMI_测试项目_Engineering Mode.xlsx',
u'SX5_HMI_测试项目_General.xlsx',
u'SX5_HMI_测试项目_Home.xlsx',
u'SX5_HMI_测试项目_IPOD.xlsx',
u'SX5_HMI_测试项目_Link.xlsx',
u'SX5_HMI_测试项目_Maintenance.xlsx',
u'SX5_HMI_测试项目_PDC.xlsx',
u'SX5_HMI_测试项目_PhoneContacts.xlsx',
u'SX5_HMI_测试项目_Power_Moding.xlsx',
u'SX5_HMI_测试项目_RADIO.xlsx',
u'SX5_HMI_测试项目_Setting.xlsx',
u'SX5_HMI_测试项目_SWDL.xlsx',
u'SX5_HMI_测试项目_USB.xlsx',
u'SX5_HMI_测试项目_VR.xlsx'
]
class Calendar(wx.Dialog):
def __init__(self, parent, id, title):
wx.Dialog.__init__(self, parent, id, title, size=(340, 240))
self.datectrl =parent.datectrl
vbox = wx.BoxSizer(wx.VERTICAL)
calend = cal.CalendarCtrl(self, -1, wx.DateTime_Now(), \
style = cal.CAL_SHOW_HOLIDAYS|\
cal.CAL_SEQUENTIAL_MONTH_SELECTION)
vbox.Add(calend, 0, wx.EXPAND|wx.ALL, 20)
self.Bind(cal.EVT_CALENDAR, self.OnCalSelected, \
id=calend.GetId())
vbox.Add((-1, 20))
hbox = wx.BoxSizer(wx.HORIZONTAL)
vbox.Add(hbox, 0, wx.LEFT, 8)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
vbox.Add(hbox2, 0, wx.ALIGN_CENTER|wx.TOP|wx.BOTTOM, 20)
self.SetSizer(vbox)
self.Show(True)
self.Center()
def OnCalSelected(self, event):
global test_date
date = str(event.GetDate())[:-9]
date = "20" +date[date.rfind("/")+1:]+'/'+date[:-3]
test_date = date
print test_date
self.datectrl.SetLabel(str(date))
self.Destroy()
class iForm(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, parent=None, title=u"数据表格处理"+version, size = (800, 600))
panel = iPanel(self, -1)
class iPanel(wx.Panel):
def __init__(self, parent, id):
wx.Panel.__init__(self, parent, -1, style=wx.TAB_TRAVERSAL|wx.CLIP_CHILDREN)
self.listctrl = wx.ListCtrl(self, -1, pos=(0,0), size=(500,600),style=wx.LB_SINGLE )#wx.LC_NO_HEADER
self.listctrl.InsertColumn(0, u"序列",width=60)
self.listctrl.InsertColumn(1, u"输入文件名",width=380)
self.listctrl.InsertColumn(2, u"状态",width=60)
self.dirpath_StaticText=wx.StaticText(self, -1, u"作业路径:", (540, 20))
self.searfile_button = wx.Button(self, -1,u'打开文件夹', pos=(550, 50),size = (90, 60))
self.irun_button = wx.Button(self, -1,u'开始工作!!', pos=(550, 120),size = (90, 60))
self.idate_button = wx.Button(self, -1,u'日期', pos=(540, 255),size = (60, 20))
wx.StaticText(self, -1, u"测试版本:", (540, 225))
wx.StaticText(self, -1, u"测试人:", (540, 195))
self.TesterChoice = wx.Choice(self, -1, (610, 190), choices=testerList)
self.VersionChoice = wx.Choice(self, -1, (610, 220), choices=versionList)
self.TesterChoice.Bind(wx.EVT_CHOICE, self.onTesterList)
self.VersionChoice.Bind(wx.EVT_CHOICE, self.onVersionList)
self.Bind(wx.EVT_BUTTON,self.OnAddLocalWork,self.searfile_button)
self.Bind(wx.EVT_BUTTON,self.OnRun,self.irun_button)
self.Bind(wx.EVT_BUTTON,self.OnDate,self.idate_button)
self.datectrl=wx.StaticText(self, -1, "", pos=(600, 255))
self.logText = wx.TextCtrl(self, -1, "", pos=(540, 300),size = (200,200),style = wx.TE_MULTILINE )
def onTesterList(self,event):
global choice1
choice1 = self.TesterChoice.GetSelection()
print testerList[choice1]
def onVersionList(self,event):
global choice2
choice2 = self.VersionChoice.GetSelection()
print versionList[choice2]
def OnDate(self,event):
mydate = Calendar(self,-1,u'请双击选择日期')
def OnAddLocalWork(self,event):
if self.listctrl.GetItemCount() > 0:
self.ClearList()
imessage="Add Input Excel files"
dlg = wx.DirDialog(self, message=imessage,
defaultPath=os.getcwd(),
style=wx.DD_CHANGE_DIR | wx.DEFAULT_DIALOG_STYLE )
if dlg.ShowModal() == wx.ID_OK:
self.path = dlg.GetPath()
self.dirpath_StaticText.SetLabel(self.path)
self.GetFileList(self.path)
self.OutPutFileList()
dlg.Destroy()
def OnRun(self,event):
global test_date
global choice2
global choice1
if self.listctrl.GetItemCount() <= 0:
wx.MessageBox(u'No file exist!','Info',wx.OK|wx.ICON_INFORMATION)
return
print self.path
self.doExcel(self.path,testerList[choice1],versionList[choice2],test_date)
wx.MessageBox(u'完成!',u'哈哈',wx.OK|wx.ICON_INFORMATION)
def GetFileList(self,filestr):
self.FileList = []
try:
FileNames=os.listdir(filestr)
except Exception, e:
wx.MessageBox(u'No file exist!'+str(e),'Info',wx.OK|wx.ICON_INFORMATION)
for EachFile in FileNames:
if ( os.path.splitext(EachFile)[1][1:] == "xls" \
or os.path.splitext(EachFile)[1][1:] == "xlsx" \
and EachFile in fileList ):
self.FileList.append(EachFile)
if len(self.FileList) <= 0:
wx.MessageBox(u'No file exist!'+str(e),'Info',wx.OK|wx.ICON_INFORMATION)
def OutPutFileList(self):
for inum in range(0,len(self.FileList)):
self.listctrl.InsertStringItem(inum, str(inum+1))
self.listctrl.SetStringItem(inum, 1, self.FileList[inum])
self.listctrl.SetStringItem(inum, 2, u"×")
def doexcel_row_abc(self,number):
if number%26==0 and number != 26:
return chr(64+number/26-1)+'Z'
else:
return chr(64+number/26)+chr(64+number%26)
def search_file(self,path,file_type):
queue = []
queue.append(path);
fpath=[]
while len(queue) > 0:
tmp = queue.pop(0)
if(os.path.isdir(tmp)):
for item in os.listdir(tmp):
queue.append(os.path.join(tmp, item))
elif(os.path.isfile(tmp)):
name= os.path.basename(tmp)
dirname= os.path.dirname(tmp)
full_path = os.path.join(dirname,name)
abspath=os.path.abspath(tmp);
if name[-1*len(file_type):] == file_type and name in fileList:
fpath.append(full_path)
return fpath
def doExcel(self,fpath,tester,test_version,test_date):
####################################################
fpath = self.search_file(fpath,'xlsx')
file_num=0
for k in fpath:
excel = win32.gencache.EnsureDispatch('Excel.Application')
fname = k
data = xlrd.open_workbook(fname)
wb = excel.Workbooks.Open(fname)
#文件是否是测试文件
self.logText.AppendText(k+"\n")
print k
for i in range(4,len(wb.Worksheets)+1):
table = data.sheets()[i-1]
nrows = table.nrows+1 #随后一个输入文字的行
ncols = table.ncols #最后一个有文字的列
for j in range(nrows,10,-1):
if(wb.Worksheets[i].Cells(j,ncols-5).Value!=None):
nrows=j
break
ncols = ncols +1 #在下一个列开始复制
self.logText.AppendText(wb.Worksheets[i].Name+"\n")
self.logText.AppendText(u"行数:"+str(nrows)+"\n")
print wb.Worksheets[i].Name
print "ncols:"+str(ncols)
print "nrows:"+str(nrows)
#if(nrows >= 685):
# wb.Worksheets[i].Range(self.doexcel_row_abc(ncols-6)+'10:'+self.doexcel_row_abc(ncols-1)+'685').Copy()
# wb.Worksheets[i].Range(self.doexcel_row_abc(ncols)+'10').PasteSpecial()
# wb.Worksheets[i].Range(self.doexcel_row_abc(ncols-6)+'686:'+self.doexcel_row_abc(ncols-1)+str(nrows)).Copy()
# wb.Worksheets[i].Range(self.doexcel_row_abc(ncols)+'686').PasteSpecial()
#else:
if(nrows<600):
if(wb.Worksheets[i].Cells(13,ncols-4).Value!=None):
copy_range = self.doexcel_row_abc(ncols-6)+'10:'+self.doexcel_row_abc(ncols-1)+str(nrows)
#print u"拷贝区域"+copy_range
wb.Worksheets[i].Range(copy_range).Copy()
wb.Worksheets[i].Range(self.doexcel_row_abc(ncols)+'10').PasteSpecial()
#wb.Worksheets[i].Range(self.doexcel_row_abc(ncols-6)+'9:'+self.doexcel_row_abc(ncols-1)+str(nrows)).Copy()
#wb.Worksheets[i].Range(self.doexcel_row_abc(ncols)+'9').PasteSpecial()
else:
for nr in range(9,nrows+1):
wb.Worksheets[i].Range(self.doexcel_row_abc(ncols-6)+str(nr)+':'+self.doexcel_row_abc(ncols-1)+str(nr)).Copy()
wb.Worksheets[i].Range(self.doexcel_row_abc(ncols)+str(nr)).PasteSpecial()
for j in range(12,nrows+1):
if(wb.Worksheets[i].Cells(j,ncols-6).Value!=None):
wb.Worksheets[i].Cells(j,ncols).Value = test_version
wb.Worksheets[i].Cells(j,ncols+1).Value = test_date
wb.Worksheets[i].Cells(j,ncols+2).Value = tester
self.logText.AppendText("OK"+"\n")
print "OK"
self.listctrl.SetStringItem( file_num , 2, u"√")
file_num=file_num+1
wb.Save()
wb.Close()
excel.Application.Quit()
return 0
class iApp(wx.App):
"""Application class."""
def __init__(self):
wx.App.__init__(self, 0)
return None
def OnInit(self):
self.MainFrame = iForm()
self.MainFrame.Show(True)
return True
if __name__ == '__main__':
app = iApp()
app.MainLoop() | [
"noreply@github.com"
] | yan-yf.noreply@github.com |
fc6bbf38d636183a0b0603685d52f4c702b13dfc | f4bfc21c1d75e091bc6a59480459ecbcbbcadb3f | /lincRNA-RBP/bin/conservation_intersect.py | 2f2e4e5f16b5a97e9266c3958f47f2af7941ad7d | [
"MIT"
] | permissive | cshukla/biostat297-project | e3ee9bf312e04b2f5f61b3e87532ca4bad14da2d | 0d03d2b65ef4a5fd19da057652e6379a2a9c7e35 | refs/heads/master | 2021-09-01T07:00:40.951057 | 2017-12-25T14:54:37 | 2017-12-25T14:54:37 | 115,344,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,808 | py | #!/usr/bin/env python
from optparse import OptionParser
from bx.intervals.intersection import Interval, IntervalTree
import gzip, glob, os, sys, subprocess
################################################################################
# conservation_intersect.py
#
# Intersect a list of segments (e.g. lincRNAs) in gff format with the multiZ
# blocks and print out the phastCons/PhyloP scores.
#
# Assumes that the gff entries are disjoint which can be accomplished using
# mergeBed.
#
# mergeBed has a little quirk where a 1 bp gff entry will be changed into
# a 2 bp entry, which causes very slight differences between using the '-l'
# option and not.
################################################################################
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <gff file>'
parser = OptionParser(usage)
parser.add_option('-c', dest='conservation_type', default='phylop', help='Conservation type to use [phastcons|phylop] [Default: %default]')
(options,args) = parser.parse_args()
if len(args) != 1:
parser.error('Must provide gff file to intersect')
gff_file = args[0]
cons_dir = '%s/research/common/data/%s' % (os.environ['HOME'],options.conservation_type)
if not os.path.isdir(cons_dir):
parser.error('Must specify conservation type as "phylop" or "phastcons"')
# build interval trees
print >> sys.stderr, 'Building interval trees ...',
chr_features = {}
p = subprocess.Popen('sortBed -i %s | mergeBed -i -' % gff_file, shell=True, stdout=subprocess.PIPE)
for line in p.stdout:
a = line.split('\t')
chr_features.setdefault(a[0], IntervalTree()).insert_interval( Interval(int(a[1])+1,int(a[2])) )
p.communicate()
print >> sys.stderr, 'Done'
# process overlapping chromosome blocks
for pc_file in glob.glob('%s/chr*' % cons_dir):
process_file(chr_features, pc_file)
################################################################################
# intersect_scores
#
# Print out block scores overlapping features.
################################################################################
def intersect_scores(features, block_start, block_scores):
block_end = block_start+len(block_scores)-1
for overlap_interval in features.find(block_start, block_start+len(block_scores)):
# block internal to invterval
if overlap_interval.start <= block_start <= block_end <= overlap_interval.end:
start = 0
end = len(block_scores)
# interval internal to block
elif block_start <= overlap_interval.start <= overlap_interval.end <= block_end:
start = overlap_interval.start - block_start
end = start + overlap_interval.end - overlap_interval.start + 1
# left block overlap interval
elif block_start < overlap_interval.start:
start = overlap_interval.start - block_start
end = start + block_end - overlap_interval.start + 1
# right block overlap interval
else:
start = 0
end = overlap_interval.end - block_start + 1
print '\n'.join([str(s) for s in block_scores[start:end]])
################################################################################
# process_file
#
# Process overlapping chromosome blocks in the given file.
################################################################################
def process_file(chr_features, pc_file):
if pc_file[-2:] == 'gz':
pc_f = gzip.open(pc_file)
elif os.path.isfile(pc_file):
pc_f = open(pc_file)
elif os.path.isfile(pc_file+'.gz'):
pc_f = gzip.open(pc_file+'.gz')
chrom = os.path.split(pc_file)[1].split('.')[0]
print >> sys.stderr, 'Processing %s ...' % chrom,
block_start = 0
block_scores = []
line = pc_f.readline()
while line:
if line.startswith('fixedStep'):
if block_scores:
intersect_scores(chr_features.get(chrom, IntervalTree()), block_start, block_scores)
a = line.split()
chrom = a[1][6:]
block_start = int(a[2][6:])
block_scores = []
else:
block_scores.append(float(line.rstrip()))
line = pc_f.readline()
intersect_scores(chr_features.get(chrom, IntervalTree()), block_start, block_scores)
pc_f.close()
print >> sys.stderr, 'Done'
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| [
"cshukla@fas.harvard.edu"
] | cshukla@fas.harvard.edu |
8678308b1d08ffdf9b7474cb19c0ca2bdc0de660 | 7349ad853e12b7c6ff1ab745d37a4def19bb4fc5 | /sd.py | 6ce20a4782875056ab156f7593f7b280cc65b37a | [] | no_license | hjpython/uumnt | 1c70ec6767de7c214720809e373d9cc1530ae902 | 7558a10e63fec8a1dfa00ad7b4b4c76261484622 | refs/heads/master | 2020-04-10T08:53:09.267205 | 2018-03-08T05:01:37 | 2018-03-08T05:01:37 | 124,268,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,055 | py | #!/usr/bin/env python
# coding=utf-8
import urllib.request
from bs4 import BeautifulSoup
import os
import urllib.error
import shutil
import re
from config import dir
def xiazai_uumnt(url):
html = urllib.request.urlopen(url).read()
title0 = BeautifulSoup(html,'lxml').find("div",{"class":"bg-white p15 center imgac clearfix"}).find("h1",{"class":"center"}).get_text()
pattern = re.compile(".*\(")
title = pattern.findall(title0)
title = title[0]
title = title[:-1]
print(title)
page = title0[-6:]
pattern = re.compile("\/\d*")
page = pattern.findall(page)[0]
pattern = re.compile("\d*")
page = pattern.findall(page)[1]
print("共"+page+"页")
try:
os.makedirs(dir + title + '_' + page)
except:
shutil.rmtree(dir + title + '_' + page)
os.makedirs(dir + title + '_' + page)
html = urllib.request.urlopen(url).read()
picurl = BeautifulSoup(html,'lxml').find("div",{"class": "bg-white p15 center imgac clearfix"}).find("img")["src"]
print(picurl)
img = urllib.request.urlopen(picurl).read()
f = open(dir + title + '_' + page + "\\" + "1.jpg", "wb")
f.write(img)
f.close()
after = int(page) + 1
for i in range(2, after):
try:
url0 = url[:-5]
url1 = url0 + '_' + str(i) + '.html'
html = urllib.request.urlopen(url1).read()
picurl = BeautifulSoup(html,'lxml').find("div", {"class": "bg-white p15 center imgac clearfix"}).find("img")["src"]
print(picurl)
img = urllib.request.urlopen(picurl).read()
f = open(dir + title + '_' + page + "\\" + str(i) + ".jpg", "wb")
f.write(img)
f.close()
except urllib.error.URLError as e:
if hasattr(e, "code"):
print(e.code)
continue
elif hasattr(e, "reason"):
print(e.reason)
continue
if __name__ == '__main__':
while True:
url = input("请输入网址:")
xiazai_uumnt(url)
| [
"834775954@qq.com"
] | 834775954@qq.com |
58532740544e8db357b9a96eeef8013a0260304c | 3c05d36c15d5f2473ebf8217902fdf38b2ed0f69 | /portfolio/settings.py | b2dfb8ff7781aa3dbb22ce4d34a5b3f637e48936 | [] | no_license | eljihaoui/django_firstProject | 69476457d2f337e346ad50605bce1515d0831950 | ff98894bed4d19ad96caef917fef7a5c3070f67d | refs/heads/master | 2023-06-06T19:40:06.509070 | 2021-07-21T18:34:56 | 2021-07-21T18:34:56 | 387,951,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,987 | py |
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-%5k*wbapl5#c4dan^r%g)077vd#($ajeqtg)47p=qzs_m8nv@b'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'projects.apps.ProjectsConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'portfolio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates/'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'portfolio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"eljihaoui@gmail.com"
] | eljihaoui@gmail.com |
26e8321387d7acb7136a76bcf11db56c990ad589 | 2ff7e53d5e512cd762217ca54317982e07a2bb0c | /notifications/client/controls/notificationScrollContainer.py | 430068214b444982858c2a3b313c78d2ca6a9572 | [] | no_license | nanxijw/Clara-Pretty-One-Dick | 66d3d69426642b79e8fd4cc8e0bec23adeeca6d6 | 50de3488a2140343c364efc2615cf6e67f152be0 | refs/heads/master | 2021-01-19T09:25:07.555284 | 2015-02-17T21:49:33 | 2015-02-17T21:49:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,742 | py | #Embedded file name: notifications/client/controls\notificationScrollContainer.py
from carbonui.control.scrollContainer import ScrollContainer
from carbonui.primitives.base import ReverseScaleDpi, ScaleDpiF
from carbonui.primitives.container import Container
import carbonui.const as uiconst
from carbonui.primitives.frame import Frame
class NotificationScrollContainer(ScrollContainer):
entryLoadEnabled = True
contentHeight = 0
mainContTopHeight = (0, 0)
def ApplyAttributes(self, attributes):
ScrollContainer.ApplyAttributes(self, attributes)
self.mainCont.Close()
self.mainCont = Container(name='mainCont', parent=self.clipCont, state=uiconst.UI_NORMAL, align=uiconst.TOPLEFT)
self.mainContTopHeight = (0, 0)
self.mainCont._OnResize = self._OnMainContResize
def EnableEntryLoad(self):
self.entryLoadEnabled = True
self.LoadVisibleEntries()
def DisableEntryLoad(self):
self.entryLoadEnabled = False
def _OnMainContResize(self, *args):
newTopHeight = (self.mainCont.top, self.mainCont.height)
if newTopHeight != self.mainContTopHeight:
self.mainContTopHeight = newTopHeight
self.LoadVisibleEntries()
def LoadVisibleEntries(self):
if not self.entryLoadEnabled:
return
for each in self.mainCont.children:
self.LoadEntryIfVisible(each)
def LoadEntryIfVisible(self, entry):
topOffset = self.mainCont.top
visibleHeight = ReverseScaleDpi(self.clipCont.displayHeight)
if topOffset + entry.top + entry.height >= 0 and topOffset + entry.top <= visibleHeight:
entry.UpdateAlignmentAsRoot()
entry.LoadContent()
entry.display = True
else:
entry.display = False
def _OnVerticalScrollBar(self, posFraction):
posFraction = max(0.0, min(posFraction, 1.0))
self.mainCont.top = -posFraction * (self.mainCont.height - ReverseScaleDpi(self.clipCont.displayHeight))
def _InsertChild(self, idx, obj):
self.mainCont.children.insert(idx, obj)
contentWidth = ReverseScaleDpi(self.displayWidth)
minContentHeight = ReverseScaleDpi(self.clipCont.displayHeight)
self.mainCont.width = contentWidth
obj.top = self.contentHeight
obj.width = contentWidth
obj.displayY = ScaleDpiF(self.contentHeight)
obj.displayWidth = ScaleDpiF(contentWidth)
self.contentHeight += obj.height
self.mainCont.height = max(minContentHeight, self.contentHeight)
self._UpdateScrollbars()
self.LoadEntryIfVisible(obj)
def Flush(self):
ScrollContainer.Flush(self)
self.contentHeight = 0
| [
"billchang.e@gmail.com"
] | billchang.e@gmail.com |
8617b2af3fc5d297b7ad8cb3f0f00ce55eeabe2b | cdbcb7fe84f5d794ac1f58203a4d4024e30270dc | /Lista_de_Exercício_07/ex004.py | 5992a3c1351219db66b539fd84e050b3e198de80 | [] | no_license | AdriandersonLira/APE | 13c8bbb9c698d546a3a4dc19501f1b865aa9844d | b96df3817645aa6807c8f30772dbd4dfdd7e8806 | refs/heads/master | 2021-07-23T06:21:36.707886 | 2020-06-14T01:47:39 | 2020-06-14T01:47:39 | 182,209,398 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | vetor = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 0 ]
for i in range(20):
vetor.append(i)
print(vetor)
K = int(input('Digite um número presente no vetor: '))
cont = 0
for i in vetor:
cont += 1 if i == K else 0
print(f'A quantidade de vezes do número {K} no vetor é {cont}')
| [
"noreply@github.com"
] | AdriandersonLira.noreply@github.com |
f90347b536b1d2ea8d4c92b4034283fd86ea967b | 34b0eeca063fc91ce72fbc7b147063e2bbc9d5c1 | /porter/utils/trace.py | a62f065a8c81d0a09739cb65e91db231d7d5a21e | [] | no_license | LeadmapRobot/EasyLayout | 9f5131fb10f0fb2684c9fd37c71cc109aa84b993 | bc8eca6416179c9e3194f6ebeb5da62e7f644455 | refs/heads/main | 2023-06-28T12:41:57.463504 | 2021-08-03T01:01:17 | 2021-08-03T01:01:17 | 391,826,271 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,180 | py | # -*- coding: UTF-8 -*-
"""
@time:2020-12-21 16:53
@author:zhang
@file:trace.py
"""
import sys
import traceback
import os
def currentFrame():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
def findCaller(srcfile):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
frame = currentFrame()
# On some versions of IronPython, currentFrame() returns None if
# IronPython isn't run with -X:Frames.
if frame is not None:
frame = frame.f_back
fln = "(unknown file)", 0, "(unknown function)"
while hasattr(frame, "f_code"):
code = frame.f_code
filename = os.path.normcase(code.co_filename)
if filename == srcfile:
frame = frame.f_back
continue
fln = (code.co_filename, frame.f_lineno, code.co_name)
break
return fln
def gettraceback():
'''
获取堆栈
'''
return traceback.format_exc()
def printtraceback():
'''
打印堆栈
'''
traceback.print_exc()
| [
"simoncoming321@163.com"
] | simoncoming321@163.com |
2bb6dc23e539eb0a385056f65a9238fe32bbaf46 | 416c02f975c23b32b8a9b525753fe441fe5ca609 | /getSetu/getSetuFromPoi.py | 94684be83469755c52319a5b54c15cf7e9c802f1 | [] | no_license | Moonenzyw/MyGit | d7b70984d3bffe65b05da6a598034360b37a8490 | fd348dfd8263475d6b68b4c2f6c47e3d28d72646 | refs/heads/master | 2020-05-15T21:57:17.049262 | 2019-04-23T12:58:31 | 2019-04-23T12:58:31 | 182,513,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,888 | py | import requests
import time
import random
import selenium,time
from urllib.request import urlretrieve
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
option = webdriver.ChromeOptions()
option.add_argument(r"user-data-dir=C:\Users\悠唯\AppData\Local\Google\Chrome\User Data");
driver = webdriver.Chrome(options=option);
driver.get("https://poi5.com/index?category=4")
time.sleep(3);
everyday = driver.find_elements_by_xpath("//div[@class='post-list__item clearfix']//a[@target='_blank']");
numlist = []
for ss in everyday:
numlist.append(ss.get_attribute("href"));
print(numlist);
hh = 22;
while hh < len(numlist):
number = numlist[hh];
print(hh,number);
driver.get(number);
time.sleep(5);
urllist = [];
print("111");
preview = driver.find_elements_by_xpath("//div[@class='preview']//img");
i = 0;
for each in preview:
i += 1;
url = each.get_attribute("src");
print(url);
url = url.replace('/t/','/a/');
url = url.replace('png','jpg');
urllist.append(url);
print("end");
count = 0
cookie = '_ga=GA1.2.1203952754.1555931011; _gid=GA1.2.1246918588.1555931011; sentinel=eyJpdiI6ImVCc1lDTzZ2cUozVlJxM1FPenNjV3c9PSIsInZhbHVlIjoiMjRoQkxLZ3NXdlRRUGNRdGgzbGdyaWhLV1wvOUE1Z3FUMkp0S0pLS3hxZTdnK1lFXC9yck5yZVc4c1ordUpnMHJsIiwibWFjIjoiMzUxOGE0YjQ2NzZjZTM1YmM2OTdmY2ZkYjY4Nzk0MmRjN2ZmMjE0YjIzMzc1MjI5ZGUyZDMxMWRmZTc1ODQ0ZiJ9; auth_token=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOjM1NTgwLCJpc3MiOiJodHRwczovL3BvaTUuY29tL2luZGV4IiwiaWF0IjoxNTU1OTMzNjk2LCJleHAiOjE1NTU5NTUyOTYsIm5iZiI6MTU1NTkzMzY5NiwianRpIjoidW9lQUNQSmFqS295bURndyJ9.Sjgld3LlOyp1CZYDUW3g94CbHqmyXnpsE8o9YTqB0AA; poi_session=eyJpdiI6IitYVWViV1N3bU5hdlRzd2NKeDNBeXc9PSIsInZhbHVlIjoiZGhRZTdxQlNRQ0dZbnZrQUN3VDJOd2hyVWJoXC9KSVlHVEFmQTNobFZqNDFWSUpOREVjMDNoMnZua1gyTTNzS2QiLCJtYWMiOiIzYTcyZDUxNzQzYzJkYzU2MDc3NTgzYzkwYWU2Yzg5ZmUzM2M2ZmVjODJmOGE5NWI0NjlmOGI4MTM2YzE3ZGVlIn0%3D; st=1555936222'
header = {
'authority': 'poi5.com',
'scheme': 'https',
'method' : 'GET',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9',
'cache-control': 'max-age=0',
'cookie': cookie,
'referer': 'https://poi5.com/index',
'upgrade-insecure-requests': '1',
'user-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'
}
i = 0;
for url in urllist:
i += 1;
html = requests.get(url,headers=header);
print(html);
with open("img/" + str(hh).zfill(3) + '_' + str(i).zfill(3) + '.jpg', 'wb') as file:
file.write(html.content);
hh += 2;
| [
"903284186@qq.com"
] | 903284186@qq.com |
09d0537ffaab646ff46697a37d17dc689807fc0b | f6b96d2c534f3d7c7f44630290cbcfc678e41dd7 | /base_auth/settings/slack.py | d604d1200875f2f8b70dde98d07fe55b02bd7d8c | [] | no_license | bitclave/base-auth | 081844f1e7bf27208c6988013e9b8605a07a24b6 | 9822f30f49c18796d3d8f85be85c56dae714ba33 | refs/heads/master | 2020-03-08T04:22:39.438898 | 2018-05-03T09:45:12 | 2018-05-03T09:45:12 | 127,919,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | import os
if 'SLACK_INCOMING_WEBHOOK_URL' in os.environ:
SLACK_INCOMING_WEBHOOK_URL = os.environ['SLACK_INCOMING_WEBHOOK_URL']
else:
SLACK_INCOMING_WEBHOOK_URL = None
| [
"rremizov@yandex.ru"
] | rremizov@yandex.ru |
794aec261c5a2a0b22f17b996021749049c4c913 | 396f93d8e73c419ef82a94174815a2cecbb8334b | /.history/tester2_20200321145335.py | eca006aa8c88a49574cbf23f7e9263b1bfe377c0 | [] | no_license | mirfarzam/ArtificialIntelligence-HeuristicAlgorithm-TabuSearch | 8c73d9448b916009c9431526864a4441fdeb682a | 90b2dca920c85cddd7c1b3335344ac7b10a9b061 | refs/heads/master | 2021-03-26T21:16:42.561068 | 2020-04-17T21:44:26 | 2020-04-17T21:44:26 | 247,750,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | import os
import subprocess
import re
from datetime import datetime
import time
process = subprocess.Popen(['./algo_tabou.exe', '1000', '1000', '50', 'distances_entre_villes_50.txt'],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
result = st
print(result)
| [
"farzam.mirmoeini@gmail.com"
] | farzam.mirmoeini@gmail.com |
45fa1969ec95844186a60ffdbb41878168324e8d | ba4ba997668d5af029dcbe3f41c3f76b0605127e | /LeetCode/code/00000-00500/00234.py | 9d9a474f892aa6890e8db1576c725a8b0756de4a | [] | no_license | qlhai/Algorithms | d928db120c3f3f948292f6ac02ae244c72687f59 | dec5a7fd448892855f2f5b0565ea53c9de147158 | refs/heads/master | 2021-06-06T17:02:33.383846 | 2020-09-23T04:38:51 | 2020-09-23T04:38:51 | 121,399,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,303 | py | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# list
def isPalindrome_1(self, head: ListNode) -> bool:
arr = []
while head:
arr.append(head.val)
head = head.next
i = 0
j = len(arr) - 1
while i <= j:
if arr[i] == arr[j]:
i += 1
j -= 1
else:
return False
return True
# reverse linklist
def isPalindrome_2(self, head: ListNode) -> bool:
if not head:
return True
if not head.next:
return True
slow = head
fast = head
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
a = head
b = self.reverse_linklist(slow)
res = True
while res and a and b:
if a.val != b.val:
res = False
a = a.next
b = b.next
return res
@staticmethod
def reverse_linklist(head):
pre = None
cur = head
while cur:
next_node = cur.next
cur.next = pre
pre = cur
cur = next_node
return pre
| [
"qlhai@outlook.com"
] | qlhai@outlook.com |
2b72226cd0f2b4dcb48cd7a8e73d627f3db7c99a | 505f1f4e4cd2ba9190fbdcf347810e2540580b62 | /webscarpingtest/env/bin/pip | d68eb3bd5f47ffe8db5ec8a614242f3937999383 | [] | no_license | shrawanphuyal/jenkinsautobuild | e634e5a5f005137627e59dd0fca2cc4a6f19fa35 | 59fb888a9743afc4b9f93251f1676a34f6cbf4cc | refs/heads/master | 2020-04-29T10:35:29.397545 | 2019-03-17T06:56:30 | 2019-03-17T06:56:30 | 176,066,746 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | #!/home/shrawan/PycharmProjects/webscarpingtest/env/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"phuyalshrawan@gmail.com"
] | phuyalshrawan@gmail.com | |
0455b48ff2f31febe500d714c46ce1a8c97ec196 | b9a2fa858dc9e68cd518bef40ab44d3dc78e134a | /myapp/migrations/0004_user_read.py | cddba480735b3052c5b84d1e7cc303941961a315 | [] | no_license | Nyppy/test_project | a796880f23fd937110ced941f219c3f832db113e | 41f78f4c20820d7f149e2e3f7e93d35897f0e45a | refs/heads/master | 2020-07-23T17:39:15.400746 | 2019-09-18T15:55:51 | 2019-09-18T15:55:51 | 206,528,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 706 | py | # Generated by Django 2.2.3 on 2019-08-13 18:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0003_user_auth_one'),
]
operations = [
migrations.CreateModel(
name='User_read',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=100)),
('email', models.CharField(max_length=100)),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
],
),
]
| [
"Yaroslaw131Dan3"
] | Yaroslaw131Dan3 |
f4aebe99d8570a819efb4c5312fa10917a1b5f3e | 7f796e0d1b6bfebe27efc165d4683bb586fec3bc | /travello/migrations/0002_destination_price.py | c09f6c4c5a94477ba8579e6cba30f2938a82bab3 | [] | no_license | akulacharan/telusko | 1b984272bb33d4787ed73ab0c460608c6ca73f20 | 9404a890eafbaefb836c5c89af155fd0baa8f1b6 | refs/heads/master | 2022-11-08T22:32:36.730400 | 2020-06-28T06:42:36 | 2020-06-28T06:42:36 | 275,524,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | # Generated by Django 3.0.5 on 2020-05-02 06:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('travello', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='destination',
name='price',
field=models.IntegerField(default=0),
preserve_default=False,
),
]
| [
"akulacharanteja@gmail.com"
] | akulacharanteja@gmail.com |
884688b3adf0caa413f15e029f27f536ba592ce5 | c89ab83cebb52a5160faa2786120604b34e42de5 | /steps/parseconfig.py | a518a18c683415df36be834298a7ecd96b7b0ac7 | [
"LicenseRef-scancode-public-domain",
"CC-BY-3.0"
] | permissive | zymos/nature_emulator | 8f3ddba64fafd8083231d3c54bcb0b77e5870a96 | c79431ad63dc607f8c196ca26f9829b0fc82ed1c | refs/heads/master | 2021-01-20T15:37:05.525860 | 2015-07-05T16:15:25 | 2015-07-05T16:15:25 | 35,963,076 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | from ConfigParser import SafeConfigParser
def config_get_true_false(parameter, value, default):
if value == 1 || value == "1" || value == "true" || value == "True" || value == "TRUE":
return True
elif value == 0 || value == "0" || value == "false" || value == "False" || value == "FALSE":
return False
else:
print "Error: Variable \"%s\" is set to \"%s\", it should be set to \"True\" or \"False\", by default setting %s=%s" % (parameter, value, parameter, default)
return default
parser = SafeConfigParser()
parser.read('config.ini')
print parser.get('Basic', 'location_city')
print parser.get('Play_Times', 'start_time_dawn')
print parser.getint('Advanced', 'audio_clip_crossfade_time_ms')
| [
"zymos@users.noreply.github.com"
] | zymos@users.noreply.github.com |
d46ae4af82e91b0bdd0ff6d2df803a5734c56909 | 4776496cfec4a9652cd1e95deda88318e126041f | /manage.py | 3076eb1166bc730642f3c5a0fdf2ccf7cdcee90e | [] | no_license | boblannon/shelter_cms | c597b9cbd0f5079bc7b3a6e957522fef752743bc | 52e187954cb160b4c8bcdbaf343f4cb410c30c38 | refs/heads/master | 2021-01-15T17:09:40.657712 | 2013-07-23T09:00:34 | 2013-07-23T09:00:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sheltr_cms.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"blannon@gmail.com"
] | blannon@gmail.com |
d7c07a67dd247b1d62d0b925177b19405cb961a8 | f308487cb622252e17e39fddac6104177ef0b431 | /Reverse/time/cracker.py | 6574b5ed369571b3c55dc594309a9c78c7aa5e97 | [] | no_license | manwithhonor/CTF | c52448744c754709c3489ffc95cb40d2e9dc2f1f | 51cec7da00439f74cd975fa26eb3bc0b27b72622 | refs/heads/master | 2020-05-27T14:39:23.753336 | 2019-05-22T16:01:29 | 2019-05-22T16:01:29 | 188,662,909 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | with open('source', 'rb') as f1:
with open('cracked', 'wb') as f2:
f2.write(f1.read().replace(b'\x00\xca\x9a\x3b', b'\x00\xca\x9a\x77'))
| [
"nagov-mark@mail.ru"
] | nagov-mark@mail.ru |
17049ed6b3705fb01190a0f5025f74cd714f8f3e | 1b26d0023ad7eb302a9dd7d0d6696ef836c17c05 | /HyeonJinGithub/2020-10-07/12100 2048 (Easy).py | 20832b880e18018074fca0d6cc9aa019aaeb8ae8 | [
"MIT"
] | permissive | Team-NTO/NTO | 93e643ddd3c6cad308f1f984aaa9abc43d9e3bb8 | 133f19e1e15e423589bd7b94b698d2afc76c3ef6 | refs/heads/master | 2023-06-23T06:26:16.374869 | 2021-07-11T06:43:08 | 2021-07-11T06:43:08 | 298,460,899 | 1 | 3 | MIT | 2021-07-11T06:43:09 | 2020-09-25T03:47:51 | Java | UTF-8 | Python | false | false | 2,831 | py | import sys
from copy import deepcopy
def print_max(arr):
global res
for i in range(len(arr)):
for j in range(len(arr[i])):
res = max(res, arr[i][j])
def dfs(tmp_board, n):
if n == 5:
print_max(tmp_board)
return
dfs(move_left(deepcopy(tmp_board)), n + 1)
dfs(move_right(deepcopy(tmp_board)), n + 1)
dfs(move_up(deepcopy(tmp_board)), n + 1)
dfs(move_down(deepcopy(tmp_board)), n + 1)
def move_left(board):
for i in range(N):
p = 0
x = 0
for j in range(N):
if board[i][j] == 0: continue
if x == 0:
x = board[i][j]
else:
if x == board[i][j]:
board[i][p] = x * 2
x = 0
p += 1
else:
board[i][p] = x
x = board[i][j]
p += 1
board[i][j] = 0
if x != 0: board[i][p] = x
return board
def move_right(board):
for i in range(N):
p = N - 1
x = 0
for j in range(N - 1, -1, -1):
if board[i][j] == 0: continue
if x == 0:
x = board[i][j]
else:
if x == board[i][j]:
board[i][p] = x * 2
p -= 1
x = 0
else:
board[i][p] = x
p -= 1
x = board[i][j]
board[i][j] = 0
if x != 0: board[i][p] = x
return board
def move_up(board):
for i in range(N):
p = 0
x = 0
for j in range(N):
if board[j][i] == 0: continue
if x == 0:
x = board[j][i]
else:
if x == board[j][i]:
board[p][i] = x * 2
p += 1
x = 0
else:
board[p][i] = x
p += 1
x = board[j][i]
board[j][i] = 0
if x != 0: board[p][i] = x
return board
def move_down(board):
for i in range(N):
p = N - 1
x = 0
for j in range(N - 1, -1, -1):
if board[j][i] == 0: continue
if x == 0:
x = board[j][i]
else:
if x == board[j][i]:
board[p][i] = x * 2
p -= 1
x = 0
else:
board[p][i] = x
p -= 1
x = board[j][i]
board[j][i] = 0
if x != 0: board[p][i] = x
return board
if __name__ == '__main__':
N = int(input())
a = [[int(x) for x in sys.stdin.readline().split()] for _ in range(N)]
res = 0
dfs(a, 0)
print(res) | [
"skymilk95@naver.com"
] | skymilk95@naver.com |
eab167304f0c65adf4c49183eb68a6f345882f21 | bfd5ff37c1779a1c958ff4aaf95f7abb97627d99 | /blog/models.py | 805c54a67b3a6dd7077fd2749729ff14be96b57b | [] | no_license | NassahMaimunah/my-first-blog | a158eab3ea6fefd24d4e24e3111c856e02cfe8c4 | b82a88478ea5c712ced6bee54be54cccda3d9cd7 | refs/heads/master | 2021-05-03T09:35:06.419261 | 2016-10-29T13:40:19 | 2016-10-29T13:40:19 | 72,284,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
| [
"nassahmaimunah@gmail.com"
] | nassahmaimunah@gmail.com |
9f43fb14f4f02ba8751b22c2c72d7895d7e04ad5 | 6f08f6390a584d51a815bb767ea203387bab8db5 | /bin/AlleleFinder | 1ba61f358c14ac55cdf2ee98aa3f71803b77db73 | [] | no_license | longrc/AlleleFinder | 23a2ae34b028023a69a9a1e696e7741c5203855d | d56166ba47a1b88994c93f1d3a2cfdf796bc8ff0 | refs/heads/master | 2023-01-21T15:03:34.420401 | 2020-12-01T09:00:43 | 2020-12-01T09:00:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,817 | #!/usr/bin/env python
import sys
import os
import argparse
import time
import allele_backbone as ab
import allele_gmap as ag
import allele_blast as abl
def time_print(info):
print("\033[32m%s\033[0m %s"%(time.strftime('[%H:%M:%S]',time.localtime(time.time())), info))
def get_opts():
group = argparse.ArgumentParser()
group.add_argument('-m', '--mono', help="reference mono fasta", required=True)
group.add_argument('-d', '--mono_cds', help="CDS fasta of mono", required=True)
group.add_argument('-f', '--mono_gff3', help="GFF3 file of mono", required=True)
group.add_argument('-c', '--cds', help="CDS fasta of polyploid", required=True)
group.add_argument('-g', '--gff3', help="GFF3 file of polyploid", required=True)
group.add_argument('-n', '--num_allele', help="number of allele", type=int, required=True)
group.add_argument('-b', '--blast_count', help="blast count, default: 2", type=int, default=2)
group.add_argument('-i', '--blast_identity', help="threshold of blast identity, default: 80", type=float, default=80)
group.add_argument('-e', '--TE', help="TE gff3 for filtering, default: \"\"", default="")
group.add_argument('-j', '--TE_overlap', help="threshold of TE overlap, default: 0.3, only effect when TE is not NULL", type=float, default=0.3)
group.add_argument('-w', '--workdir', help="workdir, default: wrkdir", default="wrkdir")
group.add_argument('-t', '--threads', help="threads, default: 12", default=12, type=int)
return group.parse_args()
def AlleleFinder(mono, mono_cds, mono_gff3, cds, gff3, na, blast_count, iden_thres, TE, TE_thres, wrkdir, threads):
if not os.path.exists(wrkdir):
os.mkdir(wrkdir)
mono = os.path.abspath(mono)
mono_cds = os.path.abspath(mono_cds)
mono_gff3 = os.path.abspath(mono_gff3)
cds = os.path.abspath(cds)
gff3 = os.path.abspath(gff3)
if TE != "":
TE = os.path.abspath(TE)
script_dir = sys.path[0]
time_print("Entering: %s"%wrkdir)
os.chdir(wrkdir)
time_print("Step1: running MCScanX")
mcs_dir = "01.mcscanx/xyz/xyz.html"
tandem_file = os.path.abspath("01.mcscanx/xyz/xyz.tandem")
hap_blast_file = os.path.abspath("01.mcscanx/xyz/xyz.blast")
if not os.path.exists(mcs_dir):
cmd = "python %s/run_MCScanX.py %s %s 01.mcscanx %d &> /dev/null"%(script_dir, cds, gff3, threads)
time_print("\tRunning: %s"%cmd)
os.system(cmd)
else:
time_print("\tMCScanX result found, skip")
time_print("Step2: running gmap")
gmap_res = "02.gmap/gmap.gff3"
if not os.path.exists(gmap_res):
cmd = "python %s/run_gmap.py %s %s %d 02.gmap %d &> /dev/null"%(script_dir, mono, cds, na, threads)
time_print("\tRunning: %s"%cmd)
os.system(cmd)
else:
time_print("\tGmap result found, skip")
time_print("Step3: Generating first allele table")
if not os.path.exists("backbone.csv"):
time_print("\tLoading MCScanX results")
base_allele = []
for fn in os.listdir(mcs_dir):
full_fn = os.path.join(mcs_dir, fn)
tmp_allele = ab.get_allele_with_mcscanx(full_fn)
base_allele.extend(tmp_allele)
time_print("\tLoading GMAP result")
gff3_db, gene_order = ag.read_gff3(gmap_res)
gff3_allele = ag.allele_gmap(gff3_db, threads)
base_allele.extend(gff3_allele)
time_print("\tWriting allele list backbone")
gff3_db, gene_order = ag.read_gff3(gff3)
base_allele = ab.split_allele(base_allele, gene_order)
base_allele = ab.merge_allele(base_allele)
with open("backbone.csv", 'w') as fout:
for allele in sorted(base_allele):
fout.write("%s\n"%(",".join(sorted(allele))))
else:
time_print("\tallele list backbone found, loading")
base_allele = []
with open("backbone.csv", 'r') as fin:
for line in fin:
base_allele.append(line.strip().split(','))
backbone = os.path.abspath("backbone.csv")
final_allele = base_allele
time_print("Step4: running blast")
if not os.path.exists("03.blast"):
os.mkdir("03.blast")
time_print("\tEntering: blast")
os.chdir("03.blast")
for i in range(0, blast_count):
time_print("\tStarting iteration %02d"%(i+1))
outpre = "iter%02d"%(i+1)
single_fa = outpre+"_single.fa"
multi_fa = outpre+"_multi.fa"
out_blast = outpre+".blast"
if not(os.path.exists(single_fa) and os.path.exists(multi_fa)):
cmd = "python %s/split_fasta_with_allele.py %s %s %s"%(script_dir, cds, backbone, outpre)
time_print("\tRunning command: %s"%cmd)
os.system(cmd)
else:
time_print("\tIter %02d, Fasta file found, skip"%(i+1))
if not os.path.exists(out_blast):
cmd1 = "makeblastdb -in %s -dbtype nucl -out blastdb%02d &> /dev/null"%(multi_fa, i+1)
cmd2 = "blastn -query %s -db blastdb%02d -out %s -evalue 1e-3 -outfmt 6 -num_alignments 1 -num_threads %d &> /dev/null"%(single_fa, i+1, out_blast, threads)
time_print("\tRunning command: %s"%cmd1)
os.system(cmd1)
time_print("\tRunning command: %s"%cmd2)
os.system(cmd2)
else:
time_print("\tIter %02d, blast file found, skip"%(i+1))
final_allele.extend(abl.allele_blast(out_blast, iden_thres))
final_allele = ab.merge_allele(final_allele)
backbone = outpre+".csv"
if not os.path.exists(backbone):
with open(backbone, 'w') as fout:
for allele in sorted(final_allele):
fout.write("%s\n"%(",".join(sorted(allele))))
time_print("\tLeaving: 03.blast")
os.chdir('..')
time_print("Step5: Writing allele table")
with open("allele.csv", 'w') as fout:
for allele in sorted(final_allele):
fout.write("%s\n"%(",".join(sorted(allele))))
time_print("Step6: Adjusting with mono annotation")
if not(os.path.exists("04.mono_adjust")):
os.mkdir("04.mono_adjust")
time_print("\tEntering: 04.mono_adjust")
os.chdir("04.mono_adjust")
mono_fn = mono_cds.split('/')[-1].split('.')[0]
hap_fn = cds.split('/')[-1].split('.')[0]
out_blast = hap_fn+'.vs.'+mono_fn+'.blast'
if not os.path.exists(out_blast):
cmd1 = "makeblastdb -in %s -dbtype nucl -out blastdb &> /dev/null"%(mono_cds)
cmd2 = "blastn -query %s -db blastdb -out %s -evalue 1e-3 -outfmt 6 -num_alignments 1 -num_threads %d &> /dev/null"%(cds, out_blast, threads)
time_print("\tRunning command: %s"%cmd1)
os.system(cmd1)
time_print("\tRunning command: %s"%cmd2)
os.system(cmd2)
else:
time_print("\tBlast file found, skip")
out_blast = os.path.abspath(out_blast)
time_print("Leaving: 04.mono_adjust")
os.chdir("..")
allele_file = "allele.adjusted.txt"
if not os.path.exists(allele_file):
cmd = "python %s/adjust_allele_table_with_mono.py allele.csv %s %s %s %s %f %s %d allele.adjusted.txt &> /dev/null"%(script_dir, mono_gff3, gff3, out_blast, hap_blast_file, iden_thres, tandem_file, na)
time_print("\tRunning command: %s"%cmd)
os.system(cmd)
else:
time_print("\tallele.adjusted.txt found, skip")
step_idx = 7
if TE != "":
time_print("Step7: Filtering with TEs")
allele_file = "allele.adjusted.nonTEs.txt"
if not os.path.exists(allele_file):
cmd = "python %s/filter_with_TE.py allele.adjusted.txt %s %s %f allele.adjusted.nonTEs.txt &> /dev/null"%(script_dir, gff3, TE, TE_thres)
time_print("\tRunning command: %s"%cmd)
os.system(cmd)
step_idx += 1
else:
time_print("\tallele.adjusted.nonTEs.txt found, skip")
time_print("Step%d: Statistics"%step_idx)
stat_pre = '.'.join(allele_file.split('.')[:-1])
cmd = "python %s/stat_allele_info.py %s %s %s &> /dev/null"%(script_dir, allele_file, gff3, stat_pre)
time_print("\tRunning command: %s"%cmd)
os.system(cmd)
time_print("Finished")
if __name__ == "__main__":
opts = get_opts()
mono = opts.mono
mono_cds = opts.mono_cds
mono_gff3 = opts.mono_gff3
cds = opts.cds
gff3 = opts.gff3
na = opts.num_allele
blast_count = opts.blast_count
iden_thres = opts.blast_identity
TE = opts.TE
TE_thres = opts.TE_overlap
wrkdir = opts.workdir
threads = opts.threads
AlleleFinder(mono, mono_cds, mono_gff3, cds, gff3, na, blast_count, iden_thres, TE, TE_thres, wrkdir, threads)
| [
"765550282@qq.com"
] | 765550282@qq.com | |
c547e795cb5f9868e237afc5db898ea2928b517c | d41cf32bd1ff18eca4ca8bfc2908eeb48b0df959 | /ts/model/recurrent_forecast.py | df0a87f85c61bafabed73bb18d12840b8fb9ba75 | [] | no_license | tedlaw09/time_series_forecaster | 1db8c17902bdd790fe9f33f841eb5de204b5672b | 62f6fa0d5e832d2d1786eae729d9462b78d9b459 | refs/heads/main | 2023-09-03T18:47:07.025929 | 2021-10-31T14:51:06 | 2021-10-31T14:51:06 | 423,173,487 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,918 | py | import pickle
import os
import tensorflow as tf
import numpy as np
from ts.utility import Utility, ForecastDataSequence, SaveCallback
from ts.log import GlobalLogger
class RecurrentForecast:
"""
RNN based forecasting model which allows for a list
of layers to be provided
"""
@staticmethod
def load(modelLoadDir):
"""
Loads the model from the provided directory path
:param modelLoadDir: Directory from where the model should be loaded
:return: model which is loaded from the given path
"""
model = RecurrentForecast(None, None, loadModel=True)
model.model = tf.keras.models.load_model(modelLoadDir)
with open(os.path.join(modelLoadDir, 'hyperparam'), 'rb') as loadFile:
loadDict = pickle.load(loadFile)
model.forecastHorizon = loadDict['forecastHorizon']
model.numTargetVariables = loadDict['numTargetVariables']
model.numExoVariables = loadDict['numExoVariables']
return model
def __init__(
self,
forecastHorizon,
layerList,
numTargetVariables=1,
numExoVariables=0,
loadModel=False
):
"""
Initialize RNN-based Forecasting model using the given parameters
:param forecastHorizon: How much further in the future the model has to
predict the target series variable
:param layerList: list of layers of the recurrent model
:param numTargetVariables: Number of target variables the model takes as input
:param numExoVariables: Number of exogenous variables the model takes as input
:param loadModel: True or False - do not use this parameter !,
this is for internal use only (i.e. it is an implementation detail)
If True, then object is normally created, else object is created
without any member values being created. This is used when model
is created by the static load method
"""
if loadModel:
return
self.forecastHorizon = forecastHorizon
self.numTargetVariables = numTargetVariables
self.numExoVariables = numExoVariables
self.model = None
self.buildModel(layerList)
def train(
self,
trainSequences,
numIterations=1,
optimizer=tf.optimizers.Adam(),
modelSavePath=None,
verboseLevel=1,
returnLosses=True
):
"""
Train the model on the provided data sequences
:param trainSequences: Sequences of data, each seq in this must either
be a numpy array of shape (n + forecastHorizon, d1) or a 2-tuple whose
first element is a numpy array of shape (n + forecastHorizon, d1),
and second element is a numpy array of shape (n + forecastHorizon, d2)
:param numIterations: Number of iterations of training to be performed
:param optimizer: Optimizer using which to train the parameters of the model
:param modelSavePath: If not None, then save the model to this path after
every iteration of training
:param verboseLevel: Verbosity Level, higher value means more information
:param returnLosses: If True, then return losses of every iteration, else
does not return losses
:return: If returnLosses is True, then return list of losses of every
iteration, else None
"""
logger = GlobalLogger.getLogger()
logger.log('Compiling Model', 1, self.train.__name__)
self.model.compile(
optimizer=optimizer, loss=tf.keras.losses.MeanSquaredError()
)
callbacks = None
if modelSavePath is not None:
callbacks = [SaveCallback(
self,
modelSavePath
)]
logger.log('Begin Training Model', 1, self.train.__name__)
history = self.model.fit(
ForecastDataSequence(
trainSequences,
self.forecastHorizon,
self.numTargetVariables,
self.numExoVariables
),
epochs=numIterations,
verbose=verboseLevel,
callbacks=callbacks
)
if returnLosses:
return history.history['loss']
def predict(
self,
targetSeries,
exogenousSeries=None,
):
"""
Forecast using the model parameters on the provided input data
:param targetSeries: Series of the Target Variable, it
should be a numpy array of shape (n, numTargetVariables)
:param exogenousSeries: Series of exogenous Variables, it should be a
numpy array of shape (n, numExoVariables), it can be None only if
numExoVariables is 0 in which case the exogenous variables are not
considered
:return: Forecast targets predicted by the model, it has shape (n,), the
horizon of the targets is the same as self.forecastHorizon
"""
logger = GlobalLogger.getLogger()
logger.log(f'Target Series Shape: {targetSeries.shape}', 2, self.predict.__name__)
if exogenousSeries is not None:
logger.log(
f'Exogenous Series Shape: {exogenousSeries.shape}', 2, self.predict.__name__
)
logger.log('Prepare Data', 1, self.predict.__name__)
assert targetSeries.shape[1] == self.numTargetVariables
assert (Utility.isExoShapeValid(exogenousSeries, self.numExoVariables))
X = Utility.prepareDataPred(targetSeries, exogenousSeries)
logger.log('Begin Prediction', 1, self.predict.__name__)
return tf.squeeze(self.model.predict(np.expand_dims(X, axis=0), verbose=0), axis=0)
def evaluate(
self,
targetSeries,
exogenousSeries=None,
returnPred=False
):
"""
Forecast using the model parameters on the provided data, evaluates
the forecast result using the loss and returns it
:param targetSeries: Series of the Target Variable, it
should be a numpy array of shape
(numTimesteps + self.forecastHorizon, numTargetVariables).
numTimesteps is the number of timesteps on which our model must predict,
the values ahead are for evaluating the predicted results with respect
to them (i.e. they are true targets for our prediction)
:param exogenousSeries: Series of exogenous Variables, it should be a
numpy array of shape (numTimesteps, numExoVariables), it can be None
only if numExoVariables is 0 in which case the exogenous variables
are not considered
:param returnPred: If True, then return predictions along with loss, else
return on loss
:return: If True, then return predictions along with loss of the predicted
and true targets, else return only loss
"""
logger = GlobalLogger.getLogger()
logger.log(f'Target Series Shape: {targetSeries.shape}', 2, self.evaluate.__name__)
if exogenousSeries is not None:
logger.log(
f'Exogenous Series Shape: {exogenousSeries.shape}', 2, self.evaluate.__name__
)
logger.log('Prepare Data', 1, self.evaluate.__name__)
assert targetSeries.shape[1] == self.numTargetVariables
assert Utility.isExoShapeValid(exogenousSeries, self.numExoVariables)
X, Ytrue = Utility.prepareDataTrain(targetSeries, exogenousSeries, self.forecastHorizon)
logger.log('Begin Evaluation', 1, self.predict.__name__)
Ypred = tf.squeeze(self.model.predict(np.expand_dims(X, axis=0), verbose=0), axis=0)
loss = tf.keras.losses.MeanSquaredError()(
Ytrue,
Ypred
)
if returnPred:
return loss, Ypred
else:
return loss
def buildModel(self, layerList):
""" Builds Model Architecture """
GlobalLogger.getLogger().log(
'Building Model Architecture',
1,
self.buildModel.__name__
)
self.model = tf.keras.Sequential(layers=layerList)
inputDimension = self.numTargetVariables + self.numExoVariables
self.model.build(input_shape=(None, None, inputDimension))
def save(self, modelSaveDir):
"""
Saves the model at the provided directory path
:param modelSaveDir: Directory where the model should be saved
"""
self.model.save(modelSaveDir)
saveDict = {
'forecastHorizon': self.forecastHorizon,
'numTargetVariables': self.numTargetVariables,
'numExoVariables': self.numExoVariables
}
saveFile = open(os.path.join(modelSaveDir, 'hyperparam'), 'wb')
pickle.dump(saveDict, saveFile)
saveFile.close()
| [
"tedlaw@126.com"
] | tedlaw@126.com |
5134ad356b8187c5c41b7a78e471e1daaae04a22 | 864655ffd9b273c23b060d98dd3b52086658c26c | /tests/test_referenceless.py | d72199c88ef266c08f124bdd5c04dca01a9f2791 | [
"MIT"
] | permissive | tuetschek/GEM-metrics | c0be7dc96dc33c7fe0b1ff109005766a8657d76b | 8e17f54104dc2305c1ac6ae65f0ec2124d922134 | refs/heads/main | 2023-05-05T09:27:03.066915 | 2021-05-13T14:59:42 | 2021-05-13T14:59:42 | 363,098,615 | 2 | 0 | MIT | 2021-04-30T09:59:08 | 2021-04-30T09:57:17 | null | UTF-8 | Python | false | false | 172 | py | """Test class for metrics that don't use a reference.
"""
import unittest
class TestReferenceLessMetric(object):
pass
if __name__ == '__main__':
unittest.main()
| [
"amadaan@cs.cmu.edu"
] | amadaan@cs.cmu.edu |
bde5b82989102ae8dbcb6355fc1544fee00f6b16 | d0925805c58627911422fe9e5574bbe163b78344 | /rooms/management/commands/seed_amenities.py | 7756185663ff553d925ecea3abf2508f280df9de | [] | no_license | hs04111/airbnb-clone | 52fc3a6e4573cbfc611386c8fcd6cc3d53bb1ddf | 6e75494f8801e1149f840345d6613d5036c04018 | refs/heads/master | 2023-02-20T03:24:21.489785 | 2021-01-24T14:49:45 | 2021-01-24T14:49:45 | 307,663,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | from django.core.management.base import BaseCommand
from rooms.models import Amenity
class Command(BaseCommand):
help = "This command creates amenities"
def handle(self, *args, **options):
amenities = [
"Kitchen",
"Heating",
"Washer",
"Wifi",
"Indoor fireplace",
"Iron",
"Laptop friendly workspace",
"Crib",
"Self check-in",
"Carbon monoxide detector",
"Shampoo",
"Air conditioning",
"Dryer",
"Breakfast",
"Hangers",
"Hair dryer",
"TV",
"High chair",
"Smoke detector",
"Private bathroom",
]
for a in amenities:
Amenity.objects.create(name=a)
self.stdout.write(self.style.SUCCESS("Amenities created"))
| [
"hs04111@naver.com"
] | hs04111@naver.com |
b2e11180449c5cbd4123a1f6c4e49af8b9b06064 | ba3c06f9ae89479fa4987fe841ac09b5b5d71383 | /python_for_kids/book/Examples/mean.py | 1d733629a2eb40a1124f5270aca68952c682f52e | [] | no_license | mary-tano/python-programming | 6d806e25011e770a04a0922d0b71bf38c222d026 | 829654a3274be939fa529ed94ea568c12f7f1a27 | refs/heads/master | 2021-05-17T15:30:32.710838 | 2020-04-01T13:37:18 | 2020-04-01T13:37:18 | 250,846,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | # Среднее значение
print("Введи число: ", end="")
Sum = 0
Value = int(input())
for Number in range(1,Value+1) :
Sum += Number
Mean = Sum / Value
print("Результат: " + str(Mean))
| [
"masha.mary.tano@gmail.com"
] | masha.mary.tano@gmail.com |
5708fc756fef49b594066cdc7e47e06500d47585 | 08b4e2c766724454059f1f72dc783b902ae14788 | /netbox_api/model/ipam_role.py | 0abe9b1b1cdde34a397ac756c73defee0888bc1c | [
"MIT"
] | permissive | zinic/netbox_api | 34d4e699747a5012c2e52c49e9f41e6375259fb3 | ace6cb2b60edd93f4a37f7a29e8d262a1c8e1fc4 | refs/heads/master | 2021-01-01T18:26:36.542405 | 2017-08-14T20:52:48 | 2017-08-14T20:52:48 | 98,341,745 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | class PrefixRole(object):
def __init__(self, id=None, name=None, slug=None, weight=None):
self.id = id
self.name = name
self.slug = slug
self.weight = weight
@classmethod
def from_dict(cls, contents):
if contents is None:
return cls()
return cls(**contents)
| [
"john.hopper@jpserver.net"
] | john.hopper@jpserver.net |
2fd6affedb77774a05cf4ecb2bb692bed8d3b69b | e4520cdf1328b2df38105f15e0f64e0836cce0dc | /seg3Dlow_inference.py | 25a1902afeaaa45759b39fc8d6210c76002aeed7 | [] | no_license | inqlee0704/lung_segmentation | 105efd078033021cdfbfacd99ccf1d74138a719e | b1d81950fbf3bab28b08671473b49b54304d3c19 | refs/heads/main | 2023-07-12T01:17:43.113532 | 2021-08-23T03:56:46 | 2021-08-23T03:56:46 | 397,806,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,322 | py | import os
from dotenv import load_dotenv
import time
import random
import wandb
from RecursiveUNet3D import UNet3D
from Seg3D import Seg3D
from medpy.io import load
from engine import Segmentor
from dataloader import LungDataset_3D_low_res
from torch.utils.data import Dataset, DataLoader
import numpy as np
import pandas as pd
from torch import nn
from torch.cuda import amp
import torch
from torchsummary import summary
from sklearn import model_selection
import SimpleITK as sitk
sitk.ProcessObject_SetGlobalWarningDisplay(False)
def seed_everything(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def wandb_config():
config = wandb.config
# ENV
config.data_path = '/data4/inqlee0704'
# config.data_path = os.getenv('VIDA_PATH')
config.in_file = 'ENV18PM_ProjSubjList_cleaned_IN.in'
config.test_results_dir = "RESULTS"
config.name = 'Seg3D_n_case128'
config.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
config.low_parameter_path = ''
config.model = 'Seg3D_low'
config.activation = 'relu'
config.optimizer = 'adam'
# config.scheduler = 'CosineAnnealingWarmRestarts'
config.loss = 'BCE'
# config.bce_weight = 0.5
# config.pos_weight = 1
config.learning_rate = 0.0005
config.train_bs = 2
config.valid_bs = 2
config.aug = False
config.save = False
config.debug = True
if config.debug:
config.epochs = 1
config.project = 'debug'
config.n_case = 128
else:
config.epochs = 20
config.project = 'lung'
config.n_case = 128
return config
def resample(img, hdr, new_spacing=[1,1,1], new_shape=None):
# new_shape = (64,64,64)
if new_shape is None:
spacing = np.array(hdr.spacing, dtype=np.float32)
resize_factor = spacing / new_spacing
new_real_shape = img.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize_factor = np.array(new_shape) / img.shape
img = scipy.ndimage.interpolation.zoom(img,real_resize_factor, mode='nearest')
return img
if __name__ == "__main__":
parameter_path = '/data1/inqlee0704/lung_segmentation/RESULTS/Seg3D_n_case128_20210819/lung_Seg3D_low.pth'
load_dotenv()
seed_everything()
config = wandb_config()
# Data
df_subjlist = pd.read_csv(os.path.join(config.data_path,config.in_file),sep='\t')
df_train, df_valid = model_selection.train_test_split(
df_subjlist[:config.n_case],
test_size=0.2,
random_state=42,
stratify=None)
train_ds = LungDataset_3D_low_res(df_train.reset_index(drop=True))
train_loader = DataLoader(train_ds,
batch_size=config.train_bs,
shuffle=True,
num_workers=0)
model = Seg3D(num_classes=1)
model.load_state_dict(torch.load(parameter_path))
model.to(config.device)
model.eval()
eng = Segmentor(model, device=config.device)
batch_preds = eng.predict(train_loader)
print(batch_preds)
print(batch_preds[0].shape)
| [
"inqlee0704@gmail.com"
] | inqlee0704@gmail.com |
720a812fe7cfb5dcb8dd3faceb9c494944c59585 | 8599fe6390fb2ee6c545e82948ae793e63af481d | /Data preprocessing/getword.py | 09c336103722314d51d707e4cbd816ebf7e78bd8 | [] | no_license | csmaterial/kaggle | 5c83a427018ff75754d314cc924eabcd13e20679 | 44da2b3de9dfeb2d7218259bfb4c779c503cd144 | refs/heads/master | 2021-09-03T07:41:08.212836 | 2018-01-07T06:36:14 | 2018-01-07T06:36:14 | 116,449,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 5 14:50:07 2018
@author: zhang
"""
import re
import numpy as np
import pandas as pd
filename = 'C:\\Users\\zhang\\Desktop\\Aspects\\tripword.csv'
word = pd.read_csv(filename)
word1 = word['word']
diction = []
for i in word1:
#while(1):
for j in range(100):
start = i.find('(')
end = i.find(')')
if start == -1:
break
tempw = i[start+1:end]
if tempw not in diction:
diction.append(i[start+1:end])
else:
1
#print(i[start+1:end])
i = i[end+1:]
| [
"noreply@github.com"
] | csmaterial.noreply@github.com |
def787d7c8addea114fbbb69f045cf9a24fdce1f | 393c49bd5e993a03255dc538cfa824ea0a9bd079 | /tools/warn/make_warn_patterns.py | 4b20493ba13d5a89d45800454c0edfff3531fbd2 | [
"Apache-2.0"
] | permissive | AOSP-whatever/platform_build_make | 0959a17692c88f61f8b8af83c04d6300fbb4af6b | ca84657ad49890763b0fd9fccc3e227fac702492 | refs/heads/master | 2022-01-01T04:35:28.240285 | 2014-11-21T06:46:52 | 2020-10-10T12:02:40 | 142,779,041 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,048 | py | # python3
# Copyright (C) 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Warning patterns for build make tools."""
# pylint:disable=relative-beyond-top-level
# pylint:disable=g-importing-member
from .cpp_warn_patterns import compile_patterns
from .severity import Severity
warn_patterns = [
# pylint:disable=line-too-long,g-inconsistent-quotes
{'category': 'make', 'severity': Severity.MEDIUM,
'description': 'make: overriding commands/ignoring old commands',
'patterns': [r".*: warning: overriding commands for target .+",
r".*: warning: ignoring old commands for target .+"]},
{'category': 'make', 'severity': Severity.HIGH,
'description': 'make: LOCAL_CLANG is false',
'patterns': [r".*: warning: LOCAL_CLANG is set to false"]},
{'category': 'make', 'severity': Severity.HIGH,
'description': 'SDK App using platform shared library',
'patterns': [r".*: warning: .+ \(.*app:sdk.*\) should not link to .+ \(native:platform\)"]},
{'category': 'make', 'severity': Severity.HIGH,
'description': 'System module linking to a vendor module',
'patterns': [r".*: warning: .+ \(.+\) should not link to .+ \(partition:.+\)"]},
{'category': 'make', 'severity': Severity.MEDIUM,
'description': 'Invalid SDK/NDK linking',
'patterns': [r".*: warning: .+ \(.+\) should not link to .+ \(.+\)"]},
{'category': 'make', 'severity': Severity.MEDIUM,
'description': 'Duplicate header copy',
'patterns': [r".*: warning: Duplicate header copy: .+"]},
{'category': 'FindEmulator', 'severity': Severity.HARMLESS,
'description': 'FindEmulator: No such file or directory',
'patterns': [r".*: warning: FindEmulator: .* No such file or directory"]},
{'category': 'make', 'severity': Severity.HARMLESS,
'description': 'make: unknown installed file',
'patterns': [r".*: warning: .*_tests: Unknown installed file for module"]},
{'category': 'make', 'severity': Severity.HARMLESS,
'description': 'unusual tags debug eng',
'patterns': [r".*: warning: .*: unusual tags debug eng"]},
{'category': 'make', 'severity': Severity.MEDIUM,
'description': 'make: please convert to soong',
'patterns': [r".*: warning: .* has been deprecated. Please convert to Soong."]},
{'category': 'make', 'severity': Severity.MEDIUM,
'description': 'make: deprecated macros',
'patterns': [r".*\.mk:.* warning:.* [A-Z_]+ (is|has been) deprecated."]},
]
compile_patterns(warn_patterns)
| [
"chh@google.com"
] | chh@google.com |
4601426557810995845176aedda71b23bf1ddc00 | d0d3e29713afe1a8874bb2c9358f64133bf5be5f | /issues/migrations/0020_auto_20200320_1437.py | 90136dbadfc48dca6ea16222121d8b58a9cd1031 | [] | no_license | mickh14/unicorn-quality | 6658f0328a5e6529249dfb555fe542a8dbd5da12 | b422014b61b368720e00a4ead7010e3b35f97252 | refs/heads/master | 2022-12-10T20:59:53.040267 | 2020-03-26T23:34:48 | 2020-03-26T23:34:48 | 243,999,129 | 0 | 1 | null | 2022-11-22T05:23:35 | 2020-02-29T16:07:51 | Python | UTF-8 | Python | false | false | 421 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2020-03-20 14:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('issues', '0019_comment_name'),
]
operations = [
migrations.RenameField(
model_name='comment',
old_name='Issue',
new_name='issue',
),
]
| [
"michael.hurley@expleogroup.com"
] | michael.hurley@expleogroup.com |
557c3b3eff50eb4bb604fd4f42bd65dbd504613f | 33054c08e42d6972caa697da4645e2909241dbdb | /StateMachine.py | a888d57a9e967a5fabfcd62cf0f87c13bdeba69d | [] | no_license | unixnut/Python-Roman-numerals | d41da12914f47befad21263030e051c42973b3cd | 4c98bcad65882649493c586d3a92c38bc8ce9282 | refs/heads/master | 2021-05-04T20:35:36.061509 | 2018-02-01T11:52:46 | 2018-02-01T11:52:46 | 119,829,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 891 | py | class StateMachine:
'''state can be one of the following:
- None: initialising
- prefix: values that will subtract from the result
- remainder: values that add to the result
'''
prefix = 1
remainder = 2
def __init__(self, d):
self.value = 0
self.previous = None
self.digits = d
## self.current_string = ""
def char(self, c):
if self.previous and self.digits[c] > self.digits[self.previous]:
# digit is larger, so previous character is now the prefix
# (subtract twice the digit's value from the total because it's a
# prefix digit not a remainder digit, so undo the previous addition as well)
self.value -= self.digits[self.previous] * 2
self.value += self.digits[c]
self.previous = c
def output(self):
return self.value
| [
"alastair@plug.org.au"
] | alastair@plug.org.au |
018ccabaec4b50ce01fc783963e90d9fafeab073 | 67aa2aaa7331836ac3ea6b03c9e6e733b2c80f65 | /philia-service/wit-service/prompt_toolkit/input/win32_pipe.py | 892af2d153b8606b78eb15e8e328416f8f0137a9 | [
"MIT"
] | permissive | BuildForSDGCohort2/masta-backend | c0b9ab6e40b787885b17a86e10344d14afba4d5f | 08c20fe910f8ab953714ac72f34cdead7a307bd3 | refs/heads/master | 2022-12-28T02:46:32.652289 | 2020-10-10T21:41:17 | 2020-10-10T21:41:17 | 291,690,682 | 1 | 1 | MIT | 2020-08-31T15:36:19 | 2020-08-31T10:58:32 | null | UTF-8 | Python | false | false | 4,078 | py | from ctypes import windll
from typing import Callable, ContextManager, List
from prompt_toolkit.eventloop.win32 import create_win32_event
from ..key_binding import KeyPress
from ..utils import DummyContext
from .base import PipeInput
from .vt100_parser import Vt100Parser
from .win32 import _Win32InputBase, attach_win32_input, detach_win32_input
__all__ = ["Win32PipeInput"]
class Win32PipeInput(_Win32InputBase, PipeInput):
"""
This is an input pipe that works on Windows.
Text or bytes can be feed into the pipe, and key strokes can be read from
the pipe. This is useful if we want to send the input programmatically into
the application. Mostly useful for unit testing.
Notice that even though it's Windows, we use vt100 escape sequences over
the pipe.
Usage::
input = Win32PipeInput()
input.send_text('inputdata')
"""
_id = 0
def __init__(self) -> None:
super().__init__()
# Event (handle) for registering this input in the event loop.
# This event is set when there is data available to read from the pipe.
# Note: We use this approach instead of using a regular pipe, like
# returned from `os.pipe()`, because making such a regular pipe
# non-blocking is tricky and this works really well.
self._event = create_win32_event()
self._closed = False
# Parser for incoming keys.
self._buffer: List[KeyPress] = [] # Buffer to collect the Key objects.
self.vt100_parser = Vt100Parser(lambda key: self._buffer.append(key))
# Identifier for every PipeInput for the hash.
self.__class__._id += 1
self._id = self.__class__._id
@property
def closed(self) -> bool:
return self._closed
def fileno(self):
"""
The windows pipe doesn't depend on the file handle.
"""
raise NotImplementedError
@property
def handle(self):
" The handle used for registering this pipe in the event loop. "
return self._event
def attach(self, input_ready_callback: Callable) -> ContextManager[None]:
"""
Return a context manager that makes this input active in the current
event loop.
"""
return attach_win32_input(self, input_ready_callback)
def detach(self) -> ContextManager[None]:
"""
Return a context manager that makes sure that this input is not active
in the current event loop.
"""
return detach_win32_input(self)
def read_keys(self) -> List[KeyPress]:
" Read list of KeyPress. "
# Return result.
result = self._buffer
self._buffer = []
# Reset event.
windll.kernel32.ResetEvent(self._event)
return result
def flush_keys(self) -> List[KeyPress]:
"""
Flush pending keys and return them.
(Used for flushing the 'escape' key.)
"""
# Flush all pending keys. (This is most important to flush the vt100
# 'Escape' key early when nothing else follows.)
self.vt100_parser.flush()
# Return result.
result = self._buffer
self._buffer = []
return result
def send_bytes(self, data: bytes) -> None:
" Send bytes to the input. "
self.send_text(data.decode("utf-8", "ignore"))
def send_text(self, text: str) -> None:
" Send text to the input. "
# Pass it through our vt100 parser.
self.vt100_parser.feed(text)
# Set event.
windll.kernel32.SetEvent(self._event)
def raw_mode(self) -> ContextManager[None]:
return DummyContext()
def cooked_mode(self) -> ContextManager[None]:
return DummyContext()
def close(self) -> None:
" Close pipe handles. "
windll.kernel32.CloseHandle(self._event)
self._closed = True
def typeahead_hash(self) -> str:
"""
This needs to be unique for every `PipeInput`.
"""
return "pipe-input-%s" % (self._id,)
| [
"phil.odinga@astechcongo.com"
] | phil.odinga@astechcongo.com |
8f4bd521b9a046c66c206d8916290b99234b9ed4 | a5f93bb346f55fe5351f8924deacd00b7b98a597 | /module4-acid-and-database-scalability-tradeoffs/app/rpg_mongo.py | 35e2afca48fdb8de825bfccc3101a4d73687f0e0 | [
"MIT"
] | permissive | anitashar/DS-Unit-3-Sprint-2-SQL-and-Databases | 33cabb92ee06aafb0ab9efe0290dfdb8c86cf236 | caa9ca7ac454d9fb7dab82ce378433e5fb9465e2 | refs/heads/master | 2021-05-27T08:01:26.223684 | 2020-04-21T00:46:29 | 2020-04-21T00:46:29 | 254,240,808 | 0 | 0 | null | 2020-04-09T01:27:05 | 2020-04-09T01:27:04 | null | UTF-8 | Python | false | false | 404 | py |
# How many total Characters are there?
# How many of each specific subclass?
# How many total Items?
# How many of the Items are weapons? How many are not?
# How many Items does each character have? (Return first 20 rows)
# How many Weapons does each character have? (Return first 20 rows)
# On average, how many Items does each Character have?
# On average, how many Weapons does each character have?
| [
"anitashar2006@gmail.com"
] | anitashar2006@gmail.com |
5fb551cfbd25ef543c12294b5e6b5b34c8fe8f14 | 2e92ec142eac6a47f955f5e063ebb27422af67c4 | /zmq_trainer/zmqsimulator.py | d30a7e2c57f821aa24e4c1672277c1d94c3d7ecc | [] | no_license | YhsCandy/HouseNavAgent | 3e6785c97125eee613028c13ec082f04b5f6c1b5 | f1c28d9c135d98e9b0dca5f530c7d2186b8ef700 | refs/heads/master | 2022-02-26T06:57:52.677336 | 2019-09-13T21:28:44 | 2019-09-13T21:28:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,127 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: zmqsimulator.py
import multiprocessing as mp
import threading
import atexit
import os, sys
from abc import abstractmethod, ABCMeta
from six.moves import queue
import weakref
import zmq
import msgpack
import msgpack_numpy
msgpack_numpy.patch()
def dumps(obj):
return msgpack.dumps(obj, use_bin_type=True)
def loads(buf):
return msgpack.loads(buf)
def ensure_proc_terminate(proc):
if isinstance(proc, list):
for p in proc:
ensure_proc_terminate(p)
return
def stop_proc_by_weak_ref(ref):
proc = ref()
if proc is None:
return
if not proc.is_alive():
return
proc.terminate()
proc.join()
assert isinstance(proc, mp.Process)
atexit.register(stop_proc_by_weak_ref, weakref.ref(proc))
class SimulatorProcess(mp.Process):
def __init__(self, idx, pipe_c2s, pipe_s2c, config=None):
super(SimulatorProcess, self).__init__()
self.idx = int(idx)
self.name = u'simulator-{}'.format(self.idx)
self.identity = self.name.encode('utf-8')
self.c2s = pipe_c2s
self.s2c = pipe_s2c
self.config = config
@abstractmethod
def _build_player(self):
pass
def run(self):
try:
player = self._build_player()
assert player is not None
except Exception as e:
print('[ERROR] <ZMQSimulator> Fail to create player for <{}>, Msg = {}'.format(self.identity, e), file=sys.stderr)
raise e
context = zmq.Context()
c2s_socket = context.socket(zmq.PUSH)
c2s_socket.setsockopt(zmq.IDENTITY, self.identity)
c2s_socket.set_hwm(2)
c2s_socket.connect(self.c2s)
s2c_socket = context.socket(zmq.DEALER)
s2c_socket.setsockopt(zmq.IDENTITY, self.identity)
# s2c_socket.set_hwm(5)
s2c_socket.connect(self.s2c)
state = player.current_state()
reward, isOver = 0, False
while True:
c2s_socket.send(dumps(
(self.identity, state, reward, isOver)),
copy=False)
action = loads(s2c_socket.recv(copy=False).bytes)
reward, isOver = player.action(action)
state = player.current_state()
class SimulatorMaster(object):
def __init__(self, pipe_c2s, pipe_s2c):
super(SimulatorMaster, self).__init__()
assert os.name != 'nt', "Doesn't support windows!"
self.name = 'SimulatorMaster'
self.context = zmq.Context()
self.c2s_socket = self.context.socket(zmq.PULL)
self.c2s_socket.bind(pipe_c2s)
self.c2s_socket.set_hwm(10)
self.s2c_socket = self.context.socket(zmq.ROUTER)
self.s2c_socket.bind(pipe_s2c)
self.s2c_socket.set_hwm(10)
# queueing messages to client
self.send_queue = queue.Queue(maxsize=100)
# make sure socket get closed at the end
def clean_context(soks, context):
for s in soks:
s.close()
context.term()
atexit.register(clean_context, [self.c2s_socket, self.s2c_socket], self.context)
def send_loop(self):
while True:
msg = self.send_queue.get()
self.s2c_socket.send_multipart(msg, copy=False)
def recv_loop(self):
try:
while True:
msg = loads(self.c2s_socket.recv(copy=False).bytes)
ident, state, reward, isOver = msg
self.recv_message(ident, state, reward, isOver)
except zmq.ContextTerminated:
print("[Simulator] Context was terminated.", file=sys.stderr)
def __del__(self):
self.context.destroy(linger=0)
@abstractmethod
def recv_message(self, ident, state, reward, isOver):
"""
Do something about the agent named "ident" after getting its output.
"""
pass
def send_message(self, ident, action):
"""
Send action to the agent named "ident".
"""
self.send_queue.put([ident, dumps(action)])
| [
"jxwuyi@gmail.com"
] | jxwuyi@gmail.com |
358893e35bbd56734f3c0df20f6129c87583d727 | abf4757a51e38b3cde6fc55b0251e77652521a2d | /models.py | 2e4f0d45edd16ef57cc997df440fe3bae2aa42ca | [] | no_license | stephenroller/relationstest | fd5284e6035682e0bfe1b13ff3c51dfec8e6f5ab | 92b66e3dd800107b5489f662264f87d5d178af61 | refs/heads/master | 2021-01-21T11:45:20.095298 | 2016-01-26T23:24:21 | 2016-01-26T23:24:21 | 37,383,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,369 | py | #!/usr/bin/env python
import numpy as np
from sklearn import svm, linear_model
from custom_classifiers import ThresholdClassifier
SETUPS = {
# baseline "unsupervised"
'cosine': ('threshold', 'cosine'),
# baseline memorizations
'lhs': ('linear', 'lhs'),
'rhs': ('linear', 'rhs'),
'concat': ('linear', 'concat'),
# asym models
'diff': ('linear', 'diff'),
'diffsq': ('linear', 'diffsq'),
# rb models
'diffrbf': ('rbf', 'diff'),
'concatrbf': ('rbf', 'concat'),
# others I dont want now
#('lhs', 'lr1', 'lhs'),
#('rhs', 'lr1', 'rhs'),
#('concat', 'lr1', 'concat'),
#('diff', 'lr1', 'diff'),
#('diffsq', 'lr1', 'diffsq'),
#('lhs', 'lr2', 'lhs'),
#('rhs', 'lr2', 'rhs'),
#('concat', 'lr2', 'concat'),
#('diff', 'lr2', 'diff'),
#('diffsq', 'lr2', 'diffsq'),
#('diffpoly', 'poly2', 'diff'),
}
def words2matrix(dataseries, space):
return np.array(list(dataseries.apply(lambda x: space[x])))
def generate_cosine_matrix(data, space):
lhs = words2matrix(data.word1, space)
rhs = words2matrix(data.word2, space)
return np.array([np.sum(np.multiply(lhs, rhs), axis=1)]).T
def generate_diff_matrix(data, space):
lhs = words2matrix(data.word1, space)
rhs = words2matrix(data.word2, space)
# difference vector
diff = rhs - lhs
return diff
def generate_diffsq_matrix(data, space):
lhs = words2matrix(data.word1, space)
rhs = words2matrix(data.word2, space)
# difference vector
diff = rhs - lhs
# element wise squared diffs
diff_sq = np.power(diff, 2)
X = np.concatenate([diff, diff_sq], axis=1)
return X
def generate_concat_matrix(data, space):
lhs = words2matrix(data.word1, space)
rhs = words2matrix(data.word2, space)
X = np.concatenate([lhs, rhs], axis=1)
return X
def generate_lhs_matrix(data, space):
lhs = words2matrix(data.word2, space)
return lhs
def generate_rhs_matrix(data, space):
rhs = words2matrix(data.word2, space)
return rhs
def generate_feature_matrix(data, space, features):
if features == 'cosine':
X = generate_cosine_matrix(data, space)
elif features == 'lhs':
X = generate_lhs_matrix(data, space)
elif features == 'rhs':
X = generate_rhs_matrix(data, space)
elif features == 'concat':
X = generate_concat_matrix(data, space)
elif features == 'diff':
X = generate_diff_matrix(data, space)
elif features == 'diffsq':
X = generate_diffsq_matrix(data, space)
else:
raise ValueError("Can't generate %s features" % features)
y = data.entails.as_matrix()
return X, y
def classifier_factory(name):
if name == 'linear':
return svm.LinearSVC()
elif name == 'poly2':
return svm.SVC(kernel='poly', degree=2)
elif name == 'threshold':
return ThresholdClassifier()
elif name == 'rbf':
return svm.SVC(kernel='rbf')
elif name == 'lr2':
return linear_model.LogisticRegression(penalty='l2')
elif name == 'lr1':
return linear_model.LogisticRegression(penalty='l1')
elif name == 'levy':
# todo this
return None
else:
raise ValueError("Don't know about %s models." % name)
def load_setup(setupname):
kl, fe = SETUPS[setupname]
return classifier_factory(kl), fe
| [
"roller@cs.utexas.edu"
] | roller@cs.utexas.edu |
599c16f31a7bdd1841ee6346e94922f0cb6d2c86 | 3ec50b750e788d018ff0f6afd3528350a8956f6a | /ch_04/tests/test_classifier.py | 680a46568379375ea3e6dcaf7eb588014dcb498b | [
"MIT"
] | permissive | jegarciaor/Python-Object-Oriented-Programming---4th-edition | 45fb68f04c905a27865c40a48705da803fbdc27a | 2f3d6b09326dab6a0488c72c96d7368bee28fef4 | refs/heads/main | 2023-06-05T05:02:54.428716 | 2021-06-26T15:23:11 | 2021-06-26T15:23:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,536 | py | """
Python 3 Object-Oriented Programming Case Study
Chapter 4, Expecting the Unexpected
"""
import base64
import csv
from pathlib import Path
from pytest import *
import classifier
@fixture(scope="module")
def app_client():
test_users = [
classifier.User(
username='noriko',
email='noriko@example.com',
real_name='Noriko K. L.',
role=classifier.Role.BOTANIST,
password='md5$H5W30kno$10a2327b2fce08c1ad0f65a12d40552f'
),
classifier.User(
username='emma',
email='emma@example.com',
real_name='Emma K.',
role=classifier.Role.RESEARCHER,
password='md5$F8ZVxsuE$ebf71d15067ed7c887c0408550b671e2'
)
]
with classifier.app.app_context():
classifier.app.config['TESTING'] = True
classifier.app.config['USER_FILE'] = Path.cwd()/"test_data"
for u in test_users:
classifier.users.add_user(u)
yield classifier.app.test_client()
def test_health_check(app_client):
result = app_client.get("health")
assert result.status_code == 200
assert result.json == {
"status": "OK",
"user_count": 2,
"users": [
{
'email': 'noriko@example.com',
'role': 'botanist',
'password': 'md5$H5W30kno$10a2327b2fce08c1ad0f65a12d40552f',
'real_name': 'Noriko K. L.',
'username': 'noriko'
},
{
'email': 'emma@example.com',
'role': 'researcher',
'password': 'md5$F8ZVxsuE$ebf71d15067ed7c887c0408550b671e2',
'real_name': 'Emma K.',
'username': 'emma'
},
]
}
def test_whoami_good(app_client):
credentials = base64.b64encode("noriko:Hunter2".encode("utf-8"))
result = app_client.get(
"whoami",
headers={
"Authorization": f"BASIC {credentials.decode('ASCII')}"
}
)
assert result.status_code == 200
print(result.json)
assert result.json["status"] == "OK"
def test_whoami_bad(app_client):
credentials = base64.b64encode("noriko:not my passowrd".encode("utf-8"))
result = app_client.get(
"whoami",
headers={
"Authorization": f"BASIC {credentials.decode('ASCII')}"
}
)
assert result.status_code == 401
print(result.json)
assert result.json["message"] == "Unknown User"
| [
"slott56@gmail.com"
] | slott56@gmail.com |
8309d17602e9b9cfb5136fa53a01bbec3edfafc4 | b886ca4e9c8e0d179c8034eaaa1e99cf59a31f70 | /01_jumptopy/chap02/str_q1.py | dd40f961683105d80fe78a12575409cac984aa65 | [] | no_license | seoseungkyo/openBigData | 80e43e7b89beba57bd16c040b1602e4789e45675 | 1da7dfd4ae7118f61440af02519fb94f411115fe | refs/heads/master | 2021-09-10T13:49:04.986210 | 2018-03-27T08:18:27 | 2018-03-27T08:18:27 | 125,938,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | pin = "881020-1068234"
yyyymmdd=pin[:6]
num=pin[7:14]
print(yyyymmdd)
print(num)
| [
"sskyo0517@gmail.com"
] | sskyo0517@gmail.com |
828706382369f44bfdb5af2eea5bfb46a468be2f | 118413c6358a829dd83da0679c8f7d3e5ff762e9 | /backend/backend/settings/dev.py | c5e5f7882c5df15cc1fe4e7681eea8e9921d74a2 | [] | no_license | jinni-ni/Klaybee | 136959f013af44c37675d2a8f07aee9e3a8f6092 | 98111d85e162dd532f8a3742ee09f6f9357e53c7 | refs/heads/master | 2023-08-22T21:06:18.871622 | 2021-10-11T04:32:49 | 2021-10-11T04:32:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | from .common import *
DEBUG = True
INSTALLED_APPS += [
'debug_toolbar',
]
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
MIDDLEWARE = [
'debug_toolbar.middleware.DebugToolbarMiddleware',
] + MIDDLEWARE
INTERNAL_IPS = ['127.0.0.1']
| [
"sjkim910115@gmail.com"
] | sjkim910115@gmail.com |
0551c05c3a0fcbffde3afd42eec059f9cc7d51a4 | 4d675034878c4b6510e1b45b856cc0a71af7f886 | /configs/gcnet/mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py | 7fb8e82ece225ab6f88f1f4f83bea56a42cf1a57 | [
"Apache-2.0",
"BSD-2-Clause-Views",
"MIT",
"BSD-2-Clause"
] | permissive | shinya7y/UniverseNet | 101ebc2ad8f15482ee45ea8d6561aa338a0fa49e | 3652b18c7ce68122dae7a32670624727d50e0914 | refs/heads/master | 2023-07-22T08:25:42.646911 | 2023-07-08T18:09:34 | 2023-07-08T18:09:34 | 263,555,721 | 407 | 58 | Apache-2.0 | 2023-01-27T01:13:31 | 2020-05-13T07:23:43 | Python | UTF-8 | Python | false | false | 376 | py | _base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, True),
position='after_conv3')
]))
| [
"noreply@github.com"
] | shinya7y.noreply@github.com |
adaef7b4c22f7d588cbecc487971f66d2e877e6a | 22ac18caaf9b4e2a143e9142bbe0c2a1a69f0233 | /plushkin/core/api/urls.py | ab41976f4efeef96d1f67f83af1fa6c0a6b47f1a | [] | no_license | buzzon/plushkin-server | e850df872fa0fc29532dd61b1637b1e5d0be37f7 | 03602685c631152180b99f337ffad969a82e2d0b | refs/heads/main | 2023-05-05T03:25:59.575758 | 2021-05-14T05:35:14 | 2021-05-14T05:35:14 | 302,861,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | from django.conf.urls import url
from rest_framework.authtoken import views as rest_views
from . import views
from .views import UserViewSet, BookmarkViewSet, BookmarkLinkViewSet
app_name = 'core'
urlpatterns = [
url(r'user_registration/$', views.UserCreate.as_view(), name="user_registration"),
url(r'user/$', views.get_user, name="user"),
url(r'user_update/$', views.user_update, name="user_update"),
url(r'user_remove/$', views.user_remove, name="user_remove"),
url(r'auth_token/$', rest_views.obtain_auth_token, name="get_token"),
url(r'users/$', UserViewSet.as_view({'get': 'list'}), name='genres_list'),
url(r'bookmarks/$', BookmarkViewSet.as_view({'get': 'list', 'post': 'create'}), name='account-list'),
url(r'bookmarks/(?P<pk>[^/]+)$', BookmarkViewSet.as_view({'get': 'retrieve', 'post': 'update', 'delete': 'destroy'}), name='account-retrieve'),
url(r'bookmarks/link/(?P<pk>[^/]+)$', BookmarkLinkViewSet.as_view({'get': 'retrieve', 'post': 'update'}), name='account-retrieve'),
]
| [
"buzzondev@gmail.com"
] | buzzondev@gmail.com |
1666841f001a283c34cf8f4be614a2ce5db233af | 6583b7f11175c40106fb7cc0037578abae125f42 | /ucc/gui/controls/bottom_text_ctrl.py | eb4944c37fa38a615ee1bd2c3465346c7c8e9757 | [] | no_license | FREDY1969/tampa-bay-python-avr | 02f913ee8373bfab4ef88902844476080b560226 | e0311815ebf81b5e1b128f621bf1f15b4fa28289 | refs/heads/master | 2020-04-24T17:45:03.787951 | 2011-10-23T17:58:09 | 2011-10-23T17:58:09 | 40,279,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | # bottom_text_ctrl.py
r'''Text control for editing word code.'''
import wx, wx.py
from ucc.gui import registry, debug
class BottomTextCtrl(wx.py.editwindow.EditWindow):
def __init__(self, *args, **kwargs):
super(BottomTextCtrl, self).__init__(*args, **kwargs)
self.setDisplayLineNumbers(True)
self.Bind(wx.EVT_KEY_DOWN, self.onBottomTextChange)
def onBottomTextChange(self, event):
debug.notice('BottomTextCtrlL: Update event!')
registry.currentWord.source_text = self.GetText()
registry.currentWord.set_save_state(False)
event.Skip() # CLEAN not sure if this is needed
def LoadFile(self, source_filename):
super(BottomTextCtrl, self).LoadFile(source_filename)
registry.currentWord.source_text = self.GetText()
| [
"dangyogi@gmail.com"
] | dangyogi@gmail.com |
235ba9837b2f58a3c0bd07de1ae82650697f68b4 | e76ed84b9801d070faef03e6feeac8782de8c30d | /testcases/begin_dev/get_access_token_cases.py | c3c1d59d628afaffe8f4c32fe8b223d369b2ee16 | [] | no_license | chaoabc/API_CHAO | 88fc5620f18d3447d4dbb4d58afdb85361758217 | 3d301445ef689d368b790f974b1000107ecbf3ca | refs/heads/master | 2022-11-24T05:38:39.853381 | 2020-07-25T14:51:20 | 2020-07-25T14:51:20 | 277,857,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,585 | py | #!/usr/bin/env python
# encoding: utf-8
# @author: miaoxiaochao
# @file: get_access_token_cases.py
# @time: 2020/7/7 13:41 下午
import requests
import unittest
from common.localconfig_utils import local_config
from common.log_utils import logger
class GetAccessTokenCases(unittest.TestCase):
def setUp(self) -> None:
self.hosts = local_config.URL
self.session = requests.session()
def tearDown(self) -> None:
pass
def test_get_access_token(self):
"""[case01] 正常获取access_token值测试"""
logger.info('[case01] 正常获取access_token值测试')
params = {
'grant_type': 'client_credential',
'appid': 'wx55614004f367f8ca',
'secret': '65515b46dd758dfdb09420bb7db2c67f'
}
actual_result = self.session.get(url=self.hosts + '/cgi-bin/token',
params=params
)
self.assertEqual(actual_result.json()['expires_in'],7200)
def test_appid_error(self):
self._testMethodDoc = '[case02] appid错误时测试'
logger.info('[case02] appid错误时测试')
params = {
'grant_type': 'client_credential',
'appid': 'wx55614004f367f8',
'secret': '65515b46dd758dfdb09420bb7db2c67f'
}
actual_result = self.session.get(url=self.hosts + '/cgi-bin/token',
params=params
)
self.assertEqual(actual_result.json()['errcode'],40013)
if __name__=="__main__":
unittest.main()
| [
"1312998662@qq.com"
] | 1312998662@qq.com |
1799efdff1eb4bb6b7e8ba832d454375c9017ab7 | e40091711a9900350939556374cee5f3e41c2c3c | /tourism/ratting/models.py | 129148c4edcd9dd3177d7a74515f01a81254f5d7 | [] | no_license | rg3915/drf-tourism | 951249f64450b6b710bb971aa52ed4d2efe2a85d | c6648a42eed77ab82cf10af242ffb20690404fc0 | refs/heads/main | 2023-03-25T21:15:08.185176 | 2021-03-24T08:49:28 | 2021-03-24T08:49:28 | 347,816,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | from django.contrib.auth.models import User
from django.db import models
class Ratting(models.Model):
comment = models.CharField('comentário', max_length=100, unique=True)
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
verbose_name='usuário',
related_name='ratting_users',
null=True,
blank=True
)
note = models.DecimalField('nota', max_digits=3, decimal_places=2)
created = models.DateTimeField(
'criado em',
auto_now_add=True,
auto_now=False
)
class Meta:
ordering = ('comment',)
verbose_name = 'avaliação'
verbose_name_plural = 'avaliações'
def __str__(self):
return self.comment
| [
"regis42santos@gmail.com"
] | regis42santos@gmail.com |
30841bd3f6a6a979eeeab80457b83222c00be2d3 | 3330ed9c8f0aed91638b3a07ad697668346db930 | /meiduo_mall/meiduo_mall/apps/payment/views.py | efbe83168b0fcbd31e9b45953a65fa112fb21d9b | [] | no_license | chengong825/meiduo_mall_django | fcee163bb6256672cbc5dcbd649aad2605bfb5e6 | e3ca5e48f0c043d3f3f6d24a198ac2812df6d719 | refs/heads/master | 2020-03-29T09:48:09.189141 | 2018-09-21T14:33:43 | 2018-09-21T14:33:43 | 149,774,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,574 | py | import os
from alipay import AliPay
from django.conf import settings
from django.shortcuts import render
# Create your views here.
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from orders.models import OrderInfo
from payment.models import Payment
class PaymentView(APIView):
"""
支付
"""
permission_classes = (IsAuthenticated,)
def get(self, request, order_id):
"""
获取支付链接
"""
# 判断订单信息是否正确
try:
order = OrderInfo.objects.get(order_id=order_id, user=request.user,
pay_method=OrderInfo.PAY_METHODS_ENUM["ALIPAY"],
status=OrderInfo.ORDER_STATUS_ENUM["UNPAID"])
except OrderInfo.DoesNotExist:
return Response({'message': '订单信息有误'}, status=status.HTTP_400_BAD_REQUEST)
# 构造支付宝支付链接地址
alipay = AliPay(
appid=settings.ALIPAY_APPID,
app_notify_url=None, # 默认回调url
app_private_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "keys/app_private_key.pem"),
alipay_public_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "keys/alipay_public_key.pem"), # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,
sign_type="RSA2", # RSA 或者 RSA2
debug=settings.ALIPAY_DEBUG # 默认False
)
order_string = alipay.api_alipay_trade_page_pay(
out_trade_no=order_id,
total_amount=str(order.total_amount),
subject="美多商城%s" % order_id,
return_url="http://www.meiduo.site:8080/pay_success.html",
)
# 需要跳转到https://openapi.alipay.com/gateway.do? + order_string
# 拼接链接返回前端
alipay_url = settings.ALIPAY_URL + "?" + order_string
return Response({'alipay_url': alipay_url})
class PaymentStatusView(APIView):
"""
支付结果
"""
def put(self, request):
data = request.query_params.dict()
signature = data.pop("sign")
alipay = AliPay(
appid=settings.ALIPAY_APPID,
app_notify_url=None, # 默认回调url
app_private_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "keys/app_private_key.pem"),
alipay_public_key_path=os.path.join(os.path.dirname(os.path.abspath(__file__)),
"keys/alipay_public_key.pem"), # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,
sign_type="RSA2", # RSA 或者 RSA2
debug=settings.ALIPAY_DEBUG # 默认False
)
success = alipay.verify(data, signature)
if success:
# 订单编号
order_id = data.get('out_trade_no')
# 支付宝支付流水号
trade_id = data.get('trade_no')
Payment.objects.create(
order_id=order_id,
trade_id=trade_id
)
OrderInfo.objects.filter(order_id=order_id, status=OrderInfo.ORDER_STATUS_ENUM['UNPAID']).update(status=OrderInfo.ORDER_STATUS_ENUM["UNCOMMENT"])
return Response({'trade_id': trade_id})
else:
return Response({'message': '非法请求'}, status=status.HTTP_403_FORBIDDEN) | [
"252918372@qq.com"
] | 252918372@qq.com |
beca7f3ac7e3309bc77727b50b4db51149f040ec | 9d1833ae35f9f35ecbb665024df93573dc59870d | /Distributed Systems Failure Detection/server.py | 1dd4145b2be1c409ca77cc7497f9a1ffe6a384a5 | [] | no_license | ishaandatta/All_Projects | 2a1788e7c0e9dea7e9775d28ac7cd33d9b7701f2 | ff8fd437b60e613407431fbbc32766c4ff748ce5 | refs/heads/main | 2023-05-05T20:15:27.908781 | 2021-05-24T11:42:20 | 2021-05-24T11:42:20 | 370,076,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | import sys
import os
from threading import Thread
from node import Node, Mode
node = Node(role='server', mode = Mode.GOSSIP)
t1 = Thread(target=node.listener)
t2 = Thread(target=node.heartbeater)
t3 = Thread(target=node.commander)
t1.daemon = True
t2.daemon = True
t3.daemon = True
t1.start()
t2.start()
t3.start()
while True:
pass
| [
"noreply@github.com"
] | ishaandatta.noreply@github.com |
6c3277c63fb51501c6203e331198a172cb4b86fa | 7c1a5eb4ab7382d20b56f88da8ddd98e56cd80f3 | /pals/loader.py | db399c4c5303d3f93a836cf27806da88b03d4ba4 | [
"MIT"
] | permissive | glasgowcompbio/PALS | 3680f0e9b42a4566b5a77f796563b9761b3ad431 | 3996190ed1436d469a4958299f32dc861dfcd3ba | refs/heads/master | 2022-08-25T16:53:28.475869 | 2022-05-02T16:11:31 | 2022-05-02T16:11:31 | 208,288,211 | 2 | 0 | MIT | 2022-08-06T06:10:45 | 2019-09-13T15:03:26 | Jupyter Notebook | UTF-8 | Python | false | false | 21,130 | py | import os
import zipfile
from collections import defaultdict, Counter
from io import BytesIO
import numpy as np
import pandas as pd
import requests
from loguru import logger
from tqdm import tqdm
from .common import DATABASE_PIMP_KEGG, load_json, DATA_DIR, GNPS_DOWNLOAD_CYTOSCAPE_DATA_VIEW, \
GNPS_VIEW_ALL_MOTIFS_VIEW, \
DATABASE_GNPS_MS2LDA, DATABASE_GNPS_MOLECULAR_FAMILY
from .reactome import get_pathway_dict, get_compound_mapping_dict, load_entity_dict, get_protein_entity_dict, \
get_protein_mapping_dict, get_gene_entity_dict, get_gene_mapping_dict
class Database(object):
def __init__(self, database_name, pathway_dict, entity_dict, mapping_dict, extra_data=None):
self.database_name = database_name
self.pathway_dict = pathway_dict
self.entity_dict = entity_dict
self.mapping_dict = mapping_dict
self.extra_data = extra_data
def __repr__(self):
return self.database_name
class Loader(object):
def load_data(self):
raise NotImplementedError()
class PiMP_KEGG_Loader(Loader):
def __init__(self, database_name):
self.database_name = database_name
def load_data(self):
json_file = os.path.abspath(os.path.join(DATA_DIR, '%s.json.zip' % DATABASE_PIMP_KEGG))
logger.debug('Loading %s' % json_file)
data = load_json(json_file, compressed=True)
database = Database(self.database_name, data['pathway_dict'], data['entity_dict'], data['mapping_dict'])
return database
class CompoundOnlineLoader(Loader):
def __init__(self, database_name, reactome_species, mp_only):
self.database_name = database_name
self.reactome_species = reactome_species
self.mp_only = mp_only
def load_data(self):
logger.debug('Retrieving data for %s from Reactome %s metabolic_pathway_only=%s' %
(self.reactome_species, self.database_name, self.mp_only))
pathway_dict = get_pathway_dict(self.reactome_species,
metabolic_pathway_only=self.mp_only)
mapping_dict = get_compound_mapping_dict(self.reactome_species, self.database_name,
metabolic_pathway_only=self.mp_only)
entity_dict = load_entity_dict(self.database_name)
data = {
'pathway_dict': pathway_dict,
'entity_dict': entity_dict,
'mapping_dict': mapping_dict
}
database = Database(self.database_name, data['pathway_dict'], data['entity_dict'], data['mapping_dict'])
return database
class CompoundOfflineLoader(Loader):
def __init__(self, database_name, reactome_species, mp_only):
self.database_name = database_name
self.reactome_species = reactome_species
self.mp_only = mp_only
def load_data(self):
if not self.mp_only:
raise ValueError(
'Pathway information is not available. Please use live reactome query with --connect_to_reactome_server.')
metabolic_pathway_dir = 'metabolic_pathways' if self.mp_only else 'all_pathways'
json_file = os.path.join(DATA_DIR, 'reactome', metabolic_pathway_dir, self.database_name,
'%s.json.zip' % self.reactome_species)
logger.debug('Loading %s' % json_file)
data = load_json(json_file, compressed=True)
database = Database(self.database_name, data['pathway_dict'], data['entity_dict'], data['mapping_dict'])
return database
class UniProtLoader(Loader):
def __init__(self, database_name, reactome_species, mp_only):
self.database_name = database_name
self.reactome_species = reactome_species
self.mp_only = mp_only
def load_data(self):
pathway_dict = get_pathway_dict(self.reactome_species, metabolic_pathway_only=self.mp_only)
entity_dict = get_protein_entity_dict(self.reactome_species, self.database_name)
mapping_dict = get_protein_mapping_dict(self.reactome_species, self.database_name,
metabolic_pathway_only=self.mp_only)
data = {
'pathway_dict': pathway_dict,
'entity_dict': entity_dict,
'mapping_dict': mapping_dict
}
database = Database(self.database_name, data['pathway_dict'], data['entity_dict'], data['mapping_dict'])
return database
class EnsemblLoader(Loader):
def __init__(self, database_name, reactome_species, mp_only):
self.database_name = database_name
self.reactome_species = reactome_species
self.mp_only = mp_only
def load_data(self):
pathway_dict = get_pathway_dict(self.reactome_species, metabolic_pathway_only=self.mp_only)
entity_dict = get_gene_entity_dict(self.reactome_species, self.database_name)
mapping_dict = get_gene_mapping_dict(self.reactome_species, self.database_name,
metabolic_pathway_only=self.mp_only)
data = {
'pathway_dict': pathway_dict,
'entity_dict': entity_dict,
'mapping_dict': mapping_dict
}
database = Database(self.database_name, data['pathway_dict'], data['entity_dict'], data['mapping_dict'])
return database
class GNPSLoader(Loader):
def __init__(self, database_name, gnps_url, metadata_df, comparisons, gnps_ms2lda_url=None, peak_table_df=None):
self.database_name = database_name
self.gnps_url = gnps_url
self.metadata_df = metadata_df
self.comparisons = comparisons
self.int_df = None
self.annotation_df = None
self.gnps_ms2lda_url = gnps_ms2lda_url
self.peak_table_df = peak_table_df
if self.database_name == DATABASE_GNPS_MS2LDA:
assert self.gnps_ms2lda_url is not None
def load_data(self):
if self.peak_table_df is not None: # load measurements from a peak table
logger.info('Processing peak table')
logger.debug(self.peak_table_df)
# drop the first (m/z) and second (RT) columns to get the measurement df
cols = [0, 1]
measurement_df = self.peak_table_df.drop(self.peak_table_df.columns[cols], axis=1)
measurement_df.index.rename('peak_id', inplace=True)
measurement_df.index = measurement_df.index.astype('str')
# FIXME: really shouldn't be called this
clustering_df = self.peak_table_df[self.peak_table_df.columns[cols]]
# create annotation dataframe
annotation_df = pd.DataFrame(index=measurement_df.index)
annotation_df.index.rename('peak_id', inplace=True)
annotation_df['entity_id'] = measurement_df.index
annotation_df['entity_id'] = annotation_df['entity_id'].astype(str)
annotation_df.index = annotation_df.index.astype('str')
else: # load measurements from GNPS
logger.info('Retrieving clustering and quantification information from GNPS')
logger.debug(self.gnps_url)
results = self._download_gnps(self.gnps_url, GNPS_DOWNLOAD_CYTOSCAPE_DATA_VIEW)
assert results is not None
quantification_df = results['quantification_df']
clustering_df = results['clustering_df']
filtered_clustering_df = clustering_df[
clustering_df['componentindex'] != -1] # drop all the singleton components
# keep only columns containing 'Peak area', and remove 'Peak area' from column names
measurement_df = quantification_df.filter(regex='Peak area')
measurement_df.columns = measurement_df.columns.str.rstrip('Peak area')
measurement_df.index.rename('peak_id', inplace=True)
measurement_df.index = measurement_df.index.astype('str')
# create annotation dataframe
annotation_df = pd.DataFrame(index=filtered_clustering_df.index)
annotation_df.index.rename('peak_id', inplace=True)
annotation_df['entity_id'] = filtered_clustering_df.index
annotation_df['entity_id'] = annotation_df['entity_id'].astype(str)
annotation_df.index = annotation_df.index.astype('str')
# filter dataframes
# assume metadata_df has two columns: 'sample' and 'group'
# check that the metadata sample names provided by users match the column names in measurement df
num_matching_samples = np.sum(self.metadata_df['sample'].isin(measurement_df.columns.values))
assert num_matching_samples > 0, 'None of the sample names in the metadata %s matches the columns names in ' \
'the measurements %s. Please check the names again.' % (
self.metadata_df['sample'].values,
measurement_df.columns.values)
# remove rows with sample id that can't be found in the columns of int_df
metadata_df = self.metadata_df[self.metadata_df['sample'].isin(measurement_df.columns.values)]
# keep only columns in int_df that have group information
measurement_df = measurement_df[metadata_df['sample']]
# create experimental design dictionary
groups = {}
for k, v in metadata_df.groupby('group'):
groups[k] = v['sample'].values.tolist()
experimental_design = {
'comparisons': self.comparisons,
'groups': groups
}
# combine all above into the extra_data dictionary for a Database
extra_data = {
'measurement_df': measurement_df,
'annotation_df': annotation_df,
'experimental_design': experimental_design
}
# Turn grouping information into PALS database object
# If it's a standard FBMN-GNPS result, then use the clustering as the groups
# otherwise if it is GNPS-MS2LDA result, then download the MS2LDA results from GNPS and use motifs as groups
if self.database_name == DATABASE_GNPS_MOLECULAR_FAMILY:
filtered_clustering_df = filtered_clustering_df.rename(columns={
'precursor mass': 'mass',
'RTConsensus': 'RT'
})
database = self._molfam_to_database(filtered_clustering_df, extra_data)
elif self.database_name == DATABASE_GNPS_MS2LDA:
logger.info('Retrieving motif information from GNPS')
logger.debug(self.gnps_ms2lda_url)
results = self._download_gnps(self.gnps_ms2lda_url, GNPS_VIEW_ALL_MOTIFS_VIEW)
motif_df = results['motif_df']
# select some useful columns to display later
# need to include all peaks, so we select the columns from clustering_df (instead of filtered_clustering_df)
try:
peak_info_df = clustering_df[['parent mass', 'LibraryID', 'GNPSLinkout_Network', 'number of spectra',
'RTConsensus', 'precursor mass', 'SumPeakIntensity', 'componentindex']]
peak_info_df = peak_info_df.rename(columns={
'precursor mass': 'mass',
'RTConsensus': 'RT'
})
except KeyError:
peak_info_df = clustering_df[['mass', 'RT']]
database = self._motif_to_database(peak_info_df, motif_df, extra_data)
return database
def _molfam_to_database(self, clustering_df, extra_data):
"""
Creates a user-defined database from GNPS Molecular Family clustering
:param clustering_df: a dataframe of GNPS clustering information
:param extra_data: additional information to include in the database
:return: a Database object from GNPS Molecular Family clustering
"""
# Create 'pathway' dictionary. In this case, 'pathway' is a GNPS molecular family
pathway_dict = {}
for comp in clustering_df['componentindex'].values:
key = str(comp)
pathway_dict[key] = {'display_name': 'Molecular Family #%d' % comp}
# Create entity dictionary. An 'entity' is a MS1 peak (GNPS consensus cluster)
entity_dict = self._get_entity_dict(clustering_df)
# Create mapping dictionary that maps entities to pathways
mapping_dict = {}
for peak_id in entity_dict:
component_index = str(entity_dict[peak_id]['componentindex'])
mapping_dict[peak_id] = [component_index]
# put everything together in a Database object
database = Database(self.database_name, pathway_dict, entity_dict, mapping_dict, extra_data=extra_data)
return database
def _motif_to_database(self, peak_info_df, motif_df, extra_data):
"""
Creates a user-defined database from GNPS-MS2LDA results
:param peak_info_df: a dataframe of additional information for peaks
:param motif_df: a dataframe of LDA analysis from GNPS-MS2LDA
:param extra_data: additional information to include in the database
:return: a Database object from GNPS-MS2LDA results
"""
# find singleton motifs
c = Counter()
for idx, row in motif_df.iterrows():
motif = row['motif']
c[motif] += 1
motifs = motif_df['motif'].unique()
singletons = [motif for motif in motifs if c[motif] == 1]
# Create 'pathway' dictionary. In this case, 'pathway' is a GNPS-MS2LDA motif
pathway_dict = {}
motifdb_urls = {}
for idx, row in motif_df.iterrows():
key = row['motif']
if key in singletons:
continue
motifdb_url = row['motifdb_url']
motifdb_annotation = row['motifdb_annotation']
# Try to cast motifdb_annotation to float. If success, then it contains NaN, which we can ignore
# otherwise add motifdb_annotation to the display name
try:
float(motifdb_annotation) # will throw ValueError if this contains an annotation string
display_name = key
except ValueError:
display_name = '%s [%s]' % (key, motifdb_annotation)
pathway_dict[key] = {
'display_name': '%s' % display_name,
'motifdb_url': motifdb_url,
'motifdb_annotation': motifdb_annotation
}
motifdb_urls[display_name] = motifdb_url
# Create entity dictionary. An 'entity' is a MS1 peak (GNPS consensus cluster)
entity_dict = self._get_entity_dict(peak_info_df)
# Create mapping dictionary that maps entities to pathways
mapping_dict = defaultdict(list)
for idx, row in motif_df.iterrows():
peak_id = str(row['scan'])
motif = row['motif']
if motif in singletons:
continue
mapping_dict[peak_id].append(motif)
mapping_dict = dict(mapping_dict)
# put everything together in a Database object
extra_data['motifdb_urls'] = motifdb_urls
database = Database(self.database_name, pathway_dict, entity_dict, mapping_dict, extra_data=extra_data)
return database
def _get_entity_dict(self, peak_info_df):
# First turn the peak info dataframe to dictionary, with peak id as the key
peak_info_df.index = peak_info_df.index.astype('str')
temp = peak_info_df.to_dict(orient='index')
# Extract entity information from temp
# temp contains a lot of stuff we don't want, so copy selected values to entity_dict
entity_dict = {}
for peak_id in temp:
entity_dict[peak_id] = {}
entity_dict[peak_id]['unique_id'] = peak_id
entity_dict[peak_id]['mass'] = temp[peak_id]['mass']
entity_dict[peak_id]['RT'] = temp[peak_id]['RT']
try:
entity_dict[peak_id]['display_name'] = temp[peak_id]['parent mass']
entity_dict[peak_id]['LibraryID'] = temp[peak_id]['LibraryID']
entity_dict[peak_id]['GNPSLinkout_Network'] = temp[peak_id]['GNPSLinkout_Network']
entity_dict[peak_id]['number of spectra'] = temp[peak_id]['number of spectra']
entity_dict[peak_id]['SumPeakIntensity'] = temp[peak_id]['SumPeakIntensity']
entity_dict[peak_id]['componentindex'] = temp[peak_id]['componentindex']
except KeyError:
pass
return entity_dict
def _download_gnps(self, gnps_url, view):
"""
Downloads the zipped cytoscape data from GNPS and extract clustering and quantification dataframes from it.
:param gnps_url: the url to the GNPS experiment, e.g.
https://gnps.ucsd.edu/ProteoSAFe/status.jsp?task=0a8432b5891a48d7ad8459ba4a89969f
:return: clustering and quantification dataframes from that GNPS result
"""
# extract task id from the url
tokens = gnps_url.split('task=')
task = tokens[1]
logger.info('Found GNPS task %s' % task)
# send a post request to GNPS
data = {
'task': task,
'view': view
}
api_endpoint = 'https://gnps.ucsd.edu/ProteoSAFe/DownloadResult'
r = requests.post(url=api_endpoint, data=data, stream=True)
# extract clustering and quantification tables
# https://stackoverflow.com/questions/37573483/progress-bar-while-download-file-over-http-with-requests
total_size = int(r.headers.get('content-length', 0))
block_size = 1024
results = None
with BytesIO() as f, tqdm(total=total_size, unit='iB', unit_scale=True) as t:
for data in r.iter_content(block_size):
t.update(len(data))
f.write(data)
if view == GNPS_DOWNLOAD_CYTOSCAPE_DATA_VIEW:
clustering_df, quantification_df = self._parse_gnps_molfam(f)
results = {
'clustering_df': clustering_df,
'quantification_df': quantification_df
}
elif view == GNPS_VIEW_ALL_MOTIFS_VIEW:
motif_df = self._parse_ms2lda_motifs(f)
results = {
'motif_df': motif_df
}
return results
def _parse_gnps_molfam(self, input_stream):
"""
Parses a zipped GNPS input stream, and extract clustering and quantification tables
:param input_stream: a zipped input of GNPS results
:return: clustering and quantification tables from the zip file
"""
clustering_df = None
quantification_df = None
with zipfile.ZipFile(input_stream) as z:
# find the right files containing the clustering info and quantification table
clusterinfo_filename = None
quantification_filename = None
# we sort z.namelist() to make sure files appear alphabetically
# It seems that depending on the format, there could be 1 or 2 quantification tables in the zip file
# If there is only one, then it is quantification_table/quantification_table-00000.txt
# Otherwise they are:
# - quantification_table/quantification_table-00000.txt
# - quantification_table_reformatted/xxx.csv
# In this case, the reformatted one is what we want, so sorting the namelist and looping over them
# will get the second (correct) one.
for filename in sorted(z.namelist()):
logger.debug('\t%s' % filename)
if filename.startswith('clusterinfo_summary'):
clusterinfo_filename = filename
logger.debug('Found cluster info: %s' % clusterinfo_filename)
if filename.startswith('quantification_table'):
quantification_filename = filename
logger.debug('Found quantification table: %s' % quantification_filename)
assert clusterinfo_filename is not None
assert quantification_filename is not None
# read clustering and quantification information
logger.debug('Loading clusterinfo %s' % clusterinfo_filename)
clustering_df = pd.read_csv(z.open(clusterinfo_filename), sep='\t', index_col='cluster index')
logger.debug('Loading quantification table %s' % quantification_filename)
quantification_df = pd.read_csv(z.open(quantification_filename), sep=',').set_index('row ID')
return clustering_df, quantification_df
def _parse_ms2lda_motifs(self, input_stream):
motif_df = None
with zipfile.ZipFile(input_stream) as z:
for filename in z.namelist():
if 'view_all_motifs' in filename:
logger.debug('Found motif table: %s' % filename)
motif_df = pd.read_csv(z.open(filename), sep='\t')
return motif_df
| [
"joewandy@gmail.com"
] | joewandy@gmail.com |
516f5f7f22dbf9ab9646fea57a52c15ec339f6f6 | 3152ce935d7a1712b596af1979da0b6b655ad790 | /chapter05/aiomysql_test.py | f488a16c1a2c28fd6e594d4411107acedfba524c | [] | no_license | tacyi/tornado_overview | 7b95743be307270d7b6270a257d22e7fd2d0d08f | 499df936df7f50a7639613b088882d0b1eba1566 | refs/heads/master | 2020-12-09T19:47:23.499461 | 2019-12-30T05:52:47 | 2019-12-30T05:52:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | import asyncio
import aiomysql
import tornado.ioloop
async def test_example():
pool = await aiomysql.create_pool(host='cdh3', port=3306,
user='sunmengzi', password='sunmengzi',
db='data_webserver', charset="utf8")
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute("SELECT name from message;")
print(cur.description)
(msg,) = await cur.fetchone()
print(msg)
# assert r == 42
pool.close()
await pool.wait_closed()
if __name__ == '__main__':
# loop = asyncio.get_event_loop()
# loop.run_until_complete(test_example(loop))
io_loop = tornado.ioloop.IOLoop.current()
io_loop.run_sync(test_example)
| [
"33619913+YeBax@users.noreply.github.com"
] | 33619913+YeBax@users.noreply.github.com |
428e40e7b1f7335489c1bf70e37fed876f14c42f | be5564a85d2a5d58b961603e915aac77bf458236 | /P1/ex02/main.py | 63d793e4e8cc9a83fed6d70447c60df8b5752d8f | [] | no_license | lisasystaliuk/OOP | f41b87ce890e5d5a1b5529e441f989fbd9d5a4c5 | 76bfb6d2eb0415edf0415ecff49d50ee7e0585c6 | refs/heads/master | 2021-09-20T02:37:29.947023 | 2018-08-02T10:36:59 | 2018-08-02T10:36:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | #!/usr/bin/env python3
from animal import Animal
from dog import Dog
from cat import Cat
def main():
a = Animal()
d = Dog()
c = Cat()
array = ["An Animal", "A Dog", "A Cat"]
for i in range(0, len(array)):
print("Making " + array[i])
a.speak()
d.speak()
c.speak()
a.sleep()
d.sleep()
c.sleep()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | lisasystaliuk.noreply@github.com |
ffdbe021b61f0b17c5776c8d9ba4abfd981a7053 | c18ed49471a64a1043bd74424d997ba2f8fcd164 | /Anproject/customer/forms.py | 4257597dac8d75ea07f1f72119a5cabc82a2270e | [] | no_license | nkchirpy/amn | e4c2f1574ff1a90ab4030bf618f8b577b83a7625 | 44624a4bf570ec239a9f6a2bb029f0730f65a290 | refs/heads/master | 2021-09-02T06:49:54.078214 | 2017-12-31T06:22:53 | 2017-12-31T06:22:53 | 106,140,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | from django import forms
from .models import Contactform
class Contact_form(forms.ModelForm):
class Meta:
model = Contactform
fields = ['name','email_address','organization_name','contact_number','comments']
widgets = {
'name':forms.TextInput(attrs={'class':'form-control is-valid oval_border','placeholder':'Enter your name','id':'form-name','name':'person_name'}),
'email_address':forms.TextInput(attrs={'class':'form-control is-valid oval_border','type':'email','placeholder':'example@domain.com','id':'form-email'}),
'organization_name':forms.TextInput(attrs={'class':'form-control is-valid oval_border','placeholder':'Enter your organization name','id':'form-organize'}),
'contact_number':forms.TextInput(attrs={'class':'form-control is-valid oval_border','type':'None','id':'form-contact'}),
'comments':forms.Textarea(attrs={'class':'form-control is-valid oval_border1','id':'form-comments',}),
}
| [
"krishsnk.s@gmail.com"
] | krishsnk.s@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.