text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# Finding coefficients in ranking algorithm by comparing results to actual ranking
# Improvements to make
# 1) use enumerate to remove arguments from functions.
# 2) *Consider exercises that were completed more often to be easier and others more difficult (a lot of error here).*
# 3) Implement householder QR decomp for regression
# coefficients to find
# 1) 'punishment' for not participating (1 unknown - assigning non-positive number to exercise)
# 2) weight of each exercise (10 unknowns - one real element of [0,1] per exercise. Unknowns add up to 1)
# 3) weight of avg min max per exercise (3 unknowns - one real element of [0,1] for each [avg,min,max]. Unknowns add up to one.)
actual_ranking = [1,6,7,5,9,10,4,8,2,3] # actual_ranking[i] = patient.number of rank
actual_ranking1 = [1,9,10,7,4,2,3,8,5,6] # actual_ranking1[i] = rank of patient[i]
exercises = [1,2,3,4,5,6,7,8,9,10,12]
patients = [1,2,3,4,5,6,7,8,9,10]
class patients(object):
def __init__(self,number,scores,stats,participated,rank,actual_rank,overall):
self.number = number # integer from 1-10. patient number.
self.scores = scores # list of floats element of [0,10]. Floats are scores for a given exercise.
self.stats = stats # for n exercises this is a list containing n lists of the form [avg,min,max], where avg is the patient's avg br for a given exercise, min is the patient's min br for a given exercise... etc.
self.participated = participated # list of booleans. True if patient participated in a gievn exercise, False if a patient did not.
self.rank = rank
self.actual_rank = actual_rank
self.overall = overall # float element of [0,10]. Overall score of patient.
def get_exercise_index(exercise):
if exercise == 12:
exercise_index = 10
else:
exercise_index = exercise - 1
return exercise_index
# gets data from CSVs
def get_data():
data = []
output = []
for i in range (1,11):
if i == 10:
patient = "patient10.csv"
else:
patient = "patient" + "0" + str(i) + ".csv"
f = open(patient)
data1 = f.read()
f.close
data.append([])
lines = data1.split("\r")
lines.pop(0)
output1 = []
prevact = '12'
prevbr = 0
for line in lines:
token = line.split()
act = int(token[1])
br = float(token[0].strip())
token = [br, act]
if prevact <> act or prevbr <> br:
output1.append(token)
prevact = act
prevbr = br
data[i-1].append(token)
output.append(output1)
return output
def initialize_patients(exercises, patients, data):
patients_list = []
# length = len(patients)
for i in range(0,10):
patients_list.append(0)
patients_list[i] = patients(i+1,[],[],[],0,0,0)
patients_list[i].actual_rank = actual_ranking1[i]
for patient in patients_list:
for exercise in exercises:
patient.scores.append(0)
sum = 0
count = 0
max = 0
min = 1000
for tuple in data[patient.number - 1]:
tuple_exercise = tuple[1]
tuple_br = tuple[0]
if tuple_exercise == exercise:
sum += tuple_br
count += 1
if tuple_br < min:
min = tuple_br
if tuple_br > max:
max = tuple_br
if count == 0 or sum == 0:
patient.participated.append(False)
patient.stats.append(['N/A','N/A','N/A'])
else:
patient.participated.append(True)
avg = sum/count
patient.stats.append([avg,min,max])
return patients_list
# returns the average of avg,min,max
def get_avg_stats(patients_list, exercise):
exercise_index = get_exercise_index(exercise)
count = 0
sum = [0,0,0] # avg,min,max
for patient in patients_list:
if patient.participated[exercise_index]:
count += 1
for i in range(0,3):
sum[i] += patient.stats[exercise_index][i]
averages = [0,0,0] # avg, min, max
for i in range(0,3):
averages[i] = sum[i] / count
return averages
# returns the variance of avg,min,max
def get_var_stats(patients_list, exercise):
exercise_index = get_exercise_index(exercise)
Var = [0,0,0] # AvgVar, MinVar, MaxVar
Avg = get_avg_stats(patients_list, exercise)
count = 0
for patient in patients_list:
if patient.participated[exercise_index]:
count += 1
for i in range(0,3):
Var[i] += (patient.stats[exercise_index][i] - Avg[i])**2
for i in range(0,3):
Var[i] = Var[i] / count
return Var
# returns the minimum avg,min,max
def get_min_stats(patients_list, exercise):
exercise_index = get_exercise_index(exercise)
min = [1000,1000,1000]
for patient in patients_list:
if patient.participated[exercise_index]:
for i in range(0,3):
if patient.stats[exercise_index][i] < min[i]:
min[i] = patient.stats[exercise_index][i]
return min
# returns the maximum avg,min,max
def get_max_stats(patients_list, exercise):
exercise_index = get_exercise_index(exercise)
max = [0,0,0]
for patient in patients_list:
if patient.participated[exercise_index]:
for i in range(0,3):
if patient.stats[exercise_index][i] > max[i]:
max[i] = patient.stats[exercise_index][i]
return max
# sets score for exercise for each patient to list [avg,min,max], where avg, min, max is a score out of 10 (by span).
def score_by_span(patients_list,exercise):
exercise_index = get_exercise_index(exercise)
max = get_max_stats(patients_list, exercise)
min = get_min_stats(patients_list,exercise)
for patient in patients_list:
if patient.participated[exercise_index]:
scores = [0,0,0]
for i in range(0,3):
scores[i] = 10*(1-(patient.stats[exercise_index][i] - min[i])/(max[i]-min[i]))
patient.scores[exercise_index] = scores
else:
patient.scores[exercise_index] = 'N/A'
return patients_list
# not running. need normal cdf.
def score_by_normal(patients_list, exercise):
exercise_index = get_exercise_index(exercise)
mean = get_avg_stats(patients_list, exercise)
stdev = get_Var_stats(patients_list,exercise)
for std in stdev:
std = std**(1/2)
for patient in patients_list:
if patient.participated[exercise_index]:
scores = [0,0,0]
for i in range(0,3):
x = (patient.stats[exercise_index][i] - mean[i])/stdev[i]
# if x >= 0:
# scores[i] = (1-Normal(x))*10
# else:
# scores[i] = Normal(-x)*10
patient.scores[exercise_index] = scores
else:
patient.scores[exercise_index] = 'N/A'
return patients_list
# for manual testing.
def manual_score(participation_deduction, Wstats, exercises, Wexercises, patients_list):
# getting [avg,min,max] scores for each exercise
for exercise in exercises:
exercise_index = get_exercise_index(exercise)
score_by_span(patients_list,exercise)
# turning [avg,min,max] scores into scores element of [0,10]
for patient in patients_list:
if patient.participated[exercise_index]:
score = 0
for i in range(0,3):
score += patient.scores[exercise_index][i]*Wstats[i]
patient.scores[exercise_index] = score
else:
patient.scores[exercise_index] = -participation_deduction
for patient in patients_list:
for exercise in exercises:
exercise_index = get_exercise_index(exercise)
patient.overall += Wexercises[exercise_index]*patient.scores[exercise_index]
return patients_list
# just played around a little. Will remove from code.
def experiment():
experiment = [0,0.2,0.4,0.6,0.8,1] # considered values for Wexercises
Wexercises = [0.1,0.1,0.1,0.05,0.05,0.1,0.1,0.1,0.1,0.1,0.1] # starting point. Note: all values are altered below.
Wstats = [0.5,0.25,0.25] # kept constant for this experiment.
participation_deduction = 0.5 # kept constant for this experiment.
patients_list = initialize_patients(exercises, patients, get_data()) # getting data as usual.
# getting [avg,min,max] scores for each exercise and then turning them into scores out of 10
for exercise in exercises:
exercise_index = get_exercise_index(exercise)
score_by_span(patients_list,exercise) # would like to replace this with score_by_normal
# turning [avg,min,max] scores into scores element of [0,10]
for patient in patients_list:
if patient.participated[exercise_index]:
score = 0
for i in range(0,3):
score += patient.scores[exercise_index][i]*Wstats[i]
patient.scores[exercise_index] = score
else:
patient.scores[exercise_index] = -participation_deduction
# trying 6^9 solutions for the weights of exercises. NOTE: None worked.
for a in experiment:
Wexercises[0] = a
for b in experiment:
Wexercises[1] = b
for c in experiment:
Wexercises[2] = c
for d in experiment:
Wexercises[3] = d
for e in experiment:
Wexercises[4] = e
for f in experiment:
Wexercises[5] = f
for g in experiment:
Wexercises[6] = g
for h in experiment:
Wexercises[7] = h
for j in experiment:
Wexercises[8] = j
Wexercises[9] = 1
for W in Wexercises[:-1]:
Wexercises[9] -= W
valid_solution = False
sum = 0
for W in Wexercises:
sum += W
if Wexercises[9] >= 0: # checking if the sum of weights = 1. This limits results and may/should be removed and/or implemented higher up in the experiments(to increase speed).
for patient in patients_list:
for exercise in exercises:
exercise_index = get_exercise_index(exercise)
patient.overall += Wexercises[exercise_index]*patient.scores[exercise_index]
valid_solution = True
patients = sorted(patients_list, key = lambda x: x.overall, reverse = True)
for n in range(0,10):
if patients[n].number <> actual_ranking[n]:
valid_solution = False
break
if valid_solution:
print Wexercises
def apply_weights(patients_list,Wavg,Wmin,Wmax):
for patient in patients_list:
for exercise_index,exercise_score in enumerate(patient.scores):
if patient.participated[exercise_index]:
patient.scores[exercise_index] = exercise_score[0]*Wavg + exercise_score[1]*Wmin + exercise_score[2]*Wmax
return patients_list
def exercise_scores(patients_list, exercises):
for exercise in exercises:
score_by_span(patients_list,exercise)
apply_weights(patients_list,0.5,0.25,0.25)
return patients_list
def get_X(patients_list):
X = []
for patient in patients_list:
for i in range(0,len(patient.scores)):
if patient.participated[i] == False:
patient.scores[i] = 0
X.append(patient.scores)
return X
# Not working. Need to account for division by zero in U[j][j]
def LU_decompose(X):
dimension = len(X)
# creating two matrices; L and U.
L = [] # to be lower triangular
U = [] # to be upper triangular
P = [] # pivot matrix. keeps track of row swaps.
row = []
for i in range(0,dimension):
row.append(0)
for i in range(0,dimension):
L.append(row)
U.append(row)
P.append(row)
# assigning values to U[i][j] and L[i][j]
for i in range(0,dimension):
for j in range(0,dimension):
if i == j:
L[i][j] = 1
else:
L[i][j] = X[i][j]
for k in range(0,j-1):
L[i][j] -= L[i][k]*U[k][j]
L[i][j] = L[i][j]/U[j][j]
U[i][j] = X[i][j]
for k in range(0,j-1):
U[i][j] -= L[i][k]*U[k][j]
return [L,U]
def list_to_int(b):
for i in range(0,len(b)):
b[i] = b[i][0]
return b
def int_to_list(b):
for i in range(0,len(b)):
b[i] = [b[i]]
return b
# swap rows i and j of matrix A.
def row_swap(A,i,j):
row_i = A[i]
A[i] = A[j]
A[j] = row_i
return A
# In matrix A, add factor*row_i to row j.
def row_add(A,i,j,factor):
dim_col = len(A[0])
for k in range(0,dim_col):
A[j][k] = A[j][k]+ factor*A[i][k]
return A
# takes triangular matrix T and solves for x in Tx = y
def backsub(T,y):
y = list_to_int(y)
dim = len(T)
x = []
for i in range(0,dim):
x.append(0)
x[dim-1] = y[dim-1]/float(T[dim-1][dim-1])
rows = reversed(range(0,dim-1))
for i in rows:
x[i] = float(y[i])
for j in range(i+1,dim):
x[i] -= T[i][j]*x[j]
x[i] = x[i]/T[i][i]
return x
# Takes a matrix A and returns triangular matrix T
def Gaussian(A,b):
dim = len(A)
for i in range(0,dim):
if A[i][i] == 0:
count = 0
while A[i+count][i] == 0:
count += 1
if i+count > dim:
return "failure"
break
row_swap(A,i,i+count)
row_swap(b,i,i+count)
for j in range(i+1,dim):
row_add(b,i,j,-A[j][i]/A[i][i])
row_add(A,i,j,-A[j][i]/A[i][i])
return [A,b]
# returns n by m matrix of zeros
def zeros(n,m):
output = []
for i in range(0,n):
output.append([])
for j in range(0,m):
output[i].append(0)
return output
# Takes two matrices and multiplies them.
def multiply(A,B):
row_dim = len(A)
col_dim = len(B[0])
sum_length = len(A[0])
AB = zeros(row_dim,col_dim)
for i in range(0,row_dim):
for j in range(0,col_dim):
for k in range(0,sum_length):
AB[i][j] = AB[i][j] + A[i][k]*B[k][j]
return AB
def regression(patients_list,exercises,target_scores):
exercise_scores(patients_list,exercises)
y = target_scores
y = int_to_list(y)
X = get_X(patients_list)
Tb = Gaussian(X,y)
T = Tb[0]
b = Tb[1]
B = backsub(T,b)
return B
data = get_data()
patients_list = initialize_patients(exercises, patients,data)
print regression(patients_list, exercises,actual_ranking1)
# ints = [1,2,556,34]
# for idx, val in enumerate(ints):
# print idx, val
|
easyCZ/SLIP-A-2015
|
respiratory/Processed Datasets/Processed Datasets/uptoweek9.py
|
Python
|
mit
| 13,161
|
[
"Gaussian"
] |
b62db48e54077b58daec42476ea29b56d18690287e6c7082a2889e4f28891d03
|
import warnings
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import normalize
class RandomWalk2D:
''' Class for SARSA random walk '''
def __init__(self, grid_size=3, end_states=[(0,0)], rewards=[1], \
exploration=.1, move_cost=0, alpha=.3, gamma=.9):
self.n = grid_size
self.alpha = alpha
self.gamma = gamma
self.end_states = end_states
self.move_cost = move_cost
self.rewards = rewards
self.e = exploration
self.actions = [(-1,0), (1,0), (0,-1), (0,1)]
# history for plot
self.h = np.zeros((self.n, self.n, len(self.actions)), dtype=int)
# invalid move penality for first time
self.q = np.ones((self.n, self.n, len(self.actions))) * -99999
for y in range(self.n):
for x in range(self.n):
for move in self.valid_moves((y,x)):
self.q[y, x, move] = 0
def valid_moves(self, state):
moves = []
if state[0] != 0:
moves.append(0)
if state[0] != self.n - 1:
moves.append(1)
if state[1] != 0:
moves.append(2)
if state[1] != self.n - 1:
moves.append(3)
return moves
def choose_action(self, state):
if np.random.uniform() < self.e:
return np.random.choice(self.valid_moves(state))
return np.argmax(self.q[state[0], state[1], :])
def move(self, state, action):
return tuple([x+y for x,y in zip(state, self.actions[action])])
def episode(self):
old_q = np.copy(self.q)
state = self.end_states[0]
while state in self.end_states:
state = tuple(np.random.random_integers(0, self.n - 1, size=2))
action = self.choose_action(state)
reward, end = 0, False
while not end:
reward -= self.move_cost
state1 = self.move(state, action)
if state1 in self.end_states:
reward += self.rewards[self.end_states.index(state1)]
end = True
action1 = self.choose_action(state1)
s_a = (state[0], state[1], action)
s1_a1 = (state1[0], state1[1], action1)
self.q[s_a] += self.alpha*(reward + self.gamma*self.q[s1_a1] - self.q[s_a])
self.h[s_a] += 1
state, action = state1, action1
return np.amax(np.abs(self.q - old_q))
def policy(self):
policy = np.zeros((self.n,self.n), dtype=int)
for y in range(self.n):
for x in range(self.n):
policy[y,x] = np.argmax(self.q[y, x, :])
return policy
def arrow(self, m):
dx, dy = 0, 0
if m == 0:
dx, dy = 0, -.3
elif m == 1:
dx, dy = 0, .3
elif m == 2:
dx, dy = -.3, 0
elif m == 3:
dx, dy = .3, 0
return dx, dy
def plot(self, axis):
policy = self.policy()
maxh = np.amax(self.h)*.2
for y in range(self.n):
for x in range(self.n):
cx, cy = x + .5, y + .5
if (y,x) in self.end_states:
v = self.rewards[self.end_states.index((y,x))]
c = 'coral' if v < 0 else 'lime'
axis.add_artist(plt.Circle((cx, cy), .3, color=c))
axis.text(cx, cy, str(v), fontsize=15, horizontalalignment='center', verticalalignment='center')
else:
#q-value
#moves = np.copy(self.q[y, x, :])
#for m, v in np.ndenumerate(moves):
# v = (1 - min(2, max(-2, v)) / 2) / 2
# dx, dy = self.arrow(m[0])
# c = '#{0:02x}{0:02x}ff'.format(int(v*255))
# plt.arrow(cx, cy, dx, dy, head_width=.2, head_length=.2, fc=c, ec=c)
moves = np.copy(self.h[y, x, :])
for m, v in np.ndenumerate(moves):
v = 1 - min(1, v / maxh)
dx, dy = self.arrow(m[0])
c = '#{0:02x}{0:02x}ff'.format(int(v*255))
plt.arrow(cx, cy, dx, dy, head_width=.2, head_length=.2, fc=c, ec=c)
dx, dy = self.arrow(policy[(y,x)])
plt.arrow(cx, cy, dx, dy, head_width=.2, head_length=.2, fc='k', ec='k')
self.h //= 10
def main():
exp = RandomWalk2D(grid_size=5, exploration=.1, move_cost=.01, \
end_states=[(0,0), (4,4), (1,2), (2,1)], \
alpha=.1, gamma=1, rewards=[2,1,-2,-1])
display_interval = 100
figure, axis = plt.subplots()
figure.canvas.set_window_title('SARSA')
axis.set_xlim([0,exp.n])
axis.xaxis.tick_top()
axis.set_ylim([exp.n, 0])
for iter in range(1000):
delta = 0
for sub_iter in range(display_interval):
delta = exp.episode()
# print(exp.q)
axis.cla()
exp.plot(axis)
plt.title('Policy Iteration: {0}, delta = {1:.7f}' \
.format((iter+1)*display_interval, delta), y=1.08)
plt.xlabel('Blue: visit-frequency, Black: optimal-policy')
axis.set_aspect('equal')
plt.draw()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
plt.pause(.001)
plt.show()
if __name__ == '__main__':
main()
|
rahulsrma26/code-gems
|
RL/randomWalk/sarsa.py
|
Python
|
mit
| 5,442
|
[
"VisIt"
] |
1d29b08a797af1239080e3264ca282227e86600344b24302bcca8ee95c80860e
|
""" The Request Task Agent takes request tasks created in the transformation database
and submits to the request management system
"""
from DIRAC import S_OK
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.TransformationSystem.Agent.TaskManagerAgentBase import TaskManagerAgentBase
from DIRAC.TransformationSystem.Client.TaskManager import RequestTasks
__RCSID__ = "$Id$"
AGENT_NAME = 'Transformation/RequestTaskAgent'
class RequestTaskAgent( TaskManagerAgentBase ):
""" An AgentModule to submit requests tasks
"""
def __init__( self, *args, **kwargs ):
""" c'tor
"""
TaskManagerAgentBase.__init__( self, *args, **kwargs )
self.transType = []
def initialize( self ):
""" Standard initialize method
"""
res = TaskManagerAgentBase.initialize( self )
if not res['OK']:
return res
self.am_setOption( 'shifterProxy', 'DataManager' )
# clients
self.taskManager = RequestTasks( transClient = self.transClient )
agentTSTypes = self.am_getOption( 'TransType', [] )
if agentTSTypes:
self.transType = agentTSTypes
else:
self.transType = Operations().getValue( 'Transformations/DataManipulation', ['Replication', 'Removal'] )
return S_OK()
def _getClients( self ):
""" Here the taskManager becomes a RequestTasks object
"""
res = TaskManagerAgentBase._getClients( self )
threadTaskManager = RequestTasks()
res.update( {'TaskManager': threadTaskManager} )
return res
|
Sbalbp/DIRAC
|
TransformationSystem/Agent/RequestTaskAgent.py
|
Python
|
gpl-3.0
| 1,537
|
[
"DIRAC"
] |
d3436ae93b341a546b041b36edc96e5dae7015dd688f1a8c92881ee7e2e04af5
|
# -*- coding: utf-8 -*-
import re
import urllib2
import httplib, urllib
import random
import json
import sys
from bs4 import BeautifulSoup
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
# Some User Agents
hds = [{'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}, \
{'User-Agent': 'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.12 Safari/535.11'}, \
{'User-Agent': 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)'}, \
{'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0'}, \
{'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/44.0.2403.89 Chrome/44.0.2403.89 Safari/537.36'}, \
{'User-Agent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50'}, \
{'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50'}, \
{'User-Agent': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0'}, \
{'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1'}, \
{'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1'}, \
{'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11'}, \
{'User-Agent': 'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11'}, \
{'User-Agent': 'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11'}]
# 杭州区域列表
regions = [u"xihu", u"xiacheng", u"jianggan", u"gongshu", u"shangcheng", u"binjiang", u"yuhang", u"xiaoshan"]
dict = {"xihu": "西湖", "xiacheng": "下城", "jianggan": "江干", "gongshu": "拱墅", "shangcheng": "上城",
"binjiang": "滨江", "yuhang": "余杭", "xiaoshan": "萧山"}
def one_record_spider(ershoufang):
"""
爬取一个二手房所有信息,记录到dict中
"""
info_dict = {}
houseTitle = ershoufang.find("div", {"class": "title"})
href = houseTitle.a.get("href")
info_dict['链接'] = href
total_price = ershoufang.find("div", {"class": "totalPrice"}).text
info_dict['价格'] = total_price
house_info = ershoufang.find("div", {"class": "houseInfo"}).get_text().strip()
info = house_info.split("|")
if info:
info_dict['小区名称'] = info[0]
info_dict['户型'] = info[1].strip()
info_dict['面积'] = info[2].strip()
info_dict['朝向'] = info[3].strip()
info_dict['装修'] = info[4].strip()
info_dict['电梯'] = info[-1].strip()
position_info = ershoufang.find("div", {"class": "positionInfo"}).text
info = position_info.strip().split(")")
floor= info[0].strip()+")"
buildtime = info[-1].split("-")[0].strip()
info_dict['楼层'] = floor
info_dict['建造时间'] = buildtime
bankuai = info[-1].split("-")[-1]
info_dict['板块'] = bankuai
follow_info = ershoufang.find("div", {"class": "followInfo"}).get_text().split("/")
concerned = follow_info[0].strip()
visit = follow_info[1].strip()
date = follow_info[2].strip()
info_dict['关注人数'] = concerned
info_dict['看房次数'] = visit
info_dict['发布时间'] = date
unitPrice = ershoufang.find("div", {"class": "unitPrice"}).find("span").text
info_dict['均价'] = unitPrice
detail = ershoufang.find("div", {"class": "tag"}).text
if detail:
info_dict['备注'] = detail
print info_dict
return info_dict
def ershoufang_spider(url_page):
"""
爬取页面链接中的小区二手房信息
"""
try:
req = urllib2.Request(url_page, headers=hds[random.randint(0, len(hds) - 1)])
source_code = urllib2.urlopen(req, timeout=10).read()
plain_text = unicode(source_code)
soup = BeautifulSoup(plain_text, "html.parser")
except (urllib2.HTTPError, urllib2.URLError), e:
print e
return
info_list = []
ershoufang_list = soup.findAll("div", {"class": "info clear"})
for ershoufang in ershoufang_list:
info_dict = one_record_spider(ershoufang)
info_list.append(info_dict)
#将info_list通过HTTP请求发送出去
httpClient = None
try:
headers = {"Content-type": "application/json; charset = UTF-8", "Accept": "application/json"}
httpClient = httplib.HTTPConnection("10.242.109.29", 80, timeout=30)
httpClient.request("POST", "/ajax/open/result/record/housedeal", json.dumps(info_list, encoding = "utf-8", ensure_ascii=False), headers)
response = httpClient.getresponse()
print response.status
print response.getheaders()
except Exception, e:
print e
finally:
if httpClient:
httpClient.close()
def region_ershoufang_spider(region):
"""
爬取城区成交记录
"""
url = u"http://hz.lianjia.com/ershoufang/" + region + "/"
try:
req = urllib2.Request(url, headers=hds[random.randint(0, len(hds) - 1)])
source_code = urllib2.urlopen(req, timeout=10).read()
plain_text = unicode(source_code)
soup = BeautifulSoup(plain_text, "html.parser")
except (urllib2.HTTPError, urllib2.URLError), e:
print e
return
total_pages = 1
try:
page_info = soup.find("div", {"class": "page-box house-lst-page-box"})
except AttributeError as e:
page_info = None
if page_info == None:
return None
page_info_str = page_info.get("page-data").split(",")[0].split(":")[1]
total_pages = int(page_info_str)
#循环实现爬取所有页面
for i in range(total_pages):
url_page = u"http://hz.lianjia.com/ershoufang/%s/pg%d/" % (region,i + 1)
ershoufang_spider(url_page)
def recent_ershoufang_spider(href):
"""
爬取页面链接中的成交记录
"""
total_pages = 100 #根据实际爬取频率确定,一般不大于10
flag = 0
#循环实现爬取所有页面
for i in range(total_pages):
url_page = u"http://hz.lianjia.com/ershoufang/pg%d/" % (i + 1)
try:
req = urllib2.Request(url_page, headers=hds[random.randint(0, len(hds) - 1)])
source_code = urllib2.urlopen(req, timeout=10).read()
plain_text = unicode(source_code)
soup = BeautifulSoup(plain_text, "html.parser")
except (urllib2.HTTPError, urllib2.URLError), e:
print e
return
info_list = []
ershoufang_list = soup.findAll("div", {"class": "info clear"})
for ershoufang in ershoufang_list:
info_dict = one_record_spider(ershoufang)
info_list.append(info_dict)
#已爬取到上次位置
if (info_dict["链接"]) == href:
flag = 1
#将info_list通过HTTP请求发送出去
httpClient = None
try:
headers = {"Content-Type": "application/json; charset = UTF-8", "Accept": "application/json"}
httpClient = httplib.HTTPConnection("10.242.109.29", 80, timeout=30)
httpClient.request("POST", "/ajax/open/result/record/housedeal", json.dumps(info_list, encoding = "utf-8", ensure_ascii=False), headers)
response = httpClient.getresponse()
print response.status
print response.getheaders()
except Exception, e:
print e
finally:
if httpClient:
httpClient.close()
if flag == 1:
break
def regions_ershoufang_spider():
"""
爬去所有城区成交记录
"""
for region in regions:
region_ershoufang_spider(region)
print '已经爬取了%s区成交记录' % dict[region]
print 'done'
if __name__ == "__main__":
"""
传递参数为上次爬取记录最新一条的小区链接,或者为0。必填。
"""
href = sys.argv[1]
if href:
#爬下最新成交记录
recent_ershoufang_spider(href)
else:
# 爬下所有小区里的成交信息
regions_ershoufang_spider()
|
autumnz613/HangzhoulLianjia
|
server_ershoufang.py
|
Python
|
mit
| 8,609
|
[
"VisIt"
] |
8d2391d3e46d6b55a1734940b1c497b3a0f8677e362ee4f08c016e71a913ace6
|
"""
Tests for discussion pages
"""
import datetime
from unittest import skip
from uuid import uuid4
from flaky import flaky
from nose.plugins.attrib import attr
from pytz import UTC
from .helpers import BaseDiscussionTestCase
from ..helpers import UniqueCourseTest
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.discussion import (
DiscussionTabSingleThreadPage,
InlineDiscussionPage,
InlineDiscussionThreadPage,
DiscussionUserProfilePage,
DiscussionTabHomePage,
DiscussionSortPreferencePage,
)
from ...pages.lms.learner_profile import LearnerProfilePage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ...fixtures.discussion import (
SingleThreadViewFixture,
UserProfileViewFixture,
SearchResultFixture,
Thread,
Response,
Comment,
SearchResult,
MultipleThreadFixture)
from .helpers import BaseDiscussionMixin
THREAD_CONTENT_WITH_LATEX = """Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
\n\n----------\n\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur. (b).\n\n
**(a)** $H_1(e^{j\\omega}) = \\sum_{n=-\\infty}^{\\infty}h_1[n]e^{-j\\omega n} =
\\sum_{n=-\\infty} ^{\\infty}h[n]e^{-j\\omega n}+\\delta_2e^{-j\\omega n_0}$
$= H(e^{j\\omega})+\\delta_2e^{-j\\omega n_0}=A_e (e^{j\\omega}) e^{-j\\omega n_0}
+\\delta_2e^{-j\\omega n_0}=e^{-j\\omega n_0} (A_e(e^{j\\omega})+\\delta_2)
$H_3(e^{j\\omega})=A_e(e^{j\\omega})+\\delta_2$. Dummy $A_e(e^{j\\omega})$ dummy post $.
$A_e(e^{j\\omega}) \\ge -\\delta_2$, it follows that $H_3(e^{j\\omega})$ is real and
$H_3(e^{j\\omega})\\ge 0$.\n\n**(b)** Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.\n\n
**Case 1:** If $re^{j\\theta}$ is a Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
\n\n**Case 3:** Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem $H_3(e^{j\\omega}) = P(cos\\omega)(cos\\omega - cos\\theta)^k$,
Lorem Lorem Lorem Lorem Lorem Lorem $P(cos\\omega)$ has no
$(cos\\omega - cos\\theta)$ factor.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
$P(cos\\theta) \\neq 0$. Since $P(cos\\omega)$ this is a dummy data post $\\omega$,
dummy $\\delta > 0$ such that for all $\\omega$ dummy $|\\omega - \\theta|
< \\delta$, $P(cos\\omega)$ Lorem ipsum dolor sit amet, consectetur adipiscing elit,
sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim
veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo
consequat. Duis aute irure dolor in reprehenderit in voluptate velit sse cillum dolore
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.
"""
class DiscussionResponsePaginationTestMixin(BaseDiscussionMixin):
"""
A mixin containing tests for response pagination for use by both inline
discussion and the discussion tab
"""
def assert_response_display_correct(self, response_total, displayed_responses):
"""
Assert that various aspects of the display of responses are all correct:
* Text indicating total number of responses
* Presence of "Add a response" button
* Number of responses actually displayed
* Presence and text of indicator of how many responses are shown
* Presence and text of button to load more responses
"""
self.assertEqual(
self.thread_page.get_response_total_text(),
str(response_total) + " responses"
)
self.assertEqual(self.thread_page.has_add_response_button(), response_total != 0)
self.assertEqual(self.thread_page.get_num_displayed_responses(), displayed_responses)
self.assertEqual(
self.thread_page.get_shown_responses_text(),
(
None if response_total == 0 else
"Showing all responses" if response_total == displayed_responses else
"Showing first {} responses".format(displayed_responses)
)
)
self.assertEqual(
self.thread_page.get_load_responses_button_text(),
(
None if response_total == displayed_responses else
"Load all responses" if response_total - displayed_responses < 100 else
"Load next 100 responses"
)
)
def test_pagination_no_responses(self):
self.setup_thread(0)
self.assert_response_display_correct(0, 0)
def test_pagination_few_responses(self):
self.setup_thread(5)
self.assert_response_display_correct(5, 5)
def test_pagination_two_response_pages(self):
self.setup_thread(50)
self.assert_response_display_correct(50, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(50, 50)
def test_pagination_exactly_two_response_pages(self):
self.setup_thread(125)
self.assert_response_display_correct(125, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(125, 125)
def test_pagination_three_response_pages(self):
self.setup_thread(150)
self.assert_response_display_correct(150, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 125)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 150)
def test_add_response_button(self):
self.setup_thread(5)
self.assertTrue(self.thread_page.has_add_response_button())
self.thread_page.click_add_response_button()
def test_add_response_button_closed_thread(self):
self.setup_thread(5, closed=True)
self.assertFalse(self.thread_page.has_add_response_button())
@attr('shard_2')
class DiscussionHomePageTest(UniqueCourseTest):
"""
Tests for the discussion home page.
"""
SEARCHED_USERNAME = "gizmo"
def setUp(self):
super(DiscussionHomePageTest, self).setUp()
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.page = DiscussionTabHomePage(self.browser, self.course_id)
self.page.visit()
def test_new_post_button(self):
"""
Scenario: I can create new posts from the Discussion home page.
Given that I am on the Discussion home page
When I click on the 'New Post' button
Then I should be shown the new post form
"""
self.assertIsNotNone(self.page.new_post_button)
self.page.click_new_post_button()
self.assertIsNotNone(self.page.new_post_form)
@attr('shard_2')
class DiscussionTabSingleThreadTest(BaseDiscussionTestCase, DiscussionResponsePaginationTestMixin):
"""
Tests for the discussion page displaying a single thread
"""
def setUp(self):
super(DiscussionTabSingleThreadTest, self).setUp()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def setup_thread_page(self, thread_id):
self.thread_page = self.create_single_thread_page(thread_id) # pylint: disable=attribute-defined-outside-init
self.thread_page.visit()
def test_mathjax_rendering(self):
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(
id=thread_id,
body=THREAD_CONTENT_WITH_LATEX,
commentable_id=self.discussion_id,
thread_type="discussion"
)
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertTrue(self.thread_page.is_discussion_body_visible())
self.thread_page.verify_mathjax_preview_available()
self.thread_page.verify_mathjax_rendered()
def test_markdown_reference_link(self):
"""
Check markdown editor renders reference link correctly
and colon(:) in reference link is not converted to %3a
"""
sample_link = "http://example.com/colon:test"
thread_content = """[enter link description here][1]\n[1]: http://example.com/colon:test"""
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(
id=thread_id,
body=thread_content,
commentable_id=self.discussion_id,
thread_type="discussion"
)
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertEqual(self.thread_page.get_link_href(), sample_link)
def test_marked_answer_comments(self):
thread_id = "test_thread_{}".format(uuid4().hex)
response_id = "test_response_{}".format(uuid4().hex)
comment_id = "test_comment_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(id=thread_id, commentable_id=self.discussion_id, thread_type="question")
)
thread_fixture.addResponse(
Response(id=response_id, endorsed=True),
[Comment(id=comment_id)]
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertFalse(self.thread_page.is_comment_visible(comment_id))
self.assertFalse(self.thread_page.is_add_comment_visible(response_id))
self.assertTrue(self.thread_page.is_show_comments_visible(response_id))
self.thread_page.show_comments(response_id)
self.assertTrue(self.thread_page.is_comment_visible(comment_id))
self.assertTrue(self.thread_page.is_add_comment_visible(response_id))
self.assertFalse(self.thread_page.is_show_comments_visible(response_id))
@attr('shard_2')
class DiscussionTabMultipleThreadTest(BaseDiscussionTestCase):
"""
Tests for the discussion page with multiple threads
"""
def setUp(self):
super(DiscussionTabMultipleThreadTest, self).setUp()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.thread_count = 2
self.thread_ids = []
self.setup_multiple_threads(thread_count=self.thread_count)
self.thread_page_1 = DiscussionTabSingleThreadPage(
self.browser,
self.course_id,
self.discussion_id,
self.thread_ids[0]
)
self.thread_page_2 = DiscussionTabSingleThreadPage(
self.browser,
self.course_id,
self.discussion_id,
self.thread_ids[1]
)
self.thread_page_1.visit()
def setup_multiple_threads(self, thread_count):
threads = []
for i in range(thread_count):
thread_id = "test_thread_{}_{}".format(i, uuid4().hex)
thread_body = "Dummy Long text body." * 50
threads.append(
Thread(id=thread_id, commentable_id=self.discussion_id, body=thread_body),
)
self.thread_ids.append(thread_id)
view = MultipleThreadFixture(threads)
view.push()
def test_page_scroll_on_thread_change_view(self):
"""
Check switching between threads changes the page focus
"""
# verify threads are rendered on the page
self.assertTrue(
self.thread_page_1.check_threads_rendered_successfully(thread_count=self.thread_count)
)
# From the thread_page_1 open & verify next thread
self.thread_page_1.click_and_open_thread(thread_id=self.thread_ids[1])
self.assertTrue(self.thread_page_2.is_browser_on_page())
# Verify that the focus is changed
self.thread_page_2.check_focus_is_set(selector=".discussion-article")
@attr('shard_2')
class DiscussionOpenClosedThreadTest(BaseDiscussionTestCase):
"""
Tests for checking the display of attributes on open and closed threads
"""
def setUp(self):
super(DiscussionOpenClosedThreadTest, self).setUp()
self.thread_id = "test_thread_{}".format(uuid4().hex)
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self, **thread_kwargs):
thread_kwargs.update({'commentable_id': self.discussion_id})
view = SingleThreadViewFixture(
Thread(id=self.thread_id, **thread_kwargs)
)
view.addResponse(Response(id="response1"))
view.push()
def setup_openclosed_thread_page(self, closed=False):
self.setup_user(roles=['Moderator'])
if closed:
self.setup_view(closed=True)
else:
self.setup_view()
page = self.create_single_thread_page(self.thread_id)
page.visit()
page.close_open_thread()
return page
def test_originally_open_thread_vote_display(self):
page = self.setup_openclosed_thread_page()
self.assertFalse(page._is_element_visible('.forum-thread-main-wrapper .action-vote'))
self.assertTrue(page._is_element_visible('.forum-thread-main-wrapper .display-vote'))
self.assertFalse(page._is_element_visible('.response_response1 .action-vote'))
self.assertTrue(page._is_element_visible('.response_response1 .display-vote'))
def test_originally_closed_thread_vote_display(self):
page = self.setup_openclosed_thread_page(True)
self.assertTrue(page._is_element_visible('.forum-thread-main-wrapper .action-vote'))
self.assertFalse(page._is_element_visible('.forum-thread-main-wrapper .display-vote'))
self.assertTrue(page._is_element_visible('.response_response1 .action-vote'))
self.assertFalse(page._is_element_visible('.response_response1 .display-vote'))
@attr('shard_2')
class DiscussionCommentDeletionTest(BaseDiscussionTestCase):
"""
Tests for deleting comments displayed beneath responses in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_deletion_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response1"), [
Comment(id="comment_other_author"),
Comment(id="comment_self_author", user_id=self.user_id, thread_id="comment_deletion_test_thread")
]
)
view.push()
def test_comment_deletion_as_student(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
def test_comment_deletion_as_moderator(self):
self.setup_user(roles=['Moderator'])
self.setup_view()
page = self.create_single_thread_page("comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
page.delete_comment("comment_other_author")
@attr('shard_2')
class DiscussionResponseEditTest(BaseDiscussionTestCase):
"""
Tests for editing responses displayed beneath thread in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="response_edit_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response_other_author", user_id="other", thread_id="response_edit_test_thread"),
)
view.addResponse(
Response(id="response_self_author", user_id=self.user_id, thread_id="response_edit_test_thread"),
)
view.push()
def edit_response(self, page, response_id):
self.assertTrue(page.is_response_editable(response_id))
page.start_response_edit(response_id)
new_response = "edited body"
page.set_response_editor_value(response_id, new_response)
page.submit_response_edit(response_id, new_response)
def test_edit_response_add_link(self):
"""
Scenario: User submits valid input to the 'add link' form
Given I am editing a response on a discussion page
When I click the 'add link' icon in the editor toolbar
And enter a valid url to the URL input field
And enter a valid string in the Description input field
And click the 'OK' button
Then the edited response should contain the new link
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
response_id = "response_self_author"
url = "http://example.com"
description = "example"
page.start_response_edit(response_id)
page.set_response_editor_value(response_id, "")
page.add_content_via_editor_button(
"link", response_id, url, description)
page.submit_response_edit(response_id, description)
expected_response_html = (
'<p><a href="{}">{}</a></p>'.format(url, description)
)
actual_response_html = page.q(
css=".response_{} .response-body".format(response_id)
).html[0]
self.assertEqual(expected_response_html, actual_response_html)
def test_edit_response_add_image(self):
"""
Scenario: User submits valid input to the 'add image' form
Given I am editing a response on a discussion page
When I click the 'add image' icon in the editor toolbar
And enter a valid url to the URL input field
And enter a valid string in the Description input field
And click the 'OK' button
Then the edited response should contain the new image
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
response_id = "response_self_author"
url = "http://www.example.com/something.png"
description = "image from example.com"
page.start_response_edit(response_id)
page.set_response_editor_value(response_id, "")
page.add_content_via_editor_button(
"image", response_id, url, description)
page.submit_response_edit(response_id, '')
expected_response_html = (
'<p><img src="{}" alt="{}" title=""></p>'.format(url, description)
)
actual_response_html = page.q(
css=".response_{} .response-body".format(response_id)
).html[0]
self.assertEqual(expected_response_html, actual_response_html)
def test_edit_response_add_image_error_msg(self):
"""
Scenario: User submits invalid input to the 'add image' form
Given I am editing a response on a discussion page
When I click the 'add image' icon in the editor toolbar
And enter an invalid url to the URL input field
And enter an empty string in the Description input field
And click the 'OK' button
Then I should be shown 2 error messages
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
page.start_response_edit("response_self_author")
page.add_content_via_editor_button(
"image", "response_self_author", '', '')
page.verify_link_editor_error_messages_shown()
def test_edit_response_add_decorative_image(self):
"""
Scenario: User submits invalid input to the 'add image' form
Given I am editing a response on a discussion page
When I click the 'add image' icon in the editor toolbar
And enter a valid url to the URL input field
And enter an empty string in the Description input field
And I check the 'image is decorative' checkbox
And click the 'OK' button
Then the edited response should contain the new image
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
response_id = "response_self_author"
url = "http://www.example.com/something.png"
description = ""
page.start_response_edit(response_id)
page.set_response_editor_value(response_id, "Some content")
page.add_content_via_editor_button(
"image", response_id, url, description, is_decorative=True)
page.submit_response_edit(response_id, "Some content")
expected_response_html = (
'<p>Some content<img src="{}" alt="{}" title=""></p>'.format(
url, description)
)
actual_response_html = page.q(
css=".response_{} .response-body".format(response_id)
).html[0]
self.assertEqual(expected_response_html, actual_response_html)
def test_edit_response_add_link_error_msg(self):
"""
Scenario: User submits invalid input to the 'add link' form
Given I am editing a response on a discussion page
When I click the 'add link' icon in the editor toolbar
And enter an invalid url to the URL input field
And enter an empty string in the Description input field
And click the 'OK' button
Then I should be shown 2 error messages
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
page.start_response_edit("response_self_author")
page.add_content_via_editor_button(
"link", "response_self_author", '', '')
page.verify_link_editor_error_messages_shown()
def test_edit_response_as_student(self):
"""
Scenario: Students should be able to edit the response they created not responses of other users
Given that I am on discussion page with student logged in
When I try to edit the response created by student
Then the response should be edited and rendered successfully
And responses from other users should be shown over there
And the student should be able to edit the response of other people
"""
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.assertTrue(page.is_response_visible("response_other_author"))
self.assertFalse(page.is_response_editable("response_other_author"))
self.edit_response(page, "response_self_author")
def test_edit_response_as_moderator(self):
"""
Scenario: Moderator should be able to edit the response they created and responses of other users
Given that I am on discussion page with moderator logged in
When I try to edit the response created by moderator
Then the response should be edited and rendered successfully
And I try to edit the response created by other users
Then the response should be edited and rendered successfully
"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.edit_response(page, "response_self_author")
self.edit_response(page, "response_other_author")
def test_vote_report_endorse_after_edit(self):
"""
Scenario: Moderator should be able to vote, report or endorse after editing the response.
Given that I am on discussion page with moderator logged in
When I try to edit the response created by moderator
Then the response should be edited and rendered successfully
And I try to edit the response created by other users
Then the response should be edited and rendered successfully
And I try to vote the response created by moderator
Then the response should be voted successfully
And I try to vote the response created by other users
Then the response should be voted successfully
And I try to report the response created by moderator
Then the response should be reported successfully
And I try to report the response created by other users
Then the response should be reported successfully
And I try to endorse the response created by moderator
Then the response should be endorsed successfully
And I try to endorse the response created by other users
Then the response should be endorsed successfully
"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("response_edit_test_thread")
page.visit()
self.edit_response(page, "response_self_author")
self.edit_response(page, "response_other_author")
page.vote_response('response_self_author')
page.vote_response('response_other_author')
page.report_response('response_self_author')
page.report_response('response_other_author')
page.endorse_response('response_self_author')
page.endorse_response('response_other_author')
@attr('shard_2')
class DiscussionCommentEditTest(BaseDiscussionTestCase):
"""
Tests for editing comments displayed beneath responses in the single thread view.
"""
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_edit_test_thread", commentable_id=self.discussion_id))
view.addResponse(
Response(id="response1"),
[Comment(id="comment_other_author", user_id="other"), Comment(id="comment_self_author", user_id=self.user_id)])
view.push()
def edit_comment(self, page, comment_id):
page.start_comment_edit(comment_id)
new_comment = "edited body"
page.set_comment_editor_value(comment_id, new_comment)
page.submit_comment_edit(comment_id, new_comment)
def test_edit_comment_as_student(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
def test_edit_comment_as_moderator(self):
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
self.edit_comment(page, "comment_other_author")
@flaky # TODO: TNL-4057
def test_cancel_comment_edit(self):
self.setup_user()
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
page.set_comment_editor_value("comment_self_author", "edited body")
page.cancel_comment_edit("comment_self_author", original_body)
def test_editor_visibility(self):
"""Only one editor should be visible at a time within a single response"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = self.create_single_thread_page("comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_add_comment_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.set_comment_editor_value("comment_self_author", "edited body")
page.start_comment_edit("comment_other_author")
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_comment_editor_visible("comment_other_author"))
self.assertEqual(page.get_comment_body("comment_self_author"), original_body)
page.start_response_edit("response1")
self.assertFalse(page.is_comment_editor_visible("comment_other_author"))
self.assertTrue(page.is_response_editor_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_response_editor_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.cancel_comment_edit("comment_self_author", original_body)
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
@attr('shard_2')
class InlineDiscussionTest(UniqueCourseTest, DiscussionResponsePaginationTestMixin):
"""
Tests for inline discussions
"""
def setUp(self):
super(InlineDiscussionTest, self).setUp()
self.thread_ids = []
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
self.additional_discussion_id = "test_discussion_{}".format(uuid4().hex)
self.course_fix = CourseFixture(**self.course_info).add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc(
"discussion",
"Test Discussion",
metadata={"discussion_id": self.discussion_id}
),
XBlockFixtureDesc(
"discussion",
"Test Discussion 1",
metadata={"discussion_id": self.additional_discussion_id}
)
)
)
)
).install()
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id).visit().get_user_id()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.courseware_page.visit()
self.discussion_page = InlineDiscussionPage(self.browser, self.discussion_id)
self.additional_discussion_page = InlineDiscussionPage(self.browser, self.additional_discussion_id)
def setup_thread_page(self, thread_id):
self.discussion_page.expand_discussion()
self.assertEqual(self.discussion_page.get_num_displayed_threads(), 1)
self.thread_page = InlineDiscussionThreadPage(self.browser, thread_id) # pylint: disable=attribute-defined-outside-init
self.thread_page.expand()
def setup_multiple_inline_threads(self, thread_count):
"""
Set up multiple treads on the page by passing 'thread_count'
"""
threads = []
for i in range(thread_count):
thread_id = "test_thread_{}_{}".format(i, uuid4().hex)
threads.append(
Thread(id=thread_id, commentable_id=self.discussion_id),
)
self.thread_ids.append(thread_id)
thread_fixture = MultipleThreadFixture(threads)
thread_fixture.add_response(
Response(id="response1"),
[Comment(id="comment1", user_id="other"), Comment(id="comment2", user_id=self.user_id)],
threads[0]
)
thread_fixture.push()
def test_page_while_expanding_inline_discussion(self):
"""
Tests for the Inline Discussion page with multiple treads. Page should not focus 'thread-wrapper'
after loading responses.
"""
self.setup_multiple_inline_threads(thread_count=3)
self.discussion_page.expand_discussion()
thread_page = InlineDiscussionThreadPage(self.browser, self.thread_ids[0])
thread_page.expand()
# Check if 'thread-wrapper' is focused after expanding thread
self.assertFalse(thread_page.check_if_selector_is_focused(selector='.thread-wrapper'))
def test_initial_render(self):
self.assertFalse(self.discussion_page.is_discussion_expanded())
def test_expand_discussion_empty(self):
self.discussion_page.expand_discussion()
self.assertEqual(self.discussion_page.get_num_displayed_threads(), 0)
def check_anonymous_to_peers(self, is_staff):
thread = Thread(id=uuid4().hex, anonymous_to_peers=True, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.push()
self.setup_thread_page(thread.get("id"))
self.assertEqual(self.thread_page.is_thread_anonymous(), not is_staff)
def test_anonymous_to_peers_threads_as_staff(self):
AutoAuthPage(self.browser, course_id=self.course_id, roles="Administrator").visit()
self.courseware_page.visit()
self.check_anonymous_to_peers(True)
def test_anonymous_to_peers_threads_as_peer(self):
self.check_anonymous_to_peers(False)
def test_discussion_blackout_period(self):
now = datetime.datetime.now(UTC)
self.course_fix.add_advanced_settings(
{
u"discussion_blackouts": {
"value": [
[
(now - datetime.timedelta(days=14)).isoformat(),
(now + datetime.timedelta(days=2)).isoformat()
]
]
}
}
)
self.course_fix._add_advanced_settings()
self.browser.refresh()
thread = Thread(id=uuid4().hex, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.addResponse(
Response(id="response1"),
[Comment(id="comment1", user_id="other"), Comment(id="comment2", user_id=self.user_id)])
thread_fixture.push()
self.setup_thread_page(thread.get("id"))
self.assertFalse(self.discussion_page.element_exists(".new-post-btn"))
self.assertFalse(self.thread_page.has_add_response_button())
self.assertFalse(self.thread_page.is_response_editable("response1"))
self.assertFalse(self.thread_page.is_add_comment_visible("response1"))
self.assertFalse(self.thread_page.is_comment_editable("comment1"))
self.assertFalse(self.thread_page.is_comment_editable("comment2"))
self.assertFalse(self.thread_page.is_comment_deletable("comment1"))
self.assertFalse(self.thread_page.is_comment_deletable("comment2"))
def test_dual_discussion_module(self):
"""
Scenario: Two discussion module in one unit shouldn't override their actions
Given that I'm on courseware page where there are two inline discussion
When I click on one discussion module new post button
Then it should add new post form of that module in DOM
And I should be shown new post form of that module
And I shouldn't be shown second discussion module new post form
And I click on second discussion module new post button
Then it should add new post form of second module in DOM
And I should be shown second discussion new post form
And I shouldn't be shown first discussion module new post form
And I have two new post form in the DOM
When I click back on first module new post button
And I should be shown new post form of that module
And I shouldn't be shown second discussion module new post form
"""
self.discussion_page.wait_for_page()
self.additional_discussion_page.wait_for_page()
self.discussion_page.click_new_post_button()
with self.discussion_page.handle_alert():
self.discussion_page.click_cancel_new_post()
self.additional_discussion_page.click_new_post_button()
self.assertFalse(self.discussion_page._is_element_visible(".new-post-article"))
with self.additional_discussion_page.handle_alert():
self.additional_discussion_page.click_cancel_new_post()
self.discussion_page.click_new_post_button()
self.assertFalse(self.additional_discussion_page._is_element_visible(".new-post-article"))
@attr('shard_2')
class DiscussionUserProfileTest(UniqueCourseTest):
"""
Tests for user profile page in discussion tab.
"""
PAGE_SIZE = 20 # django_comment_client.forum.views.THREADS_PER_PAGE
PROFILED_USERNAME = "profiled-user"
def setUp(self):
super(DiscussionUserProfileTest, self).setUp()
CourseFixture(**self.course_info).install()
# The following line creates a user enrolled in our course, whose
# threads will be viewed, but not the one who will view the page.
# It isn't necessary to log them in, but using the AutoAuthPage
# saves a lot of code.
self.profiled_user_id = AutoAuthPage(
self.browser,
username=self.PROFILED_USERNAME,
course_id=self.course_id
).visit().get_user_id()
# now create a second user who will view the profile.
self.user_id = AutoAuthPage(
self.browser,
course_id=self.course_id
).visit().get_user_id()
def check_pages(self, num_threads):
# set up the stub server to return the desired amount of thread results
threads = [Thread(id=uuid4().hex) for _ in range(num_threads)]
UserProfileViewFixture(threads).push()
# navigate to default view (page 1)
page = DiscussionUserProfilePage(
self.browser,
self.course_id,
self.profiled_user_id,
self.PROFILED_USERNAME
)
page.visit()
current_page = 1
total_pages = max(num_threads - 1, 1) / self.PAGE_SIZE + 1
all_pages = range(1, total_pages + 1)
return page
def _check_page():
# ensure the page being displayed as "current" is the expected one
self.assertEqual(page.get_current_page(), current_page)
# ensure the expected threads are being shown in the right order
threads_expected = threads[(current_page - 1) * self.PAGE_SIZE:current_page * self.PAGE_SIZE]
self.assertEqual(page.get_shown_thread_ids(), [t["id"] for t in threads_expected])
# ensure the clickable page numbers are the expected ones
self.assertEqual(page.get_clickable_pages(), [
p for p in all_pages
if p != current_page
and p - 2 <= current_page <= p + 2
or (current_page > 2 and p == 1)
or (current_page < total_pages and p == total_pages)
])
# ensure the previous button is shown, but only if it should be.
# when it is shown, make sure it works.
if current_page > 1:
self.assertTrue(page.is_prev_button_shown(current_page - 1))
page.click_prev_page()
self.assertEqual(page.get_current_page(), current_page - 1)
page.click_next_page()
self.assertEqual(page.get_current_page(), current_page)
else:
self.assertFalse(page.is_prev_button_shown())
# ensure the next button is shown, but only if it should be.
if current_page < total_pages:
self.assertTrue(page.is_next_button_shown(current_page + 1))
else:
self.assertFalse(page.is_next_button_shown())
# click all the way up through each page
for i in range(current_page, total_pages):
_check_page()
if current_page < total_pages:
page.click_on_page(current_page + 1)
current_page += 1
# click all the way back down
for i in range(current_page, 0, -1):
_check_page()
if current_page > 1:
page.click_on_page(current_page - 1)
current_page -= 1
def test_0_threads(self):
self.check_pages(0)
def test_1_thread(self):
self.check_pages(1)
def test_20_threads(self):
self.check_pages(20)
def test_21_threads(self):
self.check_pages(21)
def test_151_threads(self):
self.check_pages(151)
def test_pagination_window_reposition(self):
page = self.check_pages(50)
page.click_next_page()
page.wait_for_ajax()
self.assertTrue(page.is_window_on_top())
def test_redirects_to_learner_profile(self):
"""
Scenario: Verify that learner-profile link is present on forum discussions page and we can navigate to it.
Given that I am on discussion forum user's profile page.
And I can see a username on left sidebar
When I click on my username.
Then I will be navigated to Learner Profile page.
And I can my username on Learner Profile page
"""
learner_profile_page = LearnerProfilePage(self.browser, self.PROFILED_USERNAME)
page = self.check_pages(1)
page.click_on_sidebar_username()
learner_profile_page.wait_for_page()
self.assertTrue(learner_profile_page.field_is_visible('username'))
@attr('shard_2')
class DiscussionSearchAlertTest(UniqueCourseTest):
"""
Tests for spawning and dismissing alerts related to user search actions and their results.
"""
SEARCHED_USERNAME = "gizmo"
def setUp(self):
super(DiscussionSearchAlertTest, self).setUp()
CourseFixture(**self.course_info).install()
# first auto auth call sets up a user that we will search for in some tests
self.searched_user_id = AutoAuthPage(
self.browser,
username=self.SEARCHED_USERNAME,
course_id=self.course_id
).visit().get_user_id()
# this auto auth call creates the actual session user
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.page = DiscussionTabHomePage(self.browser, self.course_id)
self.page.visit()
def setup_corrected_text(self, text):
SearchResultFixture(SearchResult(corrected_text=text)).push()
def check_search_alert_messages(self, expected):
actual = self.page.get_search_alert_messages()
self.assertTrue(all(map(lambda msg, sub: msg.lower().find(sub.lower()) >= 0, actual, expected)))
def test_no_rewrite(self):
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no threads"])
def test_rewrite_dismiss(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.page.dismiss_alert_message("foo")
self.check_search_alert_messages([])
def test_new_search(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.setup_corrected_text("bar")
self.page.perform_search()
self.check_search_alert_messages(["bar"])
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no threads"])
def test_rewrite_and_user(self):
self.setup_corrected_text("foo")
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["foo", self.SEARCHED_USERNAME])
def test_user_only(self):
self.setup_corrected_text(None)
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["no threads", self.SEARCHED_USERNAME])
# make sure clicking the link leads to the user profile page
UserProfileViewFixture([]).push()
self.page.get_search_alert_links().first.click()
DiscussionUserProfilePage(
self.browser,
self.course_id,
self.searched_user_id,
self.SEARCHED_USERNAME
).wait_for_page()
@attr('shard_2')
class DiscussionSortPreferenceTest(UniqueCourseTest):
"""
Tests for the discussion page displaying a single thread.
"""
def setUp(self):
super(DiscussionSortPreferenceTest, self).setUp()
# Create a course to register for.
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.sort_page = DiscussionSortPreferencePage(self.browser, self.course_id)
self.sort_page.visit()
def test_default_sort_preference(self):
"""
Test to check the default sorting preference of user. (Default = date )
"""
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, "date")
def test_change_sort_preference(self):
"""
Test that if user sorting preference is changing properly.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "date"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
def test_last_preference_saved(self):
"""
Test that user last preference is saved.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "date"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
self.sort_page.refresh_page()
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
|
franosincic/edx-platform
|
common/test/acceptance/tests/discussion/test_discussion.py
|
Python
|
agpl-3.0
| 52,729
|
[
"VisIt"
] |
fad9a8b8598391491dab17ce076a08d3ae96bb76896676ac3fa3216565ca9121
|
#!/usr/bin/env python
from Asap import *
from Asap.Setup.Lattice.Cubic import FaceCenteredCubic
from Asap.Setup.Dislocation import Dislocation
from Asap.Filters.TimeAveragedPositions import TimeAveragedPositions
from Asap.Filters.RestrictedCNA import RestrictedCNA
from Asap.Dynamics.Langevin import Langevin
from Asap.Trajectories import NetCDFTrajectory
from ASE.Visualization.PrimiPlotter import *
from Numeric import *
PrintVersion(1)
#size = (50, 88, 35)
size = (30, 25, 7)
n_avg = 100
n_output = 5000
n_avg=50
n_output=100
n_total = 5 * n_output
outfile = "cna_atoms.nc"
atoms = FaceCenteredCubic(directions=((1,1,-2), (-1,1,0), (1,1,1)),
size=size, element="Au", debug=0, periodic=(0,0,1))
basis = atoms.GetUnitCell()
center = 0.5 * array([basis[0,0], basis[1,1], basis[2,2]]) + array([0.1, 0.1, 0.1])
disl = Dislocation(center, atoms.MillerToDirection((1,1,0)),
atoms.MillerToDirection((1,1,0))/2.0)
#atoms = ListOfAtoms(slab)
disl.ApplyTo(atoms)
atoms.SetCalculator(EMT(EMTRasmussenParameters()))
# Constant temperatur dynamics
dyn = Langevin(atoms, 5 * femtosecond, 300 * kB, 0.002)
# Run Common Neighbor Analysis on each n_output timestep, using the
# positions averaged over the last n_avg timesteps, and writing the
# result to the output file.
avg = TimeAveragedPositions(atoms, n_avg, n_output)
dyn.Attach(avg)
cna = RestrictedCNA(avg, verbose=1)
avg.Attach(cna)
# We output and plot the instantaneous positions. Replace atoms with
# avg to output and plot the averaged positions instead.
writer = NetCDFTrajectory(outfile, atoms, interval=1, mode="w")
cna.Attach(writer)
plotter = PrimiPlotter(atoms)
plotter.SetOutput(GifFile("cna_plot"))
# Only plot atoms in the dislocation.
def invis(a):
c = a.GetTags()
return equal(c, 0)
plotter.SetInvisibilityFunction(invis)
plotter.SetColors({0:"red", 1:"yellow", 2:"blue"})
cna.Attach(plotter)
# Run the simulation
dyn.Run(n_total)
# Close the output file (better safe than sorry)
writer.Close()
|
auag92/n2dm
|
Asap-3.8.4/Examples/CNA.py
|
Python
|
mit
| 2,024
|
[
"ASE"
] |
9068b32a5b2c116b5b3e39a3c8f11894834183d3d57014c0ebb7b2c3f58b1fc9
|
#!/usr/bin/python
# Reads ACGTrie and dumps it into value file format. In value file format
# each record is stored into 32 bits. Value of the record represent frequency
# of the chain in DNA. We use bijective numbers notation with base 4 to map from index
# into chain. This system has only four digits A < C < G < T; There are no zeros
# in this notation. The following illustrates how to map from index into chain:
#
# Index | Chain
# 1 A
# 2 C
# 3 G
# 4 T
# 5 AA
# 6 AC
# 7 AG
# 8 AT
# .....
import numpy
import math
import struct
LEVELS_TO_DUMP = 5
OUT_FILE_NAME = 'out.count.' + str(LEVELS_TO_DUMP)
A = numpy.memmap('ngraph.nohead.ACGTrie.A', dtype='uint32')
C = numpy.memmap('ngraph.nohead.ACGTrie.C', dtype='uint32')
G = numpy.memmap('ngraph.nohead.ACGTrie.G', dtype='uint32')
T = numpy.memmap('ngraph.nohead.ACGTrie.T', dtype='uint32')
COUNT = numpy.memmap('ngraph.nohead.ACGTrie.COUNT', dtype='uint32')
SEQ = numpy.memmap('ngraph.nohead.ACGTrie.SEQ', dtype='int64')
## This function turns the up2bit number in the SEQ column of the trie into a list of 2bit values like the above.
def up2bit_list(value):
value = int(value) # Numbers in cffi arrays don't have the .bit_length property, so we convert to int.
return [((value >> x) & 3) for x in range(0,value.bit_length()-1,2)] # This uses bit-shifting to get the pairs of 2 bits at a time.
def getScore(string):
row = 0 ## We always start on row 0
counts = [] ## We will return a list of counts for each row we visit
seq = [('A','C','T','G').index(char) for char in string] ## Turn the string into a list of 2bit numbers
while True:
rowCount = COUNT[row]
counts.append(rowCount) ## Straight away we will add this row's # value to our rowCount list
seqLen = len(seq)
if seqLen == 0: break ## If we have no more DNA in our input string, we're done :)
up2bit = up2bit_list(SEQ[row]) ## Else we get some more data..
up2bitLen = len(up2bit)
nextPipe = (A,C,T,G)[seq[0]][row]
if nextPipe and up2bitLen == 0: ## If we have no more DNA in this row's Seq,
row = nextPipe ## take the warp pipe to the next row.
seq = seq[1:]
continue
elif seqLen <= up2bitLen: ## If we have more DNA in our row than our string...
if up2bit[:seqLen] == seq: counts += [rowCount] * seqLen ## 1) Check they match up. If so add this row's count N times.
else:
for x,y in enumerate(seq): ## 2) If they dont match up, find where they diverge
if y != up2bit[x]: break ## and just add this row's count for that many times
counts += [rowCount] * x
else: ## Finally, to be here we must have more DNA in our string
if seq[:up2bitLen] == up2bit: ## than in our row. Thus as before we see if they match.
counts += [rowCount] * up2bitLen ## If they do, add N counts to the counts list, and take
row = (A,C,T,G)[seq[up2bitLen]][row] ## the next warp pipe out (if available)
if row != 0:
seq = seq[up2bitLen+1:]
continue
else: ## Otherwise find out where the two sequences diverge and
for x,y in enumerate(up2bit): ## just add the appropriate number of counts to the counts
if y != seq[x]: break ## list before bottoming out of the while loop and breaking.
counts += [rowCount] * x
break
return counts[-1]
def row(x):
print A[x],C[x],T[x],G[x],COUNT[x],SEQ[x]
ALPHABET = list("ACGT")
def bijective_encode(i):
if i == 0: return ''
s = ''
base = len(ALPHABET)
q0 = int(i)
while q0 > 0:
q1 = int(math.ceil(float(q0)/base) - 1)
s += ALPHABET[( q0 - base * q1 ) - 1]
q0 = q1
return s[::-1] # reverse string
def totalItems(maxNested):
sum = 0
while maxNested > 0:
sum += math.pow(4, maxNested)
maxNested -= 1
return int(sum + 1)
fo = open(OUT_FILE_NAME, 'wb')
totalItemsToProcess = totalItems(LEVELS_TO_DUMP)
print "Saving frequencies..."
for x in range(1, totalItemsToProcess):
sequence = bijective_encode(x)
score = getScore(sequence)
# print sequence, ' - ', score
fo.write(struct.pack('<i', score))
fo.close()
print "Saved " + str(totalItemsToProcess) + " records to " + OUT_FILE_NAME
|
anvaka/actg
|
utils/dumpValueFile.py
|
Python
|
mit
| 5,133
|
[
"VisIt"
] |
b1656e41fbb2a200c5822cafe9ba3da89fef14ea1cdef3990918fa4c2726dc99
|
import sys
sys.path.insert(1, "../../../")
import h2o, tests
def link_incompatible_error():
print("Reading in original prostate data.")
prostate = h2o.import_file(path=h2o.locate("smalldata/prostate/prostate.csv.zip"))
print("Throw error when trying to create model with incompatible logit link.")
try:
h2o.model = h2o.glm(x=prostate[1:8], y=prostate[8], family="gaussian", link="logit")
assert False, "expected an error"
except EnvironmentError:
assert True
try:
h2o.model = h2o.glm(x=prostate[1:8], y=prostate[8], family="tweedie", link="log")
assert False, "expected an error"
except EnvironmentError:
assert True
try:
h2o.model = h2o.glm(x=prostate[2:9], y=prostate[1], family="binomial", link="inverse")
assert False, "expected an error"
except EnvironmentError:
assert True
if __name__ == "__main__":
tests.run_test(sys.argv, link_incompatible_error)
|
brightchen/h2o-3
|
h2o-py/tests/testdir_algos/glm/pyunit_link_incompatible_errorGLM.py
|
Python
|
apache-2.0
| 987
|
[
"Gaussian"
] |
7e421f8c4b0a5f5402543f3fb708fcdbb2a7983a33d5cade9a43e43e7a1a6d9b
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2010-2016 (ita)
"""
Classes and functions required for waf commands
"""
import os, re, imp, sys
from waflib import Utils, Errors, Logs
import waflib.Node
# the following 3 constants are updated on each new release (do not touch)
HEXVERSION=0x1081300
"""Constant updated on new releases"""
WAFVERSION="1.8.19"
"""Constant updated on new releases"""
WAFREVISION="f14a6d43092d3419d90c1ce16b9d3c700309d7b3"
"""Git revision when the waf version is updated"""
ABI = 98
"""Version of the build data cache file format (used in :py:const:`waflib.Context.DBFILE`)"""
DBFILE = '.wafpickle-%s-%d-%d' % (sys.platform, sys.hexversion, ABI)
"""Name of the pickle file for storing the build data"""
APPNAME = 'APPNAME'
"""Default application name (used by ``waf dist``)"""
VERSION = 'VERSION'
"""Default application version (used by ``waf dist``)"""
TOP = 'top'
"""The variable name for the top-level directory in wscript files"""
OUT = 'out'
"""The variable name for the output directory in wscript files"""
WSCRIPT_FILE = 'wscript'
"""Name of the waf script files"""
launch_dir = ''
"""Directory from which waf has been called"""
run_dir = ''
"""Location of the wscript file to use as the entry point"""
top_dir = ''
"""Location of the project directory (top), if the project was configured"""
out_dir = ''
"""Location of the build directory (out), if the project was configured"""
waf_dir = ''
"""Directory containing the waf modules"""
local_repo = ''
"""Local repository containing additional Waf tools (plugins)"""
remote_repo = 'https://raw.githubusercontent.com/waf-project/waf/master/'
"""
Remote directory containing downloadable waf tools. The missing tools can be downloaded by using::
$ waf configure --download
"""
remote_locs = ['waflib/extras', 'waflib/Tools']
"""
Remote directories for use with :py:const:`waflib.Context.remote_repo`
"""
g_module = None
"""
Module representing the main wscript file (see :py:const:`waflib.Context.run_dir`)
"""
STDOUT = 1
STDERR = -1
BOTH = 0
classes = []
"""
List of :py:class:`waflib.Context.Context` subclasses that can be used as waf commands. The classes
are added automatically by a metaclass.
"""
def create_context(cmd_name, *k, **kw):
"""
Create a new :py:class:`waflib.Context.Context` instance corresponding to the given command.
Used in particular by :py:func:`waflib.Scripting.run_command`
:param cmd_name: command
:type cmd_name: string
:param k: arguments to give to the context class initializer
:type k: list
:param k: keyword arguments to give to the context class initializer
:type k: dict
"""
global classes
for x in classes:
if x.cmd == cmd_name:
return x(*k, **kw)
ctx = Context(*k, **kw)
ctx.fun = cmd_name
return ctx
class store_context(type):
"""
Metaclass for storing the command classes into the list :py:const:`waflib.Context.classes`
Context classes must provide an attribute 'cmd' representing the command to execute
"""
def __init__(cls, name, bases, dict):
super(store_context, cls).__init__(name, bases, dict)
name = cls.__name__
if name == 'ctx' or name == 'Context':
return
try:
cls.cmd
except AttributeError:
raise Errors.WafError('Missing command for the context class %r (cmd)' % name)
if not getattr(cls, 'fun', None):
cls.fun = cls.cmd
global classes
classes.insert(0, cls)
ctx = store_context('ctx', (object,), {})
"""Base class for the :py:class:`waflib.Context.Context` classes"""
class Context(ctx):
"""
Default context for waf commands, and base class for new command contexts.
Context objects are passed to top-level functions::
def foo(ctx):
print(ctx.__class__.__name__) # waflib.Context.Context
Subclasses must define the attribute 'cmd':
:param cmd: command to execute as in ``waf cmd``
:type cmd: string
:param fun: function name to execute when the command is called
:type fun: string
.. inheritance-diagram:: waflib.Context.Context waflib.Build.BuildContext waflib.Build.InstallContext waflib.Build.UninstallContext waflib.Build.StepContext waflib.Build.ListContext waflib.Configure.ConfigurationContext waflib.Scripting.Dist waflib.Scripting.DistCheck waflib.Build.CleanContext
"""
errors = Errors
"""
Shortcut to :py:mod:`waflib.Errors` provided for convenience
"""
tools = {}
"""
A cache for modules (wscript files) read by :py:meth:`Context.Context.load`
"""
def __init__(self, **kw):
try:
rd = kw['run_dir']
except KeyError:
global run_dir
rd = run_dir
# binds the context to the nodes in use to avoid a context singleton
self.node_class = type("Nod3", (waflib.Node.Node,), {})
self.node_class.__module__ = "waflib.Node"
self.node_class.ctx = self
self.root = self.node_class('', None)
self.cur_script = None
self.path = self.root.find_dir(rd)
self.stack_path = []
self.exec_dict = {'ctx':self, 'conf':self, 'bld':self, 'opt':self}
self.logger = None
def __hash__(self):
"""
Return a hash value for storing context objects in dicts or sets. The value is not persistent.
:return: hash value
:rtype: int
"""
return id(self)
def finalize(self):
"""
Use to free resources such as open files potentially held by the logger
"""
try:
logger = self.logger
except AttributeError:
pass
else:
Logs.free_logger(logger)
delattr(self, 'logger')
def load(self, tool_list, *k, **kw):
"""
Load a Waf tool as a module, and try calling the function named :py:const:`waflib.Context.Context.fun` from it.
A ``tooldir`` value may be provided as a list of module paths.
:type tool_list: list of string or space-separated string
:param tool_list: list of Waf tools to use
"""
tools = Utils.to_list(tool_list)
path = Utils.to_list(kw.get('tooldir', ''))
with_sys_path = kw.get('with_sys_path', True)
for t in tools:
module = load_tool(t, path, with_sys_path=with_sys_path)
fun = getattr(module, kw.get('name', self.fun), None)
if fun:
fun(self)
def execute(self):
"""
Execute the command. Redefine this method in subclasses.
"""
global g_module
self.recurse([os.path.dirname(g_module.root_path)])
def pre_recurse(self, node):
"""
Method executed immediately before a folder is read by :py:meth:`waflib.Context.Context.recurse`. The node given is set
as an attribute ``self.cur_script``, and as the current path ``self.path``
:param node: script
:type node: :py:class:`waflib.Node.Node`
"""
self.stack_path.append(self.cur_script)
self.cur_script = node
self.path = node.parent
def post_recurse(self, node):
"""
Restore ``self.cur_script`` and ``self.path`` right after :py:meth:`waflib.Context.Context.recurse` terminates.
:param node: script
:type node: :py:class:`waflib.Node.Node`
"""
self.cur_script = self.stack_path.pop()
if self.cur_script:
self.path = self.cur_script.parent
def recurse(self, dirs, name=None, mandatory=True, once=True, encoding=None):
"""
Run user code from the supplied list of directories.
The directories can be either absolute, or relative to the directory
of the wscript file. The methods :py:meth:`waflib.Context.Context.pre_recurse` and :py:meth:`waflib.Context.Context.post_recurse`
are called immediately before and after a script has been executed.
:param dirs: List of directories to visit
:type dirs: list of string or space-separated string
:param name: Name of function to invoke from the wscript
:type name: string
:param mandatory: whether sub wscript files are required to exist
:type mandatory: bool
:param once: read the script file once for a particular context
:type once: bool
"""
try:
cache = self.recurse_cache
except AttributeError:
cache = self.recurse_cache = {}
for d in Utils.to_list(dirs):
if not os.path.isabs(d):
# absolute paths only
d = os.path.join(self.path.abspath(), d)
WSCRIPT = os.path.join(d, WSCRIPT_FILE)
WSCRIPT_FUN = WSCRIPT + '_' + (name or self.fun)
node = self.root.find_node(WSCRIPT_FUN)
if node and (not once or node not in cache):
cache[node] = True
self.pre_recurse(node)
try:
function_code = node.read('rU', encoding)
exec(compile(function_code, node.abspath(), 'exec'), self.exec_dict)
finally:
self.post_recurse(node)
elif not node:
node = self.root.find_node(WSCRIPT)
tup = (node, name or self.fun)
if node and (not once or tup not in cache):
cache[tup] = True
self.pre_recurse(node)
try:
wscript_module = load_module(node.abspath(), encoding=encoding)
user_function = getattr(wscript_module, (name or self.fun), None)
if not user_function:
if not mandatory:
continue
raise Errors.WafError('No function %s defined in %s' % (name or self.fun, node.abspath()))
user_function(self)
finally:
self.post_recurse(node)
elif not node:
if not mandatory:
continue
try:
os.listdir(d)
except OSError:
raise Errors.WafError('Cannot read the folder %r' % d)
raise Errors.WafError('No wscript file in directory %s' % d)
def exec_command(self, cmd, **kw):
"""
Execute a command and return the exit status. If the context has the attribute 'log',
capture and log the process stderr/stdout for logging purposes::
def run(tsk):
ret = tsk.generator.bld.exec_command('touch foo.txt')
return ret
This method captures the standard/error outputs (Issue 1101), but it does not return the values
unlike :py:meth:`waflib.Context.Context.cmd_and_log`
:param cmd: command argument for subprocess.Popen
:param kw: keyword arguments for subprocess.Popen. The parameters input/timeout will be passed to wait/communicate.
"""
subprocess = Utils.subprocess
kw['shell'] = isinstance(cmd, str)
Logs.debug('runner: %r' % (cmd,))
Logs.debug('runner_env: kw=%s' % kw)
if self.logger:
self.logger.info(cmd)
if 'stdout' not in kw:
kw['stdout'] = subprocess.PIPE
if 'stderr' not in kw:
kw['stderr'] = subprocess.PIPE
if Logs.verbose and not kw['shell'] and not Utils.check_exe(cmd[0]):
raise Errors.WafError("Program %s not found!" % cmd[0])
wargs = {}
if 'timeout' in kw:
if kw['timeout'] is not None:
wargs['timeout'] = kw['timeout']
del kw['timeout']
if 'input' in kw:
if kw['input']:
wargs['input'] = kw['input']
kw['stdin'] = Utils.subprocess.PIPE
del kw['input']
try:
if kw['stdout'] or kw['stderr']:
p = subprocess.Popen(cmd, **kw)
(out, err) = p.communicate(**wargs)
ret = p.returncode
else:
out, err = (None, None)
ret = subprocess.Popen(cmd, **kw).wait(**wargs)
except Exception as e:
raise Errors.WafError('Execution failure: %s' % str(e), ex=e)
if out:
if not isinstance(out, str):
out = out.decode(sys.stdout.encoding or 'iso8859-1')
if self.logger:
self.logger.debug('out: %s' % out)
else:
Logs.info(out, extra={'stream':sys.stdout, 'c1': ''})
if err:
if not isinstance(err, str):
err = err.decode(sys.stdout.encoding or 'iso8859-1')
if self.logger:
self.logger.error('err: %s' % err)
else:
Logs.info(err, extra={'stream':sys.stderr, 'c1': ''})
return ret
def cmd_and_log(self, cmd, **kw):
"""
Execute a command and return stdout/stderr if the execution is successful.
An exception is thrown when the exit status is non-0. In that case, both stderr and stdout
will be bound to the WafError object::
def configure(conf):
out = conf.cmd_and_log(['echo', 'hello'], output=waflib.Context.STDOUT, quiet=waflib.Context.BOTH)
(out, err) = conf.cmd_and_log(['echo', 'hello'], output=waflib.Context.BOTH)
(out, err) = conf.cmd_and_log(cmd, input='\\n'.encode(), output=waflib.Context.STDOUT)
try:
conf.cmd_and_log(['which', 'someapp'], output=waflib.Context.BOTH)
except Exception as e:
print(e.stdout, e.stderr)
:param cmd: args for subprocess.Popen
:param kw: keyword arguments for subprocess.Popen. The parameters input/timeout will be passed to wait/communicate.
"""
subprocess = Utils.subprocess
kw['shell'] = isinstance(cmd, str)
Logs.debug('runner: %r' % (cmd,))
if 'quiet' in kw:
quiet = kw['quiet']
del kw['quiet']
else:
quiet = None
if 'output' in kw:
to_ret = kw['output']
del kw['output']
else:
to_ret = STDOUT
if Logs.verbose and not kw['shell'] and not Utils.check_exe(cmd[0]):
raise Errors.WafError("Program %s not found!" % cmd[0])
kw['stdout'] = kw['stderr'] = subprocess.PIPE
if quiet is None:
self.to_log(cmd)
wargs = {}
if 'timeout' in kw:
if kw['timeout'] is not None:
wargs['timeout'] = kw['timeout']
del kw['timeout']
if 'input' in kw:
if kw['input']:
wargs['input'] = kw['input']
kw['stdin'] = Utils.subprocess.PIPE
del kw['input']
try:
p = subprocess.Popen(cmd, **kw)
(out, err) = p.communicate(**wargs)
except Exception as e:
raise Errors.WafError('Execution failure: %s' % str(e), ex=e)
if not isinstance(out, str):
out = out.decode(sys.stdout.encoding or 'iso8859-1')
if not isinstance(err, str):
err = err.decode(sys.stdout.encoding or 'iso8859-1')
if out and quiet != STDOUT and quiet != BOTH:
self.to_log('out: %s' % out)
if err and quiet != STDERR and quiet != BOTH:
self.to_log('err: %s' % err)
if p.returncode:
e = Errors.WafError('Command %r returned %r' % (cmd, p.returncode))
e.returncode = p.returncode
e.stderr = err
e.stdout = out
raise e
if to_ret == BOTH:
return (out, err)
elif to_ret == STDERR:
return err
return out
def fatal(self, msg, ex=None):
"""
Raise a configuration error to interrupt the execution immediately::
def configure(conf):
conf.fatal('a requirement is missing')
:param msg: message to display
:type msg: string
:param ex: optional exception object
:type ex: exception
"""
if self.logger:
self.logger.info('from %s: %s' % (self.path.abspath(), msg))
try:
msg = '%s\n(complete log in %s)' % (msg, self.logger.handlers[0].baseFilename)
except Exception:
pass
raise self.errors.ConfigurationError(msg, ex=ex)
def to_log(self, msg):
"""
Log some information to the logger (if present), or to stderr. If the message is empty,
it is not printed::
def build(bld):
bld.to_log('starting the build')
When in doubt, override this method, or provide a logger on the context class.
:param msg: message
:type msg: string
"""
if not msg:
return
if self.logger:
self.logger.info(msg)
else:
sys.stderr.write(str(msg))
sys.stderr.flush()
def msg(self, *k, **kw):
"""
Print a configuration message of the form ``msg: result``.
The second part of the message will be in colors. The output
can be disabled easly by setting ``in_msg`` to a positive value::
def configure(conf):
self.in_msg = 1
conf.msg('Checking for library foo', 'ok')
# no output
:param msg: message to display to the user
:type msg: string
:param result: result to display
:type result: string or boolean
:param color: color to use, see :py:const:`waflib.Logs.colors_lst`
:type color: string
"""
try:
msg = kw['msg']
except KeyError:
msg = k[0]
self.start_msg(msg, **kw)
try:
result = kw['result']
except KeyError:
result = k[1]
color = kw.get('color', None)
if not isinstance(color, str):
color = result and 'GREEN' or 'YELLOW'
self.end_msg(result, color, **kw)
def start_msg(self, *k, **kw):
"""
Print the beginning of a 'Checking for xxx' message. See :py:meth:`waflib.Context.Context.msg`
"""
if kw.get('quiet', None):
return
msg = kw.get('msg', None) or k[0]
try:
if self.in_msg:
self.in_msg += 1
return
except AttributeError:
self.in_msg = 0
self.in_msg += 1
try:
self.line_just = max(self.line_just, len(msg))
except AttributeError:
self.line_just = max(40, len(msg))
for x in (self.line_just * '-', msg):
self.to_log(x)
Logs.pprint('NORMAL', "%s :" % msg.ljust(self.line_just), sep='')
def end_msg(self, *k, **kw):
"""Print the end of a 'Checking for' message. See :py:meth:`waflib.Context.Context.msg`"""
if kw.get('quiet', None):
return
self.in_msg -= 1
if self.in_msg:
return
result = kw.get('result', None) or k[0]
defcolor = 'GREEN'
if result == True:
msg = 'ok'
elif result == False:
msg = 'not found'
defcolor = 'YELLOW'
else:
msg = str(result)
self.to_log(msg)
try:
color = kw['color']
except KeyError:
if len(k) > 1 and k[1] in Logs.colors_lst:
# compatibility waf 1.7
color = k[1]
else:
color = defcolor
Logs.pprint(color, msg)
def load_special_tools(self, var, ban=[]):
global waf_dir
if os.path.isdir(waf_dir):
lst = self.root.find_node(waf_dir).find_node('waflib/extras').ant_glob(var)
for x in lst:
if not x.name in ban:
load_tool(x.name.replace('.py', ''))
else:
from zipfile import PyZipFile
waflibs = PyZipFile(waf_dir)
lst = waflibs.namelist()
for x in lst:
if not re.match("waflib/extras/%s" % var.replace("*", ".*"), var):
continue
f = os.path.basename(x)
doban = False
for b in ban:
r = b.replace("*", ".*")
if re.match(r, f):
doban = True
if not doban:
f = f.replace('.py', '')
load_tool(f)
cache_modules = {}
"""
Dictionary holding already loaded modules, keyed by their absolute path.
The modules are added automatically by :py:func:`waflib.Context.load_module`
"""
def load_module(path, encoding=None):
"""
Load a source file as a python module.
:param path: file path
:type path: string
:return: Loaded Python module
:rtype: module
"""
try:
return cache_modules[path]
except KeyError:
pass
module = imp.new_module(WSCRIPT_FILE)
try:
code = Utils.readf(path, m='rU', encoding=encoding)
except EnvironmentError:
raise Errors.WafError('Could not read the file %r' % path)
module_dir = os.path.dirname(path)
sys.path.insert(0, module_dir)
try : exec(compile(code, path, 'exec'), module.__dict__)
finally: sys.path.remove(module_dir)
cache_modules[path] = module
return module
def load_tool(tool, tooldir=None, ctx=None, with_sys_path=True):
"""
Import a Waf tool (python module), and store it in the dict :py:const:`waflib.Context.Context.tools`
:type tool: string
:param tool: Name of the tool
:type tooldir: list
:param tooldir: List of directories to search for the tool module
:type with_sys_path: boolean
:param with_sys_path: whether or not to search the regular sys.path, besides waf_dir and potentially given tooldirs
"""
if tool == 'java':
tool = 'javaw' # jython
else:
tool = tool.replace('++', 'xx')
origSysPath = sys.path
if not with_sys_path: sys.path = []
try:
if tooldir:
assert isinstance(tooldir, list)
sys.path = tooldir + sys.path
try:
__import__(tool)
finally:
for d in tooldir:
sys.path.remove(d)
ret = sys.modules[tool]
Context.tools[tool] = ret
return ret
else:
if not with_sys_path: sys.path.insert(0, waf_dir)
try:
for x in ('waflib.Tools.%s', 'waflib.extras.%s', 'waflib.%s', '%s'):
try:
__import__(x % tool)
break
except ImportError:
x = None
if x is None: # raise an exception
__import__(tool)
finally:
if not with_sys_path: sys.path.remove(waf_dir)
ret = sys.modules[x % tool]
Context.tools[tool] = ret
return ret
finally:
if not with_sys_path: sys.path += origSysPath
|
mattaw/SoCFoundationFlow
|
admin/waf/waf-1.8.19/waflib/Context.py
|
Python
|
apache-2.0
| 19,546
|
[
"VisIt"
] |
e481c37d329591134350db55ffeeabe75b159bf53851d9afc33f40ada26d2ade
|
import sys
sys.path.append('..')
import numpy as np
from sklearn.svm import SVC
import sklearn.preprocessing as preprocessing
from sklearn.metrics import recall_score, accuracy_score, confusion_matrix, classification_report
import os
import sys
import utils.dataset_manupulation as dm
import utils.utils as utl
import warnings
warnings.simplefilter("ignore", DeprecationWarning)
featureset = 'PNCC'
filetype = 'htk'
#path setup
root_dir = os.path.realpath('/media/fabio/DATA/Work/Snoring/Snore_dist')
targePath = os.path.join(root_dir, 'gmmUbmSvm','snoring_class')
listPath = os.path.join(root_dir, 'dataset')
featPath = os.path.join(root_dir, 'dataset', featureset)
ubmsPath = os.path.join(targePath, featureset, "ubms")
ivecPath = os.path.join(targePath, featureset, "ivectors")
scoresPath = os.path.join(targePath, featureset, "score_ivec")
snoreClassPath =os.path.join(targePath, featureset, "score_ivec","final_score.csv")#used for save best c-best gamma-best nmix so that extract_supervector_test.py and test.py can read it
sys.stdout = open(os.path.join(scoresPath,'gridsearch_ivec.txt'), 'w') #log to a file
print "experiment: "+targePath #to have the reference to experiments in text files
sys.stderr = open(os.path.join(scoresPath,'gridsearch_err_ivec.txt'), 'w') #log to a file
#variables inizialization
vec_lengths = [250, 400]
C_range = 2.0 ** np.arange(-5, 15+2, 2) # libsvm range
gamma_range = 2.0 ** np.arange(-15, 3+2, 2) # libsvm range
mixtures = 2**np.arange(0, 7, 1)
mixtures = 2**np.arange(5, 6, 1)
nFolds = 1
scores = np.zeros((mixtures.shape[0], len(vec_lengths)))
cBestValues = np.zeros((mixtures.shape[0], len(vec_lengths)))
gBestValues = np.zeros((mixtures.shape[0], len(vec_lengths)))
#LOAD DATASET
snoring_dataset = dm.load_ComParE2017(featPath, filetype) # load dataset
trainset, develset, testset = dm.split_ComParE2017_simple(snoring_dataset) # creo i trainset per calcolare media e varianza per poter normalizzare
labels = dm.label_loading(os.path.join(root_dir,'lab','ComParE2017_Snore.tsv'))
trainset_l, develset_l, _ = dm.split_ComParE2017_simple(labels)
del snoring_dataset
y = []
for seq in trainset:
y.append(seq[0])
yd = []
for seq in develset:
yd.append(seq[0])
y_train, y_train_lab = dm.label_organize(trainset_l, y)
y_devel, y_devel_lab = dm.label_organize(develset_l, yd)
##EXTEND TRAINSET
#y_train_lab = np.append(y_train_lab,y_devel_lab[:140])
#y_devel_lab = y_devel_lab[140:]
def compute_score(predictions, labels):
print("compute_score")
y_pred = []
for d in predictions:
y_pred.append(int(d))
y_true = []
for n in labels:
y_true.append(int(n))
A = accuracy_score(y_true, y_pred)
UAR = recall_score(y_true, y_pred, average='macro')
CM = confusion_matrix(y_true, y_pred)
print("Accuracy: " + str(A))
print("UAR: " + str(UAR))
cm = CM.astype(int)
print("FINAL REPORT")
print("\t V\t O\t T\t E")
print("V \t" + str(cm[0, 0]) + "\t" + str(cm[0, 1]) + "\t" + str(cm[0, 2]) + "\t" + str(cm[0, 3]))
print("O \t" + str(cm[1, 0]) + "\t" + str(cm[1, 1]) + "\t" + str(cm[1, 2]) + "\t" + str(cm[1, 3]))
print("T \t" + str(cm[2, 0]) + "\t" + str(cm[2, 1]) + "\t" + str(cm[2, 2]) + "\t" + str(cm[2, 3]))
print("E \t" + str(cm[3, 0]) + "\t" + str(cm[3, 1]) + "\t" + str(cm[3, 2]) + "\t" + str(cm[3, 3]))
print(classification_report(y_true, y_pred, target_names=['V', 'O', 'T', 'E']))
return A, UAR, CM, y_pred
mIdx = 0
for m in mixtures:
print("Mixture: " + str(m))
sys.stdout.flush()
curIvecPath = os.path.join(ivecPath, str(m))
ividx = 0
for ivl in vec_lengths:
cGammaScores = np.zeros((C_range.shape[0], gamma_range.shape[0])) #inizializza matrice dei punteggi
print("I-Vector Length: " + str(ivl))
sys.stdout.flush()
curIvecSubPath = os.path.join(curIvecPath, str(ivl))
trainFeatures = utl.readIvecFeatures(curIvecSubPath, y) #TODO READ SUPERVEC
trainClassLabels = y_train_lab
devFeatures = utl.readIvecFeatures(curIvecSubPath, yd)
devClassLabels = y_devel_lab
##EXTEND TRAINSET
#trainFeatures = np.vstack((trainFeatures,devFeatures[:140,:]))
#devFeatures = devFeatures[140:]
cIdx = 0
for C in C_range:
gIdx = 0
for gamma in gamma_range:
scaler = preprocessing.MinMaxScaler(feature_range=(-1,1))
scaler.fit(trainFeatures);
svm = SVC(C=C, kernel='rbf', gamma=gamma, class_weight='auto')
svm.fit(scaler.transform(trainFeatures), trainClassLabels) #nomealizzazione e adattamento
predLabels = svm.predict(scaler.transform(devFeatures))
A, UAR, ConfMatrix, class_pred = compute_score(predLabels, y_devel_lab)
cGammaScores[cIdx,gIdx] += UAR
gIdx += 1;
cIdx += 1;
idxs = np.unravel_index(cGammaScores.argmax(), cGammaScores.shape) #trova l'indirizzo all'interno della matrice cGammaScores a cui corrisponde il valore max
cBestValues[mIdx, ividx] = C_range[idxs[0]] #per ogni cartella (trainset+devset_(1)) si salva il valore di C che mi da il punteggio maggiore (il tutto lo fa anche per ogni valore di mixture)
gBestValues[mIdx, ividx] = gamma_range[idxs[1]] #per ogni cartella (trainset+devset_(1)) si salva il valore di GAMMA che mi da il punteggio maggiore (il tutto lo fa anche per ogni valore di mixture)
scores[mIdx, ividx] = cGammaScores.max()
ividx += 1
mIdx += 1
sys.stdout = open(snoreClassPath, 'w')
print "Featureset = " + featureset
print("**** Results ****")
print "NOTES; N-GAUSS;i-Vector len; UAR"
mIdx = 0
for m in mixtures:
scoresMix = scores[mIdx, :]
ividx = 0
for ivl in vec_lengths:
print(";" + str(m) + ";" + str(ivl) + ";" + str(scores[mIdx, ividx]))
idx_max_score = scoresMix.argmax()
print "best vale of c for " + str(vec_lengths[idx_max_score]) +" I-Vector Length and "+ str(mixtures[mIdx]) +" gaussian : "+ str(cBestValues[mIdx,idx_max_score])
print "best vale of g for " + str(vec_lengths[idx_max_score]) +" I-Vector Length and "+ str(mixtures[mIdx]) +" gaussian : "+ str(gBestValues[mIdx, idx_max_score])
ividx += 1
mIdx += 1
|
vespero89/Snoring_Challenge
|
Supervectors/gridsearch_ivec.py
|
Python
|
gpl-3.0
| 6,323
|
[
"Gaussian"
] |
b847ee35b7624913ac008e1d9f4a6e914adb5b234f5424fb564759894bda495d
|
#!/usr/bin/env python
"""Post weather update to WeatherUnderground
::
%s
.. Warning::
This module has been superseded by the :doc:`pywws.toservice`
module. It will be deleted from pywws in the next release.
Introduction
------------
`Weather Underground <http://www.wunderground.com/>`_ is a USA based
web site that gathers weather data from stations around the world.
This module enables pywws to upload readings to Weather Underground.
Configuration
-------------
If you haven't already done so, visit the Weather Underground web site
and create a member account for yourself. Then go to the `'Personal
Weather Stations' page
<http://www.wunderground.com/wxstation/signup.html>`_ and follow the
'new weather station' link. Fill in all the required details, then
click on 'submit'.
Copy your 'station ID' and password to a new ``[underground]`` section
in your ``weather.ini`` configuration file::
[underground]
password = secret
station = ABCDEFG1A
Remember to stop all pywws software before editing ``weather.ini``.
Test your configuration by running ``ToUnderground.py`` (replace
``data_dir`` with your weather data directory)::
python pywws/ToUnderground.py -vvv data_dir
This should show you the data string that is uploaded and then a
'success' message.
Upload old data
---------------
Now you can upload your last 7 days' data. Edit your ``weather.ini``
file and remove the ``last update`` line from the ``[underground]``
section, then run ``ToUnderground.py`` with the catchup option::
python pywws/ToUnderground.py -c -v data_dir
This may take 20 minutes or more, depending on how much data you have.
Add Weather Underground upload to regular tasks
-----------------------------------------------
Edit your ``weather.ini`` again, and add ``underground = True`` to the
``[live]``, ``[logged]``, ``[hourly]``, ``[12 hourly]`` or ``[daily]``
section, depending on how often you want to send data. For example::
[live]
underground = True
twitter = []
plot = []
text = []
If you set ``underground = True`` in the ``live`` section, pywws will
use Weather Underground's 'Rapid Fire' mode to send a reading every 48
seconds.
Restart your regular pywws program (``Hourly.py`` or ``LiveLog.py``)
and visit the Weather Underground web site to see regular updates from
your weather station.
"""
__docformat__ = "restructuredtext en"
__usage__ = """
usage: python ToUnderground.py [options] data_dir
options are:
-h or --help display this help
-c or --catchup upload all data since last upload (up to 4 weeks)
-v or --verbose increase amount of reassuring messages
data_dir is the root directory of the weather data
"""
__doc__ %= __usage__
__usage__ = __doc__.split('\n')[0] + __usage__
import getopt
import sys
from datetime import datetime, timedelta
import DataStore
from Logger import ApplicationLogger
import toservice
class ToUnderground(toservice.ToService):
"""Upload weather data to Weather Underground.
"""
def __init__(self, params, calib_data):
"""
:param params: pywws configuration.
:type params: :class:`pywws.DataStore.params`
:param calib_data: 'calibrated' data.
:type calib_data: :class:`pywws.DataStore.calib_store`
"""
toservice.ToService.__init__(
self, params, calib_data, service_name='underground')
def main(argv=None):
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(argv[1:], "hcv", ['help', 'catchup', 'verbose'])
except getopt.error, msg:
print >>sys.stderr, 'Error: %s\n' % msg
print >>sys.stderr, __doc__.strip()
return 1
# process options
catchup = False
verbose = 0
for o, a in opts:
if o == '-h' or o == '--help':
print __doc__.strip()
return 0
elif o == '-c' or o == '--catchup':
catchup = True
elif o == '-v' or o == '--verbose':
verbose += 1
# check arguments
if len(args) != 1:
print >>sys.stderr, "Error: 1 argument required"
print >>sys.stderr, __doc__.strip()
return 2
logger = ApplicationLogger(verbose)
return ToUnderground(
DataStore.params(args[0]), DataStore.calib_store(args[0])
).Upload(catchup)
if __name__ == "__main__":
sys.exit(main())
|
edk0/pywws
|
pywws/ToUnderground.py
|
Python
|
gpl-2.0
| 4,393
|
[
"VisIt"
] |
b0e294222dc4d9cefabc003e2024c9b0c1613071ba6816510faa92c34fc5ced1
|
List_of_plants = []
class ambrosia():
name = 'ambrosia'
plantname = 'Ambrosia Vulgaris'
Description = "These seeds grow into common ambrosia, a plant grown by and from medicine."
icon_state = 'seed-ambrosiavulgaris'
lifespan = 60
endurance = 25
production = 6
plant_yield = 6
potency = 5
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'ambrosia-dead'
genes = ["Perennial_Growth"]
mutates_into = ["ambrosia_deus"]
reagents_add = {'space_drugs':0.15, 'bicaridine':0.1, 'kelotane':0.1, 'vitamin':0.04, 'nutriment':0.05, 'toxin':0.1}
species = 'ambrosiavulgaris'
List_of_plants.append(ambrosia)
class ambrosia_deus():
name = 'ambrosia_deus'
plantname = 'Ambrosia Deus'
Description = "These seeds grow into ambrosia deus. Could it be the food of the gods..?"
icon_state = 'seed-ambrosiadeus'
lifespan = 60
endurance = 25
production = 6
plant_yield = 6
potency = 5
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'ambrosia-dead'
genes = ["Perennial_Growth"]
mutates_into = ["ambrosia_gaia"]
reagents_add = {'omnizine':0.15, 'synaptizine':0.15, 'space_drugs':0.1, 'vitamin':0.04, 'nutriment':0.05}
species = 'ambrosiadeus'
List_of_plants.append(ambrosia_deus)
class ambrosia_gaia():
name = 'ambrosia_gaia'
plantname = 'Ambrosia Gaia'
Description = "These seeds grow into ambrosia gaia, filled with infinite potential."
icon_state = 'seed-ambrosia_gaia'
lifespan = 60
endurance = 25
production = 6
plant_yield = 6
potency = 5
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'ambrosia-dead'
genes = []
mutates_into = ["ambrosia_deus"]
reagents_add = {'earthsblood':0.05, 'nutriment':0.06, 'vitamin':0.05}
species = 'ambrosia_gaia'
List_of_plants.append(ambrosia_gaia)
class apple():
name = 'apple'
plantname = 'Apple Tree'
Description = "These seeds grow into apple trees."
icon_state = 'seed-apple'
lifespan = 55
endurance = 35
production = 6
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'apple-grow'
dead_Sprite = 'apple-dead'
genes = ["Perennial_Growth"]
mutates_into = ["apple_gold"]
reagents_add = {'vitamin':0.04, 'nutriment':0.1}
species = 'apple'
List_of_plants.append(apple)
class apple_poisoned():
name = 'apple_poisoned'
plantname = 'Apple Tree'
Description = "These seeds grow into apple trees."
icon_state = 'seed-apple'
lifespan = 55
endurance = 35
production = 6
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'apple-grow'
dead_Sprite = 'apple-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'zombiepowder':0.5, 'vitamin':0.04, 'nutriment':0.1}
List_of_plants.append(apple_poisoned)
class apple_gold():
name = 'apple_gold'
plantname = 'Golden Apple Tree'
Description = "These seeds grow into golden apple trees. Good thing there are no firebirds in space."
icon_state = 'seed-goldapple'
lifespan = 55
endurance = 35
production = 10
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'apple-grow'
dead_Sprite = 'apple-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'gold':0.2, 'vitamin':0.04, 'nutriment':0.1}
species = 'goldapple'
List_of_plants.append(apple_gold)
class banana():
name = 'banana'
plantname = 'Banana Tree'
Description = "They're seeds that grow into banana trees. When grown, keep away from clown."
icon_state = 'seed-banana'
lifespan = 50
endurance = 30
production = 6
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
dead_Sprite = 'banana-dead'
genes = ["Slippery Skin", "Perennial_Growth"]
mutates_into = ["banana_mime","banana_bluespace"]
reagents_add = {'banana':0.1, 'potassium':0.1, 'vitamin':0.04, 'nutriment':0.02}
species = 'banana'
List_of_plants.append(banana)
class banana_mime():
name = 'banana_mime'
plantname = 'Mimana Tree'
Description = "They're seeds that grow into mimana trees. When grown, keep away from mime."
icon_state = 'seed-mimana'
lifespan = 50
endurance = 30
production = 6
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
dead_Sprite = 'banana-dead'
genes = ["Slippery Skin", "Perennial_Growth"]
mutates_into = [""]
reagents_add = {'nothing':0.1, 'mutetoxin':0.1, 'nutriment':0.02}
species = 'mimana'
List_of_plants.append(banana_mime)
class banana_bluespace():
name = 'banana_bluespace'
plantname = 'Bluespace Banana Tree'
Description = "They're seeds that grow into bluespace banana trees. When grown, keep away from bluespace clown."
icon_state = 'seed-banana-blue'
lifespan = 50
endurance = 30
production = 6
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'banana-grow'
dead_Sprite = 'banana-dead'
genes = ["Slippery Skin", "Bluespace Activity", "Perennial_Growth"]
mutates_into = [""]
reagents_add = {'bluespace':0.2, 'banana':0.1, 'vitamin':0.04, 'nutriment':0.02}
species = 'bluespacebanana'
List_of_plants.append(banana_bluespace)
class soya():
name = 'soya'
plantname = 'Soybean Plants'
Description = "These seeds grow into soybean plants."
icon_state = 'seed-soybean'
lifespan = 25
endurance = 15
production = 4
plant_yield = 3
potency = 15
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'soybean-grow'
dead_Sprite = 'soybean-dead'
genes = ["Perennial_Growth"]
mutates_into = ["soya_koi"]
reagents_add = {'vitamin':0.04, 'nutriment':0.05}
species = 'soybean'
List_of_plants.append(soya)
class soya_koi():
name = 'soya_koi'
plantname = 'Koibean Plants'
Description = "These seeds grow into koibean plants."
icon_state = 'seed-koibean'
lifespan = 25
endurance = 15
production = 4
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'soybean-grow'
dead_Sprite = 'soybean-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'carpotoxin':0.1, 'vitamin':0.04, 'nutriment':0.05}
species = 'koibean'
List_of_plants.append(soya_koi)
class berry():
name = 'berry'
plantname = 'Berry Bush'
Description = "These seeds grow into berry bushes."
icon_state = 'seed-berry'
lifespan = 20
endurance = 15
production = 5
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'berry-grow'
dead_Sprite = 'berry-dead'
genes = ["Perennial_Growth"]
mutates_into = ["berry_glow","berry_poison"]
reagents_add = {'vitamin':0.04, 'nutriment':0.1}
species = 'berry'
List_of_plants.append(berry)
class berry_poison():
name = 'berry_poison'
plantname = 'Poison-Berry Bush'
Description = "These seeds grow into poison-berry bushes."
icon_state = 'seed-poisonberry'
lifespan = 20
endurance = 15
production = 5
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'berry-grow'
dead_Sprite = 'berry-dead'
genes = ["Perennial_Growth"]
mutates_into = ["berry_death"]
reagents_add = {'cyanide':0.15, 'tirizene':0.2, 'vitamin':0.04, 'nutriment':0.1}
species = 'poisonberry'
List_of_plants.append(berry_poison)
class berry_death():
name = 'berry_death'
plantname = 'Death Berry Bush'
Description = "These seeds grow into death berries."
icon_state = 'seed-deathberry'
lifespan = 30
endurance = 15
production = 5
plant_yield = 2
potency = 50
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'berry-grow'
dead_Sprite = 'berry-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'coniine':0.08, 'tirizene':0.1, 'vitamin':0.04, 'nutriment':0.1}
species = 'deathberry'
List_of_plants.append(berry_death)
class berry_glow():
name = 'berry_glow'
plantname = 'Glow-Berry Bush'
Description = "These seeds grow into glow-berry bushes."
icon_state = 'seed-glowberry'
lifespan = 30
endurance = 25
production = 5
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'berry-grow'
dead_Sprite = 'berry-dead'
genes = ["Strong Bioluminescence" , "Separated Chemicals", "Perennial_Growth"]
mutates_into = [""]
reagents_add = {'uranium':0.25, 'iodine':0.2, 'vitamin':0.04, 'nutriment':0.1}
species = 'glowberry'
List_of_plants.append(berry_glow)
class cherry():
name = 'cherry'
plantname = 'Cherry Tree'
Description = "Careful not to crack a tooth on one... That'd be the pits."
icon_state = 'seed-cherry'
lifespan = 35
endurance = 35
production = 5
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'cherry-grow'
dead_Sprite = 'cherry-dead'
genes = ["Perennial_Growth"]
mutates_into = ["cherry_blue"]
reagents_add = {'nutriment':0.07, 'sugar':0.07}
species = 'cherry'
List_of_plants.append(cherry)
class cherry_blue():
name = 'cherry_blue'
plantname = 'Blue Cherry Tree'
Description = "The blue kind of cherries."
icon_state = 'seed-bluecherry'
lifespan = 35
endurance = 35
production = 5
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'cherry-grow'
dead_Sprite = 'cherry-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'nutriment':0.07, 'sugar':0.07}
species = 'bluecherry'
List_of_plants.append(cherry_blue)
class grape():
name = 'grape'
plantname = 'Grape Vine'
Description = "These seeds grow into grape vines."
icon_state = 'seed-grapes'
lifespan = 50
endurance = 25
production = 5
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'grape-grow'
dead_Sprite = 'grape-dead'
genes = ["Perennial_Growth"]
mutates_into = ["grape_green"]
reagents_add = {'vitamin':0.04, 'nutriment':0.1, 'sugar':0.1}
species = 'grape'
List_of_plants.append(grape)
class grape_green():
name = 'grape_green'
plantname = 'Green-Grape Vine'
Description = "These seeds grow into green-grape vines."
icon_state = 'seed-greengrapes'
lifespan = 50
endurance = 25
production = 5
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'grape-grow'
dead_Sprite = 'grape-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'kelotane':0.2, 'vitamin':0.04, 'nutriment':0.1, 'sugar':0.1}
species = 'greengrape'
List_of_plants.append(grape_green)
class cannabis():
name = 'cannabis'
plantname = 'Cannabis Plant'
Description = "Taxable."
icon_state = 'seed-cannabis'
lifespan = 25
endurance = 15
production = 6
plant_yield = 3
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'goon/icons/obj/hydroponics'
Grown_Sprite = 'cannabis-grow'
dead_Sprite = 'cannabis-dead'
genes = ["Perennial_Growth"]
mutates_into = ["cannabis_rainbow","cannabis_death","cannabis_white","cannabis_ultimate"]
reagents_add = {'space_drugs':0.15, 'lipolicide':0.35}
species = 'cannabis'
List_of_plants.append(cannabis)
class cannabis_rainbow():
name = 'cannabis_rainbow'
plantname = 'Rainbow Weed'
Description = "These seeds grow into rainbow weed. Groovy."
icon_state = 'seed-megacannabis'
lifespan = 25
endurance = 15
production = 6
plant_yield = 3
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'goon/icons/obj/hydroponics'
Grown_Sprite = 'cannabis-grow'
dead_Sprite = 'cannabis-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'mindbreaker':0.15, 'lipolicide':0.35}
species = 'megacannabis'
List_of_plants.append(cannabis_rainbow)
class cannabis_death():
name = 'cannabis_death'
plantname = 'Deathweed'
Description = "These seeds grow into deathweed. Not groovy."
icon_state = 'seed-blackcannabis'
lifespan = 25
endurance = 15
production = 6
plant_yield = 3
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'goon/icons/obj/hydroponics'
Grown_Sprite = 'cannabis-grow'
dead_Sprite = 'cannabis-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'cyanide':0.35, 'space_drugs':0.15, 'lipolicide':0.15}
species = 'blackcannabis'
List_of_plants.append(cannabis_death)
class cannabis_white():
name = 'cannabis_white'
plantname = 'Lifeweed'
Description = "I will give unto him that is munchies of the fountain of the cravings of life, freely."
icon_state = 'seed-whitecannabis'
lifespan = 25
endurance = 15
production = 6
plant_yield = 3
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'goon/icons/obj/hydroponics'
Grown_Sprite = 'cannabis-grow'
dead_Sprite = 'cannabis-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'omnizine':0.35, 'space_drugs':0.15, 'lipolicide':0.15}
species = 'whitecannabis'
List_of_plants.append(cannabis_white)
class cannabis_ultimate():
name = 'cannabis_ultimate'
plantname = 'Omega Weed'
Description = "These seeds grow into omega weed."
icon_state = 'seed-ocannabis'
lifespan = 25
endurance = 15
production = 6
plant_yield = 3
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'goon/icons/obj/hydroponics'
Grown_Sprite = 'cannabis-grow'
dead_Sprite = 'cannabis-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'space_drugs':0.3,'mindbreaker':0.3,'mercury':0.15,'lithium':0.15,'atropine':0.15,'haloperidol':0.15,'methamphetamine':0.15,'capsaicin':0.15,'barbers_aid':0.15,'bath_salts':0.15,'itching_powder':0.15,'crank':0.15,'krokodil':0.15,'histamine':0.15,'lipolicide':0.15}
species = 'ocannabis'
List_of_plants.append(cannabis_ultimate)
class wheat():
name = 'wheat'
plantname = 'Wheat Stalks'
Description = "These may, or may not, grow into wheat."
icon_state = 'seed-wheat'
lifespan = 25
endurance = 15
production = 1
plant_yield = 4
potency = 15
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'wheat-dead'
mutates_into = ["wheat_oat","wheat_meat"]
reagents_add = {'nutriment':0.04}
species = 'wheat'
List_of_plants.append(wheat)
class wheat_oat():
name = 'wheat_oat'
plantname = 'Oat Stalks'
Description = "These may, or may not, grow into oat."
icon_state = 'seed-oat'
lifespan = 25
endurance = 15
production = 1
plant_yield = 4
potency = 15
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'wheat-dead'
mutates_into = [""]
reagents_add = {'nutriment':0.04}
species = 'oat'
List_of_plants.append(wheat_oat)
class wheat_rice():
name = 'wheat_rice'
plantname = 'Rice Stalks'
Description = "These may, or may not, grow into rice."
icon_state = 'seed-rice'
lifespan = 25
endurance = 15
production = 1
plant_yield = 4
potency = 15
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'wheat-dead'
mutates_into = [""]
reagents_add = {'nutriment':0.04}
species = 'rice'
List_of_plants.append(wheat_rice)
class wheat_meat():
name = 'wheat_meat'
plantname = 'Meatwheat'
Description = "If you ever wanted to drive a vegetarian to insanity, here's how."
icon_state = 'seed-meatwheat'
lifespan = 25
endurance = 15
production = 1
plant_yield = 4
potency = 15
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'wheat-dead'
mutates_into = [""]
reagents_add = {'nutriment':0.04}
species = 'meatwheat'
List_of_plants.append(wheat_meat)
class chili():
name = 'chili'
plantname = 'Chili Plants'
Description = "These seeds grow into chili plants. HOT! HOT! HOT!"
icon_state = 'seed-chili'
lifespan = 20
endurance = 15
production = 5
plant_yield = 4
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'chili-grow'
dead_Sprite = 'chili-dead'
genes = ["Perennial_Growth"]
mutates_into = ["chili_ice","chili_ghost"]
reagents_add = {'capsaicin':0.25, 'vitamin':0.04, 'nutriment':0.04}
species = 'chili'
List_of_plants.append(chili)
class chili_ice():
name = 'chili_ice'
plantname = 'Ice Pepper Plants'
Description = "These seeds grow into ice pepper plants."
icon_state = 'seed-icepepper'
lifespan = 25
endurance = 15
production = 4
plant_yield = 4
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'chili-grow'
dead_Sprite = 'chili-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'frostoil':0.25, 'vitamin':0.02, 'nutriment':0.02}
species = 'chiliice'
List_of_plants.append(chili_ice)
class chili_ghost():
name = 'chili_ghost'
plantname = 'Ghost Chili Plants'
Description = "These seeds grow into a chili said to be the hottest in the galaxy."
icon_state = 'seed-chilighost'
lifespan = 20
endurance = 10
production = 10
plant_yield = 3
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'chili-grow'
dead_Sprite = 'chili-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'condensedcapsaicin':0.3, 'capsaicin':0.55, 'nutriment':0.04}
species = 'chilighost'
List_of_plants.append(chili_ghost)
class lime():
name = 'lime'
plantname = 'Lime Tree'
Description = "These are very sour seeds."
icon_state = 'seed-lime'
lifespan = 55
endurance = 50
production = 6
plant_yield = 4
potency = 15
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
genes = ["Perennial_Growth"]
mutates_into = ["orange"]
reagents_add = {'vitamin':0.04, 'nutriment':0.05}
species = 'lime'
List_of_plants.append(lime)
class orange():
name = 'orange'
plantname = 'Orange Tree'
Description = "Sour seeds."
icon_state = 'seed-orange'
lifespan = 60
endurance = 50
production = 6
plant_yield = 5
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'lime-grow'
dead_Sprite = 'lime-dead'
genes = ["Perennial_Growth"]
mutates_into = ["lime"]
reagents_add = {'vitamin':0.04, 'nutriment':0.05}
species = 'orange'
List_of_plants.append(orange)
class lemon():
name = 'lemon'
plantname = 'Lemon Tree'
Description = "These are sour seeds."
icon_state = 'seed-lemon'
lifespan = 55
endurance = 45
production = 6
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'lime-grow'
dead_Sprite = 'lime-dead'
genes = ["Perennial_Growth"]
mutates_into = ["firelemon"]
reagents_add = {'vitamin':0.04, 'nutriment':0.05}
species = 'lemon'
List_of_plants.append(lemon)
class firelemon():
name = 'firelemon '
plantname = 'Combustible Lemon Tree'
Description = "When life gives you lemons, don't make lemonade. Make life take the lemons back! Get mad! I don't want your damn lemons!"
icon_state = 'seed-firelemon'
lifespan = 55
endurance = 45
production = 6
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'lime-grow'
dead_Sprite = 'lime-dead'
genes = ["Perennial_Growth"]
reagents_add = {'nutriment':0.05}
species = 'firelemon'
List_of_plants.append(firelemon)
class cocoapod():
name = 'cocoapod'
plantname = 'Cocao Tree'
Description = "These seeds grow into cacao trees. They look fattening."
icon_state = 'seed-cocoapod'
lifespan = 20
endurance = 15
production = 5
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'cocoapod-grow'
dead_Sprite = 'cocoapod-dead'
genes = ["Perennial_Growth"]
mutates_into = ["cocoapod_vanillapod"]
reagents_add = {'cocoa':0.25, 'nutriment':0.1}
species = 'cocoapod'
List_of_plants.append(cocoapod)
class cocoapod_vanillapod():
name = 'cocoapod_vanillapod'
plantname = 'Vanilla Tree'
Description = "These seeds grow into vanilla trees. They look fattening."
icon_state = 'seed-vanillapod'
lifespan = 20
endurance = 15
production = 5
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'cocoapod-grow'
dead_Sprite = 'cocoapod-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'vanilla':0.25, 'nutriment':0.1}
species = 'vanillapod'
List_of_plants.append(cocoapod_vanillapod)
class corn():
name = 'corn'
plantname = 'Corn Stalks'
Description = "I don't mean to sound corny..."
icon_state = 'seed-corn'
lifespan = 25
endurance = 15
production = 6
plant_yield = 3
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'corn-grow'
dead_Sprite = 'corn-dead'
mutates_into = ["corn_snapcorn"]
reagents_add = {'cornoil':0.2, 'vitamin':0.04, 'nutriment':0.1}
species = 'corn'
List_of_plants.append(corn)
class corn_snapcorn():
name = 'corn_snapcorn'
plantname = 'Snapcorn Stalks'
Description = "Oh snap!"
icon_state = 'seed-snapcorn'
lifespan = 25
endurance = 15
production = 6
plant_yield = 3
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'corn-grow'
dead_Sprite = 'corn-dead'
mutates_into = [""]
reagents_add = {'cornoil':0.2, 'vitamin':0.04, 'nutriment':0.1}
species = 'snapcorn'
List_of_plants.append(corn_snapcorn)
class eggplant():
name = 'eggplant'
plantname = 'Eggplants'
Description = "These seeds grow to produce berries that look nothing like eggs."
icon_state = 'seed-eggplant'
lifespan = 25
endurance = 15
production = 6
plant_yield = 2
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'eggplant-grow'
dead_Sprite = 'eggplant-dead'
genes = ["Perennial_Growth"]
mutates_into = ["eggplant_eggy"]
reagents_add = {'vitamin':0.04, 'nutriment':0.1}
species = 'eggplant'
List_of_plants.append(eggplant)
class eggplant_eggy():
name = 'eggplant_eggy'
plantname = 'Eggplants'
Description = "These seeds grow to produce berries that look a lot like eggs."
icon_state = 'seed-eggy'
lifespan = 75
endurance = 15
production = 12
plant_yield = 2
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'eggplant-grow'
dead_Sprite = 'eggplant-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'nutriment':0.1}
species = 'eggy'
List_of_plants.append(eggplant_eggy)
class poppy():
name = 'poppy'
plantname = 'Poppy Plants'
Description = "These seeds grow into poppies."
icon_state = 'seed-poppy'
lifespan = 25
endurance = 10
production = 6
plant_yield = 6
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_flowers'
Grown_Sprite = 'poppy-grow'
dead_Sprite = 'poppy-dead'
mutates_into = ["poppy_geranium","poppy_lily"]
reagents_add = {'bicaridine':0.2, 'nutriment':0.05}
species = 'poppy'
List_of_plants.append(poppy)
class poppy_lily():
name = 'poppy_lily'
plantname = 'Lily Plants'
Description = "These seeds grow into lilies."
icon_state = 'seed-lily'
lifespan = 25
endurance = 10
production = 6
plant_yield = 6
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_flowers'
Grown_Sprite = 'poppy-grow'
dead_Sprite = 'poppy-dead'
mutates_into = [""]
reagents_add = {'bicaridine':0.2, 'nutriment':0.05}
species = 'lily'
List_of_plants.append(poppy_lily)
class poppy_geranium():
name = 'poppy_geranium'
plantname = 'Geranium Plants'
Description = "These seeds grow into geranium."
icon_state = 'seed-geranium'
lifespan = 25
endurance = 10
production = 6
plant_yield = 6
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_flowers'
Grown_Sprite = 'poppy-grow'
dead_Sprite = 'poppy-dead'
mutates_into = [""]
reagents_add = {'bicaridine':0.2, 'nutriment':0.05}
species = 'geranium'
List_of_plants.append(poppy_geranium)
class harebell():
name = 'harebell'
plantname = 'Harebells'
Description = "These seeds grow into pretty little flowers."
icon_state = 'seed-harebell'
lifespan = 100
endurance = 20
production = 1
plant_yield = 2
potency = 30
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_flowers'
genes = ["Weed Adaptation"]
reagents_add = {'nutriment':0.04}
species = 'harebell'
List_of_plants.append(harebell)
class sunflower():
name = 'sunflower'
plantname = 'Sunflowers'
Description = "These seeds grow into sunflowers."
icon_state = 'seed-sunflower'
lifespan = 25
endurance = 20
production = 2
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_flowers'
Grown_Sprite = 'sunflower-grow'
dead_Sprite = 'sunflower-dead'
mutates_into = ["sunflower_moonflower","sunflower_novaflower"]
reagents_add = {'cornoil':0.08, 'nutriment':0.04}
species = 'sunflower'
List_of_plants.append(sunflower)
class sunflower_moonflower():
name = 'sunflower_moonflower'
plantname = 'Moonflowers'
Description = "These seeds grow into moonflowers."
icon_state = 'seed-moonflower'
lifespan = 25
endurance = 20
production = 2
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_flowers'
Grown_Sprite = 'moonflower-grow'
dead_Sprite = 'sunflower-dead'
mutates_into = [""]
reagents_add = {'moonshine':0.2, 'vitamin':0.02, 'nutriment':0.02}
species = 'moonflower'
List_of_plants.append(sunflower_moonflower)
class sunflower_novaflower():
name = 'sunflower_novaflower'
plantname = 'Novaflowers'
Description = "These seeds grow into novaflowers."
icon_state = 'seed-novaflower'
lifespan = 25
endurance = 20
production = 2
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_flowers'
Grown_Sprite = 'novaflower-grow'
dead_Sprite = 'sunflower-dead'
mutates_into = [""]
reagents_add = {'condensedcapsaicin':0.25, 'capsaicin':0.3, 'nutriment':0}
species = 'novaflower'
List_of_plants.append(sunflower_novaflower)
class grass():
name = 'grass'
plantname = 'Grass'
Description = "These seeds grow into grass. Yummy!"
icon_state = 'seed-grass'
lifespan = 40
endurance = 40
production = 5
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
Grown_Sprite = 'grass-grow'
dead_Sprite = 'grass-dead'
genes = ["Perennial_Growth"]
mutates_into = ["grass_carpet"]
reagents_add = {'nutriment':0.02, 'hydrogen':0.05}
species = 'grass'
List_of_plants.append(grass)
class grass_carpet():
name = 'grass_carpet'
plantname = 'Carpet'
Description = "These seeds grow into stylish carpet samples."
icon_state = 'seed-carpet'
lifespan = 40
endurance = 40
production = 5
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
Grown_Sprite = 'grass-grow'
dead_Sprite = 'grass-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'nutriment':0.02, 'hydrogen':0.05}
species = 'carpet'
List_of_plants.append(grass_carpet)
class kudzu():
name = 'kudzu'
plantname = 'Kudzu'
Description = "These seeds grow into a weed that grows incredibly fast."
icon_state = 'seed-kudzu'
lifespan = 20
endurance = 10
production = 6
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
genes = ["Perennial_Growth", "Weed Adaptation"]
reagents_add = {'charcoal':0.04, 'nutriment':0.02}
species = 'kudzu'
List_of_plants.append(kudzu)
class watermelon():
name = 'watermelon'
plantname = 'Watermelon Vines'
Description = "These seeds grow into watermelon plants."
icon_state = 'seed-watermelon'
lifespan = 50
endurance = 40
production = 6
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
dead_Sprite = 'watermelon-dead'
genes = ["Perennial_Growth"]
mutates_into = ["watermelon_holy"]
reagents_add = {'water':0.2, 'vitamin':0.04, 'nutriment':0.2}
species = 'watermelon'
List_of_plants.append(watermelon)
class watermelon_holy():
name = 'watermelon_holy'
plantname = 'Holy Melon Vines'
Description = "These seeds grow into holymelon plants."
icon_state = 'seed-holymelon'
lifespan = 50
endurance = 40
production = 6
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
dead_Sprite = 'watermelon-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'holywater':0.2, 'vitamin':0.04, 'nutriment':0.1}
species = 'holymelon'
List_of_plants.append(watermelon_holy)
class starthistle():
name = 'starthistle'
plantname = 'Starthistle'
Description = "A robust species of weed that often springs up in-between the cracks of spaceship parking lots."
icon_state = 'seed-starthistle'
lifespan = 70
endurance = 50
production = 1
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_flowers'
genes = ["Weed Adaptation"]
mutates_into = ["harebell"]
species = 'starthistle'
List_of_plants.append(starthistle)
class cabbage():
name = 'cabbage'
plantname = 'Cabbages'
Description = "These seeds grow into cabbages."
icon_state = 'seed-cabbage'
lifespan = 50
endurance = 25
production = 5
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
genes = ["Perennial_Growth"]
mutates_into = ["replicapod"]
reagents_add = {'vitamin':0.04, 'nutriment':0.1}
species = 'cabbage'
List_of_plants.append(cabbage)
class sugarcane():
name = 'sugarcane'
plantname = 'Sugarcane'
Description = "These seeds grow into sugarcane."
icon_state = 'seed-sugarcane'
lifespan = 60
endurance = 50
production = 6
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
genes = ["Perennial_Growth"]
reagents_add = {'sugar':0.25}
species = 'sugarcane'
List_of_plants.append(sugarcane)
class gatfruit():
name = 'gatfruit'
plantname = 'Gatfruit Tree'
Description = "These seeds grow into .357 revolvers."
icon_state = 'seed-gatfruit'
lifespan = 20
endurance = 20
production = 10
plant_yield = 2
potency = 60
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
genes = ["Perennial_Growth"]
reagents_add = {'sulfur':0.1, 'carbon':0.1, 'nitrogen':0.07, 'potassium':0.05}
species = 'gatfruit'
List_of_plants.append(gatfruit)
class cherry_bomb():
name = 'cherry_bomb'
plantname = 'Cherry Bomb Tree'
Description = "They give you vibes of dread and frustration."
icon_state = 'seed-cherry_bomb'
lifespan = 35
endurance = 35
production = 5
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'cherry-grow'
dead_Sprite = 'cherry-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'nutriment':0.1, 'sugar':0.1, 'blackpowder':0.7}
species = 'cherry_bomb'
List_of_plants.append(cherry_bomb)
class reishi():
name = 'reishi'
plantname = 'Reishi'
Description = "This mycelium grows into something medicinal and relaxing."
icon_state = 'mycelium-reishi'
lifespan = 35
endurance = 35
production = 5
plant_yield = 4
potency = 15
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
genes = ["Fungal Vitality"]
reagents_add = {'morphine':0.35, 'charcoal':0.35, 'nutriment':0}
species = 'reishi'
List_of_plants.append(reishi)
class amanita():
name = 'amanita'
plantname = 'Fly Amanitas'
Description = "This mycelium grows into something horrible."
icon_state = 'mycelium-amanita'
lifespan = 50
endurance = 35
production = 5
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
genes = ["Fungal Vitality"]
mutates_into = ["angel"]
reagents_add = {'mushroomhallucinogen':0.04, 'amatoxin':0.35, 'nutriment':0, 'growthserum':0.1}
species = 'amanita'
List_of_plants.append(amanita)
class angel():
name = 'angel'
plantname = 'Destroying Angels'
Description = "This mycelium grows into something devastating."
icon_state = 'mycelium-angel'
lifespan = 50
endurance = 35
production = 5
plant_yield = 2
potency = 35
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
genes = ["Fungal Vitality"]
reagents_add = {'mushroomhallucinogen':0.04, 'amatoxin':0.1, 'nutriment':0, 'amanitin':0.2}
species = 'angel'
List_of_plants.append(angel)
class liberty():
name = 'liberty'
plantname = 'Liberty-Caps'
Description = "This mycelium grows into liberty-cap mushrooms."
icon_state = 'mycelium-liberty'
lifespan = 25
endurance = 15
production = 1
plant_yield = 5
potency = 15
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
genes = ["Fungal Vitality"]
reagents_add = {'mushroomhallucinogen':0.25, 'nutriment':0.02}
species = 'liberty'
List_of_plants.append(liberty)
class plump():
name = 'plump'
plantname = 'Plump-Helmet Mushrooms'
Description = "This mycelium grows into helmets... maybe."
icon_state = 'mycelium-plump'
lifespan = 25
endurance = 15
production = 1
plant_yield = 4
potency = 15
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
genes = ["Fungal Vitality"]
mutates_into = ["plump_walkingmushroom"]
reagents_add = {'vitamin':0.04, 'nutriment':0.1}
species = 'plump'
List_of_plants.append(plump)
class plump_walkingmushroom():
name = 'plump_walkingmushroom'
plantname = 'Walking Mushrooms'
Description = "This mycelium will grow into huge stuff!"
icon_state = 'mycelium-walkingmushroom'
lifespan = 30
endurance = 30
production = 1
plant_yield = 1
potency = 15
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
genes = ["Fungal Vitality"]
mutates_into = [""]
reagents_add = {'vitamin':0.05, 'nutriment':0.15}
species = 'walkingmushroom'
List_of_plants.append(plump_walkingmushroom)
class chanter():
name = 'chanter'
plantname = 'Chanterelle Mushrooms'
Description = "This mycelium grows into chanterelle mushrooms."
icon_state = 'mycelium-chanter'
lifespan = 35
endurance = 20
production = 1
plant_yield = 5
potency = 15
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
genes = ["Fungal Vitality"]
reagents_add = {'nutriment':0.1}
species = 'chanter'
List_of_plants.append(chanter)
class glowshroom():
name = 'glowshroom'
plantname = 'Glowshrooms'
Description = "This mycelium -glows- into mushrooms!"
icon_state = 'mycelium-glowshroom'
lifespan = 100
endurance = 30
production = 1
plant_yield = 3
potency = 30
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
genes = ["Bioluminescence", "Fungal Vitality"]
mutates_into = ["glowshroom_glowcap","glowshroom_shadowshroom"]
reagents_add = {'radium':0.1, 'phosphorus':0.1, 'nutriment':0.04}
species = 'glowshroom'
List_of_plants.append(glowshroom)
class glowshroom_glowcap():
name = 'glowshroom_glowcap'
plantname = 'Glowcaps'
Description = "This mycelium -powers- into mushrooms!"
icon_state = 'mycelium-glowcap'
lifespan = 100
endurance = 30
production = 1
plant_yield = 3
potency = 30
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
Grown_Sprite = 'glowshroom-grow'
dead_Sprite = 'glowshroom-dead'
genes = ["Red Electrical Glow", "Electrical Activity", "Fungal Vitality"]
mutates_into = [""]
reagents_add = {'teslium':0.1, 'nutriment':0.04}
species = 'glowcap'
List_of_plants.append(glowshroom_glowcap)
class glowshroom_shadowshroom():
name = 'glowshroom_shadowshroom'
plantname = 'Shadowshrooms'
Description = "This mycelium will grow into something shadowy."
icon_state = 'mycelium-shadowshroom'
lifespan = 100
endurance = 30
production = 1
plant_yield = 3
potency = 30
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
Grown_Sprite = 'shadowshroom-grow'
dead_Sprite = 'shadowshroom-dead'
genes = ["Shadow Emission", "Fungal Vitality"]
mutates_into = [""]
reagents_add = {'radium':0.2, 'nutriment':0.04}
species = 'shadowshroom'
List_of_plants.append(glowshroom_shadowshroom)
class lavaland():
name = 'lavaland'
Description = "You should never see this."
lifespan = 50
endurance = 25
production = 4
plant_yield = 4
potency = 15
weed_growth_rate = 1
weed_resistance = 5
reagents_add = {'nutriment':0.1}
List_of_plants.append(lavaland)
class lavaland_polypore():
name = 'lavaland_polypore'
plantname = 'Polypore Mushrooms'
Description = "This mycelium grows into bracket mushrooms, also known as polypores. Woody and firm, shaft miners often use them for makeshift crafts."
icon_state = 'mycelium-polypore'
lifespan = 50
endurance = 25
production = 4
plant_yield = 4
potency = 15
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
genes = ["Fungal Vitality"]
reagents_add = {'nutriment':0.1}
species = 'polypore'
List_of_plants.append(lavaland_polypore)
class lavaland_porcini():
name = 'lavaland_porcini'
plantname = 'Porcini Mushrooms'
Description = "This mycelium grows into Boletus edulus, also known as porcini. Native to the late Earth, but discovered on Lavaland. Has culinary, medicinal and relaxant effects."
icon_state = 'mycelium-porcini'
lifespan = 50
endurance = 25
production = 4
plant_yield = 4
potency = 15
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
genes = ["Fungal Vitality"]
reagents_add = {'nutriment':0.1}
species = 'porcini'
List_of_plants.append(lavaland_porcini)
class lavaland_inocybe():
name = 'lavaland_inocybe'
plantname = 'Inocybe Mushrooms'
Description = "This mycelium grows into an inocybe mushroom, a species of Lavaland origin with hallucinatory and toxic effects."
icon_state = 'mycelium-inocybe'
lifespan = 50
endurance = 25
production = 4
plant_yield = 4
potency = 15
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
genes = ["Fungal Vitality"]
reagents_add = {'nutriment':0.1}
species = 'inocybe'
List_of_plants.append(lavaland_inocybe)
class nettle():
name = 'nettle'
plantname = 'Nettles'
Description = "These seeds grow into nettles."
icon_state = 'seed-nettle'
lifespan = 30
endurance = 40
production = 6
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
genes = ["Perennial_Growth", "Weed Adaptation"]
mutates_into = ["nettle_death"]
reagents_add = {'sacid':0.5}
species = 'nettle'
List_of_plants.append(nettle)
class nettle_death():
name = 'nettle_death'
plantname = 'Death Nettles'
Description = "These seeds grow into death-nettles."
icon_state = 'seed-deathnettle'
lifespan = 30
endurance = 25
production = 6
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
genes = ["Perennial_Growth", "Weed Adaptation", "Hypodermic Prickles"]
mutates_into = [""]
reagents_add = {'facid':0.5, 'sacid':0.5}
species = 'deathnettle'
List_of_plants.append(nettle_death)
class onion():
name = 'onion'
plantname = 'Onion Sprouts'
Description = "These seeds grow into onions."
icon_state = 'seed-onion'
lifespan = 20
endurance = 25
production = 4
plant_yield = 6
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
mutates_into = ["onion_red"]
reagents_add = {'vitamin':0.04, 'nutriment':0.1}
species = 'onion'
List_of_plants.append(onion)
class onion_red():
name = 'onion_red'
plantname = 'Red Onion Sprouts'
Description = "For growing exceptionally potent onions."
icon_state = 'seed-onionred'
lifespan = 20
endurance = 25
production = 4
plant_yield = 6
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
reagents_add = {'vitamin':0.04, 'nutriment':0.1, 'tearjuice':0.05}
species = 'onion_red'
List_of_plants.append(onion_red)
class potato():
name = 'potato'
plantname = 'Potato Plants'
Description = "Boil 'em! Mash 'em! Stick 'em in a stew!"
icon_state = 'seed-potato'
lifespan = 30
endurance = 15
production = 1
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'potato-grow'
dead_Sprite = 'potato-dead'
genes = ["Capacitive Cell Production"]
mutates_into = ["potato_sweet"]
reagents_add = {'vitamin':0.04, 'nutriment':0.1}
species = 'potato'
List_of_plants.append(potato)
class potato_sweet():
name = 'potato_sweet'
plantname = 'Sweet Potato Plants'
Description = "These seeds grow into sweet potato plants."
icon_state = 'seed-sweetpotato'
lifespan = 30
endurance = 15
production = 1
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'potato-grow'
dead_Sprite = 'potato-dead'
genes = ["Capacitive Cell Production"]
mutates_into = [""]
reagents_add = {'vitamin':0.1, 'sugar':0.1, 'nutriment':0.1}
species = 'sweetpotato'
List_of_plants.append(potato_sweet)
class pumpkin():
name = 'pumpkin'
plantname = 'Pumpkin Vines'
Description = "These seeds grow into pumpkin vines."
icon_state = 'seed-pumpkin'
lifespan = 50
endurance = 40
production = 6
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'pumpkin-grow'
dead_Sprite = 'pumpkin-dead'
genes = ["Perennial_Growth"]
mutates_into = ["pumpkin_blumpkin"]
reagents_add = {'vitamin':0.04, 'nutriment':0.2}
species = 'pumpkin'
List_of_plants.append(pumpkin)
class pumpkin_blumpkin():
name = 'pumpkin_blumpkin'
plantname = 'Blumpkin Vines'
Description = "These seeds grow into blumpkin vines."
icon_state = 'seed-blumpkin'
lifespan = 50
endurance = 40
production = 6
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'pumpkin-grow'
dead_Sprite = 'pumpkin-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'ammonia':0.2, 'chlorine':0.1, 'nutriment':0.2}
species = 'blumpkin'
List_of_plants.append(pumpkin_blumpkin)
class random():
name = 'random'
plantname = 'strange plant'
Description = "Mysterious seeds as strange as their name implies. Spooky."
icon_state = 'seed-x'
lifespan = 25
endurance = 15
production = 6
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
Grown_Sprite = 'xpod-grow'
dead_Sprite = 'xpod-dead'
species = '?????'
List_of_plants.append(random)
class replicapod():
name = 'replicapod'
plantname = 'Replica Pod'
Description = "These seeds grow into replica pods. They say these are used to harvest humans."
icon_state = 'seed-replicapod'
lifespan = 50
endurance = 8
production = 1
plant_yield = 1
potency = 30
weed_growth_rate = 1
weed_resistance = 5
species = 'replicapod'
List_of_plants.append(replicapod)
class carrot():
name = 'carrot'
plantname = 'Carrots'
Description = "These seeds grow into carrots."
icon_state = 'seed-carrot'
lifespan = 25
endurance = 15
production = 1
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
mutates_into = ["carrot_parsnip"]
reagents_add = {'oculine':0.25, 'vitamin':0.04, 'nutriment':0.05}
species = 'carrot'
List_of_plants.append(carrot)
class carrot_parsnip():
name = 'carrot_parsnip'
plantname = 'Parsnip'
Description = "These seeds grow into parsnips."
icon_state = 'seed-parsnip'
lifespan = 25
endurance = 15
production = 1
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
dead_Sprite = 'carrot-dead'
mutates_into = [""]
reagents_add = {'vitamin':0.05, 'nutriment':0.05}
species = 'parsnip'
List_of_plants.append(carrot_parsnip)
class whitebeet():
name = 'whitebeet'
plantname = 'White-Beet Plants'
Description = "These seeds grow into sugary beet producing plants."
icon_state = 'seed-whitebeet'
lifespan = 60
endurance = 50
production = 6
plant_yield = 6
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
dead_Sprite = 'whitebeet-dead'
mutates_into = ["redbeet"]
reagents_add = {'vitamin':0.04, 'sugar':0.2, 'nutriment':0.05}
species = 'whitebeet'
List_of_plants.append(whitebeet)
class redbeet():
name = 'redbeet'
plantname = 'Red-Beet Plants'
Description = "These seeds grow into red beet producing plants."
icon_state = 'seed-redbeet'
lifespan = 60
endurance = 50
production = 6
plant_yield = 6
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
dead_Sprite = 'whitebeet-dead'
genes = ["Densified Chemicals"]
reagents_add = {'vitamin':0.05, 'nutriment':0.05}
species = 'redbeet'
List_of_plants.append(redbeet)
class tea():
name = 'tea'
plantname = 'Tea Aspera Plant'
Description = "These seeds grow into tea plants."
icon_state = 'seed-teaaspera'
lifespan = 20
endurance = 15
production = 5
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'tea-dead'
genes = ["Perennial_Growth"]
mutates_into = ["tea_astra"]
reagents_add = {'vitamin':0.04, 'teapowder':0.1}
species = 'teaaspera'
List_of_plants.append(tea)
class tea_astra():
name = 'tea_astra'
plantname = 'Tea Astra Plant'
Description = "These seeds grow into tea plants."
icon_state = 'seed-teaastra'
lifespan = 20
endurance = 15
production = 5
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'tea-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'synaptizine':0.1, 'vitamin':0.04, 'teapowder':0.1}
species = 'teaastra'
List_of_plants.append(tea_astra)
class coffee():
name = 'coffee'
plantname = 'Coffee Arabica Bush'
Description = "These seeds grow into coffee arabica bushes."
icon_state = 'seed-coffeea'
lifespan = 30
endurance = 20
production = 5
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'coffee-dead'
genes = ["Perennial_Growth"]
mutates_into = ["coffee_robusta"]
reagents_add = {'vitamin':0.04, 'coffeepowder':0.1}
species = 'coffeea'
List_of_plants.append(coffee)
class coffee_robusta():
name = 'coffee_robusta'
plantname = 'Coffee Robusta Bush'
Description = "These seeds grow into coffee robusta bushes."
icon_state = 'seed-coffeer'
lifespan = 30
endurance = 20
production = 5
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'coffee-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'ephedrine':0.1, 'vitamin':0.04, 'coffeepowder':0.1}
species = 'coffeer'
List_of_plants.append(coffee_robusta)
class tobacco():
name = 'tobacco'
plantname = 'Tobacco Plant'
Description = "These seeds grow into tobacco plants."
icon_state = 'seed-tobacco'
lifespan = 20
endurance = 15
production = 5
plant_yield = 10
potency = 10
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'tobacco-dead'
mutates_into = ["tobacco_space"]
reagents_add = {'nicotine':0.03, 'nutriment':0.03}
species = 'tobacco'
List_of_plants.append(tobacco)
class tobacco_space():
name = 'tobacco_space'
plantname = 'Space Tobacco Plant'
Description = "These seeds grow into space tobacco plants."
icon_state = 'seed-stobacco'
lifespan = 20
endurance = 15
production = 5
plant_yield = 10
potency = 10
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'tobacco-dead'
mutates_into = [""]
reagents_add = {'salbutamol':0.05, 'nicotine':0.08, 'nutriment':0.03}
species = 'stobacco'
List_of_plants.append(tobacco_space)
class tomato():
name = 'tomato'
plantname = 'Tomato Plants'
Description = "These seeds grow into tomato plants."
icon_state = 'seed-tomato'
lifespan = 25
endurance = 15
production = 6
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'tomato-grow'
dead_Sprite = 'tomato-dead'
genes = ["Liquid Contents", "Perennial_Growth"]
mutates_into = ["tomato_blue","tomato_blood","tomato_killer"]
reagents_add = {'vitamin':0.04, 'nutriment':0.1}
species = 'tomato'
List_of_plants.append(tomato)
class tomato_blood():
name = 'tomato_blood'
plantname = 'Blood-Tomato Plants'
Description = "These seeds grow into blood-tomato plants."
icon_state = 'seed-bloodtomato'
lifespan = 25
endurance = 15
production = 6
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'tomato-grow'
dead_Sprite = 'tomato-dead'
genes = ["Liquid Contents", "Perennial_Growth"]
mutates_into = [""]
reagents_add = {'blood':0.2, 'vitamin':0.04, 'nutriment':0.1}
species = 'bloodtomato'
List_of_plants.append(tomato_blood)
class tomato_blue():
name = 'tomato_blue'
plantname = 'Blue-Tomato Plants'
Description = "These seeds grow into blue-tomato plants."
icon_state = 'seed-bluetomato'
lifespan = 25
endurance = 15
production = 6
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'bluetomato-grow'
dead_Sprite = 'tomato-dead'
genes = ["Slippery Skin", "Perennial_Growth"]
mutates_into = ["tomato_blue_bluespace"]
reagents_add = {'lube':0.2, 'vitamin':0.04, 'nutriment':0.1}
species = 'bluetomato'
List_of_plants.append(tomato_blue)
class tomato_blue_bluespace():
name = 'tomato_blue_bluespace'
plantname = 'Bluespace Tomato Plants'
Description = "These seeds grow into bluespace tomato plants."
icon_state = 'seed-bluespacetomato'
lifespan = 25
endurance = 15
production = 6
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'tomato-grow'
dead_Sprite = 'tomato-dead'
genes = ["Liquid Contents", "Slippery Skin", "Bluespace Activity", "Perennial_Growth"]
mutates_into = [""]
reagents_add = {'lube':0.2, 'bluespace':0.2, 'vitamin':0.04, 'nutriment':0.1}
species = 'bluespacetomato'
List_of_plants.append(tomato_blue_bluespace)
class tomato_killer():
name = 'tomato_killer'
plantname = 'Killer-Tomato Plants'
Description = "These seeds grow into killer-tomato plants."
icon_state = 'seed-killertomato'
lifespan = 25
endurance = 15
production = 6
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'killertomato-grow'
dead_Sprite = 'killertomato-dead'
genes = ["Liquid Contents"]
mutates_into = [""]
reagents_add = {'vitamin':0.04, 'nutriment':0.1}
species = 'killertomato'
List_of_plants.append(tomato_killer)
class tower():
name = 'tower'
plantname = 'Tower Caps'
Description = "This mycelium grows into tower-cap mushrooms."
icon_state = 'mycelium-tower'
lifespan = 80
endurance = 50
production = 1
plant_yield = 5
potency = 50
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
dead_Sprite = 'towercap-dead'
genes = ["Fungal Vitality"]
mutates_into = ["tower_steel"]
species = 'towercap'
List_of_plants.append(tower)
class tower_steel():
name = 'tower_steel'
plantname = 'Steel Caps'
Description = "This mycelium grows into steel logs."
icon_state = 'mycelium-steelcap'
lifespan = 80
endurance = 50
production = 1
plant_yield = 5
potency = 50
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
dead_Sprite = 'towercap-dead'
genes = ["Fungal Vitality"]
mutates_into = [""]
species = 'steelcap'
List_of_plants.append(tower_steel)
|
Necromunger/unitystation
|
Tools/python DM botany to json/plans.py
|
Python
|
agpl-3.0
| 51,496
|
[
"Galaxy"
] |
06eebc02489865d591f0cebedce901d42e56cf2e640e08d571179c3627b846a9
|
#!/usr/bin/env python
from setuptools import setup
from glob import glob
import sys
setup(name='Glutton',
version='0.1',
description='Transcriptome scaffolding and postprocessing for comparative analysis using evolutionary alignment',
author='Alan Medlar',
author_email='alan.j.medlar@helsinki.fi',
url='http://wasabiapp.org/software/glutton',
license='GNU Public License ver3 ( https://www.gnu.org/licenses/gpl-3.0.html )',
long_description='Transcriptome scaffolding and postprocessing for comparative analysis using evolutionary alignment',
platforms=['*nix'],
packages=['glutton'],
install_requires=['biopython>=1.6', 'sqlalchemy', 'mysql-python', 'pysam'],
scripts=['scripts/glutton', \
'binaries/prank', \
'binaries/pagan', \
'binaries/exonerate', \
'binaries/mafft', \
'binaries/makeblastdb', \
'binaries/blastx', \
'binaries/bppphysamp', \
'binaries/bppancestor', \
'binaries/bppdist', \
'binaries/raxml' ],
data_files = [(sys.prefix + '/bin/lib', glob('./binaries/lib/*'))]
)
|
ajm/glutton
|
setup_with_extras.py
|
Python
|
gpl-3.0
| 1,214
|
[
"Biopython",
"pysam"
] |
b9cc66f276f26264317396a5d2c04483d3bff4842d5eb8d94146069de33c9f83
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2009 Gary Burton
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""Reports/Text Reports/Tag Report"""
#------------------------------------------------------------------------
#
# standard python modules
#
#------------------------------------------------------------------------
from gramps.gen.ggettext import gettext as _
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.plug.menu import EnumeratedListOption
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils as ReportUtils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.docgen import (IndexMark, FontStyle, ParagraphStyle,
TableStyle, TableCellStyle, FONT_SANS_SERIF,
INDEX_TYPE_TOC, PARA_ALIGN_CENTER)
from gramps.gen.lib import NoteType
from gramps.gen.filters import GenericFilterFactory, rules
from gramps.gen.display.name import displayer as name_displayer
from gramps.gen.errors import ReportError
from gramps.gen.datehandler import get_date
#------------------------------------------------------------------------
#
# TagReport
#
#------------------------------------------------------------------------
class TagReport(Report):
def __init__(self, database, options, user):
"""
Create the TagReport object that produces the report.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
This report needs the following parameters (class variables)
that come in the options class.
tag - The tag each object must match to be included.
"""
Report.__init__(self, database, options, user)
menu = options.menu
self.tag = menu.get_option_by_name('tag').get_value()
if not self.tag:
raise ReportError(_('Tag Report'),
_('You must first create a tag before running this report.'))
def write_report(self):
self.doc.start_paragraph("TR-Title")
# feature request 2356: avoid genitive form
title = _("Tag Report for %s Items") % self.tag
mark = IndexMark(title, INDEX_TYPE_TOC, 1)
self.doc.write_text(title, mark)
self.doc.end_paragraph()
self.write_people()
self.write_families()
#self.write_events()
self.write_notes()
self.write_media()
def write_people(self):
plist = self.database.iter_person_handles()
FilterClass = GenericFilterFactory('Person')
filter = FilterClass()
filter.add_rule(rules.person.HasTag([self.tag]))
ind_list = filter.apply(self.database, plist)
if not ind_list:
return
self.doc.start_paragraph("TR-Heading")
header = _("People")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table('PeopleTable','TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(_("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(_("Name"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(_("Birth"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(_("Death"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for person_handle in ind_list:
person = self.database.get_person_from_handle(person_handle)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(person.get_gramps_id())
self.doc.end_paragraph()
self.doc.end_cell()
name = name_displayer.display(person)
mark = ReportUtils.get_person_mark(self.database, person)
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(name, mark)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
birth_ref = person.get_birth_ref()
if birth_ref:
event = self.database.get_event_from_handle(birth_ref.ref)
self.doc.write_text(get_date( event ))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
death_ref = person.get_death_ref()
if death_ref:
event = self.database.get_event_from_handle(death_ref.ref)
self.doc.write_text(get_date( event ))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def write_families(self):
flist = self.database.iter_family_handles()
FilterClass = GenericFilterFactory('Family')
filter = FilterClass()
filter.add_rule(rules.family.HasTag([self.tag]))
fam_list = filter.apply(self.database, flist)
if not fam_list:
return
self.doc.start_paragraph("TR-Heading")
header = _("Families")
mark = IndexMark(header,INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table('FamilyTable','TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(_("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(_("Father"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(_("Mother"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(_("Relationship"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for family_handle in fam_list:
family = self.database.get_family_from_handle(family_handle)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(family.get_gramps_id())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
father_handle = family.get_father_handle()
if father_handle:
father = self.database.get_person_from_handle(father_handle)
mark = ReportUtils.get_person_mark(self.database, father)
self.doc.write_text(name_displayer.display(father), mark)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
mother_handle = family.get_mother_handle()
if mother_handle:
mother = self.database.get_person_from_handle(mother_handle)
mark = ReportUtils.get_person_mark(self.database, mother)
self.doc.write_text(name_displayer.display(mother), mark)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
relation = family.get_relationship()
self.doc.write_text(str(relation) )
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def write_events(self):
# At the time of this writing, the GRAMPS UI does not allow the setting
# of tags for events.
elist = self.database.get_event_handles()
FilterClass = GenericFilterFactory('Event')
filter = FilterClass()
filter.add_rule(rules.event.HasTag([self.tag]))
event_list = filter.apply(self.database, elist)
if not event_list:
return
self.doc.start_paragraph("TR-Heading")
header = _("Events")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table('EventTable','TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(_("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(_("Date"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(_("Place"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(_("Description"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for event_handle in event_list:
event = self.database.get_event_from_handle(event_handle)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(event.get_gramps_id())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
date = get_date(event)
if date:
self.doc.write_text(date)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
place_handle = event.get_place_handle()
place = ReportUtils.place_name(self.database, place_handle)
if place:
self.doc.write_text(place)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
descr = event.get_description()
if descr:
self.doc.write_text( descr )
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def write_notes(self):
nlist = self.database.get_note_handles()
FilterClass = GenericFilterFactory('Note')
filter = FilterClass()
filter.add_rule(rules.note.HasTag([self.tag]))
note_list = filter.apply(self.database, nlist)
if not note_list:
return
self.doc.start_paragraph("TR-Heading")
header = _("Notes")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header ,mark)
self.doc.end_paragraph()
self.doc.start_table('NoteTable','TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(_("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(_("Type"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell', 2)
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(_("Text"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for note_handle in note_list:
note = self.database.get_note_from_handle(note_handle)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(note.get_gramps_id())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
type = note.get_type()
self.doc.write_text(str(type))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell', 2)
self.doc.write_styled_note(note.get_styledtext(),
note.get_format(), 'TR-Note',
contains_html = (note.get_type()
== NoteType.HTML_CODE)
)
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def write_media(self):
mlist = self.database.get_media_object_handles(sort_handles=True)
FilterClass = GenericFilterFactory('Media')
filter = FilterClass()
filter.add_rule(rules.media.HasTag([self.tag]))
media_list = filter.apply(self.database, mlist)
if not media_list:
return
self.doc.start_paragraph("TR-Heading")
header = _("Media")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header ,mark)
self.doc.end_paragraph()
self.doc.start_table('MediaTable','TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(_("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(_("Title"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(_("Type"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(_("Date"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for media_handle in media_list:
media = self.database.get_object_from_handle(media_handle)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(media.get_gramps_id())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
title = media.get_description()
self.doc.write_text(str(title))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
mime_type = media.get_mime_type()
self.doc.write_text(str(mime_type))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
date = get_date(media)
if date:
self.doc.write_text(date)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
#------------------------------------------------------------------------
#
# TagOptions
#
#------------------------------------------------------------------------
class TagOptions(MenuReportOptions):
def __init__(self, name, dbase):
self.__db = dbase
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
"""
Add options to the menu for the tag report.
"""
category_name = _("Report Options")
all_tags = []
for handle in self.__db.get_tag_handles(sort_handles=True):
tag = self.__db.get_tag_from_handle(handle)
all_tags.append(tag.get_name())
if len(all_tags) > 0:
tag_option = EnumeratedListOption(_('Tag'), all_tags[0])
for tag_name in all_tags:
tag_option.add_item(tag_name, tag_name)
else:
tag_option = EnumeratedListOption(_('Tag'), '')
tag_option.add_item('', '')
tag_option.set_help( _("The tag to use for the report"))
menu.add_option(category_name, "tag", tag_option)
def make_default_style(self,default_style):
"""Make the default output style for the Tag Report."""
# Paragraph Styles
f = FontStyle()
f.set_size(16)
f.set_type_face(FONT_SANS_SERIF)
f.set_bold(1)
p = ParagraphStyle()
p.set_header_level(1)
p.set_bottom_border(1)
p.set_top_margin(ReportUtils.pt2cm(3))
p.set_bottom_margin(ReportUtils.pt2cm(3))
p.set_font(f)
p.set_alignment(PARA_ALIGN_CENTER)
p.set_description(_("The style used for the title of the page."))
default_style.add_paragraph_style("TR-Title", p)
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=14, italic=1)
para = ParagraphStyle()
para.set_font(font)
para.set_header_level(2)
para.set_top_margin(0.25)
para.set_bottom_margin(0.25)
para.set_description(_('The style used for the section headers.'))
default_style.add_paragraph_style("TR-Heading", para)
font = FontStyle()
font.set_size(12)
p = ParagraphStyle()
p.set(first_indent=-0.75, lmargin=.75)
p.set_font(font)
p.set_top_margin(ReportUtils.pt2cm(3))
p.set_bottom_margin(ReportUtils.pt2cm(3))
p.set_description(_('The basic style used for the text display.'))
default_style.add_paragraph_style("TR-Normal", p)
font = FontStyle()
font.set_size(12)
font.set_bold(True)
p = ParagraphStyle()
p.set(first_indent=-0.75, lmargin=.75)
p.set_font(font)
p.set_top_margin(ReportUtils.pt2cm(3))
p.set_bottom_margin(ReportUtils.pt2cm(3))
p.set_description(_('The basic style used for table headings.'))
default_style.add_paragraph_style("TR-Normal-Bold", p)
para = ParagraphStyle()
p.set(first_indent=-0.75, lmargin=.75)
para.set_top_margin(ReportUtils.pt2cm(3))
para.set_bottom_margin(ReportUtils.pt2cm(3))
para.set_description(_('The basic style used for the note display.'))
default_style.add_paragraph_style("TR-Note",para)
#Table Styles
cell = TableCellStyle()
default_style.add_cell_style('TR-TableCell', cell)
table = TableStyle()
table.set_width(100)
table.set_columns(4)
table.set_column_width(0, 10)
table.set_column_width(1, 30)
table.set_column_width(2, 30)
table.set_column_width(3, 30)
default_style.add_table_style('TR-Table',table)
|
arunkgupta/gramps
|
gramps/plugins/textreport/tagreport.py
|
Python
|
gpl-2.0
| 21,726
|
[
"Brian"
] |
ed60b0af1110db51f53f45a4d8aa168918f1e41b249b11c5e0fcba824dfb784e
|
# coding: utf-8
import copy
import datetime as dt
import furl
import mock
import httpretty
import github3.repos
import github3.git
from flask import url_for
import kozmic.builds.tasks
from kozmic.models import (User, DeployKey, Project, Membership, Hook,
HookCall, Build, Job, TrackedFile)
from . import TestCase, func_fixtures as fixtures
from . import factories, unit_tests
class TestUsers(TestCase):
@httpretty.httprettified
def test_github_sign_up(self):
"""User can sign up with GitHub."""
assert User.query.count() == 0
# Visit home page, follow the redirect, click sign up link
r = self.w.get('/').maybe_follow().click('Sign up with GitHub')
# Make sure that user is being redirected to GitHub and
# location has right arguments
assert r.status_code == 302
location = furl.furl(r.headers['Location'])
redirect_uri = location.args['redirect_uri']
assert redirect_uri == url_for('auth.auth_callback')
assert location.host == 'github.com'
assert location.args['scope'] == 'repo'
# ...now user supposed to go to GitHub and allow access to his repos.
# Then GitHub will redirect him back to `redirect_uri` with
# temporary `code`. `redirect_uri` will exchange that `code` for
# `access_token` using https://github.com/login/oauth/access_token
# endpoint.
# Temporary code:
code = '50ebfe0d4e52301fc157'
# Access token:
access_token = '526069daaa72e78b11c2c17bfe085783e765d77b'
# Mock exchange GitHub endpoint to make it always return
# our `access_token`
httpretty.register_uri(
httpretty.GET, 'https://github.com/login/oauth/access_token',
'access_token={}&scope=repo&token_type=bearer'.format(access_token))
# Mock user API call to return some valid JSON
httpretty.register_uri(
httpretty.GET, 'https://api.github.com/user', fixtures.USER_JSON)
# Visit our `redirect_uri` (pretending being GitHub)
with mock.patch.object(User, 'sync_memberships_with_github') as sync_mock:
r = self.w.get('{}?code={}'.format(redirect_uri, code))
sync_mock.assert_called_once_with()
latest_requests = httpretty.httpretty.latest_requests
assert len(latest_requests) == 2
access_token_request, user_api_request = latest_requests
# Make sure that `redirect_uri` has tried to exchange it's temporary
# code to access token by hitting /login/oauth/access_token endpoint
access_token_request_args = dict(furl.furl(access_token_request.path).args)
assert access_token_request_args['code'] == code
assert access_token_request_args['redirect_uri'] == redirect_uri
# And has succeeded
assert (user_api_request.headers['Authorization'] ==
'token {}'.format(access_token))
assert User.query.count() == 1
user = User.query.first()
assert user.gh_access_token == access_token
assert user.email == fixtures.USER_DATA['email']
assert user.gh_name == fixtures.USER_DATA['name']
assert user.gh_id == fixtures.USER_DATA['id']
assert user.gh_login == fixtures.USER_DATA['login']
class TestProjects(TestCase):
def setup_method(self, method):
TestCase.setup_method(self, method)
self.user_1, self.user_2 = factories.UserFactory.create_batch(2)
self.user_1_project = factories.ProjectFactory.create(owner=self.user_1)
self.user_2_repo = factories.UserRepositoryFactory.create(
parent=self.user_2, is_public=True)
self.user_2_org = factories.OrganizationFactory.create(user=self.user_2)
self.user_2_org_repo = factories.OrganizationRepositoryFactory.create(
parent=self.user_2_org)
def test_projects_sync(self):
"""User can sync projects with GitHub."""
self.login(user_id=self.user_1.id)
with mock.patch.object(User, 'sync_memberships_with_github') as sync_mock:
self.w.get('/').maybe_follow().forms['sync-user-memberships'].submit()
sync_mock.assert_called_once_with()
def test_repos_sync_and_view(self):
"""User can sync and view repositories from GitHub."""
user = self.user_1
self.login(user_id=user.id)
# Replace User `get_gh_org_repos` and `get_gh_repos` methods with
# stubs that don't hit GitHub API
get_gh_org_repos_stub = unit_tests.TestUser.get_stub_for__get_gh_org_repos()
get_gh_repos_stub = unit_tests.TestUser.get_stub_for__get_gh_repos()
with mock.patch.object(User, 'get_gh_org_repos', get_gh_org_repos_stub):
with mock.patch.object(User, 'get_gh_repos', get_gh_repos_stub):
r = self.w.get(url_for('repos.sync'))
# Make sure that database reflects data provided by the stubs
assert user.repositories.count() == 3
assert user.organizations.count() == 2
assert (set(org.gh_login for org in user.organizations) ==
{'pyconru', 'unistorage'})
pyconru_repos = user.organizations.filter_by(
gh_login='pyconru').first().repositories
unistorage_repos = user.organizations.filter_by(
gh_login='unistorage').first().repositories
assert pyconru_repos.count() == 1
assert unistorage_repos.count() == 6
assert unistorage_repos.filter_by(is_public=True).count() == 4
assert user.repos_last_synchronized_at
# Make sure that all the repositories are listed
data = self.w.get('/repositories/').context['repositories_by_owner']
assert set(data[user.gh_login]) == {
(repo.gh_id, repo.gh_full_name) for repo in user.repositories
}
assert set(data['pyconru']) == {
(repo.gh_id, repo.gh_full_name) for repo in pyconru_repos
}
assert set(data['unistorage']) == {
(repo.gh_id, repo.gh_full_name) for repo in unistorage_repos
}
# Create projects for some of the repositories
project_1 = factories.ProjectFactory.create(
owner=self.user_1,
gh_id=user.repositories[1].gh_id)
project_2 = factories.ProjectFactory.create(
owner=self.user_2,
gh_id=pyconru_repos[0].gh_id)
project_3 = factories.ProjectFactory.create(
owner=self.user_1,
gh_id=unistorage_repos[3].gh_id)
# And make sure that repositories for which projects were
# created are not listed
data = self.w.get('/repositories/').context['repositories_by_owner']
assert 'pyconru' not in data
assert set(data[user.gh_login]) == {
(repo.gh_id, repo.gh_full_name) for repo in user.repositories
if repo.gh_id != project_1.gh_id
}
assert set(data['unistorage']) == {
(repo.gh_id, repo.gh_full_name) for repo in unistorage_repos
if repo.gh_id != project_3.gh_id
}
def test_project_creation(self):
"""User can create a project from repository."""
self.login(user_id=self.user_2.id)
r = self.w.get('/').maybe_follow().click('New Project')
def ensure_stub(self):
self.gh_id = 123
return True
with mock.patch.object(Project, 'sync_memberships_with_github') as sync_mock, \
mock.patch.object(DeployKey, 'ensure', side_effect=ensure_stub,
autospec=True) as ensure_deploy_key_mock:
form_id = 'create-project-{}'.format(self.user_2_org_repo.gh_id)
r.forms[form_id].submit().follow()
# The repository is private, make sure a deploy key was created
ensure_deploy_key_mock.assert_called_once_with(mock.ANY)
sync_mock.assert_called_once_with()
assert self.user_2.owned_projects.count() == 1
project = self.user_2.owned_projects.filter_by(
gh_id=self.user_2_org_repo.gh_id).first()
assert project.deploy_key
assert not project.is_public
assert project.gh_id == self.user_2_org_repo.gh_id
assert project.gh_name == self.user_2_org_repo.gh_name
assert project.gh_full_name == self.user_2_org_repo.gh_full_name
assert project.gh_login == self.user_2_org_repo.parent.gh_login
assert project.deploy_key.rsa_public_key.startswith('ssh-rsa ')
assert project.deploy_key.rsa_private_key.startswith(
'-----BEGIN RSA PRIVATE KEY-----')
with mock.patch.object(Project, 'sync_memberships_with_github') as sync_mock, \
mock.patch.object(DeployKey, 'ensure') as ensure_deploy_key_mock:
form_id = 'create-project-{}'.format(self.user_2_repo.gh_id)
r.forms[form_id].submit().follow()
# The repository is public, make sure a deploy key wasn't created
assert not ensure_deploy_key_mock.called
sync_mock.assert_called_once_with()
assert self.user_2.owned_projects.count() == 2
project = self.user_2.owned_projects.filter_by(
gh_id=self.user_2_repo.gh_id).first()
assert not project.deploy_key
assert project.is_public
def test_project_deletion(self):
"""User can delete an owned project."""
project = factories.ProjectFactory.create(owner=self.user_1)
project_id = project.id
factories.MembershipFactory.create(user=self.user_2, project=project)
def get_settings_page(project):
return self.w.get('/').maybe_follow().click(
project.gh_full_name).maybe_follow().click('Settings')
self.login(user_id=self.user_2.id)
r = get_settings_page(project)
assert 'delete-project' not in r.forms
with mock.patch('flask.ext.wtf.csrf.validate_csrf', return_value=True):
r = self.w.post(
url_for('projects.delete_project', id=project.id), status='*')
assert r.status_code == 403
self.login(user_id=self.user_1.id)
r = get_settings_page(project)
form = r.forms['delete-project']
# Imitate JS logic:
form.action = url_for(
'projects.delete_project', id=project.id, _external=False)
assert form.action in r
with mock.patch.object(Project, 'gh'):
form.submit().follow()
assert not Project.query.get(project_id)
class TestHooksManagement(TestCase):
def setup_method(self, method):
TestCase.setup_method(self, method)
self.user = factories.UserFactory.create()
self.project = factories.ProjectFactory.create(owner=self.user)
@staticmethod
def _github_error_side_effect(*args, **kwargs):
raise github3.GitHubError(mock.Mock())
def get_project_settings_page(self, project):
return (self.w.get('/').maybe_follow()
.click(project.gh_full_name).maybe_follow()
.click('Settings'))
def test_manager_can_see_hooks(self):
"""Manager can see project hooks."""
self.login(user_id=self.user.id)
# See no hooks configured
settings_page = self.get_project_settings_page(self.project)
assert 'Hooks haven\'t been configured yet.' in settings_page
# Add some hooks
hooks = factories.HookFactory.create_batch(3, project=self.project)
# See all the hooks configured
settings_page = self.get_project_settings_page(self.project)
assert 'Hooks haven\'t been configured yet.' not in settings_page
for hook in hooks:
assert hook.title in settings_page
def test_manager_can_add_hook(self):
"""Manager can add a project hook."""
self.login(user_id=self.user.id)
settings_page = self.get_project_settings_page(self.project)
hook_form = settings_page.click('Add a new hook').forms['hook-form']
# Fill the hook creation form
hook_data = {
'title': 'Tests on debian-7',
'install_script': '#!/bin/bash\r\necho 123 > /test.txt',
'build_script': '#!/bin/bash\r\n./kozmic.sh',
'docker_image': 'debian-7',
}
for field, value in hook_data.items():
hook_form[field] = value
with mock.patch.object(Hook, 'ensure', return_value=True) as ensure_mock:
r = hook_form.submit().follow()
ensure_mock.assert_called_once_with()
# Ensure that hook has been created with the right data
assert self.project.hooks.count() == 1
hook = self.project.hooks.first()
assert hook.title == hook_data['title']
assert hook.install_script == '#!/bin/bash\necho 123 > /test.txt'
assert hook.build_script == '#!/bin/bash\n./kozmic.sh'
assert hook.docker_image == hook_data['docker_image']
# Pretend that there was a GitHub error
with mock.patch.object(Hook, 'ensure', return_value=False) as ensure_mock:
r = hook_form.submit()
ensure_mock.assert_called_once_with()
# Make sure that user gets the warning
assert r.flashes == [
('warning', 'Sorry, failed to create a hook. Please try again later.')
]
def test_manager_can_edit_hook(self):
"""Manager can edit a project hook."""
hooks = factories.HookFactory.create_batch(3, project=self.project)
hook_1 = hooks[1]
self.login(user_id=self.user.id)
settings_page = self.get_project_settings_page(self.project)
# Fill the hook form
link_id = 'edit-hook-{}'.format(hook_1.id)
hook_form = settings_page.click(linkid=link_id).forms['hook-form']
hook_form['title'] == hook_1.title
hook_form['title'] = 'PEP 8 check'
hook_form['build_script'] = '#!/bin/sh\r\npep8 app.py'
hook_form.submit()
# Ensure the changes are saved
assert hook_1.title == 'PEP 8 check'
assert hook_1.build_script == '#!/bin/sh\npep8 app.py'
# Trying to submit form without required field
hook_form['title'] = ''
assert 'This field is required' in hook_form.submit()
def test_manager_can_delete_hook(self):
"""Manager can delete a project hook."""
hooks = factories.HookFactory.create_batch(3, project=self.project)
hook_1, hook_2, _ = hooks
self.login(user_id=self.user.id)
settings_page = self.get_project_settings_page(self.project)
hook_1_deletion_form = settings_page.forms[
'delete-hook-{}'.format(hook_1.id)]
hook_2_deletion_form = settings_page.forms[
'delete-hook-{}'.format(hook_2.id)]
# Case 1:
# Mock hook.delete() to raise GitHubError
gh_repo_mock = mock.Mock()
gh_hook_mock = gh_repo_mock.hook.return_value
gh_hook_mock.delete.side_effect = self._github_error_side_effect
with mock.patch.object(Project, 'gh', gh_repo_mock):
hook_1_deletion_form.submit().follow()
# Ensure that `hook_1` is still there
assert hook_1 in self.project.hooks
# Case 2:
# Mock gh.hook() to return None (suppose user deleted hook manually
# using GitHub settings)
gh_repo_mock = mock.Mock()
gh_repo_mock.hook.return_value = None
with mock.patch.object(Project, 'gh', gh_repo_mock):
hook_1_deletion_form.submit().follow()
# In that case hook must be deleted from db
assert hook_1 not in self.project.hooks
# Case 3:
# Regular situation: `gh_repo.hook()` returns gh_hook,
# `gh_hook.delete()` call is successful
gh_repo_mock = mock.Mock()
gh_hook_mock = gh_repo_mock.hook.return_value
with mock.patch.object(Project, 'gh', gh_repo_mock):
hook_2_deletion_form.submit().follow()
# Ensure that hook is deleted...
assert hook_2 not in self.project.hooks
# ...the view called `.hook(id)` to get the GitHub hook
gh_repo_mock.hook.assert_called_once_with(hook_2.gh_id)
# ...and then called `delete` on it
gh_hook_mock.delete.assert_called_once_with()
def test_manager_can_change_tracked_files_in_hook_settings(self):
"""Manager can change the traked files."""
hook = factories.HookFactory.create(project=self.project)
self.login(user_id=self.user.id)
settings_page = self.get_project_settings_page(self.project)
link_id = 'edit-hook-{}'.format(hook.id)
hook_form = settings_page.click(linkid=link_id).forms['hook-form']
hook_form['tracked_files'] = ''
hook_form.submit()
assert not hook.tracked_files.all()
hook_form = settings_page.click(linkid=link_id).forms['hook-form']
hook_form['tracked_files'] = ('./requirements/basic.txt\n'
'requirements/basic.txt\n'
'././a/../requirements/dev.txt')
hook_form.submit()
assert set(tracked_file.path for tracked_file in hook.tracked_files) == {
'requirements/basic.txt',
'requirements/dev.txt',
}
hook_form = settings_page.click(linkid=link_id).forms['hook-form']
assert set(hook_form['tracked_files'].value.splitlines()) == {
'requirements/basic.txt',
'requirements/dev.txt',
}
hook_form['tracked_files'] = (hook_form['tracked_files'].value +
'\nPumpurum.txt\npumpurum.txt')
hook_form.submit()
assert set(tracked_file.path for tracked_file in hook.tracked_files) == {
'requirements/basic.txt',
'requirements/dev.txt',
'pumpurum.txt',
'Pumpurum.txt',
}
class TestMembersManagement(TestCase):
def setup_method(self, method):
TestCase.setup_method(self, method)
self.aromanovich = factories.UserFactory.create(
gh_login='aromanovich', gh_name='Anton')
self.ramm = factories.UserFactory.create(
gh_login='ramm', gh_name='Danila')
self.vsokolov = factories.UserFactory.create(
gh_login='vsokolov', gh_name='Victor')
self.project = factories.ProjectFactory.create(owner=self.aromanovich)
def get_project_settings_page(self, project):
return (self.w.get('/').maybe_follow()
.click(project.gh_full_name).maybe_follow()
.click('Settings'))
def test_manager_can_see_members(self):
"""Manager can see project members."""
self.login(user_id=self.aromanovich.id)
settings_page = self.get_project_settings_page(self.project)
member_divs = settings_page.lxml.cssselect('.members .member')
assert len(member_divs) == 1
assert 'aromanovich' in member_divs[0].text_content()
for user in [self.ramm, self.vsokolov]:
factories.MembershipFactory.create(user=user, project=self.project)
settings_page = self.get_project_settings_page(self.project)
member_divs = settings_page.lxml.cssselect('.members .member')
assert len(member_divs) == 3
assert 'aromanovich' in member_divs[0].text_content()
assert 'ramm' in member_divs[1].text_content()
assert 'vsokolov' in member_divs[2].text_content()
def test_manager_can_sync_members_with_github(self):
"""Manager can sync members with GitHub."""
factories.MembershipFactory.create(
user=self.vsokolov, project=self.project)
factories.MembershipFactory.create(
user=self.ramm, project=self.project, allows_management=True)
self.login(user_id=self.vsokolov.id)
settings_page = self.get_project_settings_page(self.project)
assert 'sync-project-memberships' not in settings_page.forms
self.login(user_id=self.ramm.id)
settings_page = self.get_project_settings_page(self.project)
with mock.patch.object(Project, 'sync_memberships_with_github') as sync_mock:
settings_page.forms['sync-project-memberships'].submit()
sync_mock.assert_called_once_with()
class TestGitHubHooks(TestCase):
skip_patters = ('[skip ci]', '[ci skip]', 'skip_ci', 'ci_skip', '[CI_SKIP]', '[SKIP_CI]', )
def setup_method(self, method):
TestCase.setup_method(self, method)
self.user = factories.UserFactory.create()
self.project = factories.ProjectFactory.create(owner=self.user)
self.hook_1 = factories.HookFactory.create(project=self.project)
self.hook_2 = factories.HookFactory.create(project=self.project)
def _create_gh_repo_mock(self, commit_data):
gh_repo_mock = mock.Mock()
gh_repo_mock.git_commit.return_value = github3.git.Commit(commit_data)
assert (fixtures.PULL_REQUEST_HOOK_CALL_DATA['pull_request']['head']['sha'] ==
commit_data['sha'])
return gh_repo_mock
def test_github_pull_request_hook(self):
commit_data = fixtures.COMMIT_47fe2_DATA
gh_repo_mock = self._create_gh_repo_mock(commit_data)
head_sha = commit_data['sha']
with mock.patch.object(Project, 'gh', gh_repo_mock), \
mock.patch('kozmic.builds.tasks.do_job') as do_job_mock:
r = self.w.post_json(
url_for('builds.hook', id=self.hook_1.id, _external=True),
fixtures.PULL_REQUEST_HOOK_CALL_DATA)
assert r.status_code == 200
assert r.body == 'OK'
gh_repo_mock.git_commit.assert_called_once_with(head_sha)
assert self.hook_1.calls.count() == 1
assert self.project.builds.count() == 1
hook_call = self.hook_1.calls.first()
build = self.project.builds.first()
assert self.project.builds.count() == 1
assert build.status == 'enqueued'
assert build.number == 1
assert build.gh_commit_ref == 'test'
assert build.gh_commit_sha == head_sha
assert build.gh_commit_message == commit_data['message']
assert build.gh_commit_author == commit_data['author']['name']
do_job_mock.delay.assert_called_once_with(hook_call_id=hook_call.id)
def test_github_ping_event(self):
with mock.patch.object(Project, 'gh'), \
mock.patch('kozmic.builds.tasks.do_job') as do_job_mock:
url = url_for('builds.hook', id=self.hook_1.id, _external=True)
r = self.w.post_json(url, {
'zen': 'Hello!',
'hook_id': self.hook_1.gh_id
})
assert r.body == 'OK'
r = self.w.post_json(url, {
'zen': 'Hello!',
'hook_id': self.hook_1.gh_id + 123
}, expect_errors=True)
assert r.body == 'Wrong hook URL'
def test_consecutive_hook_calls(self):
commit_data = fixtures.COMMIT_47fe2_DATA
gh_repo_mock = self._create_gh_repo_mock(commit_data)
head_sha = commit_data['sha']
with mock.patch.object(Project, 'gh', gh_repo_mock), \
mock.patch('kozmic.builds.tasks.do_job') as do_job_mock:
push_hook_call_data = copy.deepcopy(fixtures.PUSH_HOOK_CALL_DATA)
push_hook_call_data['ref'] = 'refs/heads/{}'.format(
fixtures.PULL_REQUEST_HOOK_CALL_DATA['pull_request']['head']['ref'])
for hook in (self.hook_1, self.hook_2):
self.w.post_json(
url_for('builds.hook', id=hook.id, _external=True),
push_hook_call_data)
self.w.post_json(
url_for('builds.hook', id=self.hook_1.id, _external=True),
fixtures.PULL_REQUEST_HOOK_CALL_DATA)
build = self.project.builds.first()
hook_call_1 = self.hook_1.calls.first()
hook_call_2 = self.hook_2.calls.first()
assert self.project.builds.count() == 1
assert self.hook_1.calls.count() == 1
assert self.hook_2.calls.count() == 1
assert build.number == 1 # Make sure that second hook call hasn't
# increased build number
assert do_job_mock.delay.call_count == 2
assert mock.call(hook_call_id=hook_call_1.id) in do_job_mock.delay.call_args_list
assert mock.call(hook_call_id=hook_call_2.id) in do_job_mock.delay.call_args_list
def test_skip_build_if_commit_contains_ci_skip(self):
for skip_pattern in self.skip_patters:
commit_data = fixtures.COMMIT_47fe2_DATA.copy()
commit_data['message'] = u'Русский текст' + skip_pattern
gh_repo_mock = self._create_gh_repo_mock(commit_data)
with mock.patch.object(Project, 'gh', gh_repo_mock), \
mock.patch('kozmic.builds.tasks.do_job') as do_job_mock:
response = self.w.post_json(
url_for('builds.hook', id=self.hook_1.id, _external=True),
fixtures.PULL_REQUEST_HOOK_CALL_DATA)
assert response.status_code == 200
assert response.body == 'OK'
assert self.project.builds.count() == 0
def test_skip_build_if_pull_request_contains_ci_skip(self):
commit_data = fixtures.COMMIT_47fe2_DATA
gh_repo_mock = self._create_gh_repo_mock(commit_data)
for skip_pattern in self.skip_patters:
payload_data = fixtures.PULL_REQUEST_HOOK_CALL_DATA.copy()
payload_data['pull_request']['title'] = u'Русский текст' + skip_pattern
payload_data['pull_request']['body'] = None
with mock.patch.object(Project, 'gh', gh_repo_mock), \
mock.patch('kozmic.builds.tasks.do_job') as do_job_mock:
response = self.w.post_json(
url_for('builds.hook', id=self.hook_1.id, _external=True), payload_data)
assert response.status_code == 200
assert response.body == 'OK'
assert self.project.builds.count() == 0
class TestBadges(TestCase):
def setup_method(self, method):
TestCase.setup_method(self, method)
self.user = factories.UserFactory.create()
self.project = factories.ProjectFactory.create(
owner=self.user,
gh_login='aromanovich',
gh_name='flask-webtest')
def test_basics(self):
r = self.w.get('/badges/aromanovich/flask-webtest/master')
assert r.status_code == 307
assert r.location == 'https://kozmic.test/static/img/badges/success.png'
self.build = factories.BuildFactory.create(
project=self.project,
status='failure',
gh_commit_ref='feature-branch')
# master branch is still "success"
r = self.w.get('/badges/aromanovich/flask-webtest/master')
assert r.status_code == 307
assert r.location == 'https://kozmic.test/static/img/badges/success.png'
# feature-branch is "failure"
r = self.w.get('/badges/aromanovich/flask-webtest/feature-branch')
assert r.status_code == 307
assert r.location == 'https://kozmic.test/static/img/badges/failure.png'
class TestBuilds(TestCase):
def setup_method(self, method):
TestCase.setup_method(self, method)
self.user = factories.UserFactory.create()
self.project = factories.ProjectFactory.create(owner=self.user)
self.build = factories.BuildFactory.create(project=self.project)
self.hook = factories.HookFactory.create(project=self.project)
self.hook_call = factories.HookCallFactory.create(
hook=self.hook, build=self.build)
def test_basics(self):
self.job = factories.JobFactory.create(
build=self.build,
hook_call=self.hook_call,
started_at=dt.datetime.utcnow() - dt.timedelta(minutes=2),
finished_at=dt.datetime.utcnow(),
stdout='[4mHello![24m')
self.login(user_id=self.user.id)
r = self.w.get(url_for('projects.build', project_id=self.project.id,
id=self.build.id))
assert '<span class="ansi4">Hello!</span>' in r
def test_restart(self):
job = factories.JobFactory.create(
build=self.build,
hook_call=self.hook_call,
started_at=dt.datetime.utcnow() - dt.timedelta(minutes=2),
finished_at=dt.datetime.utcnow(),
stdout='[4mHello![24m')
job.build.status = 'success'
self.db.session.commit()
assert job.is_finished()
self.login(user_id=self.user.id)
r = self.w.get(url_for('projects.build', project_id=self.project.id,
id=self.build.id))
with mock.patch('kozmic.projects.views.restart_job') as restart_job_mock:
r.click('Restart').follow()
restart_job_mock.delay.assert_called_once_with(job.id)
assert job.build.status == 'enqueued'
|
abak-press/kozmic-ci
|
tests/func_tests.py
|
Python
|
bsd-3-clause
| 29,170
|
[
"VisIt"
] |
6560919459db73bb6eaaf1d9b875a24295a9e1149d1211a719d05de710be2f6b
|
"""Axis binary sensor platform tests."""
from homeassistant.components import axis
import homeassistant.components.binary_sensor as binary_sensor
from homeassistant.setup import async_setup_component
from .test_device import NAME, setup_axis_integration
EVENTS = [
{
"operation": "Initialized",
"topic": "tns1:Device/tnsaxis:Sensor/PIR",
"source": "sensor",
"source_idx": "0",
"type": "state",
"value": "0",
},
{
"operation": "Initialized",
"topic": "tnsaxis:CameraApplicationPlatform/VMD/Camera1Profile1",
"type": "active",
"value": "1",
},
]
async def test_platform_manually_configured(hass):
"""Test that nothing happens when platform is manually configured."""
assert (
await async_setup_component(
hass, binary_sensor.DOMAIN, {"binary_sensor": {"platform": axis.DOMAIN}}
)
is True
)
assert axis.DOMAIN not in hass.data
async def test_no_binary_sensors(hass):
"""Test that no sensors in Axis results in no sensor entities."""
await setup_axis_integration(hass)
assert not hass.states.async_entity_ids("binary_sensor")
async def test_binary_sensors(hass):
"""Test that sensors are loaded properly."""
device = await setup_axis_integration(hass)
for event in EVENTS:
device.api.stream.event.manage_event(event)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids("binary_sensor")) == 2
pir = hass.states.get(f"binary_sensor.{NAME}_pir_0")
assert pir.state == "off"
assert pir.name == f"{NAME} PIR 0"
vmd4 = hass.states.get(f"binary_sensor.{NAME}_vmd4_camera1profile1")
assert vmd4.state == "on"
assert vmd4.name == f"{NAME} VMD4 Camera1Profile1"
|
postlund/home-assistant
|
tests/components/axis/test_binary_sensor.py
|
Python
|
apache-2.0
| 1,798
|
[
"VMD"
] |
65d03640697b9446adbf08d0e506f345449957e33cb008f4908c39e36d9087fa
|
"""K-means clustering"""
# Authors: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Thomas Rueckstiess <ruecksti@in.tum.de>
# James Bergstra <james.bergstra@umontreal.ca>
# Jan Schlueter <scikit-learn@jan-schlueter.de>
# Nelle Varoquaux
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..utils.extmath import row_norms, squared_norm
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.fixes import astype
from ..utils import check_array
from ..utils import check_random_state
from ..utils import as_float_array
from ..utils import gen_batches
from ..utils.validation import check_is_fitted
from ..utils.random import choice
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from . import _k_means
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X: array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters: integer
The number of seeds to choose
x_squared_norms: array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state: numpy.RandomState
The generator used to initialize the centers.
n_local_trials: integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features))
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0], X, Y_norm_squared=x_squared_norms, squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(closest_dist_sq.cumsum(), rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
return_n_iter=False):
"""K-means clustering algorithm.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter: int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X) or hasattr(init, '__array__'):
X_mean = X.mean(axis=0)
if not sp.issparse(X):
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init = np.asarray(init).copy()
init -= X_mean
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = _kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_kmeans_single)(X, n_clusters, max_iter=max_iter,
init=init, verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single(X, n_clusters, x_squared_norms, max_iter=300,
init='k-means++', verbose=False, random_state=None,
tol=1e-4, precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X: array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters: int
The number of clusters to form as well as the number of
centroids to generate.
max_iter: int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init: {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol: float, optional
The relative increment in the results before declaring convergence.
verbose: boolean, optional
Verbosity mode
x_squared_norms: array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
centroid: float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label: integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia: float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=np.float64)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
if squared_norm(centers_old - centers) <= tol:
if verbose:
print("Converged at iteration %d" % i)
break
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
k = centers.shape[0]
all_distances = euclidean_distances(centers, X, x_squared_norms,
squared=True)
labels = np.empty(n_samples, dtype=np.int32)
labels.fill(-1)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(k):
dist = all_distances[center_id]
labels[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms: array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers: float64 array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances: float64 array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels: int array of shape(n)
The resulting assignment
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=np.float64)
# distances will be changed in-place
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X: array, shape (n_samples, n_features)
k: int
number of centroids
init: {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
x_squared_norms: array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers: array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
centers = init
elif callable(init):
centers = init(X, k, random_state=random_state)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
if len(centers) != k:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, k))
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
See also
--------
MiniBatchKMeans:
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster to than the default batch implementation.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10, max_iter=300,
tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True, n_jobs=1):
if hasattr(init, '__array__'):
n_clusters = init.shape[0]
init = np.asarray(init, dtype=np.float64)
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr')
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
if X.dtype.kind != 'f':
warnings.warn("Got data type %s, converted to float "
"to avoid overflows" % X.dtype,
RuntimeWarning, stacklevel=2)
X = X.astype(np.float)
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs)
return self
def fit_predict(self, X):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float64, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional
Controls the verbosity.
Returns
-------
inertia : float
Sum of distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = counts < reassignment_ratio * counts.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = choice(X.shape[0], replace=False, size=n_reassigns,
random_state=random_state)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X,
astype(new_centers, np.intp),
astype(np.where(to_reassign)[0], np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
counts[to_reassign] = np.min(counts[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
centers[center_idx] /= counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulte the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
tol : float, default: 0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
batch_size : int, optional, default: 100
Size of the mini batches.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
init : {'k-means++', 'random' or an ndarray}, default: 'k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
compute_labels : boolean, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
reassignment_ratio : float, default: 0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster
"""
random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse="csr", order='C', dtype=np.float64)
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in MiniBatchKMeans instead of '
'n_init=%d'
% self.n_init, RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, np.double)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, np.double)
distances = np.zeros(self.batch_size, dtype=np.float64)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=None, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.random_integers(
0, n_samples - 1, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
"""
X = check_array(X, accept_sparse="csr")
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(X.shape[0], dtype=np.float64)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, np.double), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
|
maheshakya/scikit-learn
|
sklearn/cluster/k_means_.py
|
Python
|
bsd-3-clause
| 54,070
|
[
"Gaussian"
] |
93c43ba90f3a77381ce4dc179019f9cdb9e559ecf7f10721baf62124f69fd7d5
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
*************************************
**espresso.interaction.LennardJones**
*************************************
"""
from espresso import pmi, infinity
from espresso.esutil import *
from espresso.interaction.Potential import *
from espresso.interaction.Interaction import *
from _espresso import interaction_LennardJones, \
interaction_VerletListLennardJones, \
interaction_VerletListAdressLennardJones, \
interaction_VerletListAdressLennardJones2, \
interaction_VerletListHadressLennardJones, \
interaction_VerletListHadressLennardJones2, \
interaction_CellListLennardJones, \
interaction_FixedPairListLennardJones
class LennardJonesLocal(PotentialLocal, interaction_LennardJones):
'The (local) Lennard-Jones potential.'
def __init__(self, epsilon=1.0, sigma=1.0,
cutoff=infinity, shift="auto"):
"""Initialize the local Lennard Jones object."""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if shift =="auto":
cxxinit(self, interaction_LennardJones,
epsilon, sigma, cutoff)
else:
cxxinit(self, interaction_LennardJones,
epsilon, sigma, cutoff, shift)
class VerletListLennardJonesLocal(InteractionLocal, interaction_VerletListLennardJones):
'The (local) Lennard Jones interaction using Verlet lists.'
def __init__(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListLennardJones, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
def getVerletListLocal(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getVerletList(self)
class VerletListAdressLennardJonesLocal(InteractionLocal, interaction_VerletListAdressLennardJones):
'The (local) Lennard Jones interaction using Verlet lists.'
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListAdressLennardJones, vl, fixedtupleList)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCG(self, type1, type2, potential)
class VerletListAdressLennardJones2Local(InteractionLocal, interaction_VerletListAdressLennardJones2):
'The (local) Lennard Jones interaction using Verlet lists.'
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListAdressLennardJones2, vl, fixedtupleList)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCG(self, type1, type2, potential)
class VerletListHadressLennardJonesLocal(InteractionLocal, interaction_VerletListHadressLennardJones):
'The (local) Lennard Jones interaction using Verlet lists.'
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListHadressLennardJones, vl, fixedtupleList)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCG(self, type1, type2, potential)
class VerletListHadressLennardJones2Local(InteractionLocal, interaction_VerletListHadressLennardJones2):
'The (local) Lennard Jones interaction using Verlet lists.'
def __init__(self, vl, fixedtupleList, KTI = False):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListHadressLennardJones2, vl, fixedtupleList, KTI)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCG(self, type1, type2, potential)
class CellListLennardJonesLocal(InteractionLocal, interaction_CellListLennardJones):
'The (local) Lennard Jones interaction using cell lists.'
def __init__(self, stor):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_CellListLennardJones, stor)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
class FixedPairListLennardJonesLocal(InteractionLocal, interaction_FixedPairListLennardJones):
'The (local) Lennard-Jones interaction using FixedPair lists.'
def __init__(self, system, vl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListLennardJones, system, vl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
def setFixedPairList(self, fixedpairlist):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setFixedPairList(self, fixedpairlist)
def getFixedPairList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getFixedPairList(self)
if pmi.isController:
class LennardJones(Potential):
'The Lennard-Jones potential.'
pmiproxydefs = dict(
cls = 'espresso.interaction.LennardJonesLocal',
pmiproperty = ['epsilon', 'sigma']
)
class VerletListLennardJones(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.VerletListLennardJonesLocal',
pmicall = ['setPotential', 'getPotential', 'getVerletList']
)
class VerletListAdressLennardJones(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.VerletListAdressLennardJonesLocal',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class VerletListAdressLennardJones2(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.VerletListAdressLennardJones2Local',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class VerletListHadressLennardJones(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.VerletListHadressLennardJonesLocal',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class VerletListHadressLennardJones2(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.VerletListHadressLennardJones2Local',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class CellListLennardJones(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.CellListLennardJonesLocal',
pmicall = ['setPotential']
)
class FixedPairListLennardJones(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.interaction.FixedPairListLennardJonesLocal',
pmicall = ['setPotential', 'setFixedPairList','getFixedPairList' ]
)
|
BackupTheBerlios/espressopp
|
src/interaction/LennardJones.py
|
Python
|
gpl-3.0
| 10,773
|
[
"ESPResSo"
] |
3a38daf65a0b51b154978889521a4f108ec33b176d0ca5751c3c4ff1da2fd122
|
#!/usr/bin/env python
#coding: utf8
import os
from splinter import Browser
from sys import exit
import time
username = raw_input("Nombre de usuario: ")
pswd = raw_input("Contraseña: ")
alias = raw_input("Alias: ")
with Browser('chrome') as browser:
browser.visit("https://www.corporativo.telcel.com/pcorporativo/login.iface")
button = browser.find_by_name("j_id22:j_id24").click()
browser.fill('j_spring_security_check:j_username', username)
browser.find_by_id('j_password').fill(pswd)
browser.fill('j_spring_security_check:j_alias', alias)
btn2 = browser.find_by_name("j_spring_security_check:j_id38").click()
time.sleep(5)
btn3 = browser.find_by_name("corporativo:menu:0.1").click()
btn4 = browser.find_by_id("corporativo:menu:0:menuBar:adendum:link").click()
num = 0
while True:
if browser.is_element_present_by_id("corporativo:menu:0:tree:n-" + str(num) + ":j_id65"):
btn5 = browser.find_by_id("corporativo:menu:0:tree:n-" + str(num) + ":j_id65").click()
tn6 = browser.find_by_name("corporativo:menu:0:j_id1254").click()
time.sleep(10)
if browser.is_element_not_present_by_id("corporativo:menu:0:j_id1257"):
print "Se hizo el proceso1"
print num
num += 1
else:
btn7 = browser.find_by_id("corporativo:menu:0:j_id1257").click()
print "Se hizo el proceso2"
print num
time.sleep(10)
num += 1
else:
exit(0)
|
ianchesu/Adendum-Download
|
adendum.py
|
Python
|
mit
| 1,566
|
[
"VisIt"
] |
5bebd479abed0cc57b1794a1b1de7b70d0639f609977cfde10a5cafb10c9b408
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.conf import settings
from django.http import HttpResponse
from django.test import TestCase
from mock import patch
from fakeldap import MockLDAP
from zilencer.models import Deployment
from zerver.views import do_change_password, create_homepage_form
from zerver.views.invite import get_invitee_emails_set
from zerver.models import (
get_realm_by_string_id, get_prereg_user_by_email, get_user_profile_by_email,
PreregistrationUser, Realm, RealmAlias, Recipient, ScheduledJob, UserProfile, UserMessage,
Stream, Subscription,
)
from zerver.lib.actions import (
set_default_streams,
do_change_is_admin
)
from zerver.lib.initial_password import initial_password
from zerver.lib.actions import do_deactivate_realm, do_set_realm_default_language, \
add_new_user_history
from zerver.lib.digest import send_digest_email
from zerver.lib.notifications import enqueue_welcome_emails, one_click_unsubscribe_link
from zerver.lib.test_helpers import find_key_by_email, queries_captured, \
HostRequestMock
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.test_runner import slow
from zerver.lib.session_user import get_session_dict_user
import re
import ujson
from six.moves import urllib
from six.moves import range
import six
from six import text_type
from typing import Any
class PublicURLTest(ZulipTestCase):
"""
Account creation URLs are accessible even when not logged in. Authenticated
URLs redirect to a page.
"""
def fetch(self, method, urls, expected_status):
# type: (str, List[str], int) -> None
for url in urls:
response = getattr(self.client, method)(url) # e.g. self.client_post(url) if method is "post"
self.assertEqual(response.status_code, expected_status,
msg="Expected %d, received %d for %s to %s" % (
expected_status, response.status_code, method, url))
def test_public_urls(self):
# type: () -> None
"""
Test which views are accessible when not logged in.
"""
# FIXME: We should also test the Tornado URLs -- this codepath
# can't do so because this Django test mechanism doesn't go
# through Tornado.
get_urls = {200: ["/accounts/home/", "/accounts/login/"
"/en/accounts/home/", "/ru/accounts/home/",
"/en/accounts/login/", "/ru/accounts/login/",
"/help/"],
302: ["/", "/en/", "/ru/"],
401: ["/api/v1/streams/Denmark/members",
"/api/v1/users/me/subscriptions",
"/api/v1/messages",
"/json/messages",
"/api/v1/streams",
],
404: ["/help/nonexistent",],
}
post_urls = {200: ["/accounts/login/"],
302: ["/accounts/logout/"],
401: ["/json/messages",
"/json/invite_users",
"/json/settings/change",
"/json/subscriptions/remove",
"/json/subscriptions/exists",
"/json/subscriptions/property",
"/json/get_subscribers",
"/json/fetch_api_key",
"/json/users/me/subscriptions",
"/api/v1/users/me/subscriptions",
],
400: ["/api/v1/external/github",
"/api/v1/fetch_api_key",
],
}
put_urls = {401: ["/json/users/me/pointer"],
}
for status_code, url_set in six.iteritems(get_urls):
self.fetch("get", url_set, status_code)
for status_code, url_set in six.iteritems(post_urls):
self.fetch("post", url_set, status_code)
for status_code, url_set in six.iteritems(put_urls):
self.fetch("put", url_set, status_code)
def test_get_gcid_when_not_configured(self):
# type: () -> None
with self.settings(GOOGLE_CLIENT_ID=None):
resp = self.client_get("/api/v1/fetch_google_client_id")
self.assertEquals(400, resp.status_code,
msg="Expected 400, received %d for GET /api/v1/fetch_google_client_id" % resp.status_code,
)
data = ujson.loads(resp.content)
self.assertEqual('error', data['result'])
def test_get_gcid_when_configured(self):
# type: () -> None
with self.settings(GOOGLE_CLIENT_ID="ABCD"):
resp = self.client_get("/api/v1/fetch_google_client_id")
self.assertEquals(200, resp.status_code,
msg="Expected 200, received %d for GET /api/v1/fetch_google_client_id" % resp.status_code,
)
data = ujson.loads(resp.content)
self.assertEqual('success', data['result'])
self.assertEqual('ABCD', data['google_client_id'])
class AddNewUserHistoryTest(ZulipTestCase):
def test_add_new_user_history_race(self):
# type: () -> None
"""Sends a message during user creation"""
# Create a user who hasn't had historical messages added
set_default_streams(get_realm_by_string_id("zulip"), ["Denmark", "Verona"])
with patch("zerver.lib.actions.add_new_user_history"):
self.register("test", "test")
user_profile = get_user_profile_by_email("test@zulip.com")
subs = Subscription.objects.select_related("recipient").filter(
user_profile=user_profile, recipient__type=Recipient.STREAM)
streams = Stream.objects.filter(id__in=[sub.recipient.type_id for sub in subs])
self.send_message("hamlet@zulip.com", streams[0].name, Recipient.STREAM, "test")
add_new_user_history(user_profile, streams)
class PasswordResetTest(ZulipTestCase):
"""
Log in, reset password, log out, log in with new password.
"""
def test_password_reset(self):
# type: () -> None
email = 'hamlet@zulip.com'
old_password = initial_password(email)
self.login(email)
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEquals(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email to finish the process.", result)
# Visit the password reset link.
password_reset_url = self.get_confirmation_url_from_outbox(email, "(\S+)")
result = self.client_get(password_reset_url)
self.assertEquals(result.status_code, 200)
# Reset your password
result = self.client_post(password_reset_url,
{'new_password1': 'new_password',
'new_password2': 'new_password'})
# password reset succeeded
self.assertEquals(result.status_code, 302)
self.assertTrue(result["Location"].endswith("/password/done/"))
# log back in with new password
self.login(email, password='new_password')
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
# make sure old password no longer works
self.login(email, password=old_password, fails=True)
def test_redirect_endpoints(self):
# type: () -> None
'''
These tests are mostly designed to give us 100% URL coverage
in our URL coverage reports. Our mechanism for finding URL
coverage doesn't handle redirects, so we just have a few quick
tests here.
'''
result = self.client_get('/accounts/password/reset/done/')
self.assertEqual(result.status_code, 200)
self.assertIn('Check your email', result.content.decode("utf-8"))
result = self.client_get('/accounts/password/done/')
self.assertEqual(result.status_code, 200)
self.assertIn("We've reset your password!", result.content.decode("utf-8"))
result = self.client_get('/accounts/send_confirm/alice@example.com')
self.assertEqual(result.status_code, 200)
self.assertIn("Still no email?", result.content.decode("utf-8"))
class LoginTest(ZulipTestCase):
"""
Logging in, registration, and logging out.
"""
def test_login(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_bad_password(self):
# type: () -> None
self.login("hamlet@zulip.com", password="wrongpassword", fails=True)
self.assertIsNone(get_session_dict_user(self.client.session))
def test_login_nonexist_user(self):
# type: () -> None
result = self.login_with_return("xxx@zulip.com", "xxx")
self.assert_in_response("Please enter a correct email and password", result)
def test_register(self):
# type: () -> None
realm = get_realm_by_string_id("zulip")
stream_names = ["stream_%s" % i for i in range(40)]
for stream_name in stream_names:
self.make_stream(stream_name, realm=realm)
set_default_streams(realm, stream_names)
with queries_captured() as queries:
self.register("test", "test")
# Ensure the number of queries we make is not O(streams)
self.assert_max_length(queries, 69)
user_profile = get_user_profile_by_email('test@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.assertFalse(user_profile.enable_stream_desktop_notifications)
def test_register_deactivated(self):
# type: () -> None
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
realm = get_realm_by_string_id("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.register("test", "test")
self.assert_in_response("has been deactivated", result)
with self.assertRaises(UserProfile.DoesNotExist):
get_user_profile_by_email('test@zulip.com')
def test_login_deactivated(self):
# type: () -> None
"""
If you try to log in to a deactivated realm, you get a clear error page.
"""
realm = get_realm_by_string_id("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.login_with_return("hamlet@zulip.com")
self.assert_in_response("has been deactivated", result)
def test_logout(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.client_post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
def test_non_ascii_login(self):
# type: () -> None
"""
You can log in even if your password contain non-ASCII characters.
"""
email = "test@zulip.com"
password = u"hümbüǵ"
# Registering succeeds.
self.register("test", password)
user_profile = get_user_profile_by_email(email)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.client_post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
# Logging in succeeds.
self.client_post('/accounts/logout/')
self.login(email, password)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class InviteUserTest(ZulipTestCase):
def invite(self, users, streams):
# type: (str, List[text_type]) -> HttpResponse
"""
Invites the specified users to Zulip with the specified streams.
users should be a string containing the users to invite, comma or
newline separated.
streams should be a list of strings.
"""
return self.client_post("/json/invite_users",
{"invitee_emails": users,
"stream": streams})
def check_sent_emails(self, correct_recipients):
# type: (List[str]) -> None
from django.core.mail import outbox
self.assertEqual(len(outbox), len(correct_recipients))
email_recipients = [email.recipients()[0] for email in outbox]
self.assertEqual(sorted(email_recipients), sorted(correct_recipients))
def test_bulk_invite_users(self):
# type: () -> None
"""The bulk_invite_users code path is for the first user in a realm."""
self.login('hamlet@zulip.com')
invitees = ['alice@zulip.com', 'bob@zulip.com']
params = {
'invitee_emails': ujson.dumps(invitees)
}
result = self.client_post('/json/bulk_invite_users', params)
self.assert_json_success(result)
self.check_sent_emails(invitees)
def test_successful_invite_user(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
def test_successful_invite_user_with_name(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
email = "alice-test@zulip.com"
invitee = "Alice Test <{}>".format(email)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.check_sent_emails([email])
def test_successful_invite_user_with_name_and_normal_one(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
email = "alice-test@zulip.com"
email2 = "bob-test@zulip.com"
invitee = "Alice Test <{}>, {}".format(email, email2)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.assertTrue(find_key_by_email(email2))
self.check_sent_emails([email, email2])
def test_invite_user_signup_initial_history(self):
# type: () -> None
"""
Test that a new user invited to a stream receives some initial
history but only from public streams.
"""
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
private_stream_name = "Secret"
self.make_stream(private_stream_name, invite_only=True)
self.subscribe_to_stream(user_profile.email, private_stream_name)
public_msg_id = self.send_message("hamlet@zulip.com", "Denmark", Recipient.STREAM,
"Public topic", "Public message")
secret_msg_id = self.send_message("hamlet@zulip.com", private_stream_name, Recipient.STREAM,
"Secret topic", "Secret message")
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, [private_stream_name, "Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user("alice-test", "password")
invitee_profile = get_user_profile_by_email(invitee)
invitee_msg_ids = [um.message_id for um in
UserMessage.objects.filter(user_profile=invitee_profile)]
self.assertTrue(public_msg_id in invitee_msg_ids)
self.assertFalse(secret_msg_id in invitee_msg_ids)
def test_multi_user_invite(self):
# type: () -> None
"""
Invites multiple users with a variety of delimiters.
"""
self.login("hamlet@zulip.com")
# Intentionally use a weird string.
self.assert_json_success(self.invite(
"""bob-test@zulip.com, carol-test@zulip.com,
dave-test@zulip.com
earl-test@zulip.com""", ["Denmark"]))
for user in ("bob", "carol", "dave", "earl"):
self.assertTrue(find_key_by_email("%s-test@zulip.com" % (user,)))
self.check_sent_emails(["bob-test@zulip.com", "carol-test@zulip.com",
"dave-test@zulip.com", "earl-test@zulip.com"])
def test_missing_or_invalid_params(self):
# type: () -> None
"""
Tests inviting with various missing or invalid parameters.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(
self.client_post("/json/invite_users", {"invitee_emails": "foo@zulip.com"}),
"You must specify at least one stream for invitees to join.")
for address in ("noatsign.com", "outsideyourdomain@example.net"):
self.assert_json_error(
self.invite(address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
self.check_sent_emails([])
def test_invalid_stream(self):
# type: () -> None
"""
Tests inviting to a non-existent stream.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(self.invite("iago-test@zulip.com", ["NotARealStream"]),
"Stream does not exist: NotARealStream. No invites were sent.")
self.check_sent_emails([])
def test_invite_existing_user(self):
# type: () -> None
"""
If you invite an address already using Zulip, no invitation is sent.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(
self.client_post("/json/invite_users",
{"invitee_emails": "hamlet@zulip.com",
"stream": ["Denmark"]}),
"We weren't able to invite anyone.")
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email="hamlet@zulip.com"))
self.check_sent_emails([])
def test_invite_some_existing_some_new(self):
# type: () -> None
"""
If you invite a mix of already existing and new users, invitations are
only sent to the new users.
"""
self.login("hamlet@zulip.com")
existing = ["hamlet@zulip.com", "othello@zulip.com"]
new = ["foo-test@zulip.com", "bar-test@zulip.com"]
result = self.client_post("/json/invite_users",
{"invitee_emails": "\n".join(existing + new),
"stream": ["Denmark"]})
self.assert_json_error(result,
"Some of those addresses are already using Zulip, \
so we didn't send them an invitation. We did send invitations to everyone else!")
# We only created accounts for the new users.
for email in existing:
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email=email))
for email in new:
self.assertTrue(PreregistrationUser.objects.get(email=email))
# We only sent emails to the new users.
self.check_sent_emails(new)
prereg_user = get_prereg_user_by_email('foo-test@zulip.com')
self.assertEqual(prereg_user.email, 'foo-test@zulip.com')
def test_invite_outside_domain_in_closed_realm(self):
# type: () -> None
"""
In a realm with `restricted_to_domain = True`, you can't invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm_by_string_id("zulip")
zulip_realm.restricted_to_domain = True
zulip_realm.save()
self.login("hamlet@zulip.com")
external_address = "foo@example.com"
self.assert_json_error(
self.invite(external_address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
def test_invite_outside_domain_in_open_realm(self):
# type: () -> None
"""
In a realm with `restricted_to_domain = False`, you can invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm_by_string_id("zulip")
zulip_realm.restricted_to_domain = False
zulip_realm.save()
self.login("hamlet@zulip.com")
external_address = "foo@example.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
def test_invite_with_non_ascii_streams(self):
# type: () -> None
"""
Inviting someone to streams with non-ASCII characters succeeds.
"""
self.login("hamlet@zulip.com")
invitee = "alice-test@zulip.com"
stream_name = u"hümbüǵ"
# Make sure we're subscribed before inviting someone.
self.subscribe_to_stream("hamlet@zulip.com", stream_name)
self.assert_json_success(self.invite(invitee, [stream_name]))
class InviteeEmailsParserTests(TestCase):
def setUp(self):
# type: () -> None
self.email1 = "email1@zulip.com"
self.email2 = "email2@zulip.com"
self.email3 = "email3@zulip.com"
def test_if_emails_separated_by_commas_are_parsed_and_striped_correctly(self):
# type: () -> None
emails_raw = "{} ,{}, {}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_separated_by_newlines_are_parsed_and_striped_correctly(self):
# type: () -> None
emails_raw = "{}\n {}\n {} ".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_from_email_client_separated_by_newlines_are_parsed_correctly(self):
# type: () -> None
emails_raw = "Email One <{}>\nEmailTwo<{}>\nEmail Three<{}>".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_in_mixed_style_are_parsed_correctly(self):
# type: () -> None
emails_raw = "Email One <{}>,EmailTwo<{}>\n{}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
class EmailUnsubscribeTests(ZulipTestCase):
def test_missedmessage_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in missed message
e-mails that you can click even when logged out to update your
email notification settings.
"""
user_profile = get_user_profile_by_email("hamlet@zulip.com")
user_profile.enable_offline_email_notifications = True
user_profile.save()
unsubscribe_link = one_click_unsubscribe_link(user_profile,
"missed_messages")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="hamlet@zulip.com")
self.assertFalse(user_profile.enable_offline_email_notifications)
def test_welcome_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in welcome e-mails that you can
click even when logged out to stop receiving them.
"""
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email("hamlet@zulip.com")
# Simulate a new user signing up, which enqueues 2 welcome e-mails.
enqueue_welcome_emails(email, "King Hamlet")
self.assertEqual(2, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from the welcome e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The welcome email jobs are no longer scheduled.
self.assertEqual(result.status_code, 200)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
def test_digest_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in digest e-mails that you can
click even when logged out to stop receiving them.
Unsubscribing from these emails also dequeues any digest email jobs that
have been queued.
"""
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email("hamlet@zulip.com")
self.assertTrue(user_profile.enable_digest_emails)
# Enqueue a fake digest email.
send_digest_email(user_profile, "", "")
self.assertEqual(1, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from digest e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "digest")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The setting is toggled off, and scheduled jobs have been removed.
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="hamlet@zulip.com")
self.assertFalse(user_profile.enable_digest_emails)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
class RealmCreationTest(ZulipTestCase):
def test_create_realm(self):
# type: () -> None
username = "user1"
password = "test"
string_id = "zuliptest"
domain = 'test.com'
email = "user1@test.com"
realm = get_realm_by_string_id('test')
# Make sure the realm does not exist
self.assertIsNone(realm)
with self.settings(OPEN_REALM_CREATION=True):
# Create new realm with the email
result = self.client_post('/create_realm/', {'email': email})
self.assertEquals(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEquals(result.status_code, 200)
result = self.submit_reg_form_for_user(username, password, domain=domain,
realm_subdomain = string_id)
self.assertEquals(result.status_code, 302)
# Make sure the realm is created
realm = get_realm_by_string_id(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user_profile_by_email(email).realm, realm)
# Check defaults
self.assertEquals(realm.org_type, Realm.COMMUNITY)
self.assertEquals(realm.restricted_to_domain, False)
self.assertEquals(realm.invite_required, True)
self.assertTrue(result["Location"].endswith("/"))
def test_create_realm_with_subdomain(self):
# type: () -> None
username = "user1"
password = "test"
string_id = "zuliptest"
domain = "test.com"
email = "user1@test.com"
realm_name = "Test"
# Make sure the realm does not exist
self.assertIsNone(get_realm_by_string_id('test'))
with self.settings(REALMS_HAVE_SUBDOMAINS=True), self.settings(OPEN_REALM_CREATION=True):
# Create new realm with the email
result = self.client_post('/create_realm/', {'email': email})
self.assertEquals(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEquals(result.status_code, 200)
result = self.submit_reg_form_for_user(username, password, domain=domain,
realm_subdomain = string_id,
realm_name=realm_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=string_id + ".testserver")
self.assertEquals(result.status_code, 302)
# Make sure the realm is created
realm = get_realm_by_string_id(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user_profile_by_email(email).realm, realm)
self.assertEqual(realm.name, realm_name)
self.assertEqual(realm.subdomain, string_id)
def test_mailinator_signup(self):
# type: () -> None
with self.settings(OPEN_REALM_CREATION=True):
result = self.client_post('/create_realm/', {'email': "hi@mailinator.com"})
self.assert_in_response('Please use your real email address.', result)
def test_subdomain_restrictions(self):
# type: () -> None
username = "user1"
password = "test"
domain = "test.com"
email = "user1@test.com"
realm_name = "Test"
with self.settings(REALMS_HAVE_SUBDOMAINS=False), self.settings(OPEN_REALM_CREATION=True):
result = self.client_post('/create_realm/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
errors = {'id': "at least 3 characters",
'-id': "cannot start or end with a",
'string-ID': "lowercase letters",
'string_id': "lowercase letters",
'stream': "unavailable",
'streams': "unavailable",
'about': "unavailable",
'abouts': "unavailable",
'mit': "unavailable"}
for string_id, error_msg in errors.items():
result = self.submit_reg_form_for_user(username, password, domain = domain,
realm_subdomain = string_id,
realm_name = realm_name)
self.assert_in_response(error_msg, result)
# test valid subdomain
result = self.submit_reg_form_for_user(username, password, domain = domain,
realm_subdomain = 'a-0',
realm_name = realm_name)
self.assertEquals(result.status_code, 302)
class UserSignUpTest(ZulipTestCase):
def test_user_default_language(self):
# type: () -> None
"""
Check if the default language of new user is the default language
of the realm.
"""
username = "newguy"
email = "newguy@zulip.com"
password = "newpassword"
realm = get_realm_by_string_id('zulip')
domain = realm.domain
do_set_realm_default_language(realm, "de")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEquals(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s@%s" % (username, domain)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEquals(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(username, password, domain)
self.assertEquals(result.status_code, 302)
user_profile = get_user_profile_by_email(email)
self.assertEqual(user_profile.default_language, realm.default_language)
from django.core.mail import outbox
outbox.pop()
def test_unique_completely_open_domain(self):
# type: () -> None
username = "user1"
password = "test"
email = "user1@acme.com"
subdomain = "zulip"
realm_name = "Zulip"
realm = get_realm_by_string_id('zulip')
realm.restricted_to_domain = False
realm.invite_required = False
realm.save()
realm = get_realm_by_string_id('mit')
do_deactivate_realm(realm)
realm.save()
result = self.client_post('/register/', {'email': email})
self.assertEquals(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEquals(result.status_code, 200)
result = self.submit_reg_form_for_user(username,
password,
domain='acme.com',
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEquals(result.status_code, 200)
self.assertIn("You're almost there.", result.content.decode('utf8'))
def test_completely_open_domain_success(self):
# type: () -> None
username = "user1"
password = "test"
email = "user1@acme.com"
subdomain = "zulip"
realm_name = "Zulip"
realm = get_realm_by_string_id('zulip')
realm.restricted_to_domain = False
realm.invite_required = False
realm.save()
result = self.client_post('/register/zulip.com/', {'email': email})
self.assertEquals(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEquals(result.status_code, 200)
result = self.submit_reg_form_for_user(username,
password,
domain='acme.com',
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEquals(result.status_code, 200)
self.assertIn("You're almost there.", result.content.decode('utf8'))
def test_failed_signup_due_to_restricted_domain(self):
# type: () -> None
realm = get_realm_by_string_id('zulip')
with self.settings(REALMS_HAVE_SUBDOMAINS = True):
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
form = create_homepage_form(request, {'email': 'user@acme.com'})
self.assertIn("trying to join, zulip, only allows users with e-mail", form.errors['email'][0])
def test_failed_signup_due_to_invite_required(self):
# type: () -> None
realm = get_realm_by_string_id('zulip')
realm.invite_required = True
realm.save()
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
form = create_homepage_form(request, {'email': 'user@zulip.com'})
self.assertIn("Please request an invite from", form.errors['email'][0])
def test_failed_signup_due_to_nonexistent_realm(self):
# type: () -> None
with self.settings(REALMS_HAVE_SUBDOMAINS = True):
request = HostRequestMock(host = 'acme.' + settings.EXTERNAL_HOST)
request.session = {} # type: ignore
form = create_homepage_form(request, {'email': 'user@acme.com'})
self.assertIn("organization you are trying to join does not exist", form.errors['email'][0])
def test_registration_through_ldap(self):
# type: () -> None
username = "newuser"
password = "testing"
domain = "zulip.com"
email = "newuser@zulip.com"
subdomain = "zulip"
realm_name = "Zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': ['New User Name']
}
}
with patch('zerver.views.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEquals(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',),
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
result = self.client_get(confirmation_url)
self.assertEquals(result.status_code, 200)
result = self.submit_reg_form_for_user(username,
password,
domain=domain,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEquals(result.status_code, 200)
self.assertIn("You're almost there.", result.content.decode('utf8'))
self.assertIn("New User Name", result.content.decode('utf8'))
self.assertIn("newuser@zulip.com", result.content.decode('utf8'))
# Test the TypeError exception handler
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': None # This will raise TypeError
}
}
result = self.submit_reg_form_for_user(username,
password,
domain=domain,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEquals(result.status_code, 200)
self.assertIn("You're almost there.", result.content.decode('utf8'))
self.assertIn("newuser@zulip.com", result.content.decode('utf8'))
mock_ldap.reset()
mock_initialize.stop()
@patch('DNS.dnslookup', return_value=[['sipbtest:*:20922:101:Fred Sipb,,,:/mit/sipbtest:/bin/athena/tcsh']])
def test_registration_of_mirror_dummy_user(self, ignored):
# type: (Any) -> None
username = "sipbtest"
password = "test"
domain = "mit.edu"
email = "sipbtest@mit.edu"
subdomain = "sipb"
realm_name = "MIT"
user_profile = get_user_profile_by_email(email)
user_profile.is_mirror_dummy = True
user_profile.is_active = False
user_profile.save()
result = self.client_post('/register/', {'email': email})
self.assertEquals(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise ValueError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEquals(result.status_code, 200)
result = self.submit_reg_form_for_user(username,
password,
domain=domain,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEquals(result.status_code, 200)
result = self.submit_reg_form_for_user(username,
password,
domain=domain,
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEquals(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class DeactivateUserTest(ZulipTestCase):
def test_deactivate_user(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertFalse(user.is_active)
self.login(email, fails=True)
def test_do_not_deactivate_final_admin(self):
# type: () -> None
email = 'iago@zulip.com'
self.login(email)
user = get_user_profile_by_email('iago@zulip.com')
self.assertTrue(user.is_active)
self.client_delete('/json/users/me')
user = get_user_profile_by_email('iago@zulip.com')
self.assertTrue(user.is_active)
self.assertTrue(user.is_realm_admin)
email = 'hamlet@zulip.com'
user_2 = get_user_profile_by_email('hamlet@zulip.com')
do_change_is_admin(user_2, True)
self.assertTrue(user_2.is_realm_admin)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
do_change_is_admin(user, True)
|
vikas-parashar/zulip
|
zerver/tests/test_signup.py
|
Python
|
apache-2.0
| 47,788
|
[
"VisIt"
] |
5556c0e98c7fe848e898c4b5dfa9d945baf5403d54a46edc2814cd070ed55926
|
"""
Naive Bayes with MapReduce
Algorithm calculates multinomial distribution for discrete features and Gaussian distribution for numerical features.
The output of algorithm is consistent with implementation of Naive Bayes classifier in Orange and scikit-learn.
Reference:
MapReduce version of algorithm is proposed by Cheng-Tao Chu; Sang Kyun Kim, Yi-An Lin, YuanYuan Yu, Gary Bradski,
Andrew Ng, and Kunle Olukotun. "Map-Reduce for Machine Learning on Multicore". NIPS 2006.
"""
def simple_init(interface, params):
return params
def map_fit(interface, state, label, inp):
"""
Function counts occurrences of feature values for every row in given data chunk. For continuous features it returns
number of values and it calculates mean and variance for every feature.
For discrete features it counts occurrences of labels and values for every feature. It returns occurrences of pairs:
label, feature index, feature values.
"""
import numpy as np
combiner = {} # combiner used for joining of intermediate pairs
out = interface.output(0) # all outputted pairs have the same output label
for row in inp: # for every row in data chunk
row = row.strip().split(state["delimiter"]) # split row
if len(row) > 1: # check if row is empty
for i, j in enumerate(state["X_indices"]): # for defined features
if row[j] not in state["missing_vals"]: # check missing values
# creates a pair - label, feature index
pair = row[state["y_index"]] + state["delimiter"] + str(j)
if state["X_meta"][i] == "c": # continuous features
if pair in combiner:
# convert to float and store value
combiner[pair].append(np.float32(row[j]))
else:
combiner[pair] = [np.float32(row[j])]
else: # discrete features
# add feature value to pair
pair += state["delimiter"] + row[j]
# increase counts of current pair
combiner[pair] = combiner.get(pair, 0) + 1
# increase label counts
combiner[row[state["y_index"]]] = combiner.get(row[state["y_index"]], 0) + 1
for k, v in combiner.iteritems(): # all pairs in combiner are output
if len(k.split(state["delimiter"])) == 2: # continous features
# number of elements, partial mean and variance
out.add(k, (np.size(v), np.mean(v, dtype=np.float32), np.var(v, dtype=np.float32)))
else: # discrete features and labels
out.add(k, v)
def reduce_fit(interface, state, label, inp):
"""
Function separates aggregation of continuous and discrete features.
For continuous features it aggregates partially calculated means and variances and returns them. For discrete
features it aggregates pairs and returns them. Pairs with label occurrences are used to calculate prior probabilities
"""
from disco.util import kvgroup # function for grouping values by key
import numpy as np
out = interface.output(0) # all outputted pairs have the same output label
# model of naive Bayes stores label names, sum of all label occurrences and pairs
# (feature index, feature values) for discrete features which are needed to optimize predict phase.
fit_model = {"y_labels": [], "y_sum": 0, "iv": set()}
combiner = {} # combiner maintains correct order of means and variances.
means, variances = [], []
k_prev = ""
for key, value in kvgroup(inp): # input pairs are sorted and grouped by key
k_split = key.split(state["delimiter"]) # pair is split
if len(k_split) == 3: # discrete features
# store pair (feature index, feature value)
fit_model["iv"].add(tuple(k_split[1:]))
# aggregate and output occurrences of a pair
out.add(tuple(k_split), sum(value))
elif len(k_split) == 2: # continuous features
# if label is different than previous.
# This enables calculation of all variances and means for every feature for current label.
if k_split[0] != k_prev and k_prev != "":
mean, var = zip(*[combiner[key] for key in sorted(combiner.keys())])
means.append(mean)
variances.append(var)
# number of elements, partial mean, partial variance.
n_a = mean_a = var_a = 0
# code aggregates partially calculated means and variances
for n_b, mean_b, var_b in value:
n_ab = n_a + n_b
var_a = ((n_a * var_a + n_b * var_b) / float(n_ab)) + (
n_a * n_b * ((mean_b - mean_a) / float(n_ab)) ** 2)
mean_a = (n_a * mean_a + n_b * mean_b) / float(n_ab)
n_a = n_ab
# maintains correct order of statistics for every feature
combiner[int(k_split[1])] = (mean_a, var_a + 1e-9)
k_prev = k_split[0]
else: # aggregates label occurrences
fit_model[key] = np.sum(value)
fit_model["y_sum"] += fit_model[key] # sum of all label occurrences
fit_model["y_labels"].append(key)
# if statistics for continuous features were not output in last iteration
if len(means) > 0:
mean, var = zip(*[combiner[key] for key in sorted(combiner.keys())])
out.add("mean", np.array(means + [mean], dtype=np.float32))
variances = np.array(variances + [var], dtype=np.float32)
out.add("var", variances)
out.add("var_log", np.log(np.pi * variances))
# calculation of prior probabilities
prior = [fit_model[y_label] / float(fit_model["y_sum"]) for y_label in fit_model["y_labels"]]
out.add("prior", np.array(prior, dtype=np.float32))
out.add("prior_log", np.log(prior))
out.add("iv", list(fit_model["iv"]))
out.add("y_labels", fit_model["y_labels"])
def map_predict(interface, state, label, inp):
"""
Function makes a predictions of samples with given model. It calculates probabilities with multinomial and Gaussian distribution.
"""
import numpy as np
out = interface.output(0)
continuous = [j for i, j in enumerate(state["X_indices"]) if
state["X_meta"][i] == "c"] # indices of continuous features
discrete = [j for i, j in enumerate(state["X_indices"]) if
state["X_meta"][i] == "d"] # indices of discrete features
cont = True if len(continuous) > 0 else False # enables calculation of Gaussian probabilities
disc = True if len(discrete) > 0 else False # enables calculation of multinomial probabilities.
for row in inp:
row = row.strip().split(state["delimiter"])
if len(row) > 1: # if row is empty
# set id of a sample
x_id = "" if state["id_index"] == -1 else row[state["id_index"]]
# initialize prior probability for all labels
probs = state["fit_model"]["prior_log"]
if cont: # continuous features
x = np.array([(0 if row[j] in state["missing_vals"] else float(row[j])) for j in
continuous]) # sets selected features of the sample
# Gaussian distribution
probs = probs - 0.5 * np.sum(
np.true_divide((x - state["fit_model"]["mean"]) ** 2, state["fit_model"]["var"]) +
state["fit_model"]["var_log"], axis=1)
if disc: # discrete features
# multinomial distribution
probs = probs + np.sum(
[(0 if row[i] in state["missing_vals"] else state["fit_model"].get((str(i), row[i]), np.zeros(1)))
for i in discrete], axis=0)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = np.log(np.sum(np.exp(probs)))
probs = np.exp(np.array(probs) - log_prob_x)
# Predicted label is the one with highest probability
y_predicted = max(zip(probs, state["fit_model"]["y_labels"]))[1]
out.add(x_id, (y_predicted, probs.tolist()))
def fit(dataset, save_results=True, show=False):
"""
Function builds a model for Naive Bayes. It executes multiple map functions and one reduce function which aggregates intermediate results and returns a model.
Parameters
----------
input - dataset object with input urls and other parameters
save_results - save results to ddfs
show - show info about job execution
Returns
-------
Urls of fit model results on ddfs
"""
from disco.worker.pipeline.worker import Worker, Stage
from disco.core import Job
# define a job and set save of results to ddfs
job = Job(worker=Worker(save_results=save_results))
# job parallelizes mappers, sorts intermediate pairs and joins them with one reducer
job.pipeline = [
("split", Stage("map", input_chain=dataset.params["input_chain"], init=simple_init, process=map_fit)),
('group_all', Stage("reduce", init=simple_init, process=reduce_fit, sort=True, combine=True))]
job.params = dataset.params # job parameters (dataset object)
# define name of a job and input data urls
job.run(name="naivebayes_fit", input=dataset.params["data_tag"])
fitmodel_url = job.wait(show=show)
return {"naivebayes_fitmodel": fitmodel_url} # return results url
def predict(dataset, fitmodel_url, m=1, save_results=True, show=False):
"""
Function starts a job that makes predictions to input data with a given model
Parameters
----------
input - dataset object with input urls and other parameters
fitmodel_url - model created in fit phase
m - m estimate is used with discrete features
save_results - save results to ddfs
show - show info about job execution
Returns
-------
Urls of predictions on ddfs
"""
from disco.worker.pipeline.worker import Worker, Stage
from disco.core import Job, result_iterator
import numpy as np
try:
m = float(m)
except ValueError:
raise Exception("Parameter m should be numerical.")
if "naivebayes_fitmodel" in fitmodel_url:
# fit model is loaded from ddfs
fit_model = dict((k, v) for k, v in result_iterator(fitmodel_url["naivebayes_fitmodel"]))
if len(fit_model["y_labels"]) < 2:
print "There is only one class in training data."
return []
else:
raise Exception("Incorrect fit model.")
if dataset.params["X_meta"].count("d") > 0: # if there are discrete features in the model
# code calculates logarithms to optimize predict phase as opposed to calculation by every mapped.
np.seterr(divide='ignore')
for iv in fit_model["iv"]:
dist = [fit_model.pop((y,) + iv, 0) for y in fit_model["y_labels"]]
fit_model[iv] = np.nan_to_num(
np.log(np.true_divide(np.array(dist) + m * fit_model["prior"], np.sum(dist) + m))) - fit_model[
"prior_log"]
del (fit_model["iv"])
# define a job and set save of results to ddfs
job = Job(worker=Worker(save_results=save_results))
# job parallelizes execution of mappers
job.pipeline = [
("split", Stage("map", input_chain=dataset.params["input_chain"], init=simple_init, process=map_predict))]
job.params = dataset.params # job parameters (dataset object)
job.params["fit_model"] = fit_model
# define name of a job and input data urls
job.run(name="naivebayes_predict", input=dataset.params["data_tag"])
results = job.wait(show=show)
return results
|
romanorac/discomll
|
discomll/classification/naivebayes.py
|
Python
|
apache-2.0
| 11,861
|
[
"Gaussian"
] |
b85faefe1aeed2a905ffff1a129c412e835d25bfc5cdfb45474a8dec6f30afc3
|
"""
Django settings for neuron project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_PATH = os.path.split(os.path.abspath(os.path.dirname(__file__)))[0]
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3c_z-wu^xvi($4o!l!i-o)m!hiy)f6)mh-2a(6kika)^nn)s3o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'neuron',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'neuron.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR + "/templates"],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'neuron.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/usr/share/nginx/html/neuron/static'
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
},
'dfile':{
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(PROJECT_PATH, 'log/debug.log')
},
'ifile':{
'level':'INFO',
'class': 'logging.FileHandler',
'filename': os.path.join(PROJECT_PATH, 'log/info.log')
},
'efile':{
'level':'ERROR',
'class': 'logging.FileHandler',
'filename':os.path.join(PROJECT_PATH, 'log/error.log')
},
},
'loggers': {
'django': {
'handlers': ['ifile',],
'propagate': True,
'level': 'INFO',
},
'django.request': {
'handlers': ['dfile'],
'level': 'DEBUG',
'propagate': True,
},
'django.request':{
'handlers':['ifile',],
'level':'INFO',
'propagate': True,
},
'django.request':{
'handlers':['efile',],
'level':'ERROR',
'propagate': True,
},
}
}
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.AllowAny',),
}
|
chugunovyar/factoryForBuild
|
neuron/settings.py
|
Python
|
gpl-3.0
| 4,902
|
[
"NEURON"
] |
47aeca4f330eed13259599728ad45ea185b32922ca0d36b479384aef0ab81594
|
"""Support for control of ElkM1 tasks ("macros")."""
from homeassistant.components.elkm1 import (
DOMAIN as ELK_DOMAIN, ElkEntity, create_elk_entities)
from homeassistant.components.scene import Scene
DEPENDENCIES = [ELK_DOMAIN]
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Create the Elk-M1 scene platform."""
if discovery_info is None:
return
elk = hass.data[ELK_DOMAIN]['elk']
entities = create_elk_entities(hass, elk.tasks, 'task', ElkTask, [])
async_add_entities(entities, True)
class ElkTask(ElkEntity, Scene):
"""Elk-M1 task as scene."""
async def async_activate(self):
"""Activate the task."""
self._element.activate()
|
HydrelioxGitHub/home-assistant
|
homeassistant/components/elkm1/scene.py
|
Python
|
apache-2.0
| 743
|
[
"Elk"
] |
e6a6baff50aef811755a502677dbd0e13ea6dba3de4e49184392f994b3536e70
|
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2003-2006 Donald N. Allingham
# Copyright (C) 2004-2005 Eero Tamminen
# Copyright (C) 2007-2012 Brian G. Matherly
# Copyright (C) 2008 Peter Landgren
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2012-2016 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
"""Reports/Graphical Reports/Statistics Report"""
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
import time
from functools import partial
#------------------------------------------------------------------------
#
# Gprime modules
#
#------------------------------------------------------------------------
from gprime.const import LOCALE as glocale
_ = glocale.translation.sgettext
# Person and relation types
from gprime.lib import Person, FamilyRelType, EventType, EventRoleType
from gprime.lib.date import Date, gregorian
# gender and report type names
from gprime.plug.docgen import (FontStyle, ParagraphStyle, GraphicsStyle,
FONT_SANS_SERIF, FONT_SERIF,
PARA_ALIGN_CENTER, PARA_ALIGN_LEFT,
IndexMark, INDEX_TYPE_TOC)
from gprime.plug.menu import (BooleanOption,
EnumeratedListOption, NumberOption,
FilterOption, PersonOption)
from gprime.plug.report import Report
from gprime.plug.report import utils
from gprime.plug.report import MenuReportOptions
from gprime.plug.report import stdoptions
from gprime.datehandler import parser
from gprime.display.place import displayer as _pd
from gprime.proxy import CacheProxyDb
#------------------------------------------------------------------------
#
# Private Functions
#
#------------------------------------------------------------------------
def draw_wedge(doc, style, centerx, centery, radius, start_angle,
end_angle, short_radius=0):
""" draw a wedge """
from math import pi, cos, sin
while end_angle < start_angle:
end_angle += 360
path = []
degreestoradians = pi / 180.0
radiansdelta = degreestoradians / 2
sangle = start_angle * degreestoradians
eangle = end_angle * degreestoradians
while eangle < sangle:
eangle = eangle + 2 * pi
angle = sangle
if short_radius == 0:
if (end_angle - start_angle) != 360:
path.append((centerx, centery))
else:
origx = (centerx + cos(angle) * short_radius)
origy = (centery + sin(angle) * short_radius)
path.append((origx, origy))
while angle < eangle:
_x_ = centerx + cos(angle) * radius
_y_ = centery + sin(angle) * radius
path.append((_x_, _y_))
angle = angle + radiansdelta
_x_ = centerx + cos(eangle) * radius
_y_ = centery + sin(eangle) * radius
path.append((_x_, _y_))
if short_radius:
_x_ = centerx + cos(eangle) * short_radius
_y_ = centery + sin(eangle) * short_radius
path.append((_x_, _y_))
angle = eangle
while angle >= sangle:
_x_ = centerx + cos(angle) * short_radius
_y_ = centery + sin(angle) * short_radius
path.append((_x_, _y_))
angle -= radiansdelta
doc.draw_path(style, path)
delta = (eangle - sangle) / 2.0
rad = short_radius + (radius - short_radius) / 2.0
return ((centerx + cos(sangle + delta) * rad),
(centery + sin(sangle + delta) * rad))
def draw_pie_chart(doc, center_x, center_y, radius, data, start=0):
"""
Draws a pie chart in the specified document. The data passed is plotted as
a pie chart. The data should consist of the actual data. Percentages of
each slice are determined by the routine.
@param doc: Document to which the pie chart should be added
@type doc: BaseDoc derived class
@param center_x: x coordinate in centimeters where the center of the pie
chart should be. 0 is the left hand edge of the document.
@type center_x: float
@param center_y: y coordinate in centimeters where the center of the pie
chart should be. 0 is the top edge of the document
@type center_y: float
@param radius: radius of the pie chart. The pie charts width and height
will be twice this value.
@type radius: float
@param data: List of tuples containing the data to be plotted. The values
are (graphics_format, value), where graphics_format is a BaseDoc
GraphicsStyle, and value is a floating point number. Any other items in
the tuple are ignored. This allows you to share the same data list with
the L{draw_legend} function.
@type data: list
@param start: starting point in degrees, where the default of 0 indicates
a start point extending from the center to right in a horizontal line.
@type start: float
"""
total = 0.0
for item in data:
total += item[1]
for item in data:
incr = 360.0*(item[1]/total)
draw_wedge(doc, item[0], center_x, center_y, radius,
start, start + incr)
start += incr
def draw_legend(doc, start_x, start_y, data, title, label_style):
"""
Draws a legend for a graph in the specified document. The data passed is
used to define the legend. First item style is used for the optional
Legend title.
@param doc: Document to which the legend chart should be added
@type doc: BaseDoc derived class
@param start_x: x coordinate in centimeters where the left hand corner
of the legend is placed. 0 is the left hand edge of the document.
@type start_x: float
@param start_y: y coordinate in centimeters where the top of the legend
should be. 0 is the top edge of the document
@type start_y: float
@param data: List of tuples containing the data to be used to create the
legend. In order to be compatible with the graph plots, the first and
third values of the tuple used. The format is (graphics_format, value,
legend_description).
@type data: list
"""
style_sheet = doc.get_style_sheet()
if title:
gstyle = style_sheet.get_draw_style(label_style)
pstyle_name = gstyle.get_paragraph_style()
pstyle = style_sheet.get_paragraph_style(pstyle_name)
size = utils.pt2cm(pstyle.get_font().get_size())
doc.draw_text(label_style, title,
start_x + (3*size), start_y - (size*0.25))
start_y += size * 1.3
for (sformat, size, legend) in data:
gstyle = style_sheet.get_draw_style(sformat)
pstyle_name = gstyle.get_paragraph_style()
pstyle = style_sheet.get_paragraph_style(pstyle_name)
size = utils.pt2cm(pstyle.get_font().get_size())
doc.draw_box(sformat, "", start_x, start_y, (2*size), size)
doc.draw_text(label_style, legend,
start_x + (3*size), start_y - (size*0.25))
start_y += size * 1.3
_TTT = time.localtime(time.time())
_TODAY = parser.parse("%04d-%02d-%02d" % _TTT[:3])
def estimate_age(dbase, person,
end_handle=None, start_handle=None, today=_TODAY):
"""
Estimates the age of a person based off the birth and death
dates of the person. A tuple containing the estimated upper
and lower bounds of the person's age is returned. If either
the birth or death date is missing, a (-1, -1) is returned.
@param dbase: GRAMPS database to which the Person object belongs
@type dbase: DbBase
@param person: Person object to calculate the age of
@type person: Person
@param end_handle: Determines the event handle that determines
the upper limit of the age. If None, the death event is used
@type end_handle: str
@param start_handle: Determines the event handle that determines
the lower limit of the event. If None, the birth event is
used
@type start_handle: str
@returns: tuple containing the lower and upper bounds of the
person's age, or (-1, -1) if it could not be determined.
@rtype: tuple
"""
bhandle = None
if start_handle:
bhandle = start_handle
else:
bref = person.get_birth_ref()
if bref:
bhandle = bref.get_reference_handle()
dhandle = None
if end_handle:
dhandle = end_handle
else:
dref = person.get_death_ref()
if dref:
dhandle = dref.get_reference_handle()
# if either of the events is not defined, return an error message
if not bhandle:
return (-1, -1)
bdata = dbase.get_event_from_handle(bhandle).get_date_object()
if dhandle:
ddata = dbase.get_event_from_handle(dhandle).get_date_object()
else:
if today is not None:
ddata = today
else:
return (-1, -1)
# if the date is not valid, return an error message
if not bdata.get_valid() or not ddata.get_valid():
return (-1, -1)
# if a year is not valid, return an error message
if not bdata.get_year_valid() or not ddata.get_year_valid():
return (-1, -1)
bstart = bdata.get_start_date()
bstop = bdata.get_stop_date()
dstart = ddata.get_start_date()
dstop = ddata.get_stop_date()
def _calc_diff(low, high):
if (low[1], low[0]) > (high[1], high[0]):
return high[2] - low[2] - 1
else:
return high[2] - low[2]
if bstop == dstop == Date.EMPTY:
lower = _calc_diff(bstart, dstart)
age = (lower, lower)
elif bstop == Date.EMPTY:
lower = _calc_diff(bstart, dstart)
upper = _calc_diff(bstart, dstop)
age = (lower, upper)
elif dstop == Date.EMPTY:
lower = _calc_diff(bstop, dstart)
upper = _calc_diff(bstart, dstart)
age = (lower, upper)
else:
lower = _calc_diff(bstop, dstart)
upper = _calc_diff(bstart, dstop)
age = (lower, upper)
return age
#------------------------------------------------------------------------
#
# Global options and their names
#
#------------------------------------------------------------------------
class _options:
# sort type identifiers
SORT_VALUE = 0
SORT_KEY = 1
opt_sorts = [
(SORT_VALUE, "Item count", _("Item count")),
(SORT_KEY, "Item name", _("Item name"))
]
opt_genders = [
(Person.UNKNOWN, "Both", _("Both")),
(Person.MALE, "Men", _("Men")),
(Person.FEMALE, "Women", _("Women"))
]
def _T_(value): # enable deferred translations (see Python docs 22.1.3.4)
return value
# _T_ is a gramps-defined keyword -- see po/update_po.py and po/genpot.sh
#------------------------------------------------------------------------
#
# Data extraction methods from the database
#
#------------------------------------------------------------------------
class Extract:
""" Class for extracting statistical data from the database """
def __init__(self):
"""Methods for extracting statistical data from the database"""
# key, non-localized name, localized name, type method, data method
self.extractors = {
'data_title': ("Title", _T_("person|Title"),
self.get_person, self.get_title),
'data_sname': ("Surname", _T_("Surname"),
self.get_person, self.get_surname),
'data_fname': ("Forename", _T_("Forename"),
self.get_person, self.get_forename),
'data_gender': ("Gender", _T_("Gender"),
self.get_person, self.get_gender),
'data_byear': ("Birth year", _T_("Birth year"),
self.get_birth, self.get_year),
'data_dyear': ("Death year", _T_("Death year"),
self.get_death, self.get_year),
'data_bmonth': ("Birth month", _T_("Birth month"),
self.get_birth, self.get_month),
'data_dmonth': ("Death month", _T_("Death month"),
self.get_death, self.get_month),
'data_bplace': ("Birth place", _T_("Birth place"),
self.get_birth, self.get_place),
'data_dplace': ("Death place", _T_("Death place"),
self.get_death, self.get_place),
'data_mplace': ("Marriage place", _T_("Marriage place"),
self.get_marriage_handles, self.get_places),
'data_mcount': ("Number of relationships",
_T_("Number of relationships"),
self.get_any_family_handles,
self.get_handle_count),
'data_fchild': ("Age when first child born",
_T_("Age when first child born"),
self.get_child_handles,
self.get_first_child_age),
'data_lchild': ("Age when last child born",
_T_("Age when last child born"),
self.get_child_handles, self.get_last_child_age),
'data_ccount': ("Number of children", _T_("Number of children"),
self.get_child_handles, self.get_handle_count),
'data_mage': ("Age at marriage", _T_("Age at marriage"),
self.get_marriage_handles, self.get_event_ages),
'data_dage': ("Age at death", _T_("Age at death"),
self.get_person, self.get_death_age),
'data_age': ("Age", _T_("Age"),
self.get_person, self.get_person_age),
'data_etypes': ("Event type", _T_("Event type"),
self.get_event_handles, self.get_event_type)
}
# ----------------- data extraction methods --------------------
# take an object and return a list of strings
def get_title(self, person):
"return title for given person"
# TODO: return all titles, not just primary ones...
title = person.get_primary_name().get_title()
if title:
return [title]
else:
return [_T_("(Preferred) title missing")]
def get_forename(self, person):
"return forenames for given person"
# TODO: return all forenames, not just primary ones...
firstnames = person.get_primary_name().get_first_name().strip()
if firstnames:
return firstnames.split()
else:
return [_T_("(Preferred) forename missing")]
def get_surname(self, person):
"return surnames for given person"
# TODO: return all surnames, not just primary ones...
# TODO: have the surname formatted according to the name_format too
surnames = person.get_primary_name().get_surname().strip()
if surnames:
return surnames.split()
else:
return [_T_("(Preferred) surname missing")]
def get_gender(self, person):
"return gender for given person"
# TODO: why there's no Person.getGenderName?
# It could be used by getDisplayInfo & this...
if person.gender == Person.MALE:
return [_T_("Men")]
if person.gender == Person.FEMALE:
return [_T_("Women")]
return [_T_("Gender unknown")]
def get_year(self, event):
"return year for given event"
date = event.get_date_object()
if date:
year = date.get_year()
if year:
return [str(year)]
return [_T_("Date(s) missing")]
def get_month(self, event):
"return month for given event"
date = event.get_date_object()
if date:
month = date.get_month()
if month:
return [self._locale.date_displayer.long_months[month]]
return [_T_("Date(s) missing")]
def get_place(self, event):
"return place for given event"
place_handle = event.get_place_handle()
if place_handle:
place = _pd.display_event(self.db, event)
if place:
return [place]
return [_T_("Place missing")]
def get_places(self, data):
"return places for given (person,event_handles)"
places = []
person, event_handles = data
for event_handle in event_handles:
event = self.db.get_event_from_handle(event_handle)
place_handle = event.get_place_handle()
if place_handle:
place = _pd.display_event(self.db, event)
if place:
places.append(place)
else:
places.append(_T_("Place missing"))
return places
def get_person_age(self, person):
"return age for given person, if alive"
death_ref = person.get_death_ref()
if not death_ref:
return [self.estimate_age(person)]
return [_T_("Already dead")]
def get_death_age(self, person):
"return age at death for given person, if dead"
death_ref = person.get_death_ref()
if death_ref:
return [self.estimate_age(person, death_ref.ref)]
return [_T_("Still alive")]
def get_event_ages(self, data):
"return ages at given (person,event_handles)"
person, event_handles = data
ages = [self.estimate_age(person, h) for h in event_handles]
if ages:
return ages
return [_T_("Events missing")]
def get_event_type(self, data):
"return event types at given (person,event_handles)"
types = []
person, event_handles = data
for event_handle in event_handles:
event = self.db.get_event_from_handle(event_handle)
event_type = self._(self._get_type(event.get_type()))
types.append(event_type)
if types:
return types
return [_T_("Events missing")]
def get_first_child_age(self, data):
"return age when first child in given (person,child_handles) was born"
ages, errors = self.get_sorted_child_ages(data)
if ages:
errors.append(ages[0])
return errors
return [_T_("Children missing")]
def get_last_child_age(self, data):
"return age when last child in given (person,child_handles) was born"
ages, errors = self.get_sorted_child_ages(data)
if ages:
errors.append(ages[-1])
return errors
return [_T_("Children missing")]
def get_handle_count(self, data):
"""
return number of handles in given (person, handle_list)
used for child count, family count
"""
return ["%3d" % len(data[1])]
# ------------------- utility methods -------------------------
def get_sorted_child_ages(self, data):
"return (sorted_ages,errors) for given (person,child_handles)"
ages = []
errors = []
person, child_handles = data
for child_handle in child_handles:
child = self.db.get_person_from_handle(child_handle)
birth_ref = child.get_birth_ref()
if birth_ref:
ages.append(self.estimate_age(person, birth_ref.ref))
else:
errors.append(_T_("Birth missing"))
continue
ages.sort()
return (ages, errors)
def estimate_age(self, person, end=None, begin=None):
"""return estimated age (range) for given person or error message.
age string is padded with spaces so that it can be sorted"""
age = estimate_age(self.db, person, end, begin)
if age[0] < 0 or age[1] < 0:
# inadequate information
return _T_("Date(s) missing")
if age[0] == age[1]:
# exact year
return "%3d" % age[0]
else:
# minimum and maximum
return "%3d-%d" % (age[0], age[1])
# ------------------- type methods -------------------------
# take db and person and return suitable gramps object(s)
def get_person(self, person):
"return person"
return person
def get_birth(self, person):
"return birth event for given person or None"
birth_ref = person.get_birth_ref()
if birth_ref:
return self.db.get_event_from_handle(birth_ref.ref)
return None
def get_death(self, person):
"return death event for given person or None"
death_ref = person.get_death_ref()
if death_ref:
return self.db.get_event_from_handle(death_ref.ref)
return None
def get_child_handles(self, person):
"return list of child handles for given person or None"
children = []
for fam_handle in person.get_family_handle_list():
fam = self.db.get_family_from_handle(fam_handle)
for child_ref in fam.get_child_ref_list():
children.append(child_ref.ref)
# TODO: it would be good to return only biological children,
# but GRAMPS doesn't offer any efficient way to check that
# (I don't want to check each children's parent family mother
# and father relations as that would make this *much* slower)
if children:
return (person, children)
return None
def get_marriage_handles(self, person):
"return list of marriage event handles for given person or None"
marriages = []
for family_handle in person.get_family_handle_list():
family = self.db.get_family_from_handle(family_handle)
if int(family.get_relationship()) == FamilyRelType.MARRIED:
for event_ref in family.get_event_ref_list():
event = self.db.get_event_from_handle(event_ref.ref)
if (event.get_type() == EventType.MARRIAGE and
(event_ref.get_role() == EventRoleType.FAMILY or
event_ref.get_role() == EventRoleType.PRIMARY)):
marriages.append(event_ref.ref)
if marriages:
return (person, marriages)
return None
def get_any_family_handles(self, person):
"return list of family handles for given person or None"
families = person.get_family_handle_list()
if families:
return (person, families)
return None
def get_event_handles(self, person):
"return list of event handles for given person or None"
events = [ref.ref for ref in person.get_event_ref_list()]
if events:
return (person, events)
return None
# ----------------- data collection methods --------------------
def get_person_data(self, person, collect):
"""Add data from the database to 'collect' for the given person,
using methods from the 'collect' data dict tuple
"""
for chart in collect:
# get the information
type_func = chart[2]
data_func = chart[3]
obj = type_func(person) # e.g. get_date()
if obj:
value = data_func(obj) # e.g. get_year()
else:
value = [_T_("Personal information missing")]
# list of information found
for key in value:
if key in chart[1]:
chart[1][key] += 1
else:
chart[1][key] = 1
def collect_data(self, dbase, filter_func, menu, genders,
year_from, year_to, no_years, cb_progress, rlocale):
"""goes through the database and collects the selected personal
data persons fitting the filter and birth year criteria. The
arguments are:
dbase - the GRAMPS database
filter_func - filtering function selected by the StatisticsDialog
options - report options_dict which sets which methods are used
genders - which gender(s) to include into statistics
year_from - use only persons who've born this year of after
year_to - use only persons who've born this year or before
no_years - use also people without known birth year
cb_progress - callback to indicate progress
rlocale - a Locale instance
Returns an array of tuple of:
- Extraction method title
- Dict of values with their counts
(- Method)
"""
self.db = dbase # store for use by methods
self._locale = rlocale
self._ = rlocale.translation.sgettext
self._get_type = rlocale.get_type
data = []
ext = self.extractors
# which methods to use
for name in self.extractors:
option = menu.get_option_by_name(name)
if option.get_value() == True:
# localized data title, value dict, type and data method
data.append((ext[name][1], {}, ext[name][2], ext[name][3]))
# go through the people and collect data
for person_handle in filter_func.apply(dbase,
dbase.iter_person_handles(),
cb_progress):
cb_progress()
person = dbase.get_person_from_handle(person_handle)
# check whether person has suitable gender
if person.gender != genders and genders != Person.UNKNOWN:
continue
# check whether birth year is within required range
birth = self.get_birth(person)
if birth:
birthdate = birth.get_date_object()
if birthdate.get_year_valid():
birthdate = gregorian(birthdate)
year = birthdate.get_year()
if not (year >= year_from and year <= year_to):
continue
else:
# if death before range, person's out of range too...
death = self.get_death(person)
if death:
deathdate = death.get_date_object()
if deathdate.get_year_valid():
deathdate = gregorian(deathdate)
if deathdate.get_year() < year_from:
continue
if not no_years:
# don't accept people not known to be in range
continue
else:
continue
else:
continue
self.get_person_data(person, data)
return data
# GLOBAL: required so that we get access to _Extract.extractors[]
# Unfortunately class variables cannot reference instance methods :-/
_Extract = Extract()
#------------------------------------------------------------------------
#
# Statistics report
#
#------------------------------------------------------------------------
class StatisticsChart(Report):
""" StatisticsChart report """
def __init__(self, database, options, user):
"""
Create the Statistics object that produces the report.
Uses the Extractor class to extract the data from the database.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
incl_private - Whether to include private data
living_people - How to handle living people
years_past_death - Consider as living this many years after death
"""
Report.__init__(self, database, options, user)
menu = options.menu
self._user = user
lang = menu.get_option_by_name('trans').get_value()
rlocale = self.set_locale(lang)
# override default gettext, or English output will have "person|Title"
self._ = rlocale.translation.sgettext
stdoptions.run_private_data_option(self, menu)
living_opt = stdoptions.run_living_people_option(self, menu, rlocale)
self.database = CacheProxyDb(self.database)
get_option_by_name = menu.get_option_by_name
get_value = lambda name: get_option_by_name(name).get_value()
filter_opt = get_option_by_name('filter')
self.filter = filter_opt.get_filter()
self.fil_name = "(%s)" % self.filter.get_name(rlocale)
self.bar_items = get_value('bar_items')
year_from = get_value('year_from')
year_to = get_value('year_to')
gender = get_value('gender')
living_value = get_value('living_people')
for (value, description) in living_opt.get_items(xml_items=True):
if value == living_value:
living_desc = self._(description)
break
self.living_desc = self._("(Living people: %(option_name)s)"
) % {'option_name' : living_desc}
# title needs both data extraction method name + gender name
if gender == Person.MALE:
genders = self._("Men")
elif gender == Person.FEMALE:
genders = self._("Women")
else:
genders = None
# needed for keyword based localization
mapping = {
'genders': genders,
'year_from': year_from,
'year_to': year_to
}
if genders:
span_string = self._("%(genders)s born "
"%(year_from)04d-%(year_to)04d"
) % mapping
else:
span_string = self._("Persons born "
"%(year_from)04d-%(year_to)04d"
) % mapping
# extract requested items from the database and count them
self._user.begin_progress(_('Statistics Charts'),
_('Collecting data...'),
self.database.get_number_of_people())
tables = _Extract.collect_data(self.database, self.filter, menu,
gender, year_from, year_to,
get_value('no_years'),
self._user.step_progress,
rlocale)
self._user.end_progress()
self._user.begin_progress(_('Statistics Charts'),
_('Sorting data...'), len(tables))
self.data = []
sortby = get_value('sortby')
reverse = get_value('reverse')
for table in tables:
# generate sorted item lookup index index
lookup = self.index_items(table[1], sortby, reverse)
# document heading
heading = "%(str1)s -- %(str2)s" % {'str1' : self._(table[0]),
'str2' : span_string}
self.data.append((heading, table[0], table[1], lookup))
self._user.step_progress()
self._user.end_progress()
def index_items(self, data, sort, reverse):
"""creates & stores a sorted index for the items"""
# sort by item keys
index = sorted(data, reverse=True if reverse else False)
if sort == _options.SORT_VALUE:
# set for the sorting function
self.lookup_items = data
# then sort by value
index.sort(key=lambda x: self.lookup_items[x],
reverse=True if reverse else False)
return index
def write_report(self):
"output the selected statistics..."
mark = IndexMark(self._('Statistics Charts'), INDEX_TYPE_TOC, 1)
self._user.begin_progress(_('Statistics Charts'),
_('Saving charts...'), len(self.data))
for data in sorted(self.data):
self.doc.start_page()
if mark:
self.doc.draw_text('SC-title', '', 0, 0, mark) # put it in TOC
mark = None # crock, but we only want one of them
if len(data[3]) < self.bar_items:
self.output_piechart(*data[:4])
else:
self.output_barchart(*data[:4])
self.doc.end_page()
self._user.step_progress()
self._user.end_progress()
def output_piechart(self, title1, typename, data, lookup):
# set layout variables
middle_w = self.doc.get_usable_width() / 2
middle_h = self.doc.get_usable_height() / 2
middle = min(middle_w, middle_h)
# start output
style_sheet = self.doc.get_style_sheet()
pstyle = style_sheet.get_paragraph_style('SC-Title')
mark = IndexMark(title1, INDEX_TYPE_TOC, 2)
self.doc.center_text('SC-title', title1, middle_w, 0, mark)
yoffset = utils.pt2cm(pstyle.get_font().get_size())
self.doc.center_text('SC-title', self.fil_name, middle_w, yoffset)
yoffset = 2 * utils.pt2cm(pstyle.get_font().get_size())
self.doc.center_text('SC-title', self.living_desc, middle_w, yoffset)
# collect data for output
color = 0
chart_data = []
for key in lookup:
style = "SC-color-%d" % color
text = "%s (%d)" % (self._(key), data[key])
# graphics style, value, and it's label
chart_data.append((style, data[key], text))
color = (color+1) % 7 # There are only 7 color styles defined
margin = 1.0
legendx = 2.0
# output data...
radius = middle - 2*margin
yoffset += margin + radius
draw_pie_chart(self.doc, middle_w, yoffset, radius, chart_data, -90)
yoffset += radius + 2*margin
if middle == middle_h: # Landscape
legendx = 1.0
yoffset = margin
text = self._("%s (persons):") % self._(typename)
draw_legend(self.doc, legendx, yoffset, chart_data, text, 'SC-legend')
def output_barchart(self, title1, typename, data, lookup):
pt2cm = utils.pt2cm
style_sheet = self.doc.get_style_sheet()
pstyle = style_sheet.get_paragraph_style('SC-Text')
font = pstyle.get_font()
# set layout variables
width = self.doc.get_usable_width()
row_h = pt2cm(font.get_size())
max_y = self.doc.get_usable_height() - row_h
pad = row_h * 0.5
# check maximum value
max_value = max(data[k] for k in lookup) if lookup else 0
# horizontal area for the gfx bars
margin = 1.0
middle = width/2.0
textx = middle + margin/2.0
stopx = middle - margin/2.0
maxsize = stopx - margin
# start output
pstyle = style_sheet.get_paragraph_style('SC-Title')
mark = IndexMark(title1, INDEX_TYPE_TOC, 2)
self.doc.center_text('SC-title', title1, middle, 0, mark)
yoffset = pt2cm(pstyle.get_font().get_size())
self.doc.center_text('SC-title', self.fil_name, middle, yoffset)
yoffset = 2 * pt2cm(pstyle.get_font().get_size())
self.doc.center_text('SC-title', self.living_desc, middle, yoffset)
yoffset = 3 * pt2cm(pstyle.get_font().get_size())
# header
yoffset += (row_h + pad)
text = self._("%s (persons):") % self._(typename)
self.doc.draw_text('SC-text', text, textx, yoffset)
for key in lookup:
yoffset += (row_h + pad)
if yoffset > max_y:
# for graphical report, page_break() doesn't seem to work
self.doc.end_page()
self.doc.start_page()
yoffset = 0
# right align bar to the text
value = data[key]
startx = stopx - (maxsize * value / max_value)
self.doc.draw_box('SC-bar', "",
startx, yoffset, stopx-startx, row_h)
# text after bar
text = "%s (%d)" % (self._(key), data[key])
self.doc.draw_text('SC-text', text, textx, yoffset)
return
#------------------------------------------------------------------------
#
# StatisticsChartOptions
#
#------------------------------------------------------------------------
class StatisticsChartOptions(MenuReportOptions):
""" Options for StatisticsChart report """
def __init__(self, name, dbase):
self.__pid = None
self.__filter = None
self.__db = dbase
self._nf = None
MenuReportOptions.__init__(self, name, dbase)
def get_subject(self):
""" Return a string that describes the subject of the report. """
return self.__filter.get_filter().get_name()
def add_menu_options(self, menu):
"""
Add options to the menu for the statistics report.
"""
################################
category_name = _("Report Options")
add_option = partial(menu.add_option, category_name)
################################
self.__filter = FilterOption(_("Filter"), 0)
self.__filter.set_help(_("Determines what people are included "
"in the report."))
add_option("filter", self.__filter)
self.__filter.connect('value-changed', self.__filter_changed)
self.__pid = PersonOption(_("Filter Person"))
self.__pid.set_help(_("The center person for the filter."))
menu.add_option(category_name, "pid", self.__pid)
self.__pid.connect('value-changed', self.__update_filters)
self._nf = stdoptions.add_name_format_option(menu, category_name)
self._nf.connect('value-changed', self.__update_filters)
self.__update_filters()
stdoptions.add_private_data_option(menu, category_name)
stdoptions.add_living_people_option(menu, category_name)
stdoptions.add_localization_option(menu, category_name)
################################
category_name = _("Report Details")
add_option = partial(menu.add_option, category_name)
################################
sortby = EnumeratedListOption(_('Sort chart items by'),
_options.SORT_VALUE)
for item_idx in range(len(_options.opt_sorts)):
item = _options.opt_sorts[item_idx]
sortby.add_item(item_idx, item[2])
sortby.set_help(_("Select how the statistical data is sorted."))
add_option("sortby", sortby)
reverse = BooleanOption(_("Sort in reverse order"), False)
reverse.set_help(_("Check to reverse the sorting order."))
add_option("reverse", reverse)
this_year = time.localtime()[0]
year_from = NumberOption(_("People Born After"),
1700, 1, this_year)
year_from.set_help(_("Birth year from which to include people."))
add_option("year_from", year_from)
year_to = NumberOption(_("People Born Before"),
this_year, 1, this_year)
year_to.set_help(_("Birth year until which to include people"))
add_option("year_to", year_to)
no_years = BooleanOption(_("Include people without known birth years"),
False)
no_years.set_help(_("Whether to include people without "
"known birth years."))
add_option("no_years", no_years)
gender = EnumeratedListOption(_('Genders included'),
Person.UNKNOWN)
for item_idx in range(len(_options.opt_genders)):
item = _options.opt_genders[item_idx]
gender.add_item(item[0], item[2])
gender.set_help(_("Select which genders are included into "
"statistics."))
add_option("gender", gender)
bar_items = NumberOption(_("Max. items for a pie"), 8, 0, 20)
bar_items.set_help(_("With fewer items pie chart and legend will be "
"used instead of a bar chart."))
add_option("bar_items", bar_items)
# -------------------------------------------------
# List of available charts on separate option tabs
idx = 0
half = len(_Extract.extractors) // 2
chart_types = []
for (chart_opt, ctuple) in _Extract.extractors.items():
chart_types.append((_(ctuple[1]), chart_opt, ctuple))
sorted_chart_types = sorted(chart_types,
key=lambda x: glocale.sort_key(x[0]))
for (translated_option_name, opt_name, ctuple) in sorted_chart_types:
if idx <= half:
category_name = _("Charts 1")
else:
category_name = _("Charts 2")
opt = BooleanOption(translated_option_name, False)
opt.set_help(_("Include charts with indicated data."))
menu.add_option(category_name, opt_name, opt)
idx += 1
# Enable a couple of charts by default
menu.get_option_by_name("data_gender").set_value(True)
menu.get_option_by_name("data_ccount").set_value(True)
menu.get_option_by_name("data_bmonth").set_value(True)
def __update_filters(self):
"""
Update the filter list based on the selected person
"""
gid = self.__pid.get_value()
person = self.__db.get_person_from_gid(gid)
nfv = self._nf.get_value()
filter_list = utils.get_person_filters(person,
include_single=False,
name_format=nfv)
self.__filter.set_filters(filter_list)
def __filter_changed(self):
"""
Handle filter change. If the filter is not specific to a person,
disable the person option
"""
filter_value = self.__filter.get_value()
if filter_value in [1, 2, 3, 4]:
# Filters 1, 2, 3 and 4 rely on the center person
self.__pid.set_available(True)
else:
# The rest don't
self.__pid.set_available(False)
def make_default_style(self, default_style):
"""Make the default output style for the Statistics report."""
# Paragraph Styles
fstyle = FontStyle()
fstyle.set_size(10)
fstyle.set_type_face(FONT_SERIF)
pstyle = ParagraphStyle()
pstyle.set_font(fstyle)
pstyle.set_alignment(PARA_ALIGN_LEFT)
pstyle.set_description(_("The style used for the items and values."))
default_style.add_paragraph_style("SC-Text", pstyle)
fstyle = FontStyle()
fstyle.set_size(14)
fstyle.set_type_face(FONT_SANS_SERIF)
pstyle = ParagraphStyle()
pstyle.set_font(fstyle)
pstyle.set_alignment(PARA_ALIGN_CENTER)
pstyle.set_description(_("The style used for the title of the page."))
default_style.add_paragraph_style("SC-Title", pstyle)
"""
Graphic Styles:
SC-title - Contains the SC-Title paragraph style used for
the title of the document
SC-text - Contains the SC-Name paragraph style used for
the individual's name
SC-color-N - The colors for drawing pies.
SC-bar - A red bar with 0.5pt black line.
"""
gstyle = GraphicsStyle()
gstyle.set_paragraph_style("SC-Title")
gstyle.set_color((0, 0, 0))
gstyle.set_fill_color((255, 255, 255))
gstyle.set_line_width(0)
default_style.add_draw_style("SC-title", gstyle)
gstyle = GraphicsStyle()
gstyle.set_paragraph_style("SC-Text")
gstyle.set_color((0, 0, 0))
gstyle.set_fill_color((255, 255, 255))
gstyle.set_line_width(0)
default_style.add_draw_style("SC-text", gstyle)
width = 0.8
# red
gstyle = GraphicsStyle()
gstyle.set_paragraph_style('SC-Text')
gstyle.set_color((0, 0, 0))
gstyle.set_fill_color((255, 0, 0))
gstyle.set_line_width(width)
default_style.add_draw_style("SC-color-0", gstyle)
# orange
gstyle = GraphicsStyle()
gstyle.set_paragraph_style('SC-Text')
gstyle.set_color((0, 0, 0))
gstyle.set_fill_color((255, 158, 33))
gstyle.set_line_width(width)
default_style.add_draw_style("SC-color-1", gstyle)
# green
gstyle = GraphicsStyle()
gstyle.set_paragraph_style('SC-Text')
gstyle.set_color((0, 0, 0))
gstyle.set_fill_color((0, 178, 0))
gstyle.set_line_width(width)
default_style.add_draw_style("SC-color-2", gstyle)
# violet
gstyle = GraphicsStyle()
gstyle.set_paragraph_style('SC-Text')
gstyle.set_color((0, 0, 0))
gstyle.set_fill_color((123, 0, 123))
gstyle.set_line_width(width)
default_style.add_draw_style("SC-color-3", gstyle)
# yellow
gstyle = GraphicsStyle()
gstyle.set_paragraph_style('SC-Text')
gstyle.set_color((0, 0, 0))
gstyle.set_fill_color((255, 255, 0))
gstyle.set_line_width(width)
default_style.add_draw_style("SC-color-4", gstyle)
# blue
gstyle = GraphicsStyle()
gstyle.set_paragraph_style('SC-Text')
gstyle.set_color((0, 0, 0))
gstyle.set_fill_color((0, 105, 214))
gstyle.set_line_width(width)
default_style.add_draw_style("SC-color-5", gstyle)
# gray
gstyle = GraphicsStyle()
gstyle.set_paragraph_style('SC-Text')
gstyle.set_color((0, 0, 0))
gstyle.set_fill_color((210, 204, 210))
gstyle.set_line_width(width)
default_style.add_draw_style("SC-color-6", gstyle)
gstyle = GraphicsStyle()
gstyle.set_color((0, 0, 0))
gstyle.set_fill_color((255, 0, 0))
gstyle.set_line_width(width)
default_style.add_draw_style("SC-bar", gstyle)
# legend
gstyle = GraphicsStyle()
gstyle.set_paragraph_style('SC-Text')
gstyle.set_color((0, 0, 0))
gstyle.set_fill_color((255, 255, 255))
gstyle.set_line_width(0)
default_style.add_draw_style("SC-legend", gstyle)
|
sam-m888/gprime
|
gprime/plugins/drawreport/statisticschart.py
|
Python
|
gpl-2.0
| 47,300
|
[
"Brian"
] |
bfb3c0340e1988225354605b19280e68e44b36c47c6af70828927826c9bfeeea
|
#!/usr/bin/env python
# Copyright (C) 2011 Tianyang Li
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author Tianyang Li
# E-Mail tmy1018 gmail com
"""
filter reads according to alignment in SAM file
Usage:
./sam_filter.py [0/1] [SAM] [fasta/fastq] [read]
0 - keep those not aligned in SAM
1 - keep those aligned in SAM
"""
import sys
import HTSeq
from Bio import SeqIO
def main(argv):
keep = True
if argv[1] == '0':
keep = False
if argv[1] == '1':
keep = True
aligned = set([])
for align in HTSeq.SAM_Reader(argv[2]):
if align.aligned:
aligned.add(align.read.name)
if keep:
for read in SeqIO.parse(argv[4], argv[3]):
if read.name in aligned:
print read.format('fastq')
else:
for read in SeqIO.parse(argv[4], argv[3]):
if read.name not in aligned:
print read.format('fastq')
if __name__ == '__main__':
main(sys.argv)
sys.exit(0)
|
tianyang-li/meta-transcriptome
|
sam_filter.py
|
Python
|
gpl-3.0
| 1,610
|
[
"HTSeq"
] |
2ba71f0ad45db95ba89ad7d040aeeff51cc538049df34e3cc01422d89d63eb64
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the Account Settings page.
"""
from unittest import skip
from nose.plugins.attrib import attr
from bok_choy.web_app_test import WebAppTest
from ...pages.lms.account_settings import AccountSettingsPage
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.dashboard import DashboardPage
from ..helpers import EventsTestMixin
class AccountSettingsTestMixin(EventsTestMixin, WebAppTest):
"""
Mixin with helper methods to test the account settings page.
"""
CHANGE_INITIATED_EVENT_NAME = u"edx.user.settings.change_initiated"
USER_SETTINGS_CHANGED_EVENT_NAME = 'edx.user.settings.changed'
ACCOUNT_SETTINGS_REFERER = u"/account/settings"
def visit_account_settings_page(self):
"""
Visit the account settings page for the current user, and store the page instance
as self.account_settings_page.
"""
# pylint: disable=attribute-defined-outside-init
self.account_settings_page = AccountSettingsPage(self.browser)
self.account_settings_page.visit()
self.account_settings_page.wait_for_ajax()
def log_in_as_unique_user(self, email=None):
"""
Create a unique user and return the account's username and id.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
auto_auth_page = AutoAuthPage(self.browser, username=username, email=email).visit()
user_id = auto_auth_page.get_user_id()
return username, user_id
def settings_changed_event_filter(self, event):
"""Filter out any events that are not "settings changed" events."""
return event['event_type'] == self.USER_SETTINGS_CHANGED_EVENT_NAME
def expected_settings_changed_event(self, setting, old, new, table=None):
"""A dictionary representing the expected fields in a "settings changed" event."""
return {
'username': self.username,
'referer': self.get_settings_page_url(),
'event': {
'user_id': self.user_id,
'setting': setting,
'old': old,
'new': new,
'truncated': [],
'table': table or 'auth_userprofile'
}
}
def settings_change_initiated_event_filter(self, event):
"""Filter out any events that are not "settings change initiated" events."""
return event['event_type'] == self.CHANGE_INITIATED_EVENT_NAME
def expected_settings_change_initiated_event(self, setting, old, new, username=None, user_id=None):
"""A dictionary representing the expected fields in a "settings change initiated" event."""
return {
'username': username or self.username,
'referer': self.get_settings_page_url(),
'event': {
'user_id': user_id or self.user_id,
'setting': setting,
'old': old,
'new': new,
}
}
def get_settings_page_url(self):
"""The absolute URL of the account settings page given the test context."""
return self.relative_path_to_absolute_uri(self.ACCOUNT_SETTINGS_REFERER)
def assert_no_setting_changed_event(self):
"""Assert no setting changed event has been emitted thus far."""
self.assert_no_matching_events_were_emitted({'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME})
@attr('shard_5')
class DashboardMenuTest(AccountSettingsTestMixin, WebAppTest):
"""
Tests that the dashboard menu works correctly with the account settings page.
"""
def test_link_on_dashboard_works(self):
"""
Scenario: Verify that the "Account" link works from the dashboard.
Given that I am a registered user
And I visit my dashboard
And I click on "Account" in the top drop down
Then I should see my account settings page
"""
self.log_in_as_unique_user()
dashboard_page = DashboardPage(self.browser)
dashboard_page.visit()
dashboard_page.click_username_dropdown()
self.assertIn('Account', dashboard_page.username_dropdown_link_text)
dashboard_page.click_account_settings_link()
@attr('shard_5')
class AccountSettingsPageTest(AccountSettingsTestMixin, WebAppTest):
"""
Tests that verify behaviour of the Account Settings page.
"""
SUCCESS_MESSAGE = 'Your changes have been saved.'
def setUp(self):
"""
Initialize account and pages.
"""
super(AccountSettingsPageTest, self).setUp()
self.username, self.user_id = self.log_in_as_unique_user()
self.visit_account_settings_page()
def test_page_view_event(self):
"""
Scenario: An event should be recorded when the "Account Settings"
page is viewed.
Given that I am a registered user
And I visit my account settings page
Then a page view analytics event should be recorded
"""
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.user.settings.viewed'}, number_of_matches=1)
self.assert_events_match(
[
{
'event': {
'user_id': self.user_id,
'page': 'account',
'visibility': None
}
}
],
actual_events
)
def test_all_sections_and_fields_are_present(self):
"""
Scenario: Verify that all sections and fields are present on the page.
"""
expected_sections_structure = [
{
'title': 'Basic Account Information (required)',
'fields': [
'Username',
'Full Name',
'Email Address',
'Password',
'Language',
'Country or Region'
]
},
{
'title': 'Additional Information (optional)',
'fields': [
'Education Completed',
'Gender',
'Year of Birth',
'Preferred Language',
]
},
{
'title': 'Connected Accounts',
'fields': [
'Dummy',
'Facebook',
'Google',
]
}
]
self.assertEqual(self.account_settings_page.sections_structure(), expected_sections_structure)
def _test_readonly_field(self, field_id, title, value):
"""
Test behavior of a readonly field.
"""
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.value_for_readonly_field(field_id), value)
def _test_text_field(
self, field_id, title, initial_value, new_invalid_value, new_valid_values, success_message=SUCCESS_MESSAGE,
assert_after_reload=True
):
"""
Test behaviour of a text field.
"""
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.value_for_text_field(field_id), initial_value)
self.assertEqual(
self.account_settings_page.value_for_text_field(field_id, new_invalid_value), new_invalid_value
)
self.account_settings_page.wait_for_indicator(field_id, 'validation-error')
self.browser.refresh()
self.assertNotEqual(self.account_settings_page.value_for_text_field(field_id), new_invalid_value)
for new_value in new_valid_values:
self.assertEqual(self.account_settings_page.value_for_text_field(field_id, new_value), new_value)
self.account_settings_page.wait_for_message(field_id, success_message)
if assert_after_reload:
self.browser.refresh()
self.assertEqual(self.account_settings_page.value_for_text_field(field_id), new_value)
def _test_dropdown_field(
self, field_id, title, initial_value, new_values, success_message=SUCCESS_MESSAGE, reloads_on_save=False
):
"""
Test behaviour of a dropdown field.
"""
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.value_for_dropdown_field(field_id), initial_value)
for new_value in new_values:
self.assertEqual(self.account_settings_page.value_for_dropdown_field(field_id, new_value), new_value)
self.account_settings_page.wait_for_message(field_id, success_message)
if reloads_on_save:
self.account_settings_page.wait_for_loading_indicator()
else:
self.browser.refresh()
self.account_settings_page.wait_for_page()
self.assertEqual(self.account_settings_page.value_for_dropdown_field(field_id), new_value)
def _test_link_field(self, field_id, title, link_title, success_message):
"""
Test behaviour a link field.
"""
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.link_title_for_link_field(field_id), link_title)
self.account_settings_page.click_on_link_in_link_field(field_id)
self.account_settings_page.wait_for_message(field_id, success_message)
def test_username_field(self):
"""
Test behaviour of "Username" field.
"""
self._test_readonly_field('username', 'Username', self.username)
def test_full_name_field(self):
"""
Test behaviour of "Full Name" field.
"""
self._test_text_field(
u'name',
u'Full Name',
self.username,
u'@',
[u'another name', self.username],
)
actual_events = self.wait_for_events(event_filter=self.settings_changed_event_filter, number_of_matches=2)
self.assert_events_match(
[
self.expected_settings_changed_event('name', self.username, 'another name'),
self.expected_settings_changed_event('name', 'another name', self.username),
],
actual_events
)
def test_email_field(self):
"""
Test behaviour of "Email" field.
"""
email = u"test@example.com"
username, user_id = self.log_in_as_unique_user(email=email)
self.visit_account_settings_page()
self._test_text_field(
u'email',
u'Email Address',
email,
u'@',
[u'me@here.com', u'you@there.com'],
success_message='Click the link in the message to update your email address.',
assert_after_reload=False
)
actual_events = self.wait_for_events(
event_filter=self.settings_change_initiated_event_filter, number_of_matches=2)
self.assert_events_match(
[
self.expected_settings_change_initiated_event(
'email', email, 'me@here.com', username=username, user_id=user_id),
# NOTE the first email change was never confirmed, so old has not changed.
self.expected_settings_change_initiated_event(
'email', email, 'you@there.com', username=username, user_id=user_id),
],
actual_events
)
# Email is not saved until user confirms, so no events should have been
# emitted.
self.assert_no_setting_changed_event()
def test_password_field(self):
"""
Test behaviour of "Password" field.
"""
self._test_link_field(
u'password',
u'Password',
u'Reset Password',
success_message='Click the link in the message to reset your password.',
)
event_filter = self.expected_settings_change_initiated_event('password', None, None)
self.wait_for_events(event_filter=event_filter, number_of_matches=1)
# Like email, since the user has not confirmed their password change,
# the field has not yet changed, so no events will have been emitted.
self.assert_no_setting_changed_event()
@skip(
'On bokchoy test servers, language changes take a few reloads to fully realize '
'which means we can no longer reliably match the strings in the html in other tests.'
)
def test_language_field(self):
"""
Test behaviour of "Language" field.
"""
self._test_dropdown_field(
u'pref-lang',
u'Language',
u'English',
[u'Dummy Language (Esperanto)', u'English'],
reloads_on_save=True,
)
def test_education_completed_field(self):
"""
Test behaviour of "Education Completed" field.
"""
self._test_dropdown_field(
u'level_of_education',
u'Education Completed',
u'',
[u'Bachelor\'s degree', u''],
)
actual_events = self.wait_for_events(event_filter=self.settings_changed_event_filter, number_of_matches=2)
self.assert_events_match(
[
self.expected_settings_changed_event('level_of_education', None, 'b'),
self.expected_settings_changed_event('level_of_education', 'b', None),
],
actual_events
)
def test_gender_field(self):
"""
Test behaviour of "Gender" field.
"""
self._test_dropdown_field(
u'gender',
u'Gender',
u'',
[u'Female', u''],
)
actual_events = self.wait_for_events(event_filter=self.settings_changed_event_filter, number_of_matches=2)
self.assert_events_match(
[
self.expected_settings_changed_event('gender', None, 'f'),
self.expected_settings_changed_event('gender', 'f', None),
],
actual_events
)
def test_year_of_birth_field(self):
"""
Test behaviour of "Year of Birth" field.
"""
# Note that when we clear the year_of_birth here we're firing an event.
self.assertEqual(self.account_settings_page.value_for_dropdown_field('year_of_birth', ''), '')
expected_events = [
self.expected_settings_changed_event('year_of_birth', None, 1980),
self.expected_settings_changed_event('year_of_birth', 1980, None),
]
with self.assert_events_match_during(self.settings_changed_event_filter, expected_events):
self._test_dropdown_field(
u'year_of_birth',
u'Year of Birth',
u'',
[u'1980', u''],
)
def test_country_field(self):
"""
Test behaviour of "Country or Region" field.
"""
self._test_dropdown_field(
u'country',
u'Country or Region',
u'',
[u'Pakistan', u'Palau'],
)
def test_preferred_language_field(self):
"""
Test behaviour of "Preferred Language" field.
"""
self._test_dropdown_field(
u'language_proficiencies',
u'Preferred Language',
u'',
[u'Pushto', u''],
)
actual_events = self.wait_for_events(event_filter=self.settings_changed_event_filter, number_of_matches=2)
self.assert_events_match(
[
self.expected_settings_changed_event(
'language_proficiencies', [], [{'code': 'ps'}], table='student_languageproficiency'),
self.expected_settings_changed_event(
'language_proficiencies', [{'code': 'ps'}], [], table='student_languageproficiency'),
],
actual_events
)
def test_connected_accounts(self):
"""
Test that fields for third party auth providers exist.
Currently there is no way to test the whole authentication process
because that would require accounts with the providers.
"""
providers = (
['auth-oa2-facebook', 'Facebook', 'Link'],
['auth-oa2-google-oauth2', 'Google', 'Link'],
)
for field_id, title, link_title in providers:
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.link_title_for_link_field(field_id), link_title)
@attr('a11y')
class AccountSettingsA11yTest(AccountSettingsTestMixin, WebAppTest):
"""
Class to test account settings accessibility.
"""
def test_account_settings_a11y(self):
"""
Test the accessibility of the account settings page.
"""
self.log_in_as_unique_user()
self.visit_account_settings_page()
self.account_settings_page.a11y_audit.config.set_rules({
'ignore': [
'link-href', # TODO: AC-233, AC-238
'skip-link', # TODO: AC-179
],
})
self.account_settings_page.a11y_audit.check_for_accessibility_errors()
|
simbs/edx-platform
|
common/test/acceptance/tests/lms/test_account_settings.py
|
Python
|
agpl-3.0
| 17,483
|
[
"VisIt"
] |
c1602704debed07dc05ca17ccaf73ddbda81601bed75ad8999d6ac2a30fa7d4b
|
from brian import Clock, Hz, second, PoissonGroup, network_operation, pA, Network
import numpy as np
from perceptchoice.model.monitor import WTAMonitor
from perceptchoice.model.network import default_params, pyr_params, inh_params, simulation_params, WTANetworkGroup
class VirtualSubject:
def __init__(self, subj_id, wta_params=default_params(), pyr_params=pyr_params(), inh_params=inh_params(),
sim_params=simulation_params(), network_class=WTANetworkGroup):
self.subj_id = subj_id
self.wta_params = wta_params
self.pyr_params = pyr_params
self.inh_params = inh_params
self.sim_params = sim_params
self.simulation_clock = Clock(dt=self.sim_params.dt)
self.input_update_clock = Clock(dt=1 / (self.wta_params.refresh_rate / Hz) * second)
self.background_input = PoissonGroup(self.wta_params.background_input_size,
rates=self.wta_params.background_freq, clock=self.simulation_clock)
self.task_inputs = []
for i in range(self.wta_params.num_groups):
self.task_inputs.append(PoissonGroup(self.wta_params.task_input_size,
rates=self.wta_params.task_input_resting_rate, clock=self.simulation_clock))
# Create WTA network
self.wta_network = network_class(params=self.wta_params, background_input=self.background_input,
task_inputs=self.task_inputs, pyr_params=self.pyr_params, inh_params=self.inh_params,
clock=self.simulation_clock)
# Create network monitor
self.wta_monitor = WTAMonitor(self.wta_network, self.sim_params, record_neuron_state=False, record_spikes=False,
record_firing_rate=True, record_inputs=True, save_summary_only=False,
clock=self.simulation_clock)
# Create Brian network and reset clock
self.net = Network(self.background_input, self.task_inputs, self.wta_network,
self.wta_network.connections.values(), self.wta_monitor.monitors.values())
def run_trial(self, sim_params, input_freq):
self.wta_monitor.sim_params=sim_params
self.net.reinit(states=False)
@network_operation(when='start', clock=self.input_update_clock)
def set_task_inputs():
for idx in range(len(self.task_inputs)):
rate = self.wta_params.task_input_resting_rate
if sim_params.stim_start_time <= self.simulation_clock.t < sim_params.stim_end_time:
rate = input_freq[idx] * Hz + np.random.randn() * self.wta_params.input_var
if rate < self.wta_params.task_input_resting_rate:
rate = self.wta_params.task_input_resting_rate
self.task_inputs[idx]._S[0, :] = rate
@network_operation(clock=self.simulation_clock)
def inject_current():
if sim_params.dcs_start_time < self.simulation_clock.t <= sim_params.dcs_end_time:
self.wta_network.group_e.I_dcs = sim_params.p_dcs
self.wta_network.group_i.I_dcs = sim_params.i_dcs
else:
self.wta_network.group_e.I_dcs = 0 * pA
self.wta_network.group_i.I_dcs = 0 * pA
self.net.remove(set_task_inputs, inject_current)
self.net.add(set_task_inputs, inject_current)
self.net.run(sim_params.trial_duration, report='text')
|
jbonaiuto/perceptual-choice-hysteresis
|
src/python/perceptchoice/model/virtual_subject.py
|
Python
|
gpl-3.0
| 3,437
|
[
"Brian"
] |
7e2e300c6652e260dcf9fec657f9c0794d6cd2ef572dc46b09f83415c287c60d
|
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2003-2005 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2005-2012 Julio Sanchez
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any latr version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#
#-------------------------------------------------------------------------
"""
Spanish-specific classes for relationships.
"""
from __future__ import unicode_literals
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from gramps.gen.lib import Person
MALE = Person.MALE
FEMALE = Person.FEMALE
UNKNOWN = Person.UNKNOWN
import gramps.gen.relationship
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
_level_name_male = [ "", "primero", "segundo", "tercero", "cuarto", "quinto",
"sexto", "séptimo", "octavo", "noveno", "décimo", "undécimo",
"duodécimo", "decimotercero", "decimocuarto", "decimoquinto",
"decimosexto", "decimoséptimo", "decimoctavo", "decimonono",
"vigésimo" ]
# Short forms (in apocope) used before names
_level_name_male_a = [ "", "primer", "segundo", "tercer", "cuarto", "quinto",
"sexto", "séptimo", "octavo", "noveno", "décimo", "undécimo",
"duodécimo", "decimotercer", "decimocuarto", "decimoquinto",
"decimosexto", "decimoséptimo", "decimoctavo", "decimonono",
"vigésimo" ]
_level_name_female = [ "", "primera", "segunda", "tercera", "cuarta", "quinta",
"sexta", "séptima", "octava", "novena", "décima", "undécima",
"duodécima", "decimotercera", "decimocuarta", "decimoquinta",
"decimosexta", "decimoséptima", "decimoctava", "decimonona",
"vigésima" ]
_level_name_plural = [ "", "primeros", "segundos", "terceros", "cuartos",
"quintos", "sextos", "séptimos", "octavos", "novenos",
"décimos", "undécimos", "duodécimos", "decimoterceros",
"decimocuartos", "decimoquintos", "decimosextos",
"decimoséptimos", "decimoctavos", "decimononos",
"vigésimos" ]
# This plugin tries to be flexible and expect little from the following
# tables. Ancestors are named from the list for the first generations.
# When this list is not enough, ordinals are used based on the same idea,
# i.e. bisabuelo is 'segundo abuelo' and so on, that has been the
# traditional way in Spanish. When we run out of ordinals we resort to
# N-ésimo notation, that is sort of understandable if in context.
# 'trastatarabuelo' is not in DRAE, but is well known
_parents_level = [ "", "padres",
"abuelos",
"bisabuelos",
"tatarabuelos",
"trastatarabuelos" ]
_father_level = [ "", "padre%(inlaw)s",
"abuelo%(inlaw)s",
"bisabuelo%(inlaw)s",
"tatarabuelo%(inlaw)s",
"trastatarabuelo%(inlaw)s"]
_mother_level = [ "", "madre%(inlaw)s",
"abuela%(inlaw)s",
"bisabuela%(inlaw)s",
"tatarabuela%(inlaw)s",
"trastatarabuela%(inlaw)s"]
# step-relationships can't be handled as in English
# Notice that the traditional lack of divorce in Catholic, Spanish-speaking, countries has resulted
# in a scarcity of terms to describe these relationships since only death of a spouse would let the
# other marry again. Divorce is common now, so these relationships abound, but history has left us
# without support in the language. So, in this case, we will be more liberal than in other cases and
# or coin a few new words or accept others that seem to have some use, but always patterned
# after the style of the well-documented cases, so that users can intuitively guess their meaning.
# Notice that "that relationship does not exist in Spanish" is not a valid objection. Once the Gramps
# core has computed a relationship, it *has* to be named *somehow*. The only alternative is to change
# the Gramps core so that it does not find relationships that cannot be named in Spanish.
_step_father_level = [ "", "padrastro%(inlaw)s",
"abuelastro%(inlaw)s" ]
_step_mother_level = [ "", "madrastra%(inlaw)s",
"abuelastra%(inlaw)s" ]
# Higher-order terms (after trastatarabuelo) on this list are not standard,
# but then there is no standard naming scheme at all for this in Spanish.
# Check http://www.genealogia-es.com/guia3.html that echoes a proposed
# scheme that has got some reception in the Spanish-language genealogy
# community. Uncomment these names if you want to use them.
#_parents_level = [ "", "padres", "abuelos", "bisabuelos", "tatarabuelos",
# "trastatarabuelos", "pentabuelos", "hexabuelos",
# "heptabuelos", "octabuelos", "eneabuelos", "decabuelos"]
#_father_level = [ "", "padre", "abuelo", "bisabuelo", "tatarabuelo",
# "trastatarabuelo", "pentabuelo", "hexabuelo",
# "heptabuelo", "octabuelo", "eneabuelo", "decabuelo"]
#_mother_level = [ "", "madre", "abuela", "bisabuela", "tatarabuela",
# "trastatarabuela", "pentabuela", "hexabuela",
# "heptabuela", "octabuela", "eneabuela", "decabuela"]
# DRAE defines cuadrinieto as well, with the same meaning as chozno
# trastataranieto is in use too, but is not in DRAE
# DRAE also registers bizchozno and bischozno, but prefers bichozno
_son_level = [ "", "hijo%(inlaw)s",
"nieto%(inlaw)s",
"bisnieto%(inlaw)s",
"tataranieto%(inlaw)s",
"chozno%(inlaw)s",
"bichozno%(inlaw)s" ]
# Though "abuelastro" is in DRAE, "nietastro" isn't
_step_son_level = [ "", "hijastro%(inlaw)s",
"nietastro%(inlaw)s" ]
_daughter_level = [ "", "hija%(inlaw)s",
"nieta%(inlaw)s",
"bisnieta%(inlaw)s",
"tataranieta%(inlaw)s",
"chozna%(inlaw)s",
"bichozna%(inlaw)s" ]
_step_daughter_level = [ "", "hijastra%(inlaw)s",
"nietastra%(inlaw)s" ]
_sister_level = [ "", "hermana%(inlaw)s",
"tía%(inlaw)s",
"tía abuela%(inlaw)s",
"tía bisabuela%(inlaw)s",
"tía tatarabuela%(inlaw)s" ]
# Tiastro/tiastra aren't in DRAE
_step_sister_level = [ "", "hermanastra%(inlaw)s",
"tiastra%(inlaw)s",
"tía abuelastra%(inlaw)s" ]
_brother_level = [ "", "hermano%(inlaw)s",
"tío%(inlaw)s",
"tío abuelo%(inlaw)s",
"tío bisabuelo%(inlaw)s",
"tío tatarabuelo%(inlaw)s" ]
_step_brother_level = [ "", "hermanastro%(inlaw)s",
"tiastro%(inlaw)s",
"tío abuelastro%(inlaw)s" ]
_nephew_level = [ "", "sobrino%(inlaw)s",
"sobrino nieto%(inlaw)s",
"sobrino bisnieto%(inlaw)s",
"sobrino tataranieto%(inlaw)s",
"sobrino chozno%(inlaw)s",
"sobrino bichozno%(inlaw)s" ]
# Nether are sobrinastro/sobrinastra
_step_nephew_level = [ "", "sobrinastro%(inlaw)s",
"sobrino nietastro%(inlaw)s" ]
_niece_level = [ "", "sobrina%(inlaw)s",
"sobrina nieta%(inlaw)s",
"sobrina bisnieta%(inlaw)s",
"sobrina tataranieta%(inlaw)s",
"sobrina chozna%(inlaw)s",
"sobrina bichozna%(inlaw)s" ]
_step_niece_level = [ "", "sobrinastra%(inlaw)s",
"sobrina nietastra%(inlaw)s" ]
_children_level = [ "", "hijos",
"nietos",
"bisnietos",
"tataranietos",
"choznos",
"bichoznos" ]
_siblings_level = [ "",
"hermanos/as", "tíos/tías",
"tíos abuelos/tías abuelas", "tíos bisabuelos/tías bisabuelas",
"tíos tatarabuelos/tías tatarabuelas", "tíos trastatarabuelos/tías trastatarabuelas" ]
_nephews_nieces_level = [ "",
"hermanos/as",
"sobrinos/as",
"sobrinos nietos/sobrinas nietas",
"sobrinos bisnietos/sobrinas bisnietas",
"sobrinos tataranietos/sobrinas tataranietas",
"sobrinos choznos/sobrinas choznas",
"sobrinos bichoznos/sobrinas bichoznas" ]
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
class RelationshipCalculator(gramps.gen.relationship.RelationshipCalculator):
"""
RelationshipCalculator Class
"""
def __init__(self):
gramps.gen.relationship.RelationshipCalculator.__init__(self)
def _get_step_father(self, level, inlaw=''):
"""Internal spanish method to create relation string
"""
if level < len(_step_father_level):
return _step_father_level[level] % {'inlaw': inlaw}
elif (level-1) < len(_level_name_male_a):
return "%s abuelastro%s" % (_level_name_male_a[level-1],inlaw)
else:
return "%d-ésimo abuelastro%s" % (level-1,inlaw)
def _get_father(self, level, step='', inlaw=''):
"""Internal spanish method to create relation string
"""
if step:
return self._get_step_father(level, inlaw)
if inlaw and level == 1:
return "suegro"
if level < len(_father_level):
return _father_level[level] % {'inlaw': inlaw}
elif (level-1) < len(_level_name_male_a):
return "%s abuelo%s" % (_level_name_male_a[level-1],inlaw)
else:
return "%d-ésimo abuelo%s" % (level-1,inlaw)
def _get_step_son(self, level, inlaw=''):
"""Internal spanish method to create relation string
"""
if level < len(_step_son_level):
return _step_son_level[level] % {'inlaw': inlaw}
elif (level-1) < len(_level_name_male_a):
return "%s nietastro%s" % (_level_name_male_a[level-1],inlaw)
else:
return "%d-ésimo nietastro%s" % (level-1,inlaw)
def _get_son(self, level, step='', inlaw=''):
"""Internal spanish method to create relation string
"""
if step:
return self._get_step_son(level, inlaw)
if inlaw and level == 1:
return "yerno"
if level < len(_son_level):
return _son_level[level] % {'inlaw': inlaw}
elif (level-1) < len(_level_name_male_a):
return "%s nieto%s" % (_level_name_male_a[level-1], inlaw)
else:
return "%d-ésimo nieto%s" % (level-1, inlaw)
def _get_step_mother(self, level, inlaw=''):
"""Internal spanish method to create relation string
"""
if level < len(_step_mother_level):
return _step_mother_level[level] % {'inlaw': inlaw}
elif (level-1) < len(_level_name_female):
return "%s abuelastra%s" % (_level_name_female[level-1],inlaw)
else:
return "%d-ésima abuelastra%s" % (level-1,inlaw)
def _get_mother(self, level, step='', inlaw=''):
"""Internal spanish method to create relation string
"""
if step:
return self._get_step_mother(level, inlaw)
if inlaw and level == 1:
return "suegra"
if level < len(_mother_level):
return _mother_level[level] % {'inlaw': inlaw}
elif (level-1) < len(_level_name_female):
return "%s abuela%s" % (_level_name_female[level-1],inlaw)
else:
return "%d-ésima abuela%s" % (level-1,inlaw)
def _get_step_daughter(self, level, inlaw=''):
"""Internal spanish method to create relation string
"""
if level < len(_step_daughter_level):
return _step_daughter_level[level] % {'inlaw': inlaw}
elif (level-1) < len(_level_name_female):
return "%s nietastra%s" % (_level_name_female[level-1],inlaw)
else:
return "%d-ésima nietastra%s" % (level-1,inlaw)
def _get_daughter(self, level, step='', inlaw=''):
"""Internal spanish method to create relation string
"""
if step:
return self._get_step_daughter(level, inlaw)
if inlaw and level == 1:
return "nuera"
if level < len(_daughter_level):
return _daughter_level[level] % {'inlaw': inlaw}
elif (level-1) < len(_level_name_female):
return "%s nieta%s" % (_level_name_female[level-1], inlaw)
else:
return "%d-ésima nieta%s" % (level-1, inlaw)
def _get_parent_unknown(self, level, step='', inlaw=''):
"""Internal spanish method to create relation string
"""
return "%s o %s" % (self._get_father(level,step,inlaw), self._get_mother(level,step,inlaw))
def _get_child_unknown(self, level, step='', inlaw=''):
"""Internal spanish method to create relation string
"""
return "%s o %s" % (self._get_son(level,step,inlaw), self._get_daughter(level,step,inlaw))
def _get_step_aunt(self, level, inlaw=''):
"""Internal spanish method to create relation string
"""
if level < len(_step_sister_level):
return _step_sister_level[level] % {'inlaw': inlaw}
elif (level-2) < len(_level_name_female):
return "%s tía abuelastra%s" % (_level_name_female[level-2],inlaw)
else:
return "%d-ésima tia abuelastra%s" % (level-2,inlaw)
def _get_aunt(self, level, step='', inlaw=''):
"""Internal spanish method to create relation string
"""
if step:
return self._get_step_aunt(level, inlaw)
if inlaw and level == 1:
return "cuñada"
if level < len(_sister_level):
return _sister_level[level] % {'inlaw': inlaw}
elif (level-2) < len(_level_name_female):
return "%s tía abuela%s" % (_level_name_female[level-2], inlaw)
else:
return "%d-ésima tía abuela%s" % (level-2, inlaw)
def _get_distant_aunt(self, level, step, inlae):
if step:
base = 'tiastra'
else:
base = 'tía'
if level < len(_level_name_female):
return "%s %s" % (base,_level_name_female[level])
else:
return "%s %d-ésima" % (base,level)
def _get_step_uncle(self, level, inlaw=''):
"""Internal spanish method to create relation string
"""
if level < len(_step_brother_level):
return _step_brother_level[level] % {'inlaw': inlaw}
elif (level-2) < len(_level_name_male_a):
return "%s tío abuelastro%s" % (_level_name_male_a[level-2],inlaw)
else:
return "%d-ésimo tío abuelastro%s" % (level-2,inlaw)
def _get_uncle(self, level, step='', inlaw=''):
"""Internal spanish method to create relation string
"""
if step:
return self._get_step_uncle(level, inlaw)
if inlaw and level == 1:
return "cuñado"
if level < len(_brother_level):
return _brother_level[level] % {'inlaw': inlaw}
elif (level-2) < len(_level_name_male_a):
return "%s tío abuelo%s" % (_level_name_male_a[level-2], inlaw)
else:
return "%d-ésimo tío abuelo%s" % (level-2, inlaw)
def _get_distant_uncle(self, level, step='', inlaw=''):
if step:
base = 'tiastro'
else:
base = 'tío'
if level < len(_level_name_male):
return "%s %s" % (base,_level_name_male[level])
else:
return "%s %d-ésimo" % (base,level)
def _get_step_nephew(self, level, inlaw=''):
"""Internal spanish method to create relation string
"""
if level < len(_step_nephew_level):
return _step_nephew_level[level] % {'inlaw': inlaw}
elif (level-1) < len(_level_name_male_a):
return "%s tío sobrinastro%s" % (_level_name_male_a[level-1],inlaw)
else:
return "%d-ésimo tío sobrinastro%s" % (level-1,inlaw)
def _get_nephew(self, level, step='', inlaw=''):
"""Internal spanish method to create relation string
"""
if step:
return self._get_step_nephew(level, inlaw)
if level < len(_nephew_level):
return _nephew_level[level] % {'inlaw': inlaw}
elif (level-1) < len(_level_name_male_a):
return "%s sobrino nieto%s" % (_level_name_male_a[level-1], inlaw)
else:
return "%d-ésimo sobrino nieto%s" % (level-1, inlaw)
def _get_distant_nephew(self, level, step, inlaw):
if step:
base = 'sobrinastro'
else:
base = 'sobrino'
if level < len(_level_name_male):
return "%s %s" % (base,_level_name_male[level])
else:
return "%s %d-ésimo" % (base,level)
def _get_step_niece(self, level, inlaw=''):
"""Internal spanish method to create relation string
"""
if level < len(_step_niece_level):
return _step_niece_level[level] % {'inlaw': inlaw}
elif (level-1) < len(_level_name_female):
return "%s tía sobrinastra%s" % (_level_name_female[level-1],inlaw)
else:
return "%d-ésima tía sobrinastra%s" % (level-1,inlaw)
def _get_niece(self, level, step='', inlaw=''):
"""Internal spanish method to create relation string
"""
if step:
return self._get_step_niece(level, inlaw)
if level < len(_niece_level):
return _niece_level[level] % {'inlaw': inlaw}
elif (level-1) < len(_level_name_female):
return "%s sobrina nieta%s" % (_level_name_female[level-1], inlaw)
else:
return "%d-ésima sobrina nieta%s" % (level-1, inlaw)
def _get_distant_niece(self, level, step, inlaw):
if step:
base = 'sobrinastra'
else:
base = 'sobrina'
if level < len(_level_name_female):
return "%s %s" % (base,_level_name_female[level])
else:
return "%s %d-ésima" % (base,level)
def _get_male_cousin(self, level, removed, lower=False, step='', inlaw='', gender_c=UNKNOWN):
"""Internal spanish method to create relation string
"""
# primastro is an invention and is not backed by DRAE
if step:
prim="primastro"
else:
prim="primo"
if removed == 0:
if level == 1:
return "%s hermano%s" % (prim, inlaw)
elif level < len(_level_name_male):
return "%s %s%s" % (prim,_level_name_male[level], inlaw)
else:
return "%s %d-ésimo%s" % (prim, level, inlaw)
elif removed > 0 and lower:
if gender_c == MALE:
return "%s de un %s" % (self._get_son(removed,step,inlaw),
self._get_male_cousin(level, 0, lower, step, inlaw, gender_c))
elif gender_c == FEMALE:
return "%s de una %s" % (self._get_son(removed,step,inlaw),
self._get_female_cousin(level, 0, lower, step, inlaw, gender_c))
else:
return "%s de un %s" % (self._get_son(removed,step,inlaw),
self._get_male_cousin(level, 0, lower, step, inlaw, gender_c))
elif removed > 0 and not lower:
if gender_c == MALE:
return "%s de un %s" % (self._get_male_cousin(level, 0, lower, step, inlaw, gender_c),
self._get_father(removed,step,inlaw))
elif gender_c == FEMALE:
return "%s de una %s" % (self._get_male_cousin(level, 0, lower, step, inlaw, gender_c),
self._get_mother(removed,step,inlaw))
else:
return "%s de un %s" % (self._get_male_cousin(level, 0, lower, step, inlaw, gender_c),
self._get_father(removed,step,inlaw))
else:
return "%s %scousin%s (%d-%d)" % (_level_name[level],
step, inlaw,
removed, lower)
def _get_female_cousin(self, level, removed, lower=False, step='', inlaw='', gender_c=UNKNOWN):
"""Internal spanish method to create relation string
"""
# primastra is an invention and is not real Spanish
if step:
prim="primastra"
else:
prim="prima"
if removed == 0:
if level == 1:
return "%s hermana%s" % (prim, inlaw)
elif level < len(_level_name_male):
return "%s %s%s" % (prim,_level_name_female[level], inlaw)
else:
return "%s %d-ésima%s" % (prim, level, inlaw)
elif removed > 0 and lower:
if gender_c == MALE:
return "%s de un %s" % (self._get_daughter(removed,step,inlaw),
self._get_male_cousin(level, 0, lower, step, inlaw, gender_c))
elif gender_c == FEMALE:
return "%s de una %s" % (self._get_daughter(removed,step,inlaw),
self._get_female_cousin(level, 0, lower, step, inlaw, gender_c))
else:
return "%s de un %s" % (self._get_daughter(removed,step,inlaw),
self._get_male_cousin(level, 0, lower, step, inlaw, gender_c))
elif removed > 0 and not lower:
if gender_c == MALE:
return "%s de un %s" % (self._get_female_cousin(level, 0, lower, step, inlaw, gender_c),
self._get_father(removed,step,inlaw))
elif gender_c == FEMALE:
return "%s de una %s" % (self._get_female_cousin(level, 0, lower, step, inlaw, gender_c),
self._get_mother(removed,step,inlaw))
else:
return "%s de un %s" % (self._get_female_cousin(level, 0, lower, step, inlaw, gender_c),
self._get_father(removed,step,inlaw))
else:
return "%s %sprima%s (%d-%d)" % (_level_name[level],
step, inlaw,
removed, lower)
def _get_sibling(self, level, step='', inlaw=''):
"""Internal spanish method to create relation string
"""
# TBC: inlaw is inflicted, it is probably better to do away with this method
# and do both calls from the caller (would need inlaw_MALE and inlaw_FEMALE,
# but is feasible
return "%s o %s" % (self._get_uncle(level,step,inlaw),self._get_aunt(level,step,inlaw))
def get_plural_relationship_string(self, Ga, Gb,
reltocommon_a='', reltocommon_b='',
only_birth=True,
in_law_a=False, in_law_b=False):
"""Spanish version of method to create relation string - check relationship.py
"""
rel_str = "parientes lejanos"
if Ga == 0:
# These are descendants
if Gb < len(_children_level):
rel_str = _children_level[Gb]
elif (Gb-1) < len(_level_name_plural):
rel_str = "%s nietos" % (_level_name_plural[Gb-1])
else:
rel_str = "%d-ésimos nietos" % (Gb-1)
elif Gb == 0:
# These are parents/grand parents
if Ga < len(_parents_level):
rel_str = _parents_level[Ga]
elif (Ga-1) < len(_level_name_plural):
rel_str = "%s abuelos" % (_level_name_plural[Ga-1])
else:
rel_str = "%d-ésimos abuelos" % (Ga-1)
elif Gb == 1:
# These are siblings/aunts/uncles
if Ga < len(_siblings_level):
rel_str = _siblings_level[Ga]
elif (Ga-1) < len(_level_name_plural):
rel_str = "%s tíos abuelos" % (_level_name_plural[Ga-1])
else:
rel_str = "%s-ésimos tíos abuelos" % (Ga-1)
elif Ga == 1:
# These are nieces/nephews
if Gb < len(_nephews_nieces_level):
rel_str = _nephews_nieces_level[Gb]
elif (Gb-1) < len(_level_name_plural):
rel_str = "%s sobrinos nietos" % (_level_name_plural[Gb-1])
else:
rel_str = "%s-ésimos sobrinos nietos" % (Gb-1)
elif Ga > 1 and Ga == Gb:
# These are cousins in the same generation
if Ga == 2:
rel_str = "primos hermanos"
elif level < len(_level_name_plural):
rel_str = "primos %s" % (_level_name_plural[Ga-1])
else:
rel_str = "primos %d-ésimos" % (Ga-1)
elif Ga == Gb+1:
# These are distant uncles/aunts
if Gb < len(_level_name_plural):
rel_str = "tíos %s" % (_level_name_plural[Gb])
else:
rel_str = "tíos %d-ésimos" % (Gb)
elif Ga+1 == Gb:
# These are distant nephews/nieces
if Gb-1 < len(_level_name_plural):
rel_str = "sobrinos %s" % (_level_name_plural[Gb-1])
else:
rel_str = "sobrinos %d-ésimos" % (Gb-1)
elif Ga > 1 and Ga > Gb:
# These are cousins in different generations with the second person
# being in a higher generation from the common ancestor than the
# first person.
rel_str = "%s de los %s" % (
self.get_plural_relationship_string(0, Gb),
self.get_plural_relationship_string(Ga, 0) )
elif Gb > 1 and Gb > Ga:
# These are cousins in different generations with the second person
# being in a lower generation from the common ancestor than the
# first person.
rel_str = "%s de los %s" % (
self.get_plural_relationship_string(0, Gb),
self.get_plural_relationship_string(Ga, 0) )
if in_law_b == True:
rel_str = "cónyuges de los %s" % rel_str
return rel_str
def get_single_relationship_string(self, Ga, Gb, gender_a, gender_b,
reltocommon_a, reltocommon_b,
only_birth=True,
in_law_a=False, in_law_b=False):
"""Spanish version of method to create relation string - check relationship.py
"""
if only_birth:
step = ''
else:
step = self.STEP
if in_law_a or in_law_b :
if gender_b == FEMALE:
inlaw = ' política'
else:
inlaw = ' político'
else:
inlaw = ''
rel_str = "%spariente%s lejano" % (step, inlaw)
if Ga == 0:
# b is descendant of a
if Gb == 0 :
rel_str = 'la misma persona'
elif gender_b == MALE:
rel_str = self._get_son(Gb, step, inlaw)
elif gender_b == FEMALE:
rel_str = self._get_daughter(Gb, step, inlaw)
else:
rel_str = self._get_child_unknown(Gb, step, inlaw)
elif Gb == 0:
# b is parents/grand parent of a
if gender_b == MALE:
rel_str = self._get_father(Ga, step, inlaw)
elif gender_b == FEMALE:
rel_str = self._get_mother(Ga, step, inlaw)
else:
rel_str = self._get_parent_unknown(Ga, step, inlaw)
elif Gb == 1:
# b is sibling/aunt/uncle of a
if gender_b == MALE:
rel_str = self._get_uncle(Ga, step, inlaw)
elif gender_b == FEMALE:
rel_str = self._get_aunt(Ga, step, inlaw)
else:
rel_str = self._get_sibling(Ga, step, inlaw)
elif Ga == 1:
# b is niece/nephew of a
if gender_b == MALE:
rel_str = self._get_nephew(Gb-1, step, inlaw)
elif gender_b == FEMALE:
rel_str = self._get_niece(Gb-1, step, inlaw)
else:
rel_str = "%s o %s" % (self._get_nephew(Gb-1, step, inlaw),
self._get_niece(Gb-1, step, inlaw))
elif Ga == Gb:
# a and b cousins in the same generation
if gender_b == MALE:
rel_str = self._get_male_cousin(Ga-1, 0, lower=False, step=step,
inlaw=inlaw)
elif gender_b == FEMALE:
rel_str = self._get_female_cousin(Ga-1, 0, lower=False, step=step,
inlaw=inlaw)
else:
rel_str = "%s o %s" % (self._get_male_cousin(Ga-1, 0, step=step, inlaw=inlaw),
self._get_female_cousin(Ga-1, 0, step=step, inlaw=inlaw))
elif Ga == Gb+1:
if gender_b == Person.MALE:
rel_str = self._get_distant_uncle(Gb, step, inlaw)
elif gender_b == Person.FEMALE:
rel_str = self._get_distant_aunt(Gb, step, inlaw)
else:
rel_str = "%s o %s" % (self._get_distant_uncle(Gb, 0, step=step, inlaw=inlaw),
self._get_distant_aunt(Gb, 0, step=step, inlaw=inlaw))
elif Ga+1 == Gb:
if gender_b == Person.MALE:
rel_str = self._get_distant_nephew(Gb-1, step, inlaw)
elif gender_b == Person.FEMALE:
rel_str = self._get_distant_niece(Gb-1, step, inlaw)
else:
rel_str = "%s o %s" % (self._get_distant_nephew(Gb-1, 0, step=step, inlaw=inlaw),
self._get_distant_niece(Gb-1, 0, step=step, inlaw=inlaw))
elif Ga > Gb:
# These are cousins in different generations with the second person
# being in a higher generation from the common ancestor than the
# first person.
# We need to know the gender of the ancestor of the first person who is on
# the same generation as the other person
if reltocommon_a[Ga-Gb-1] == 'f':
gender_c = MALE
elif reltocommon_a[Ga-Gb-1] == 'm':
gender_c = FEMALE
else:
gender_c = UNKNOWN
if gender_b == MALE:
rel_str = self._get_male_cousin(Gb-1, Ga-Gb, lower=False,
step=step, inlaw=inlaw, gender_c=gender_c)
elif gender_b == FEMALE:
rel_str = self._get_female_cousin(Gb-1, Ga-Gb, lower=False,
step=step, inlaw=inlaw, gender_c=gender_c)
else:
rel_str = "%s o %s" % (self._get_male_cousin(Gb-1, Ga-Gb, lower=False,
step=step, inlaw=inlaw),
self._get_female_cousin(Gb-1, Ga-Gb, lower=False,
step=step, inlaw=inlaw))
elif Gb > Ga:
# These are cousins in different generations with the second person
# being in a lower generation from the common ancestor than the
# first person.
# We need to know the gender of the person who is an ancestor of the second person and
# is on the same generation that the first person
if reltocommon_b[Gb-Ga-1] == 'f':
gender_c = MALE
elif reltocommon_b[Gb-Ga-1] == 'm':
gender_c = FEMALE
else:
gender_c = UNKNOWN
if gender_b == MALE:
rel_str = self._get_male_cousin(Ga-1, Gb-Ga, lower=True,
step=step, inlaw=inlaw, gender_c=gender_c)
elif gender_b == FEMALE:
rel_str = self._get_female_cousin(Ga-1, Gb-Ga, lower=True,
step=step, inlaw=inlaw, gender_c=gender_c)
else:
rel_str = "%s o %s" % (self._get_male_cousin(Ga-1, Gb-Ga, lower=True,
step=step, inlaw=inlaw),
self._get_female_cousin(Ga-1, Gb-Ga, lower=True,
step=step, inlaw=inlaw))
return rel_str
def get_sibling_relationship_string(self, sib_type, gender_a, gender_b,
in_law_a=False, in_law_b=False):
"""
"""
rel_str = ''
if gender_b != FEMALE:
if sib_type == self.NORM_SIB or sib_type == self.UNKNOWN_SIB:
rel_str = 'hermano'
elif sib_type == self.HALF_SIB_MOTHER \
or sib_type == self.HALF_SIB_FATHER:
rel_str = 'medio hermano'
elif sib_type == self.STEP_SIB:
rel_str = 'hermanastro'
if in_law_a or in_law_b :
rel_str += ' político'
if gender_b == UNKNOWN:
rel_str += ' o '
if gender_b != MALE:
if sib_type == self.NORM_SIB or sib_type == self.UNKNOWN_SIB:
rel_str += 'hermana'
elif sib_type == self.HALF_SIB_MOTHER \
or sib_type == self.HALF_SIB_FATHER:
rel_str += 'medio hermana'
elif sib_type == self.STEP_SIB:
rel_str += 'hermanastra'
if in_law_a or in_law_b :
rel_str += ' política'
return rel_str
if __name__ == "__main__":
# Test function. Call it as follows from the command line (so as to find
# imported modules):
# export PYTHONPATH=/path/to/gramps/src
# python src/plugins/rel/rel_es.py
# (Above not needed here)
"""TRANSLATORS, copy this if statement at the bottom of your
rel_xx.py module, and test your work with:
python src/plugins/rel/rel_xx.py
"""
from gramps.gen.relationship import test
RC = RelationshipCalculator()
test(RC, True)
|
Forage/Gramps
|
gramps/plugins/rel/rel_es.py
|
Python
|
gpl-2.0
| 36,185
|
[
"Brian"
] |
44d7d9f50c59aee44759b963088db4352bd52cc378c038a6a79bdb25e00767dd
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_gslbapplicationpersistenceprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of GslbApplicationPersistenceProfile Avi RESTful Object
description:
- This module is used to configure GslbApplicationPersistenceProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
description:
description:
- Field introduced in 17.1.1.
name:
description:
- A user-friendly name for the persistence profile.
- Field introduced in 17.1.1.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
- Field introduced in 17.1.1.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the persistence profile.
- Field introduced in 17.1.1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create GslbApplicationPersistenceProfile object
avi_gslbapplicationpersistenceprofile:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_gslbapplicationpersistenceprofile
"""
RETURN = '''
obj:
description: GslbApplicationPersistenceProfile (api/gslbapplicationpersistenceprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
description=dict(type='str',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'gslbapplicationpersistenceprofile',
set([]))
if __name__ == '__main__':
main()
|
alexlo03/ansible
|
lib/ansible/modules/network/avi/avi_gslbapplicationpersistenceprofile.py
|
Python
|
gpl-3.0
| 3,672
|
[
"VisIt"
] |
ac9827db1853832042ebbd96967276ba5b825611d6d8266c17b093740de266b0
|
import os
import tarfile
import cStringIO
from DIRAC import S_OK, gLogger
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.Core.DISET.TransferClient import TransferClient
from DIRAC.Core.Security import Locations, CS
class BundleDeliveryClient:
def __init__( self, rpcClient = False, transferClient = False ):
self.rpcClient = rpcClient
self.transferClient = transferClient
self.log = gLogger.getSubLogger( "BundleDelivery" )
def __getRPCClient( self ):
if self.rpcClient:
return self.rpcClient
return RPCClient( "Framework/BundleDelivery",
skipCACheck = CS.skipCACheck() )
def __getTransferClient( self ):
if self.transferClient:
return self.transferClient
return TransferClient( "Framework/BundleDelivery",
skipCACheck = CS.skipCACheck() )
def __getHash( self, bundleID, dirToSyncTo ):
try:
fd = open( os.path.join( dirToSyncTo, ".dab.%s" % bundleID ), "rb" )
hash = fd.read().strip()
fd.close()
return hash
except:
return ""
def __setHash( self, bundleID, dirToSyncTo, hash ):
try:
fileName = os.path.join( dirToSyncTo, ".dab.%s" % bundleID )
fd = open( fileName, "wb" )
fd.write( hash )
fd.close()
except Exception, e:
self.log.error( "Could not save hash after synchronization", "%s: %s" % ( fileName, str( e ) ) )
def syncDir( self, bundleID, dirToSyncTo ):
dirCreated = False
if not os.path.isdir( dirToSyncTo ):
self.log.info( "Creating dir %s" % dirToSyncTo )
os.makedirs( dirToSyncTo )
dirCreated = True
currentHash = self.__getHash( bundleID, dirToSyncTo )
self.log.info( "Current hash for bundle %s in dir %s is '%s'" % ( bundleID, dirToSyncTo, currentHash ) )
buff = cStringIO.StringIO()
transferClient = self.__getTransferClient()
result = transferClient.receiveFile( buff, ( bundleID, currentHash ) )
if not result[ 'OK' ]:
self.log.error( "Could not sync dir", result[ 'Message' ] )
if dirCreated:
self.log.info( "Removing dir %s" % dirToSyncTo )
os.unlink( dirToSyncTo )
buff.close()
return result
newHash = result[ 'Value' ]
if newHash == currentHash:
self.log.info( "Dir %s was already in sync" % dirToSyncTo )
return S_OK( False )
buff.seek( 0 )
self.log.info( "Synchronizing dir with remote bundle" )
tF = tarfile.open( name = 'dummy', mode = "r:gz", fileobj = buff )
for tarinfo in tF:
tF.extract( tarinfo, dirToSyncTo )
tF.close()
buff.close()
self.__setHash( bundleID, dirToSyncTo, newHash )
self.log.info( "Dir has been synchronized" )
return S_OK( True )
def syncCAs( self ):
X509_CERT_DIR = False
if 'X509_CERT_DIR' in os.environ:
X509_CERT_DIR = os.environ['X509_CERT_DIR']
del os.environ['X509_CERT_DIR']
casLocation = Locations.getCAsLocation()
if not casLocation:
casLocation = Locations.getCAsDefaultLocation()
result = self.syncDir( "CAs", casLocation )
if X509_CERT_DIR:
os.environ['X509_CERT_DIR'] = X509_CERT_DIR
return result
def syncCRLs( self ):
X509_CERT_DIR = False
if 'X509_CERT_DIR' in os.environ:
X509_CERT_DIR = os.environ['X509_CERT_DIR']
del os.environ['X509_CERT_DIR']
result = self.syncDir( "CRLs", Locations.getCAsLocation() )
if X509_CERT_DIR:
os.environ['X509_CERT_DIR'] = X509_CERT_DIR
return result
|
Sbalbp/DIRAC
|
FrameworkSystem/Client/BundleDeliveryClient.py
|
Python
|
gpl-3.0
| 3,497
|
[
"DIRAC"
] |
8f5e9ec4545ad0655e5ba3c7afdb89fdfbb0dac0da9d6f38d302bbfa6b4755e0
|
"""This module defines input and output functions for NMD format.
.. _nmd-format:
NMD Format
-------------------------------------------------------------------------------
Description
^^^^^^^^^^^
NMD files (extension :file:`.nmd`) are plain text files that contain at
least normal mode and system coordinate data.
NMD files can be visualized using :ref:`nmwiz`. ProDy functions
:func:`.writeNMD` and :func:`.parseNMD` can be used to read and write NMD
files.
Data fields
^^^^^^^^^^^
Data fields in bold face are required. All data arrays and lists must be in a
single line and items must be separated by one or more space characters.
**coordinates**: system coordinates as a list of decimal numbers
Coordinate array is the most important line in an NMD file. All mode array
lengths must match the length of the coordinate array. Also, number of atoms
in the system is deduced from the length of the coordinate array.
::
coordinates 27.552 4.354 23.629 24.179 4.807 21.907 ...
**mode**: normal mode array as a list of decimal numbers
Optionally, mode index and a scaling factor may be provided
in the same line as a mode array. Both of these must precede the mode array.
Providing a scaling factor enables relative scaling of the mode arrows and
the amplitude of the fluctuations in animations. For NMA, scaling factors
may be chosen to be the square-root of the inverse-eigenvalue associated
with the mode. Analogously, for PCA data, scaling factor would be the
square-root of the eigenvalue.
If a mode line contains numbers preceding the mode array, they are evaluated
based on their type. If an integer is encountered, it is considered the mode
index. If a decimal number is encountered, it is considered the scaling
factor. Scaling factor may be the square-root of the inverse eigenvalue
if data is from an elastic network model, or the square-root of the
eigenvalue if data is from an essential dynamics (or principal component)
analysis.
For example, all of the following lines are valid. The first line contains
mode index and scaling factor. Second and third lines contain mode index or
scaling factor. Last line contains only the mode array.
::
mode 1 2.37 0.039 0.009 0.058 0.038 -0.011 0.052 ...
mode 1 0.039 0.009 0.058 0.038 -0.011 0.052 ...
mode 2.37 0.039 0.009 0.058 0.038 -0.011 0.052 ...
mode 0.039 0.009 0.058 0.038 -0.011 0.052 0.043 ...
*name*: name of the model
The length of all following data fields must be equal to the number of atoms in
the system. NMWiz uses such data when writing a temporary PDB files for
loading coordinate data into VMD.
*atomnames*: list of atom names
If not provided, all atom names are set to "CA".
*resnames*: list of residue names
If not provided, all residue names are set to "GLY".
*chainids*: list of chain identifiers
If not provided, all chain identifiers are set to "A".
*resids*: list of residue numbers
If not provided, residue numbers are started from 1 and incremented by one
for each atom.
*bfactors*: list of experimental beta-factors
If not provided, all beta-factors are set to zero.
Beta-factors can be used to color the protein representation.
NMD files may contain additional lines. Only lines that start with one of the
above field names are evaluated by NMWiz.
Autoload Trick
^^^^^^^^^^^^^^
By adding a special line in an NMD file, file content can be automatically
loaded into VMD at startup. The first line calls a NMWiz function to load the
file itself (:file:`xyzeros.nmd`).
::
nmwiz_load xyzeros.nmd
coordinates 0 0 0 0 0 0 ...
mode 0.039 0.009 0.058 0.038 -0.011 0.052 ...
mode -0.045 -0.096 -0.009 -0.040 -0.076 -0.010 ...
mode 0.007 -0.044 0.080 0.015 -0.037 0.062 ...
In this case, VMD must be started from the command line by typing
:program:`vmd -e xyzeros.nmd`."""
__all__ = ['parseNMD', 'writeNMD', 'pathVMD', 'getVMDpath', 'setVMDpath',
'viewNMDinVMD']
import os
from os.path import abspath, join, split, splitext
import numpy as np
from prody import LOGGER, SETTINGS, PY3K
from prody.atomic import AtomGroup
from prody.utilities import openFile, isExecutable, which, PLATFORM, addext
from .nma import NMA
from .anm import ANM
from .gnm import GNM, ZERO
from .pca import PCA
from .mode import Vector, Mode
from .modeset import ModeSet
def pathVMD(*path):
"""Return VMD path, or set it to be a user specified *path*."""
if not path:
path = SETTINGS.get('vmd', None)
if isExecutable(path):
return path
else:
LOGGER.warning('VMD path is not set by user, looking for it.')
vmdbin = None
vmddir = None
if PLATFORM == 'Windows':
if PY3K:
import winreg
else:
import _winreg as winreg # PY3K: OK
for vmdversion in ('1.8.7', '1.9', '1.9.1'):
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
'Software\\University of Illinois\\VMD\\' +
vmdversion)
vmddir = winreg.QueryValueEx(key, 'VMDDIR')[0]
vmdbin = join(vmddir, 'vmd.exe')
except:
pass
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
'Software\\WOW6432node\\University of Illinois\\VMD\\' +
vmdversion)
vmddir = winreg.QueryValueEx(key, 'VMDDIR')[0]
vmdbin = join(vmddir, 'vmd.exe')
except:
pass
else:
vmdbin = which('vmd')
if False:
pipe = os.popen('which vmd')
vmdbin = pipe.next().strip()
vmdfile = open(vmdbin)
for line in vmdfile:
if line.startswith('defaultvmddir='):
vmddir = line.split('=')[1].replace('"', '')
break
vmdfile.close()
if isExecutable(vmdbin):
setVMDpath(vmdbin)
return vmdbin
elif len(path) == 1:
path = path[0]
if isExecutable(path):
SETTINGS['vmd'] = path
SETTINGS.save()
LOGGER.info("VMD path is set to '{0}'.".format(path))
else:
raise OSError('{0} is not executable.'.format(str(path)))
else:
raise ValueError('specify a single path string')
def getVMDpath():
"""Deprecated for removal in v1.5, use :func:`pathVMD` instead."""
return pathVMD()
def setVMDpath(path):
"""Deprecated for removal in v1.5, use :func:`pathVMD` instead."""
return pathVMD(path)
NMD_LABEL_MAP = {
'atomnames': 'name',
'resnames': 'resname',
'resnums': 'resnum',
'resids': 'resnum',
'chainids': 'chain',
'bfactors': 'beta',
'segnames': 'segment',
'segments': 'segment',
}
def parseNMD(filename, type=None):
"""Return :class:`.NMA` and :class:`.AtomGroup` instances storing data
parsed from *filename* in :file:`.nmd` format. Type of :class:`.NMA`
instance, e.g. :class:`.PCA`, :class:`.ANM`, or :class:`.GNM` will
be determined based on mode data."""
assert not isinstance(type, NMA), 'type must be NMA, ANM, GNM, or PCA'
atomic = {}
atomic.update([(label, None) for label in NMD_LABEL_MAP])
atomic['coordinates'] = None
atomic['name'] = None
modes = []
with open(filename) as nmd:
for i, line in enumerate(nmd):
try:
label, data = line.split(None, 1)
except ValueError:
pass
if label == 'mode':
modes.append((i + 1, data))
elif label in atomic:
if atomic[label] is None:
atomic[label] = (i + 1, data)
else:
LOGGER.warn('Data label {0} is found more than once in '
'{1}.'.format(repr(label), repr(filename)))
name = atomic.pop('name', '')[1].strip() or splitext(split(filename)[1])[0]
ag = AtomGroup(name)
dof = None
n_atoms = None
line, coords = atomic.pop('coordinates', None)
if coords is not None:
coords = np.fromstring(coords, dtype=float, sep=' ')
dof = coords.shape[0]
if dof % 3 != 0:
LOGGER.warn('Coordinate data in {0} at line {1} is corrupt '
'and will be omitted.'.format(repr(filename), line))
else:
n_atoms = dof / 3
coords = coords.reshape((n_atoms, 3))
ag.setCoords(coords)
from prody.atomic import ATOMIC_FIELDS
for label, data in atomic.items(): # PY3K: OK
if data is None:
continue
line, data = data
data = data.split()
if n_atoms is None:
n_atoms = len(data)
dof = n_atoms * 3
elif len(data) != n_atoms:
LOGGER.warn('Data with label {0} in {1} at line {2} is '
'corrupt, expected {2} values, parsed {3}.'.format(
repr(label), repr(filename), line, n_atoms, len(data)))
continue
label = NMD_LABEL_MAP[label]
data = np.array(data, dtype=ATOMIC_FIELDS[label].dtype)
ag.setData(label, data)
if not modes:
return None, ag
length = len(modes[0][1].split())
is3d = length > n_atoms + 2
if dof is None:
dof = length - (length % 3)
elif not is3d: # GNM
dof = n_atoms
array = np.zeros((dof, len(modes)))
less = 0
eigvals = []
count = 0
for i, (line, mode) in enumerate(modes):
mode = np.fromstring(mode, dtype=float, sep=' ')
diff = len(mode) - dof
if diff < 0 or diff > 2:
LOGGER.warn('Mode data in {0} at line {1} is corrupt.'
.format(repr(filename), line))
continue
array[:, i - less] = mode[diff:]
count += 1
eigvals.append(mode[:diff])
if count == 0:
return None, ag
try:
eigvals = np.array(eigvals, dtype=float)
except TypeError:
LOGGER.warn('Failed to parse eigenvalues from {0}.'
.format(repr(filename)))
if eigvals.shape[1] > 2:
LOGGER.warn('Failed to parse eigenvalues from {0}.'
.format(repr(filename)))
eigvals = None
elif eigvals.shape[1] == 1:
if np.all(eigvals % 1 == 0):
LOGGER.warn('Failed to parse eigenvalues from {0}.'
.format(repr(filename)))
eigvals = None
else:
eigvals = eigvals.flatten() ** 2
else:
eigvals = eigvals[:, 1] ** 2
if is3d:
if eigvals is not None and np.all(eigvals[:-1] >= eigvals[1:]):
nma = PCA(name)
else:
nma = ANM(name)
else:
nma = GNM(name)
if count != array.shape[1]:
array = array[:, :count].copy()
nma.setEigens(array, eigvals)
return nma, ag
def writeNMD(filename, modes, atoms, zeros=False):
"""Return *filename* that contains *modes* and *atoms* data in NMD format
described in :ref:`nmd-format`. :file:`.nmd` extension is appended to
filename, if it does not have an extension.
.. note::
#. If zeros is **False** (by default), this function skips modes
with zero eigenvalues. If zeros is **True**, modes with zero
eigenvalues are written out, their scaling factor being the
square-root of the inverse of the mode number times 0.0001.
This provides descending factors consistent with the NMA modes.
#. If a :class:`.Vector` instance is given, it will be normalized
before it is written. It's length before normalization will be
written as the scaling factor of the vector."""
if not isinstance(modes, (NMA, ModeSet, Mode, Vector)):
raise TypeError('modes must be NMA, ModeSet, Mode, or Vector, '
'not {0}'.format(type(modes)))
if modes.numAtoms() != atoms.numAtoms():
raise Exception('number of atoms do not match')
out = openFile(addext(filename, '.nmd'), 'w')
#out.write('#!{0} -e\n'.format(VMDPATH))
out.write('nmwiz_load {0}\n'.format(abspath(filename)))
name = modes.getTitle()
name = name.replace(' ', '_').replace('.', '_')
if not name.replace('_', '').isalnum() or len(name) > 30:
name = str(atoms)
name = name.replace(' ', '_').replace('.', '_')
if not name.replace('_', '').isalnum() or len(name) > 30:
name = splitext(split(filename)[1])[0]
out.write('name {0}\n'.format(name))
try:
coords = atoms.getCoords()
except:
raise ValueError('coordinates could not be retrieved from atoms')
if coords is None:
raise ValueError('atom coordinates are not set')
try:
data = atoms.getNames()
if data is not None:
out.write('atomnames {0}\n'.format(' '.join(data)))
except:
pass
try:
data = atoms.getResnames()
if data is not None:
out.write('resnames {0}\n'.format(' '.join(data)))
except:
pass
try:
data = atoms.getResnums()
if data is not None:
out.write('resids ')
data.tofile(out, ' ')
out.write('\n')
except:
pass
try:
data = atoms.getChids()
if data is not None:
out.write('chainids {0}\n'.format(' '.join(data)))
except:
pass
try:
data = atoms.getSegnames()
if data is not None:
out.write('segnames {0}\n'.format(' '.join(data)))
except:
pass
try:
data = atoms.getBetas()
if data is not None:
out.write('bfactors ')
data.tofile(out, ' ', '%.2f')
out.write('\n')
except:
pass
format = '{0:.3f}'.format
out.write('coordinates ')
coords.tofile(out, ' ', '%.3f')
out.write('\n')
count = 0
if isinstance(modes, Vector):
out.write('mode 1 {0:.2f} '.format(abs(modes)))
modes.getNormed()._getArray().tofile(out, ' ', '%.3f')
out.write('\n')
count += 1
else:
if isinstance(modes, Mode):
modes = [modes]
for mode in modes:
if (mode.getEigval() < ZERO) and not zeros:
continue
elif (mode.getEigval() < ZERO) and zeros:
out.write('mode {0} {1:.2f} '.format(
mode.getIndex()+1, np.sqrt(1/(0.0001*(mode.getIndex()+1)))))
else:
out.write('mode {0} {1:.2f} '.format(
mode.getIndex()+1, mode.getVariance()**0.5))
arr = mode._getArray().tofile(out, ' ', '%.3f')
out.write('\n')
count += 1
if count == 0:
LOGGER.warning('No normal mode data was written. '
'Given modes might have 0 eigenvalues.')
out.close()
return filename
def viewNMDinVMD(filename):
"""Start VMD in the current Python session and load NMD data."""
vmd = pathVMD()
if vmd:
os.system('{0} -e {1}'.format(vmd, abspath(filename)))
|
Shen-Lab/cNMA
|
Manual/Modified/nmdfile.py
|
Python
|
mit
| 15,465
|
[
"VMD"
] |
05b33c75ee31452acfeabced2292a5f30c487ac6b7a302da0afa40795422579f
|
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 2: Nature-Inspired Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2014 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
__author__ = 'jheaton'
# for python 3.x use 'tkinter' rather than 'Tkinter'
from Tkinter import *
import time
import random
CANVAS_WIDTH = 400
CANVAS_HEIGHT = 400
CELL_WIDTH = 3
CELL_HEIGHT = 3
class App():
"""
Elementary Cellular Automation (ECA)
"""
RULE = 30
def __init__(self):
self.root = Tk()
l1=Label(self.root,text="Rule:" + str(App.RULE))
l1.pack()
self.c = Canvas(self.root,width=400, height=400)
self.c.pack()
rows = CANVAS_HEIGHT / CELL_HEIGHT
cols = CANVAS_WIDTH / CELL_WIDTH
grid = [[0 for x in range(rows)] for x in range(cols)]
# Seed the grid
grid[0][cols/2] = True
# Decode the rule
output = [0] * 8
cx = 1
idx = 7
while idx > 0:
output[idx] = (App.RULE & cx) != 0
idx = idx - 1
cx *= 2
# Build the CA.
for row in range(0,rows):
prev_row = row - 1
for i in range(0, cols - 2):
x = i * CELL_WIDTH
y = row * CELL_HEIGHT
if prev_row>=0:
result = False
a = grid[prev_row][i]
b = grid[prev_row][i + 1]
c = grid[prev_row][i + 2]
if a and b and c:
result = output[0]
elif a and b and not c:
result = output[1]
elif a and not b and c:
result = output[2]
elif a and not b and not c:
result = output[3]
elif not a and b and c:
result = output[4]
elif not a and b and not c:
result = output[5]
elif not a and not b and c:
result = output[6]
elif not a and not b and not c:
result = output[7]
grid[row][i + 1] = result
if grid[row][i + 1] :
r = self.c.create_rectangle(x, y, x+CELL_WIDTH,y+CELL_HEIGHT, outline="black", fill="black")
else:
r = self.c.create_rectangle(x, y, x+CELL_WIDTH,y+CELL_HEIGHT, outline="black", fill="white")
self.root.mainloop()
app=App()
|
PeterLauris/aifh
|
vol2/vol2-python-examples/examples/example_eca.py
|
Python
|
apache-2.0
| 3,365
|
[
"VisIt"
] |
f86f56278d7fd83208fcca3b18e517145be8fd8ecb25fb2ccf400d1296aaa63e
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 04 13:48:11 2015
This file is part of pyNLO.
pyNLO is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pyNLO is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pyNLO. If not, see <http://www.gnu.org/licenses/>.
@author: ycasg
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy.interpolate import interp1d
from pynlo.util import IFFT_t
from pynlo.light.PulseBase import Pulse
class SechPulse(Pulse):
def __init__(self, power, T0_ps, center_wavelength_nm,
time_window_ps = 10., frep_MHz = 100., NPTS = 2**10,
GDD = 0, TOD = 0, chirp2 = 0, chirp3 = 0,
power_is_avg = False):
"""Generate a squared-hyperbolic secant "sech" pulse
A(t) = sqrt(P0 [W]) * sech(t/T0 [ps])
centered at wavelength center_wavelength_nm (nm).
time_window (ps) sets temporal grid size.
Optional GDD and TOD are in ps^2 and ps^3.
Note: The full-width-at-half-maximum (FWHM) is given by
T0_ps * 1.76
"""
Pulse.__init__(self, frep_MHz = frep_MHz, n = NPTS)
# make sure we weren't passed mks units
assert (center_wavelength_nm > 1.0)
assert (time_window_ps > 1.0 )
self.set_center_wavelength_nm(center_wavelength_nm)
self.set_time_window_ps(time_window_ps)
### Generate pulse
if not power_is_avg:
# from https://www.rp-photonics.com/sech2_shaped_pulses.html
self.set_AT( np.sqrt(power)/np.cosh(self.T_ps/T0_ps) )
else:
self.set_AT( 1 / np.cosh(self.T_ps/T0_ps) )
self.set_AT(self.AT * np.sqrt( power / ( frep_MHz*1.0e6 * self.calc_epp()) ))
self.chirp_pulse_W(GDD, TOD)
self.chirp_pulse_T(chirp2, chirp3, T0_ps)
class GaussianPulse(Pulse):
def __init__(self, power, T0_ps, center_wavelength_nm,
time_window_ps = 10., frep_MHz = 100., NPTS = 2**10,
GDD = 0, TOD = 0, chirp2 = 0, chirp3 = 0,
power_is_avg = False):
"""Generate Gaussian pulse A(t) = sqrt(peak_power[W]) *
exp( -(t/T0 [ps])^2 / 2 ) centered at wavelength
center_wavelength_nm (nm). time_window (ps) sets temporal grid
size. Optional GDD and TOD are in ps^2 and ps^3.
Note: For this definition of a Gaussian pulse, T0_ps is the
full-width-at-half-maximum (FWHM) of the pulse.
"""
Pulse.__init__(self, frep_MHz = frep_MHz, n = NPTS)
# make sure we weren't passed mks units
assert (center_wavelength_nm > 1.0)
assert (time_window_ps > 1.0 )
self.set_center_wavelength_nm(center_wavelength_nm)
self.set_time_window_ps(time_window_ps)
GDD = GDD
TOD = TOD
# from https://www.rp-photonics.com/gaussian_pulses.html
self.set_AT( np.sqrt(power) * np.exp(-2.77*0.5*self.T_ps**2/(T0_ps**2)) ) # input field (W^0.5)
if power_is_avg:
self.set_AT(self.AT * np.sqrt( power / ( frep_MHz*1.0e6 * self.calc_epp()) ))
self.chirp_pulse_W(GDD, TOD)
self.chirp_pulse_T(chirp2, chirp3, T0_ps)
class SincPulse(Pulse):
def __init__(self, power, FWHM_ps, center_wavelength_nm,
time_window_ps = 10., frep_MHz = 100., NPTS = 2**10,
GDD = 0, TOD = 0, chirp2 = 0, chirp3 = 0,
power_is_avg = False):
"""Generate sinc pulse A(t) = sqrt(peak_power[W]) * sin(t/T0)/(t/T0)
centered at wavelength center_wavelength_nm (nm).
The width is given by FWHM_ps, which is the full-width-at-half-maximum
in picoseconds. T0 is equal th FWHM/3.7909885.
time_window_ps sets temporal grid size. Optional GDD and TOD are
in ps^2 and ps^3."""
Pulse.__init__(self, frep_MHz = frep_MHz, n = NPTS)
# make sure we weren't passed mks units
assert (center_wavelength_nm > 1.0)
assert (time_window_ps > 1.0 )
self.set_center_wavelength_nm(center_wavelength_nm)
self.set_time_window_ps(time_window_ps)
T0_ps = FWHM_ps/3.7909885
### Generate pulse
if not power_is_avg:
# numpy.sinc is sin(pi*x)/(pi*x), so we divide by pi
self.set_AT( np.sqrt(power) * np.sinc(self.T_ps/(T0_ps*np.pi)) )
else:
self.set_AT( 1 / np.sinc(np.pi * self.T_ps/(T0_ps*np.pi)) )
self.set_AT(self.AT * np.sqrt( power / ( frep_MHz*1.0e6 * self.calc_epp()) ))
self.chirp_pulse_W(GDD, TOD)
self.chirp_pulse_T(chirp2, chirp3, T0_ps)
class FROGPulse(Pulse):
def __init__(self, time_window_ps, center_wavelength_nm, power,frep_MHz = 100., NPTS = 2**10,
power_is_avg = False,
fileloc = '',
flip_phase = True):
"""Generate pulse from FROG data. Grid is centered at wavelength
center_wavelength_nm (nm), but pulse properties are loaded from data
file. If flip_phase is true, all phase is multiplied by -1 [useful
for correcting direction of time ambiguity]. time_window (ps) sets
temporal grid size.
power sets the pulse energy:
if power_is_epp is True then the number is pulse energy [J]
if power_is_epp is False then the power is average power [W], and
is multiplied by frep to calculate pulse energy"""
Pulse.__init__(self, frep_MHz = frep_MHz, n = NPTS)
try:
self.fileloc = fileloc
# make sure we weren't passed mks units
assert (center_wavelength_nm > 1.0)
assert (time_window_ps > 1.0 )
self.set_time_window_ps(time_window_ps)
self.set_center_wavelength_nm(center_wavelength_nm) # reference wavelength (nm)
# power -> EPP
if power_is_avg:
power = power / self.frep_mks
# Read in retrieved FROG trace
frog_data = np.genfromtxt(self.fileloc)
wavelengths = frog_data[:,0]# (nm)
intensity = frog_data[:,1]# (arb. units)
phase = frog_data[:,2]# (radians)
if flip_phase:
phase = -1 * phase
pulse_envelope = interp1d(wavelengths, intensity, kind='linear',
bounds_error=False,fill_value=0)
phase_envelope = interp1d(wavelengths, phase, kind='linear',
bounds_error=False,fill_value=0)
gridded_intensity = pulse_envelope(self.wl_nm)
gridded_phase = phase_envelope(self.wl_nm)
# Calculate time domain complex electric field A
self.set_AW(gridded_intensity*np.exp(1j*gridded_phase))
# Calculate normalization factor to achieve requested
# pulse energy
e_scale = np.sqrt(power / self.calc_epp() )
self.set_AT(self.AT * e_scale )
except IOError:
print ('File not found.' )
class NoisePulse(Pulse):
def __init__(self, center_wavelength_nm, time_window_ps = 10., NPTS = 2**8,
frep_MHz = None):
Pulse.__init__(self, n = NPTS, frep_MHz = frep_MHz)
self.set_center_wavelength_nm(center_wavelength_nm)
self.set_time_window_ps(time_window_ps)
self.set_AW( 1e-30 * np.ones(self.NPTS) * np.exp(1j * 2 * np.pi *
1j * np.random.rand(self.NPTS)))
class CWPulse(Pulse):
def __init__(self, avg_power, center_wavelength_nm, time_window_ps = 10.0,
NPTS = 2**8,offset_from_center_THz = None):
Pulse.__init__(self, n = NPTS)
# make sure we weren't passed mks units
assert (center_wavelength_nm > 1.0)
assert (time_window_ps > 1.0 )
if offset_from_center_THz is None:
self.set_center_wavelength_nm(center_wavelength_nm)
self.set_time_window_ps(time_window_ps)
# Set the time domain to be CW, which should give us a delta function in
# frequency. Then normalize that delta function (in frequency space) to
# the average power. Note that frep does not factor in here.
self.set_AT(np.ones(self.NPTS,))
self.set_AW(self.AW * np.sqrt(avg_power) / sum(abs(self.AW)) )
else:
dF = 1.0/time_window_ps
n_offset = np.round( offset_from_center_THz/dF)
center_THz = self._c_nmps/center_wavelength_nm -\
n_offset * dF
center_nm = self._c_nmps / center_THz
self.set_time_window_ps(time_window_ps)
self.set_center_wavelength_nm(center_nm)
aws = np.zeros((self.NPTS, ))
aws[int(self.NPTS/2.0) + int(n_offset) ] = 1.0 *np.sqrt(avg_power)
self.set_AW(aws)
def gen_OSA(self, time_window_ps, center_wavelength_nm, power,
power_is_epp = False,
fileloc = 'O:\\OFM\\Maser\\Dual-Comb 100 MHz System\\Pump spectrum-Yb-101614.csv',
log = True, rows = 30): # Yb spectrum
"""Generate pulse from OSA data. Grid is centered at wavelength
center_wavelength_nm (nm), but pulse properties are loaded from data
file. time_window (ps) sets temporal grid size. Switch in place for
importing log vs. linear data.
power sets the pulse energy:
if power_is_epp is True then the number is pulse energy [J]
if power_is_epp is False then the power is average power [W], and
is multiplied by frep to calculate pulse energy"""
try:
self.fileloc = fileloc
self.set_time_window_ps(time_window_ps)
self.center_wl = center_wavelength_nm # reference wavelength (nm)
self.w0 = (2. * np.pi * self.c) / self.center_wl # reference angular frequency
self.setup_grids()
if not power_is_epp:
power = power / self.frep
# Read in OSA data
osa_data = np.genfromtxt(self.fileloc, delimiter = ',', skiprows = rows)
wavelengths = osa_data[:,0]# (nm)
wavelengths = self.internal_wl_from_nm(wavelengths)
intensity = osa_data[:,1]# (arb. units)
if log:
intensity = 10.**(intensity / 10.)
freq_abs = self.c/wavelengths
freq_abs = np.sort(freq_abs)
self.freq_rel = freq_abs - self.c / self.center_wl
pulse_envelope = interp1d(self.freq_rel, intensity, kind='linear',
bounds_error = False, fill_value=0)
self.gridded_intensity = pulse_envelope(self.V / (2*np.pi))
# Calculate time domain complex electric field A
self.A = IFFT_t(self.gridded_intensity)
# Calculate normalization factor to achieve requested
# pulse energy
e_scale = np.sqrt(power / self.calc_epp() )
self.A = self.A * e_scale
except IOError:
print ('File not found.')
|
ycasg/PyNLO
|
src/pynlo/light/DerivedPulses.py
|
Python
|
gpl-3.0
| 12,433
|
[
"Gaussian"
] |
86fe6bdbe9af9c47482ea43c354cdcf7d3326350b65a4bfcaa5f5d2483ede8a0
|
"""
Manage pools of connections so that we can limit the number of requests per site and reuse
connections.
@since: 1.6
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import urlparse
from collections import defaultdict
import threading, gobject
from zeroinstall.support import tasks
from zeroinstall.injector import download
default_port = {
'http': 80,
'https': 443,
}
class DownloadStep:
url = None
status = None
redirect = None
class DownloadScheduler:
"""Assigns (and re-assigns on redirect) Downloads to Sites, allowing per-site limits and connection pooling.
@since: 1.6"""
def __init__(self):
self._sites = defaultdict(lambda: Site()) # (scheme://host:port) -> Site
@tasks.async
def download(self, dl):
# (changed if we get redirected)
current_url = dl.url
redirections_remaining = 10
# Assign the Download to a Site based on its scheme, host and port. If the result is a redirect,
# reassign it to the appropriate new site. Note that proxy handling happens later; we want to group
# and limit by the target site, not treat everything as going to a single site (the proxy).
while True:
location_parts = urlparse.urlparse(current_url)
site_key = (location_parts.scheme,
location_parts.hostname,
location_parts.port or default_port.get(location_parts.scheme, None))
step = DownloadStep()
step.dl = dl
step.url = current_url
blocker = self._sites[site_key].download(step)
yield blocker
tasks.check(blocker)
if not step.redirect:
break
current_url = step.redirect
if redirections_remaining == 0:
raise download.DownloadError("Too many redirections {url} -> {current}".format(
url = dl.url,
current = current_url))
redirections_remaining -= 1
# (else go around the loop again)
MAX_DOWNLOADS_PER_SITE = 5
def _spawn_thread(step):
from ._download_child import download_in_thread
thread_blocker = tasks.Blocker("wait for thread " + step.url)
def notify_done(status, ex = None, redirect = None):
step.status = status
step.redirect = redirect
def wake_up_main():
child.join()
thread_blocker.trigger(ex)
return False
gobject.idle_add(wake_up_main)
child = threading.Thread(target = lambda: download_in_thread(step.url, step.dl.tempfile, step.dl.modification_time, notify_done))
child.daemon = True
child.start()
return thread_blocker
class Site:
"""Represents a service accepting download requests. All requests with the same scheme, host and port are
handled by the same Site object, allowing it to do connection pooling and queuing, although the current
implementation doesn't do either."""
def __init__(self):
self.queue = []
self.active = 0
@tasks.async
def download(self, step):
if self.active == MAX_DOWNLOADS_PER_SITE:
# Too busy to start a new download now. Queue this one and wait.
ticket = tasks.Blocker('queued download for ' + step.url)
self.queue.append(ticket)
yield ticket, step.dl._aborted
if step.dl._aborted.happened:
raise download.DownloadAborted()
# Start a new thread for the download
thread_blocker = _spawn_thread(step)
self.active += 1
# Wait for thread to complete download.
yield thread_blocker, step.dl._aborted
self.active -= 1
if self.active < MAX_DOWNLOADS_PER_SITE:
self.process_next() # Start next queued download, if any
if step.dl._aborted.happened:
# Don't wait for child to finish (might be stuck doing IO)
raise download.DownloadAborted()
tasks.check(thread_blocker)
if step.status == download.RESULT_REDIRECT:
assert step.redirect
return # DownloadScheduler will handle it
assert not step.redirect, step.redirect
step.dl._finish(step.status)
def process_next(self):
assert self.active < MAX_DOWNLOADS_PER_SITE
if self.queue:
nxt = self.queue.pop()
nxt.trigger()
|
dabrahams/zeroinstall
|
zeroinstall/injector/scheduler.py
|
Python
|
lgpl-2.1
| 3,901
|
[
"VisIt"
] |
aca73010f097311fdae8bcfee171654287550f19ece497645462aa1eb81a4839
|
'''
Author: Fernando Luiz Neme Chibli
Name: pyTreeBee
Version: 0.6
License: GNU LGPL v2.1
'''
'''
Asgard Defense is the first game made with pyTreeBee.
Asgard Defense by Asgardian AGES is licensed under a Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License.
To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-nd/4.0/.
Asgard Defense v0.30 is available on http://goo.gl/JkKElS.
'''
import pygame, os,time
from pygame.locals import *
pygame.init()
pygame.mixer.init()
def treeBeeStart(tela_principal):
while tela_principal.running:
tela_principal.run()
if tela_principal.storedlink:
proxima_tela=tela_principal.storedlink.linkReply(tela_principal)
tela_principal.storedlink=None
tela_principal=proxima_tela
class entryBox(object):
def __init__(self, font, string_entry, box_width, x , y, indentation=10):
self.text=string_entry
self.x=x
self.y=y
self.width=box_width
self.font=font
self.indentation=indentation
self.digit_position=0
self.bar=(0,0,0) # Black
self.background=(255,255,255) # White
self.color=(0,0,0) # Black
self.bar_width=1 # How much smaller is the background rect?
self.selected=False # You didn't selected it, did you?
self.limit=1000001 # "It's over one million" - Freeza
#-Getters-#
def getString(self):
return self.text
#-Setters-#
def setColors(self, bar_color, background_color, text_color):
self.bar=bar_color
self.background=background_color
self.color=text_color
def setBarColor(self, bar_color):
self.bar=bar_color
def setBackground(self, background_color):
self.background=background_color
def setBarWidth(self, bar_width):
self.bar_width=bar_width
def setTextColor(self, text_color):
self.color=text_color
def setLimit(self, char_limit):
self.limit=char_limit
#-Box "Blitter"-#
def blitOn(self, screen):
#width, font_height+indentation
screen.fill( self.bar,pygame.Rect(self.x,self.y,self.width, (2*self.indentation)+self.font.get_height() ))
#width-bar_width, font_height+indentation
screen.fill( self.background,pygame.Rect(self.x+self.bar_width,self.y+self.bar_width,self.width-2*self.bar_width, (2*self.indentation)+self.font.get_height()-2*self.bar_width ))
#text before the position plus a bar
string_exibition=self.text[:self.digit_position]+'|'
text_display=self.font.render(string_exibition,True,self.color)
if text_display.get_width()>self.width-self.indentation: #if it get of the bounds, it displays focusing the position...
#I think that I was drunk when I did all this... thing... I'm tired, lets sleep...
screen.blit(text_display,(self.x+ self.width -self.indentation -text_display.get_width(),self.y+self.indentation))
string_exibition2=self.text[self.digit_position:]
text_display2=self.font.render(string_exibition2,True,self.color)
screen.blit(text_display2,(self.x+self.width-self.indentation,self.y+self.indentation))
else:
text_display=self.font.render(string_exibition+self.text[self.digit_position:],True,self.color)
screen.blit(text_display,(self.x+self.indentation,self.y+self.indentation))
#-Selection Verifier-#
def mouseSelect(self, event):
if event.type == MOUSEBUTTONUP and event.button==1:
x,y=event.pos
if x>self.x and y>self.y and x< self.x+self.width and y < self.y+(2*self.indentation)+self.font.get_height()-2*self.bar_width:
self.selected=True
else:
self.selected=False
#-Text and Key Manipulator-#
def keyPressed(self, event):
if event.type == KEYDOWN:
if event.key==K_DELETE and self.digit_position<len(self.text):
self.text=self.text[:self.digit_position]+self.text[self.digit_position+1:]
else:
if event.key==K_BACKSPACE and self.digit_position>0:
self.text=self.text[:self.digit_position-1]+self.text[self.digit_position:]
self.digit_position-=1
else:
if event.key==K_RIGHT and self.digit_position<len(self.text):
self.digit_position+=1
else:
if event.key==K_LEFT and self.digit_position>0:
self.digit_position-=1
else:
if event.key >= 32 and event.key <= 126 and len(self.text)<self.limit:
if pygame.key.get_mods() & KMOD_SHIFT or pygame.key.get_mods() & KMOD_CAPS:
self.text=self.text[:self.digit_position]+(chr(event.key).upper())+self.text[self.digit_position:]
else:
self.text=self.text[:self.digit_position]+chr(event.key)+self.text[self.digit_position:]
self.digit_position+=1
def keyControls(self, event):
self.mouseSelect(event)
if self.selected:
self.keyPressed(event)
return False
class linkedButton(object):
def __init__(self, screen, font, text_vector, x,y ,rect, x_indentation, y_indentation, image=None):
self.text=[text_vector]*3
self.x=x
self.y=y
self.rect=[rect]*3
if image!=None:
self.image=[image]*3
self.scale=[self.image[0].get_size()]*3
else:
self.image=[None]*3
self.scale=[(1,1)]*3
self.bar_image=[None]*3
self.bar_scale=[(1,1)]*3
#scale = image resize
self.bar=[(0,0,0)]*3
self.bar_size=[(1,1)]*3
#size = rect size
self.background=[(255,255,255)]*3
self.color=[(0,0,0)]*3
self.font=[font]*3
self.x_indentation=[x_indentation]*3
self.y_indentation=[y_indentation]*3
self.state=0
self.link=screen
self.actived=False
self.hovered_sound=None
self.selected_sound=None
self.wait_sound=0
def buttonTurnOff(self):
self.actived=False
def moveTo(self, location):
for r in self.rect:
r.x=location[0]
r.y=location[1]
def setSelect_Text(self, text, font=None):
self.text[0]=text
if font!=None:
self.font[0]=font
def setSelect_Font(self, font):
self.font[0]=font
def setSelect_Indentation(self, indent):
self.x_indentation[0]=indent[0]
self.y_indentation[0]=indent[1]
def setButton(self, font, text_vector, rect, x_indentation, y_indentation):
self.text[0]=text_vector
self.rect[0]=rect
self.font[0]=font
self.x_indentation[0]=x_indentation
self.y_indentation[0]=y_indentation
def setColors(self, bar_color, background_color, text_color):
self.bar[0]=bar_color
self.background[0]=background_color
self.color[0]=text_color
def setBarColor(self, bar_color):
self.bar[0]=bar_color
def setBackground(self, background_color):
self.background[0]=background_color
def setTextColor(self, text_color):
self.color[0]=text_color
def setBarSize(self, bar_size):
self.bar_size[0]=bar_size
def setImage(self, image):
self.image[0]=image
def setScale(self, size):
self.scale[0]=size
def setBarImage(self, image):
self.bar_image[0]=image
def setBarScale(self, size):
self.bar_scale[0]=size
def setHover_Sound(self,sound):
self.hovered_sound=sound
def setHover_Text(self, text, font=None):
self.text[1]=text
if font!=None:
self.font[1]=font
def setHover_Font(self, font):
self.font[1]=font
def setHover_Indentation(self, indent):
self.x_indentation[1]=indent[0]
self.y_indentation[1]=indent[1]
def setHover(self, font, text_vector, rect, x_indentation, y_indentation):
self.text[1]=text_vector
self.rect[1]=rect
self.font[1]=font
self.x_indentation[1]=x_indentation
self.y_indentation[1]=y_indentation
def setHover_Colors(self, bar_color, background_color, text_color):
self.bar[1]=bar_color
self.background[1]=background_color
self.color[1]=text_color
def setHover_BarSize(self, bar_size):
self.bar_size[1]=bar_size
def setHover_Image(self, image):
self.image[1]=image
def setHover_Scale(self, size):
self.scale[1]=size
def setHover_BarImage(self, image):
self.bar_image[1]=image
def setHover_BarScale(self, size):
self.bar_scale[1]=size
def setSelect_Sound(self,sound):
self.selected_sound=sound
def setSelect_Text(self, text, font=None):
self.text[2]=text
if font!=None:
self.font[2]=font
def setSelect_Font(self, font):
self.font[2]=font
def setSelect_Indentation(self, indent):
self.x_indentation[2]=indent[0]
self.y_indentation[2]=indent[1]
def setSelect(self, font, text_vector, rect, x_indentation, y_indentation):
self.text[2]=text_vector
self.rect[2]=rect
self.font[2]=font
self.x_indentation[2]=x_indentation
self.y_indentation[2]=y_indentation
def setSelect_Colors(self, bar_color, background_color, text_color):
self.bar[2]=bar_color
self.background[2]=background_color
self.color[2]=text_color
def setAll_BarColor(self, bar_color):
self.bar=[bar_color]*3
def setAll_Background(self, background_color):
self.background=[background_color]*3
def setAll_TextColor(self, text_color):
self.color=[text_color]*3
def setAll_Text(self, text, font=None):
self.text=[text]*3
if font!=None:
self.font=[font]*3
def setAll_Font(self, font):
self.font=[font]*3
def setAll_Indentation(self, indent):
self.x_indentation=[indent[0]]*3
self.y_indentation=[indent[1]]*3
def setAll_BarSize(self, bar_size):
self.bar_size=[bar_size]*3
def setAll_Image(self, image):
self.image=[image]*3
def setAll_Scale(self, size):
self.scale=[size]*3
def setAll_BarImage(self, image):
self.bar_image=[image]*3
def setAll_BarScale(self, size):
self.bar_scale=[size]*3
def setAll_Sound(self,sound):
self.hovered_sound=sound
self.selected_sound=sound
def setEach_Sound(self,sound1,sound2):
self.hovered_sound=sound1
self.selected_sound=sound2
def setEach_Text(self, text1,text2,text3):
self.text=[text1,text2,text3]
def setEach_Image(self, img1,img2,img3):
self.image=[img1,img2,img3]
self.scale=[img1.get_size(),img2.get_size(),img3.get_size()]
def setEach_Scale(self, size,size_hover,size_select):
self.scale=[size,size_hover,size_select]
def setEach_BarSize(self, size,size_hover,size_select):
self.bar_size=[size,size_hover,size_select]
def setSound_Time(self,time=-1):
self.wait_sound=time
def blitOn(self, screen):
try:
if self.bar_image[self.state]==None:
screen.fill( self.bar[self.state], pygame.Rect(self.x+self.rect[self.state].x,self.y+self.rect[self.state].y,self.rect[self.state].w,self.rect[self.state].h))
else:
scaled_bar = pygame.transform.scale(self.bar_image[self.state],self.bar_scale[self.state])
screen.blit( scaled_bar[self.state], (self.x+self.rect[self.state].x,self.y+self.rect[self.state].y))
if self.image[self.state]==None:
screen.fill( self.background[self.state],pygame.Rect(self.x+self.rect[self.state].x+self.bar_size[self.state][0],self.y+self.rect[self.state].y+self.bar_size[self.state][1],self.rect[self.state].width-2*self.bar_size[self.state][0], self.rect[self.state].height-2*self.bar_size[self.state][1]))
else:
scaled_image = pygame.transform.scale( self.image[self.state],self.scale[self.state])
screen.blit( scaled_image,(self.x+self.rect[self.state].x+self.bar_size[self.state][0],self.y+self.rect[self.state].y+self.bar_size[self.state][1]))
except:
pass
for n in range(len(self.text[self.state])):
text_display=self.font[self.state].render(self.text[self.state][n],True,self.color[self.state])
screen.blit(text_display,(self.x+self.rect[self.state].x+self.x_indentation[self.state],self.y+self.rect[self.state].y+self.y_indentation[self.state]+n*self.font[self.state].get_height()))
def mouseColide(self,event):
x,y=event.pos
if x>self.x+self.rect[self.state].x and y>self.y+self.rect[self.state].y and x< self.x+self.rect[self.state].x+self.rect[self.state].width and y < self.y+self.rect[self.state].y+self.rect[self.state].height:
return True
else:
return False
def keyControls(self, event):#, refresh_itens= None):
reply=False
if event.type == MOUSEBUTTONDOWN and event.button==1:
if self.mouseColide(event):
if self.state!=2 and self.selected_sound!=None:
self.selected_sound.play()
if self.wait_sound>0:
time.sleep(self.wait_sound)
if self.wait_sound<0:
time.sleep(self.selected_sound.get_length())
self.state=2
self.actived=True
if event.type == MOUSEMOTION and self.actived==False:
if self.mouseColide(event):
if self.state!=1 and self.hovered_sound!=None:
self.hovered_sound.play()
self.state=1
else:
self.state=0
if event.type == MOUSEBUTTONUP and event.button==1:
self.actived=False
if self.mouseColide(event):
reply = self.link
'''
try:
self.link.refresh(refresh_itens)
except:
pass'''
return reply
class accelGame(object):
def __init__(self, fps):
self.fps=fps
def linkReply(self,prevscr):
prevscr.fps=self.fps
return prevscr
class volumeControl(object):
def __init__(self, how_much):
self.amount=how_much
def linkReply(self, prevscr):
pygame.mixer.music.set_volume(pygame.mixer.music.get_volume() + self.amount)
return prevscr
class renderVolume(object):
def __init__(self, fonte,cor,x,y,diferenciador=1):
self.font=fonte
self.color=cor
self.x=x
self.y=y
self.unit=diferenciador
def blitOn(self,scr):
scr.blit(self.font.render(str(int(pygame.mixer.music.get_volume()*self.unit)),True,self.color),(self.x,self.y))
def keyControls(self,event):pass
class directWriter(object):
def __init__(self, render, x,y):
self.render=render
self.x=x
self.y=y
def blitOn(self,scr):
scr.blit(self.render,(self.x,self.y))
def keyControls(self,event):pass
class accelKey(object):
def __init__(self,fps,key):
self.link=accelGame(fps)
self.key=key
def keyControls(self,event):
retorno=False
if event.type==KEYUP:
if event.key==self.key:
retorno=self.link
return retorno
def blitOn(self,scr):pass
class linkedMusic(object):
def __init__(self, link, music, repeat, start_time):
self.link=link
self.music=music
self.repeat=repeat
self.start_time=start_time
def linkReply(self,prevscr):
pygame.mixer.music.stop()
try:
pygame.mixer.music.load(self.music)
except:
print "can't load music"
try:
pygame.mixer.music.play(self.repeat,self.start_time)
except:
print "can't play music"
self.link.stored_music=self.music
return self.link
class sendItens(object):
def __init__(self):
self.actived=True
self.itens=[]
self.destiny=None
def refresh(self, refresh):
self.itens=[]
for item in refresh[0]:
self.itens.append(item.text)
self.destiny=refresh[1]
def linkReply(self,tela_principal):
tela_principal.addItem(self.destiny)
tela_principal.itens[len(tela_principal.itens)-1].receive(self.itens)
return tela_principal
class submitGroup(object):
#destiny=None
#sendItens era InnerClass aqui mas nao deu certo
def __init__(self, destiny,(font, text_vector, rect, x_indentation, y_indentation), *itens):
self.destiny=destiny
self.itens=list(itens)
self.sendbutton=linkedButton(sendItens(), font, text_vector, rect, x_indentation, y_indentation)
def addItem(self,item):
self.itens.append(item)
def keyControls(self, event):
for item in self.itens:
item.keyControls(event)
return self.sendbutton.keyControls(event,(self.itens,self.destiny))
def blitOn(self,scr):
for item in self.itens:
item.blitOn(scr)
self.sendbutton.blitOn(scr)
class basicReceive(object):
def __init__(self, x,y, font, color):
self.itens=[]
self.x=x
self.y=y
self.font=font
self.color=color
def receive(self,itens):
self.itens=itens
def blitOn(self, scr):
text_display=self.font.render("Sent Data:",True,self.color)
scr.blit(text_display,(self.x,self.y-self.font.get_height()))
for n in range(len(self.itens)):
text_display=self.font.render(str(self.itens[n]),True,self.color)
scr.blit(text_display,(self.x,self.y+n*self.font.get_height()))
def keyControls(self, event):
pass
class osWeblink(object):
def __init__(self, link):
self.link=link
def linkReply(self, previousscreen):
os.system("START "+str(self.link))
return previousscreen
class osCommand(object):
def __init__(self, *cmd):
self.cmd_vector=list(cmd)
def linkReply(self, previousscreen):
for cmd in self.cmd_vector: os.system(str(cmd))
return previousscreen
class itemCounter(object):
def __init__(self,scr,link,count):
self.scr=scr
self.link=link
self.count=count
def keyControls(self,event):pass
def blitOn(self,scr):
if self.count==0:
self.scr.linkcalled=self.link
class moveObject(object):
def __init__(self, origem,item, pos_ini, destino, velocidade):
self.item=item
self.origem=origem
self.pos_ini=pos_ini
self.destino=destino
self.step=(((destino[0]-pos_ini[0])/velocidade),((destino[1]-pos_ini[1])/velocidade))
def keyControls(self,event):
return False
def blitOn(self,scr):
try:
if self.destino[0]!=self.item.x:
self.item.x+=self.step[0]
if self.destino[1]!=self.item.y:
self.item.y+=self.step[1]
if self.destino[0]==self.item.x and self.destino[1]==self.item.y:
self.origem.count-=1
self=None
except:
try:
if self.destino[0]!=self.item.pos[0]:
self.item.pos[0]+=self.step[0]
if self.destino[1]!=self.item.pos[1]:
self.item.pos[1]+=self.step[1]
if self.destino[0]==self.item.pos[0] and self.destino[1]==self.item.pos[1]:
self.origem.count-=1
self=None
except:
pass
def fadeIn(scr,link, *itens):
origem=itemCounter(scr,link,len(itens))
scr.addItem(origem)
for item in itens:
try:
scr.addItem(moveObject(origem,item[0], (item[0].x,item[0].y), item[1], item[2]))
except:
try:
scr.addItem(moveObject(origem,item[0], (item[0].pos[0],item[0].pos[0]), item[1], item[2]))#item,posicao inicial,destino,velocidade
except:
pass
class dynamicScreen(object):
def __init__(self, size, fps, color, *itens):
self.size=size
self.fps=fps
self.color=color
self.image=None
self.imagepos=(0,0)
self.itens=list(itens)
self.screenpos=(0,0)
self.storedlink=None
self.linkcalled=False
self.running=True
self.full=False
self.music=False
self.stored_music=False
self.startpos=None
self.repeat=None
self.stopmusic=False
self.title=None
self.title_fps=False
def setMusic(self, music,repeat,startpos, stop=True):
self.music=music
self.repeat=repeat
self.startpos=startpos
self.stopmusic=stop
def setFULLSCREEN(self):
self.full=True
def linkReply(self, previousscreen):
return self
def setImage(self, image, imagepos):
self.image=image
self.imagepos=imagepos
def setColor(self, color):
self.color=color
def addItem(self,*itens):
for item in itens:
self.itens.append(item)
#print len(self.itens)
#print type(item)
def setTitle(self,title,fps=False):
self.title=title
self.title_fps=fps
def run(self):
if self.full:
infoObject = pygame.display.Info()
self.size=(infoObject.current_w, infoObject.current_h)
display = pygame.display.set_mode(self.size,FULLSCREEN)
else:
display = pygame.display.set_mode(self.size)
fpsClock=pygame.time.Clock()
self.linkcalled=False
if self.music:# and pygame.mixer.music.get_busy()==False
pygame.mixer.music.stop()
pygame.mixer.music.load(self.music)
pygame.mixer.music.play(self.repeat,self.startpos)
if self.title: pygame.display.set_caption(self.title)
while self.running and self.linkcalled==False:
if self.title==None: pygame.display.set_caption(str(int(fpsClock.get_fps()))+":"+str(self.fps))
elif self.title_fps==True: pygame.display.set_caption(str(self.title+" FPS "+int(fpsClock.get_fps()))+":"+str(self.fps))
for event in pygame.event.get():
if event.type==QUIT:
self.storedlink=None
self.running=False
for i in range(len(self.itens)):
try:
#time.sleep( (len(self.itens)-i)/100)
self.linkcalled=self.itens[i].keyControls(event)
if self.linkcalled!=None and self.linkcalled!=False:
break
except:
#print 'keyControls() error on screen "'+str(self.title)+'", self.itens[ '+str(i)+' ]'
pass
display.fill(self.color)
if self.image!=None:
try:
display.blit(self.image,self.imagepos)
except:
print 'background error'
for i in range(len(self.itens)):
try:
self.itens[i].blitOn(display)
except:
#print 'blitOn() error on screen "'+str(self.title)+'", self.itens[ '+str(i)+' ]'
pass
pygame.display.update()
fpsClock.tick(self.fps)
if self.linkcalled!=None and self.linkcalled!=False:
self.storedlink=self.linkcalled
if self.stopmusic:
pygame.mixer.music.stop()
'''
frame_atual=0
class extraControls(object):
def __init__(self,tela):
self.tela=tela
self.frame_skip=1
def blitOn(self,screen):
global frame_atual
frame_atual+=self.frame_skip
def keyControls(self,event):
return False
tela3=dynamicScreen((800,600),60,(90,90,90))
tela2=dynamicScreen((800,600),60,(90,90,90),linkedButton(osWeblink("www.google.com"),pygame.font.SysFont("arial",12),["google"],100,200,pygame.Rect(0,0,100,100),0,0))
tela1=dynamicScreen((800,600),60,(90,90,90),linkedButton(tela2,pygame.font.SysFont("arial",12),["tela2"],100,100,pygame.Rect(0,0,100,100),0,0),linkedButton(tela3,pygame.font.SysFont("arial",12),["tela3"],200,100,pygame.Rect(0,0,100,100),0,0))
tela2.addItem(linkedButton(tela1,pygame.font.SysFont("arial",12),["tela1"],100,100,pygame.Rect(0,0,100,100),0,0))
tela3.addItem(linkedButton(tela1,pygame.font.SysFont("arial",12),["tela1"],100,100,pygame.Rect(0,0,100,100),0,0))
#tela1.setMusic("Five Armies.mp3",-1,0.0,False)
tela1.addItem(extraControls(tela1))
#tela2.setMusic("Teller of the Tales.mp3",-1,0.0,True)
treeBeeStart(tela1)
pygame.quit()
'''
|
FernandoLuizNemeChibli/pyTreeBee
|
pyTreeBee.py
|
Python
|
lgpl-2.1
| 25,228
|
[
"VisIt"
] |
ef551a39629fe6fac1044b8def805deddfcdbf33b582786560fb2eec056a648d
|
# Copyright (c) 2012 - 2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from scipy.special import gammaln, digamma
from ...util.linalg import pdinv
from paramz.domains import _REAL, _POSITIVE, _NEGATIVE
import warnings
import weakref
class Prior(object):
domain = None
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance or cls._instance.__class__ is not cls:
newfunc = super(Prior, cls).__new__
if newfunc is object.__new__:
cls._instance = newfunc(cls)
else:
cls._instance = newfunc(cls, *args, **kwargs)
return cls._instance
def pdf(self, x):
return np.exp(self.lnpdf(x))
def plot(self):
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ...plotting.matplot_dep import priors_plots
priors_plots.univariate_plot(self)
def __repr__(self, *args, **kwargs):
return self.__str__()
class Gaussian(Prior):
"""
Implementation of the univariate Gaussian probability function, coupled with random variables.
:param mu: mean
:param sigma: standard deviation
.. Note:: Bishop 2006 notation is used throughout the code
"""
domain = _REAL
_instances = []
def __new__(cls, mu=0, sigma=1): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().mu == mu and instance().sigma == sigma:
return instance()
newfunc = super(Prior, cls).__new__
if newfunc is object.__new__:
o = newfunc(cls)
else:
o = newfunc(cls, mu, sigma)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu, sigma):
self.mu = float(mu)
self.sigma = float(sigma)
self.sigma2 = np.square(self.sigma)
self.constant = -0.5 * np.log(2 * np.pi * self.sigma2)
def __str__(self):
return "N({:.2g}, {:.2g})".format(self.mu, self.sigma)
def lnpdf(self, x):
return self.constant - 0.5 * np.square(x - self.mu) / self.sigma2
def lnpdf_grad(self, x):
return -(x - self.mu) / self.sigma2
def rvs(self, n):
return np.random.randn(n) * self.sigma + self.mu
# def __getstate__(self):
# return self.mu, self.sigma
#
# def __setstate__(self, state):
# self.mu = state[0]
# self.sigma = state[1]
# self.sigma2 = np.square(self.sigma)
# self.constant = -0.5 * np.log(2 * np.pi * self.sigma2)
class Uniform(Prior):
_instances = []
def __new__(cls, lower=0, upper=1): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().lower == lower and instance().upper == upper:
return instance()
newfunc = super(Prior, cls).__new__
if newfunc is object.__new__:
o = newfunc(cls)
else:
o = newfunc(cls, lower, upper)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, lower, upper):
self.lower = float(lower)
self.upper = float(upper)
assert self.lower < self.upper, "Lower needs to be strictly smaller than upper."
if self.lower >= 0:
self.domain = _POSITIVE
elif self.upper <= 0:
self.domain = _NEGATIVE
else:
self.domain = _REAL
def __str__(self):
return "[{:.2g}, {:.2g}]".format(self.lower, self.upper)
def lnpdf(self, x):
region = (x >= self.lower) * (x <= self.upper)
return region
def lnpdf_grad(self, x):
return np.zeros(x.shape)
def rvs(self, n):
return np.random.uniform(self.lower, self.upper, size=n)
# def __getstate__(self):
# return self.lower, self.upper
#
# def __setstate__(self, state):
# self.lower = state[0]
# self.upper = state[1]
class LogGaussian(Gaussian):
"""
Implementation of the univariate *log*-Gaussian probability function, coupled with random variables.
:param mu: mean
:param sigma: standard deviation
.. Note:: Bishop 2006 notation is used throughout the code
"""
domain = _POSITIVE
_instances = []
def __new__(cls, mu=0, sigma=1): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().mu == mu and instance().sigma == sigma:
return instance()
newfunc = super(Prior, cls).__new__
if newfunc is object.__new__:
o = newfunc(cls)
else:
o = newfunc(cls, mu, sigma)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu, sigma):
self.mu = float(mu)
self.sigma = float(sigma)
self.sigma2 = np.square(self.sigma)
self.constant = -0.5 * np.log(2 * np.pi * self.sigma2)
def __str__(self):
return "lnN({:.2g}, {:.2g})".format(self.mu, self.sigma)
def lnpdf(self, x):
return self.constant - 0.5 * np.square(np.log(x) - self.mu) / self.sigma2 - np.log(x)
def lnpdf_grad(self, x):
return -((np.log(x) - self.mu) / self.sigma2 + 1.) / x
def rvs(self, n):
return np.exp(np.random.randn(int(n)) * self.sigma + self.mu)
class MultivariateGaussian(Prior):
"""
Implementation of the multivariate Gaussian probability function, coupled with random variables.
:param mu: mean (N-dimensional array)
:param var: covariance matrix (NxN)
.. Note:: Bishop 2006 notation is used throughout the code
"""
domain = _REAL
_instances = []
def __new__(cls, mu=0, var=1): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if
instance()]
for instance in cls._instances:
if np.all(instance().mu == mu) and np.all(
instance().var == var):
return instance()
newfunc = super(Prior, cls).__new__
if newfunc is object.__new__:
o = newfunc(cls)
else:
o = newfunc(cls, mu, var)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu, var):
self.mu = np.array(mu).flatten()
self.var = np.array(var)
assert len(self.var.shape) == 2, 'Covariance must be a matrix'
assert self.var.shape[0] == self.var.shape[1], \
'Covariance must be a square matrix'
assert self.var.shape[0] == self.mu.size
self.input_dim = self.mu.size
self.inv, _, self.hld, _ = pdinv(self.var)
self.constant = -0.5 * (self.input_dim * np.log(2 * np.pi) + self.hld)
def __str__(self):
return 'MultiN(' + str(self.mu) + ', ' + str(np.diag(self.var)) + ')'
def summary(self):
raise NotImplementedError
def pdf(self, x):
x = np.array(x).flatten()
return np.exp(self.lnpdf(x))
def lnpdf(self, x):
x = np.array(x).flatten()
d = x - self.mu
return self.constant - 0.5 * np.dot(d.T, np.dot(self.inv, d))
def lnpdf_grad(self, x):
x = np.array(x).flatten()
d = x - self.mu
return - np.dot(self.inv, d)
def rvs(self, n):
return np.random.multivariate_normal(self.mu, self.var, n)
def plot(self):
import sys
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ..plotting.matplot_dep import priors_plots
priors_plots.multivariate_plot(self)
def __getstate__(self):
return self.mu, self.var
def __setstate__(self, state):
self.mu = np.array(state[0]).flatten()
self.var = state[1]
assert len(self.var.shape) == 2, 'Covariance must be a matrix'
assert self.var.shape[0] == self.var.shape[1], \
'Covariance must be a square matrix'
assert self.var.shape[0] == self.mu.size
self.input_dim = self.mu.size
self.inv, _, self.hld, _ = pdinv(self.var)
self.constant = -0.5 * (self.input_dim * np.log(2 * np.pi) + self.hld)
def gamma_from_EV(E, V):
warnings.warn("use Gamma.from_EV to create Gamma Prior", FutureWarning)
return Gamma.from_EV(E, V)
class Gamma(Prior):
"""
Implementation of the Gamma probability function, coupled with random variables.
:param a: shape parameter
:param b: rate parameter (warning: it's the *inverse* of the scale)
.. Note:: Bishop 2006 notation is used throughout the code
"""
domain = _POSITIVE
_instances = []
def __new__(cls, a=1, b=.5): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().a == a and instance().b == b:
return instance()
newfunc = super(Prior, cls).__new__
if newfunc is object.__new__:
o = newfunc(cls)
else:
o = newfunc(cls, a, b)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
@property
def a(self):
return self._a
@property
def b(self):
return self._b
def __init__(self, a, b):
self._a = float(a)
self._b = float(b)
self.constant = -gammaln(self.a) + a * np.log(b)
def __str__(self):
return "Ga({:.2g}, {:.2g})".format(self.a, self.b)
def summary(self):
ret = {"E[x]": self.a / self.b, \
"E[ln x]": digamma(self.a) - np.log(self.b), \
"var[x]": self.a / self.b / self.b, \
"Entropy": gammaln(self.a) - (self.a - 1.) * digamma(self.a) - np.log(self.b) + self.a}
if self.a > 1:
ret['Mode'] = (self.a - 1.) / self.b
else:
ret['mode'] = np.nan
return ret
def lnpdf(self, x):
return self.constant + (self.a - 1) * np.log(x) - self.b * x
def lnpdf_grad(self, x):
return (self.a - 1.) / x - self.b
def rvs(self, n):
return np.random.gamma(scale=1. / self.b, shape=self.a, size=n)
@staticmethod
def from_EV(E, V):
"""
Creates an instance of a Gamma Prior by specifying the Expected value(s)
and Variance(s) of the distribution.
:param E: expected value
:param V: variance
"""
a = np.square(E) / V
b = E / V
return Gamma(a, b)
def __getstate__(self):
return self.a, self.b
def __setstate__(self, state):
self._a = state[0]
self._b = state[1]
self.constant = -gammaln(self.a) + self.a * np.log(self.b)
class InverseGamma(Gamma):
"""
Implementation of the inverse-Gamma probability function, coupled with random variables.
:param a: shape parameter
:param b: rate parameter (warning: it's the *inverse* of the scale)
.. Note:: Bishop 2006 notation is used throughout the code
"""
domain = _POSITIVE
_instances = []
def __str__(self):
return "iGa({:.2g}, {:.2g})".format(self.a, self.b)
def summary(self):
return {}
@staticmethod
def from_EV(E, V):
raise NotImplementedError
def lnpdf(self, x):
return self.constant - (self.a + 1) * np.log(x) - self.b / x
def lnpdf_grad(self, x):
return -(self.a + 1.) / x + self.b / x ** 2
def rvs(self, n):
return 1. / np.random.gamma(scale=1. / self.b, shape=self.a, size=n)
class DGPLVM_KFDA(Prior):
"""
Implementation of the Discriminative Gaussian Process Latent Variable function using
Kernel Fisher Discriminant Analysis by Seung-Jean Kim for implementing Face paper
by Chaochao Lu.
:param lambdaa: constant
:param sigma2: constant
.. Note:: Surpassing Human-Level Face paper dgplvm implementation
"""
domain = _REAL
# _instances = []
# def __new__(cls, lambdaa, sigma2): # Singleton:
# if cls._instances:
# cls._instances[:] = [instance for instance in cls._instances if instance()]
# for instance in cls._instances:
# if instance().mu == mu and instance().sigma == sigma:
# return instance()
# o = super(Prior, cls).__new__(cls, mu, sigma)
# cls._instances.append(weakref.ref(o))
# return cls._instances[-1]()
def __init__(self, lambdaa, sigma2, lbl, kern, x_shape):
"""A description for init"""
self.datanum = lbl.shape[0]
self.classnum = lbl.shape[1]
self.lambdaa = lambdaa
self.sigma2 = sigma2
self.lbl = lbl
self.kern = kern
lst_ni = self.compute_lst_ni()
self.a = self.compute_a(lst_ni)
self.A = self.compute_A(lst_ni)
self.x_shape = x_shape
def get_class_label(self, y):
for idx, v in enumerate(y):
if v == 1:
return idx
return -1
# This function assigns each data point to its own class
# and returns the dictionary which contains the class name and parameters.
def compute_cls(self, x):
cls = {}
# Appending each data point to its proper class
for j in range(self.datanum):
class_label = self.get_class_label(self.lbl[j])
if class_label not in cls:
cls[class_label] = []
cls[class_label].append(x[j])
if len(cls) > 2:
for i in range(2, self.classnum):
del cls[i]
return cls
def x_reduced(self, cls):
x1 = cls[0]
x2 = cls[1]
x = np.concatenate((x1, x2), axis=0)
return x
def compute_lst_ni(self):
lst_ni = []
lst_ni1 = []
lst_ni2 = []
f1 = (np.where(self.lbl[:, 0] == 1)[0])
f2 = (np.where(self.lbl[:, 1] == 1)[0])
for idx in f1:
lst_ni1.append(idx)
for idx in f2:
lst_ni2.append(idx)
lst_ni.append(len(lst_ni1))
lst_ni.append(len(lst_ni2))
return lst_ni
def compute_a(self, lst_ni):
a = np.ones((self.datanum, 1))
count = 0
for N_i in lst_ni:
if N_i == lst_ni[0]:
a[count:count + N_i] = (float(1) / N_i) * a[count]
count += N_i
else:
if N_i == lst_ni[1]:
a[count: count + N_i] = -(float(1) / N_i) * a[count]
count += N_i
return a
def compute_A(self, lst_ni):
A = np.zeros((self.datanum, self.datanum))
idx = 0
for N_i in lst_ni:
B = float(1) / np.sqrt(N_i) * (np.eye(N_i) - ((float(1) / N_i) * np.ones((N_i, N_i))))
A[idx:idx + N_i, idx:idx + N_i] = B
idx += N_i
return A
# Here log function
def lnpdf(self, x):
x = x.reshape(self.x_shape)
K = self.kern.K(x)
a_trans = np.transpose(self.a)
paran = self.lambdaa * np.eye(x.shape[0]) + self.A.dot(K).dot(self.A)
inv_part = pdinv(paran)[0]
J = a_trans.dot(K).dot(self.a) - a_trans.dot(K).dot(self.A).dot(inv_part).dot(self.A).dot(K).dot(self.a)
J_star = (1. / self.lambdaa) * J
return (-1. / self.sigma2) * J_star
# Here gradient function
def lnpdf_grad(self, x):
x = x.reshape(self.x_shape)
K = self.kern.K(x)
paran = self.lambdaa * np.eye(x.shape[0]) + self.A.dot(K).dot(self.A)
inv_part = pdinv(paran)[0]
b = self.A.dot(inv_part).dot(self.A).dot(K).dot(self.a)
a_Minus_b = self.a - b
a_b_trans = np.transpose(a_Minus_b)
DJ_star_DK = (1. / self.lambdaa) * (a_Minus_b.dot(a_b_trans))
DJ_star_DX = self.kern.gradients_X(DJ_star_DK, x)
return (-1. / self.sigma2) * DJ_star_DX
def rvs(self, n):
return np.random.rand(n) # A WRONG implementation
def __str__(self):
return 'DGPLVM_prior'
def __getstate___(self):
return self.lbl, self.lambdaa, self.sigma2, self.kern, self.x_shape
def __setstate__(self, state):
lbl, lambdaa, sigma2, kern, a, A, x_shape = state
self.datanum = lbl.shape[0]
self.classnum = lbl.shape[1]
self.lambdaa = lambdaa
self.sigma2 = sigma2
self.lbl = lbl
self.kern = kern
lst_ni = self.compute_lst_ni()
self.a = self.compute_a(lst_ni)
self.A = self.compute_A(lst_ni)
self.x_shape = x_shape
class DGPLVM(Prior):
"""
Implementation of the Discriminative Gaussian Process Latent Variable model paper, by Raquel.
:param sigma2: constant
.. Note:: DGPLVM for Classification paper implementation
"""
domain = _REAL
def __new__(cls, sigma2, lbl, x_shape):
return super(Prior, cls).__new__(cls, sigma2, lbl, x_shape)
def __init__(self, sigma2, lbl, x_shape):
self.sigma2 = sigma2
# self.x = x
self.lbl = lbl
self.classnum = lbl.shape[1]
self.datanum = lbl.shape[0]
self.x_shape = x_shape
self.dim = x_shape[1]
def get_class_label(self, y):
for idx, v in enumerate(y):
if v == 1:
return idx
return -1
# This function assigns each data point to its own class
# and returns the dictionary which contains the class name and parameters.
def compute_cls(self, x):
cls = {}
# Appending each data point to its proper class
for j in range(self.datanum):
class_label = self.get_class_label(self.lbl[j])
if class_label not in cls:
cls[class_label] = []
cls[class_label].append(x[j])
return cls
# This function computes mean of each class. The mean is calculated through each dimension
def compute_Mi(self, cls):
M_i = np.zeros((self.classnum, self.dim))
for i in cls:
# Mean of each class
class_i = cls[i]
M_i[i] = np.mean(class_i, axis=0)
return M_i
# Adding data points as tuple to the dictionary so that we can access indices
def compute_indices(self, x):
data_idx = {}
for j in range(self.datanum):
class_label = self.get_class_label(self.lbl[j])
if class_label not in data_idx:
data_idx[class_label] = []
t = (j, x[j])
data_idx[class_label].append(t)
return data_idx
# Adding indices to the list so we can access whole the indices
def compute_listIndices(self, data_idx):
lst_idx = []
lst_idx_all = []
for i in data_idx:
if len(lst_idx) == 0:
pass
#Do nothing, because it is the first time list is created so is empty
else:
lst_idx = []
# Here we put indices of each class in to the list called lst_idx_all
for m in range(len(data_idx[i])):
lst_idx.append(data_idx[i][m][0])
lst_idx_all.append(lst_idx)
return lst_idx_all
# This function calculates between classes variances
def compute_Sb(self, cls, M_i, M_0):
Sb = np.zeros((self.dim, self.dim))
for i in cls:
B = (M_i[i] - M_0).reshape(self.dim, 1)
B_trans = B.transpose()
Sb += (float(len(cls[i])) / self.datanum) * B.dot(B_trans)
return Sb
# This function calculates within classes variances
def compute_Sw(self, cls, M_i):
Sw = np.zeros((self.dim, self.dim))
for i in cls:
N_i = float(len(cls[i]))
W_WT = np.zeros((self.dim, self.dim))
for xk in cls[i]:
W = (xk - M_i[i])
W_WT += np.outer(W, W)
Sw += (N_i / self.datanum) * ((1. / N_i) * W_WT)
return Sw
# Calculating beta and Bi for Sb
def compute_sig_beta_Bi(self, data_idx, M_i, M_0, lst_idx_all):
# import pdb
# pdb.set_trace()
B_i = np.zeros((self.classnum, self.dim))
Sig_beta_B_i_all = np.zeros((self.datanum, self.dim))
for i in data_idx:
# pdb.set_trace()
# Calculating Bi
B_i[i] = (M_i[i] - M_0).reshape(1, self.dim)
for k in range(self.datanum):
for i in data_idx:
N_i = float(len(data_idx[i]))
if k in lst_idx_all[i]:
beta = (float(1) / N_i) - (float(1) / self.datanum)
Sig_beta_B_i_all[k] += float(N_i) / self.datanum * (beta * B_i[i])
else:
beta = -(float(1) / self.datanum)
Sig_beta_B_i_all[k] += float(N_i) / self.datanum * (beta * B_i[i])
Sig_beta_B_i_all = Sig_beta_B_i_all.transpose()
return Sig_beta_B_i_all
# Calculating W_j s separately so we can access all the W_j s anytime
def compute_wj(self, data_idx, M_i):
W_i = np.zeros((self.datanum, self.dim))
for i in data_idx:
N_i = float(len(data_idx[i]))
for tpl in data_idx[i]:
xj = tpl[1]
j = tpl[0]
W_i[j] = (xj - M_i[i])
return W_i
# Calculating alpha and Wj for Sw
def compute_sig_alpha_W(self, data_idx, lst_idx_all, W_i):
Sig_alpha_W_i = np.zeros((self.datanum, self.dim))
for i in data_idx:
N_i = float(len(data_idx[i]))
for tpl in data_idx[i]:
k = tpl[0]
for j in lst_idx_all[i]:
if k == j:
alpha = 1 - (float(1) / N_i)
Sig_alpha_W_i[k] += (alpha * W_i[j])
else:
alpha = 0 - (float(1) / N_i)
Sig_alpha_W_i[k] += (alpha * W_i[j])
Sig_alpha_W_i = (1. / self.datanum) * np.transpose(Sig_alpha_W_i)
return Sig_alpha_W_i
# This function calculates log of our prior
def lnpdf(self, x):
x = x.reshape(self.x_shape)
cls = self.compute_cls(x)
M_0 = np.mean(x, axis=0)
M_i = self.compute_Mi(cls)
Sb = self.compute_Sb(cls, M_i, M_0)
Sw = self.compute_Sw(cls, M_i)
# sb_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))
#Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
#Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0]
Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.1)[0]
return (-1 / self.sigma2) * np.trace(Sb_inv_N.dot(Sw))
# This function calculates derivative of the log of prior function
def lnpdf_grad(self, x):
x = x.reshape(self.x_shape)
cls = self.compute_cls(x)
M_0 = np.mean(x, axis=0)
M_i = self.compute_Mi(cls)
Sb = self.compute_Sb(cls, M_i, M_0)
Sw = self.compute_Sw(cls, M_i)
data_idx = self.compute_indices(x)
lst_idx_all = self.compute_listIndices(data_idx)
Sig_beta_B_i_all = self.compute_sig_beta_Bi(data_idx, M_i, M_0, lst_idx_all)
W_i = self.compute_wj(data_idx, M_i)
Sig_alpha_W_i = self.compute_sig_alpha_W(data_idx, lst_idx_all, W_i)
# Calculating inverse of Sb and its transpose and minus
# Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))
#Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
#Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0]
Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.1)[0]
Sb_inv_N_trans = np.transpose(Sb_inv_N)
Sb_inv_N_trans_minus = -1 * Sb_inv_N_trans
Sw_trans = np.transpose(Sw)
# Calculating DJ/DXk
DJ_Dxk = 2 * (
Sb_inv_N_trans_minus.dot(Sw_trans).dot(Sb_inv_N_trans).dot(Sig_beta_B_i_all) + Sb_inv_N_trans.dot(
Sig_alpha_W_i))
# Calculating derivative of the log of the prior
DPx_Dx = ((-1 / self.sigma2) * DJ_Dxk)
return DPx_Dx.T
# def frb(self, x):
# from functools import partial
# from GPy.models import GradientChecker
# f = partial(self.lnpdf)
# df = partial(self.lnpdf_grad)
# grad = GradientChecker(f, df, x, 'X')
# grad.checkgrad(verbose=1)
def rvs(self, n):
return np.random.rand(n) # A WRONG implementation
def __str__(self):
return 'DGPLVM_prior_Raq'
# ******************************************
from . import Parameterized
from . import Param
class DGPLVM_Lamda(Prior, Parameterized):
"""
Implementation of the Discriminative Gaussian Process Latent Variable model paper, by Raquel.
:param sigma2: constant
.. Note:: DGPLVM for Classification paper implementation
"""
domain = _REAL
# _instances = []
# def __new__(cls, mu, sigma): # Singleton:
# if cls._instances:
# cls._instances[:] = [instance for instance in cls._instances if instance()]
# for instance in cls._instances:
# if instance().mu == mu and instance().sigma == sigma:
# return instance()
# o = super(Prior, cls).__new__(cls, mu, sigma)
# cls._instances.append(weakref.ref(o))
# return cls._instances[-1]()
def __init__(self, sigma2, lbl, x_shape, lamda, name='DP_prior'):
super(DGPLVM_Lamda, self).__init__(name=name)
self.sigma2 = sigma2
# self.x = x
self.lbl = lbl
self.lamda = lamda
self.classnum = lbl.shape[1]
self.datanum = lbl.shape[0]
self.x_shape = x_shape
self.dim = x_shape[1]
self.lamda = Param('lamda', np.diag(lamda))
self.link_parameter(self.lamda)
def get_class_label(self, y):
for idx, v in enumerate(y):
if v == 1:
return idx
return -1
# This function assigns each data point to its own class
# and returns the dictionary which contains the class name and parameters.
def compute_cls(self, x):
cls = {}
# Appending each data point to its proper class
for j in range(self.datanum):
class_label = self.get_class_label(self.lbl[j])
if class_label not in cls:
cls[class_label] = []
cls[class_label].append(x[j])
return cls
# This function computes mean of each class. The mean is calculated through each dimension
def compute_Mi(self, cls):
M_i = np.zeros((self.classnum, self.dim))
for i in cls:
# Mean of each class
class_i = cls[i]
M_i[i] = np.mean(class_i, axis=0)
return M_i
# Adding data points as tuple to the dictionary so that we can access indices
def compute_indices(self, x):
data_idx = {}
for j in range(self.datanum):
class_label = self.get_class_label(self.lbl[j])
if class_label not in data_idx:
data_idx[class_label] = []
t = (j, x[j])
data_idx[class_label].append(t)
return data_idx
# Adding indices to the list so we can access whole the indices
def compute_listIndices(self, data_idx):
lst_idx = []
lst_idx_all = []
for i in data_idx:
if len(lst_idx) == 0:
pass
#Do nothing, because it is the first time list is created so is empty
else:
lst_idx = []
# Here we put indices of each class in to the list called lst_idx_all
for m in range(len(data_idx[i])):
lst_idx.append(data_idx[i][m][0])
lst_idx_all.append(lst_idx)
return lst_idx_all
# This function calculates between classes variances
def compute_Sb(self, cls, M_i, M_0):
Sb = np.zeros((self.dim, self.dim))
for i in cls:
B = (M_i[i] - M_0).reshape(self.dim, 1)
B_trans = B.transpose()
Sb += (float(len(cls[i])) / self.datanum) * B.dot(B_trans)
return Sb
# This function calculates within classes variances
def compute_Sw(self, cls, M_i):
Sw = np.zeros((self.dim, self.dim))
for i in cls:
N_i = float(len(cls[i]))
W_WT = np.zeros((self.dim, self.dim))
for xk in cls[i]:
W = (xk - M_i[i])
W_WT += np.outer(W, W)
Sw += (N_i / self.datanum) * ((1. / N_i) * W_WT)
return Sw
# Calculating beta and Bi for Sb
def compute_sig_beta_Bi(self, data_idx, M_i, M_0, lst_idx_all):
# import pdb
# pdb.set_trace()
B_i = np.zeros((self.classnum, self.dim))
Sig_beta_B_i_all = np.zeros((self.datanum, self.dim))
for i in data_idx:
# pdb.set_trace()
# Calculating Bi
B_i[i] = (M_i[i] - M_0).reshape(1, self.dim)
for k in range(self.datanum):
for i in data_idx:
N_i = float(len(data_idx[i]))
if k in lst_idx_all[i]:
beta = (float(1) / N_i) - (float(1) / self.datanum)
Sig_beta_B_i_all[k] += float(N_i) / self.datanum * (beta * B_i[i])
else:
beta = -(float(1) / self.datanum)
Sig_beta_B_i_all[k] += float(N_i) / self.datanum * (beta * B_i[i])
Sig_beta_B_i_all = Sig_beta_B_i_all.transpose()
return Sig_beta_B_i_all
# Calculating W_j s separately so we can access all the W_j s anytime
def compute_wj(self, data_idx, M_i):
W_i = np.zeros((self.datanum, self.dim))
for i in data_idx:
N_i = float(len(data_idx[i]))
for tpl in data_idx[i]:
xj = tpl[1]
j = tpl[0]
W_i[j] = (xj - M_i[i])
return W_i
# Calculating alpha and Wj for Sw
def compute_sig_alpha_W(self, data_idx, lst_idx_all, W_i):
Sig_alpha_W_i = np.zeros((self.datanum, self.dim))
for i in data_idx:
N_i = float(len(data_idx[i]))
for tpl in data_idx[i]:
k = tpl[0]
for j in lst_idx_all[i]:
if k == j:
alpha = 1 - (float(1) / N_i)
Sig_alpha_W_i[k] += (alpha * W_i[j])
else:
alpha = 0 - (float(1) / N_i)
Sig_alpha_W_i[k] += (alpha * W_i[j])
Sig_alpha_W_i = (1. / self.datanum) * np.transpose(Sig_alpha_W_i)
return Sig_alpha_W_i
# This function calculates log of our prior
def lnpdf(self, x):
x = x.reshape(self.x_shape)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!
#self.lamda.values[:] = self.lamda.values/self.lamda.values.sum()
xprime = x.dot(np.diagflat(self.lamda))
x = xprime
# print x
cls = self.compute_cls(x)
M_0 = np.mean(x, axis=0)
M_i = self.compute_Mi(cls)
Sb = self.compute_Sb(cls, M_i, M_0)
Sw = self.compute_Sw(cls, M_i)
# Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))
#Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
#Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.5))[0]
Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.9)[0]
return (-1 / self.sigma2) * np.trace(Sb_inv_N.dot(Sw))
# This function calculates derivative of the log of prior function
def lnpdf_grad(self, x):
x = x.reshape(self.x_shape)
xprime = x.dot(np.diagflat(self.lamda))
x = xprime
# print x
cls = self.compute_cls(x)
M_0 = np.mean(x, axis=0)
M_i = self.compute_Mi(cls)
Sb = self.compute_Sb(cls, M_i, M_0)
Sw = self.compute_Sw(cls, M_i)
data_idx = self.compute_indices(x)
lst_idx_all = self.compute_listIndices(data_idx)
Sig_beta_B_i_all = self.compute_sig_beta_Bi(data_idx, M_i, M_0, lst_idx_all)
W_i = self.compute_wj(data_idx, M_i)
Sig_alpha_W_i = self.compute_sig_alpha_W(data_idx, lst_idx_all, W_i)
# Calculating inverse of Sb and its transpose and minus
# Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))
#Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
#Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.5))[0]
Sb_inv_N = pdinv(Sb + np.eye(Sb.shape[0])*0.9)[0]
Sb_inv_N_trans = np.transpose(Sb_inv_N)
Sb_inv_N_trans_minus = -1 * Sb_inv_N_trans
Sw_trans = np.transpose(Sw)
# Calculating DJ/DXk
DJ_Dxk = 2 * (
Sb_inv_N_trans_minus.dot(Sw_trans).dot(Sb_inv_N_trans).dot(Sig_beta_B_i_all) + Sb_inv_N_trans.dot(
Sig_alpha_W_i))
# Calculating derivative of the log of the prior
DPx_Dx = ((-1 / self.sigma2) * DJ_Dxk)
DPxprim_Dx = np.diagflat(self.lamda).dot(DPx_Dx)
# Because of the GPy we need to transpose our matrix so that it gets the same shape as out matrix (denominator layout!!!)
DPxprim_Dx = DPxprim_Dx.T
DPxprim_Dlamda = DPx_Dx.dot(x)
# Because of the GPy we need to transpose our matrix so that it gets the same shape as out matrix (denominator layout!!!)
DPxprim_Dlamda = DPxprim_Dlamda.T
self.lamda.gradient = np.diag(DPxprim_Dlamda)
# print DPxprim_Dx
return DPxprim_Dx
# def frb(self, x):
# from functools import partial
# from GPy.models import GradientChecker
# f = partial(self.lnpdf)
# df = partial(self.lnpdf_grad)
# grad = GradientChecker(f, df, x, 'X')
# grad.checkgrad(verbose=1)
def rvs(self, n):
return np.random.rand(n) # A WRONG implementation
def __str__(self):
return 'DGPLVM_prior_Raq_Lamda'
# ******************************************
class DGPLVM_T(Prior):
"""
Implementation of the Discriminative Gaussian Process Latent Variable model paper, by Raquel.
:param sigma2: constant
.. Note:: DGPLVM for Classification paper implementation
"""
domain = _REAL
# _instances = []
# def __new__(cls, mu, sigma): # Singleton:
# if cls._instances:
# cls._instances[:] = [instance for instance in cls._instances if instance()]
# for instance in cls._instances:
# if instance().mu == mu and instance().sigma == sigma:
# return instance()
# o = super(Prior, cls).__new__(cls, mu, sigma)
# cls._instances.append(weakref.ref(o))
# return cls._instances[-1]()
def __init__(self, sigma2, lbl, x_shape, vec):
self.sigma2 = sigma2
# self.x = x
self.lbl = lbl
self.classnum = lbl.shape[1]
self.datanum = lbl.shape[0]
self.x_shape = x_shape
self.dim = x_shape[1]
self.vec = vec
def get_class_label(self, y):
for idx, v in enumerate(y):
if v == 1:
return idx
return -1
# This function assigns each data point to its own class
# and returns the dictionary which contains the class name and parameters.
def compute_cls(self, x):
cls = {}
# Appending each data point to its proper class
for j in range(self.datanum):
class_label = self.get_class_label(self.lbl[j])
if class_label not in cls:
cls[class_label] = []
cls[class_label].append(x[j])
return cls
# This function computes mean of each class. The mean is calculated through each dimension
def compute_Mi(self, cls):
M_i = np.zeros((self.classnum, self.dim))
for i in cls:
# Mean of each class
# class_i = np.multiply(cls[i],vec)
class_i = cls[i]
M_i[i] = np.mean(class_i, axis=0)
return M_i
# Adding data points as tuple to the dictionary so that we can access indices
def compute_indices(self, x):
data_idx = {}
for j in range(self.datanum):
class_label = self.get_class_label(self.lbl[j])
if class_label not in data_idx:
data_idx[class_label] = []
t = (j, x[j])
data_idx[class_label].append(t)
return data_idx
# Adding indices to the list so we can access whole the indices
def compute_listIndices(self, data_idx):
lst_idx = []
lst_idx_all = []
for i in data_idx:
if len(lst_idx) == 0:
pass
#Do nothing, because it is the first time list is created so is empty
else:
lst_idx = []
# Here we put indices of each class in to the list called lst_idx_all
for m in range(len(data_idx[i])):
lst_idx.append(data_idx[i][m][0])
lst_idx_all.append(lst_idx)
return lst_idx_all
# This function calculates between classes variances
def compute_Sb(self, cls, M_i, M_0):
Sb = np.zeros((self.dim, self.dim))
for i in cls:
B = (M_i[i] - M_0).reshape(self.dim, 1)
B_trans = B.transpose()
Sb += (float(len(cls[i])) / self.datanum) * B.dot(B_trans)
return Sb
# This function calculates within classes variances
def compute_Sw(self, cls, M_i):
Sw = np.zeros((self.dim, self.dim))
for i in cls:
N_i = float(len(cls[i]))
W_WT = np.zeros((self.dim, self.dim))
for xk in cls[i]:
W = (xk - M_i[i])
W_WT += np.outer(W, W)
Sw += (N_i / self.datanum) * ((1. / N_i) * W_WT)
return Sw
# Calculating beta and Bi for Sb
def compute_sig_beta_Bi(self, data_idx, M_i, M_0, lst_idx_all):
# import pdb
# pdb.set_trace()
B_i = np.zeros((self.classnum, self.dim))
Sig_beta_B_i_all = np.zeros((self.datanum, self.dim))
for i in data_idx:
# pdb.set_trace()
# Calculating Bi
B_i[i] = (M_i[i] - M_0).reshape(1, self.dim)
for k in range(self.datanum):
for i in data_idx:
N_i = float(len(data_idx[i]))
if k in lst_idx_all[i]:
beta = (float(1) / N_i) - (float(1) / self.datanum)
Sig_beta_B_i_all[k] += float(N_i) / self.datanum * (beta * B_i[i])
else:
beta = -(float(1) / self.datanum)
Sig_beta_B_i_all[k] += float(N_i) / self.datanum * (beta * B_i[i])
Sig_beta_B_i_all = Sig_beta_B_i_all.transpose()
return Sig_beta_B_i_all
# Calculating W_j s separately so we can access all the W_j s anytime
def compute_wj(self, data_idx, M_i):
W_i = np.zeros((self.datanum, self.dim))
for i in data_idx:
N_i = float(len(data_idx[i]))
for tpl in data_idx[i]:
xj = tpl[1]
j = tpl[0]
W_i[j] = (xj - M_i[i])
return W_i
# Calculating alpha and Wj for Sw
def compute_sig_alpha_W(self, data_idx, lst_idx_all, W_i):
Sig_alpha_W_i = np.zeros((self.datanum, self.dim))
for i in data_idx:
N_i = float(len(data_idx[i]))
for tpl in data_idx[i]:
k = tpl[0]
for j in lst_idx_all[i]:
if k == j:
alpha = 1 - (float(1) / N_i)
Sig_alpha_W_i[k] += (alpha * W_i[j])
else:
alpha = 0 - (float(1) / N_i)
Sig_alpha_W_i[k] += (alpha * W_i[j])
Sig_alpha_W_i = (1. / self.datanum) * np.transpose(Sig_alpha_W_i)
return Sig_alpha_W_i
# This function calculates log of our prior
def lnpdf(self, x):
x = x.reshape(self.x_shape)
xprim = x.dot(self.vec)
x = xprim
# print x
cls = self.compute_cls(x)
M_0 = np.mean(x, axis=0)
M_i = self.compute_Mi(cls)
Sb = self.compute_Sb(cls, M_i, M_0)
Sw = self.compute_Sw(cls, M_i)
# Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))
#Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
#print 'SB_inv: ', Sb_inv_N
#Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0]
Sb_inv_N = pdinv(Sb+np.eye(Sb.shape[0])*0.1)[0]
return (-1 / self.sigma2) * np.trace(Sb_inv_N.dot(Sw))
# This function calculates derivative of the log of prior function
def lnpdf_grad(self, x):
x = x.reshape(self.x_shape)
xprim = x.dot(self.vec)
x = xprim
# print x
cls = self.compute_cls(x)
M_0 = np.mean(x, axis=0)
M_i = self.compute_Mi(cls)
Sb = self.compute_Sb(cls, M_i, M_0)
Sw = self.compute_Sw(cls, M_i)
data_idx = self.compute_indices(x)
lst_idx_all = self.compute_listIndices(data_idx)
Sig_beta_B_i_all = self.compute_sig_beta_Bi(data_idx, M_i, M_0, lst_idx_all)
W_i = self.compute_wj(data_idx, M_i)
Sig_alpha_W_i = self.compute_sig_alpha_W(data_idx, lst_idx_all, W_i)
# Calculating inverse of Sb and its transpose and minus
# Sb_inv_N = np.linalg.inv(Sb + np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))
#Sb_inv_N = np.linalg.inv(Sb+np.eye(Sb.shape[0])*0.1)
#print 'SB_inv: ',Sb_inv_N
#Sb_inv_N = pdinv(Sb+ np.eye(Sb.shape[0]) * (np.diag(Sb).min() * 0.1))[0]
Sb_inv_N = pdinv(Sb+np.eye(Sb.shape[0])*0.1)[0]
Sb_inv_N_trans = np.transpose(Sb_inv_N)
Sb_inv_N_trans_minus = -1 * Sb_inv_N_trans
Sw_trans = np.transpose(Sw)
# Calculating DJ/DXk
DJ_Dxk = 2 * (
Sb_inv_N_trans_minus.dot(Sw_trans).dot(Sb_inv_N_trans).dot(Sig_beta_B_i_all) + Sb_inv_N_trans.dot(
Sig_alpha_W_i))
# Calculating derivative of the log of the prior
DPx_Dx = ((-1 / self.sigma2) * DJ_Dxk)
return DPx_Dx.T
# def frb(self, x):
# from functools import partial
# from GPy.models import GradientChecker
# f = partial(self.lnpdf)
# df = partial(self.lnpdf_grad)
# grad = GradientChecker(f, df, x, 'X')
# grad.checkgrad(verbose=1)
def rvs(self, n):
return np.random.rand(n) # A WRONG implementation
def __str__(self):
return 'DGPLVM_prior_Raq_TTT'
class HalfT(Prior):
"""
Implementation of the half student t probability function, coupled with random variables.
:param A: scale parameter
:param nu: degrees of freedom
"""
domain = _POSITIVE
_instances = []
def __new__(cls, A, nu): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().A == A and instance().nu == nu:
return instance()
o = super(Prior, cls).__new__(cls, A, nu)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, A, nu):
self.A = float(A)
self.nu = float(nu)
self.constant = gammaln(.5*(self.nu+1.)) - gammaln(.5*self.nu) - .5*np.log(np.pi*self.A*self.nu)
def __str__(self):
return "hT({:.2g}, {:.2g})".format(self.A, self.nu)
def lnpdf(self, theta):
return (theta > 0) * (self.constant - .5*(self.nu + 1) * np.log(1. + (1./self.nu) * (theta/self.A)**2))
# theta = theta if isinstance(theta,np.ndarray) else np.array([theta])
# lnpdfs = np.zeros_like(theta)
# theta = np.array([theta])
# above_zero = theta.flatten()>1e-6
# v = self.nu
# sigma2=self.A
# stop
# lnpdfs[above_zero] = (+ gammaln((v + 1) * 0.5)
# - gammaln(v * 0.5)
# - 0.5*np.log(sigma2 * v * np.pi)
# - 0.5*(v + 1)*np.log(1 + (1/np.float(v))*((theta[above_zero][0]**2)/sigma2))
# )
# return lnpdfs
def lnpdf_grad(self, theta):
theta = theta if isinstance(theta, np.ndarray) else np.array([theta])
grad = np.zeros_like(theta)
above_zero = theta > 1e-6
v = self.nu
sigma2 = self.A
grad[above_zero] = -0.5*(v+1)*(2*theta[above_zero])/(v*sigma2 + theta[above_zero][0]**2)
return grad
def rvs(self, n):
# return np.random.randn(n) * self.sigma + self.mu
from scipy.stats import t
# [np.abs(x) for x in t.rvs(df=4,loc=0,scale=50, size=10000)])
ret = t.rvs(self.nu, loc=0, scale=self.A, size=n)
ret[ret < 0] = 0
return ret
class Exponential(Prior):
"""
Implementation of the Exponential probability function,
coupled with random variables.
:param l: shape parameter
"""
domain = _POSITIVE
_instances = []
def __new__(cls, l): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().l == l:
return instance()
o = super(Exponential, cls).__new__(cls, l)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, l):
self.l = l
def __str__(self):
return "Exp({:.2g})".format(self.l)
def summary(self):
ret = {"E[x]": 1. / self.l,
"E[ln x]": np.nan,
"var[x]": 1. / self.l**2,
"Entropy": 1. - np.log(self.l),
"Mode": 0.}
return ret
def lnpdf(self, x):
return np.log(self.l) - self.l * x
def lnpdf_grad(self, x):
return - self.l
def rvs(self, n):
return np.random.exponential(scale=self.l, size=n)
class StudentT(Prior):
"""
Implementation of the student t probability function, coupled with random variables.
:param mu: mean
:param sigma: standard deviation
:param nu: degrees of freedom
.. Note:: Bishop 2006 notation is used throughout the code
"""
domain = _REAL
_instances = []
def __new__(cls, mu=0, sigma=1, nu=4): # Singleton:
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().mu == mu and instance().sigma == sigma and instance().nu == nu:
return instance()
newfunc = super(Prior, cls).__new__
if newfunc is object.__new__:
o = newfunc(cls)
else:
o = newfunc(cls, mu, sigma, nu)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu, sigma, nu):
self.mu = float(mu)
self.sigma = float(sigma)
self.sigma2 = np.square(self.sigma)
self.nu = float(nu)
def __str__(self):
return "St({:.2g}, {:.2g}, {:.2g})".format(self.mu, self.sigma, self.nu)
def lnpdf(self, x):
from scipy.stats import t
return t.logpdf(x,self.nu,self.mu,self.sigma)
def lnpdf_grad(self, x):
return -(self.nu + 1.)*(x - self.mu)/( self.nu*self.sigma2 + np.square(x - self.mu) )
def rvs(self, n):
from scipy.stats import t
ret = t.rvs(self.nu, loc=self.mu, scale=self.sigma, size=n)
return ret
|
SheffieldML/GPy
|
GPy/core/parameterization/priors.py
|
Python
|
bsd-3-clause
| 48,125
|
[
"Gaussian"
] |
cbefd017ebdd5b67b9464cd2f63c280c96d0f7f6fe71a86ae1bf507ee3a5f5f6
|
from hamcrest import *
from test.features import BrowserTest
from test.features.support import table_from
class Javascript(BrowserTest):
def test_all_services_table(self):
self.browser.visit("http://0.0.0.0:8000/SpecRunner")
tests_passing = self.browser.find_by_css('.symbolSummary .passed')
tests_failing = self.browser.find_by_css('.symbolSummary .failed')
print 'Tests passing %s' % len(tests_passing)
print 'Tests failing %s' % len(tests_failing)
assert_that(len(tests_failing), is_(0))
|
alphagov/transactions-explorer
|
test/features/test_javascript.py
|
Python
|
mit
| 549
|
[
"VisIt"
] |
bba3d97adced9d33a83aa9ce5aa1e59f478743c4d4e0e71a794ed5697cf86110
|
"""
Copyright 2013 Appurify, Inc
All rights reserved
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
"""Constants used throughout the wrapper for the Appurify Mobile Platform REST
API."""
# Current development version
# Increment this during development as and when desired
# setup.py will use this version to generate new releases
VERSION = (1, 0, 5)
__version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:])
# Last tagged stable version
# Bump this to match VERSION when dev version is stable for new release
# and also have passed Sencha architect tool integration tests
# This variable is only used by REST API (/client/version/)
STABLE_VERSION = (0, 4, 9)
__stable_version__ = '.'.join(map(str, STABLE_VERSION[0:3])) + ''.join(STABLE_VERSION[3:])
__homepage__ = 'http://appurify.com'
__license__ = 'Commercial'
__description__ = 'Appurify Developer Python Client'
__contact__ = "support@appurify.com"
__repourl__ = 'http://github.com/appurify/appurify-python'
API_PROTO = "https" # override using APPURIFY_API_PROTO environment variable
API_HOST = "live.appurify.com" # APPURIFY_API_HOST
API_PORT = 443 # APPURIFY_API_PORT
API_POLL_SEC = 15 # test result polled every poll seconds (APPURIFY_API_POLL_DELAY)
API_RETRY_ON_FAILURE = 1 # should client retry API calls in case of non-200 response (APPURIFY_API_RETRY_ON_FAILURE)
API_RETRY_DELAY = 1 # (in seconds) if retry on failure is enabled, interval between each retry (APPURIFY_API_RETRY_DELAY)
API_MAX_RETRY = 3 # if retry on failure is enabled, how many times should client retry (APPURIFY_API_MAX_RETRY)
API_STATUS_UP = 1 # aws status page code for service up and running
API_STATUS_DOWN = 2 # service is down
API_WAIT_FOR_SERVICE = 1 # should client wait for service to come back live by polling aws status page?
API_STATUS_BASE_URL = 'https://s3-us-west-1.amazonaws.com/appurify-api-status'
MAX_DOWNLOAD_RETRIES = 10 # Number of times client should try to download the test results before giving up
DEFAULT_TIMEOUT = 3600 # default timeout if one cant be obtained from platform
#Exit code references
EXIT_CODE_ALL_PASS = 0 # Test completed with no exceptions or errors
EXIT_CODE_TEST_FAILURE = 1 # Test completed normally but reported test failures
EXIT_CODE_TEST_ABORT = 2 # Test was aborted by the user or system
EXIT_CODE_TEST_TIMEOUT = 3 # Test was aborted by the system because of timeout
EXIT_CODE_DEVICE_FAILURE = 4 # Test could not be completed because the device could not be activated or reserved
EXIT_CODE_BAD_TEST = 5 # Test could not execute because there was an error in the configuration or uploaded files
EXIT_CODE_AUTH_FAILURE = 6 # Test could not execute because the server rejected the provided credentials (key/secret)
EXIT_CODE_OTHER_EXCEPTION = 7 # Test could not execute because of other server/remote exception
EXIT_CODE_CLIENT_EXCEPTION = 8 # Test could not execute because of an unexpected error in the client
EXIT_CODE_CONNECTION_ERROR = 9 # Test got a connection error attempting to reach the server
EXIT_CODE_APP_INSTALL_FAILED = 10 # The app could not be installed on the device (possibly due to incorrect build)
EXIT_CODE_INVALID_PROVISION = 11 # Test could not execute because device type is not found in user pool
EXIT_CODE_INVALID_DEVICE = 12 # Test could not execute because app is not built for device type
EXIT_CODE_DEVICE_NOT_FOUND = 13 # Device doesn't exist in users device pool
EXIT_CODE_APP_INCOMPATIBLE = 14 # Device doesn't exist in users device pool
EXIT_CODE_GRID_TIMEOUT = 15 # Test reached timeout for grid session
#Exit code exception references
# TODO: Probably should be fetching these from the server at some point
EXIT_CODE_EXCEPTION_MAP = {EXIT_CODE_TEST_ABORT : [4000, 5000],
EXIT_CODE_APP_INSTALL_FAILED : [4007],
EXIT_CODE_GRID_TIMEOUT : [7003, 7005, 7007],
EXIT_CODE_INVALID_PROVISION : [4008, 4005],
EXIT_CODE_INVALID_DEVICE : [4006, 4009],
EXIT_CODE_TEST_TIMEOUT : [4001, 4002, 4003, 1010],
EXIT_CODE_DEVICE_FAILURE: [1000, 1001, 1002, 1005, 1008, 1009, 1011, 1012, 1013, 1014],
EXIT_CODE_BAD_TEST: [1003, 1006, 1007, 1008, 3000, 3001, 3003, 3004]}
#Supported test frameworks
SUPPORTED_TEST_TYPES = [
'calabash',
'ocunit',
'uiautomation',
'robotium',
'ios_robot',
'android_uiautomator',
'kiwi',
'cedar',
'kif',
'android_calabash',
'ios_selenium',
'android_selenium',
'ios_webrobot',
'appium',
'browser_test',
'appurify_recording',
'network_headers',
'ios_sencharobot',
'android_monkey',
'calabash_refresh_app',
'ios_webviewrobot',
'ios_wpt',
'touch_test',
'ios_monkeytalk',
'android_robot',
'android_monkeytalk',
'espresso',
'android_spoon',
'xctest',
'instrumentation',
]
#Frameworks that do not need a test source uploaded
NO_TEST_SOURCE = [
'ios_robot',
'ios_webrobot',
'browser_test',
'kif',
'kif:google',
'network_headers',
'ios_sencharobot',
'ios_webviewrobot',
'ios_wpt',
'touch_test',
'android_robot',
'android_monkey'
]
#Frameworks that do not need a app source uploaded
NO_APP_SOURCE = [
'ios_selenium',
'android_selenium',
'ios_webrobot',
'browser_test',
'network_headers',
'ios_webviewrobot',
'ios_wpt',
'appium',
'android_spoon',
]
#Actions that are enabled through the test
ENABLED_ACTIONS = [
'access_token_generate',
'access_token_list',
'access_token_usage',
'access_token_validate',
'devices_list',
'devices_config',
'devices_config_list',
'devices_config_networks_list',
'tests_list',
'tests_check_result',
]
|
appurify/appurify-python
|
appurify/constants.py
|
Python
|
apache-2.0
| 6,341
|
[
"ESPResSo"
] |
800c1413b68a013594b2ae5a0e46b7652dcb60ef5dfeb96966bfec4da3df296a
|
import os, random, string
from fabric.api import run, env, cd, settings, put, local, shell_env
# load ~/.ssh/id_rsa
env.key_filename = os.getenv('HOME', '/root') + '/.ssh/id_rsa'
# determine hosts
branch = os.getenv('DRONE_BRANCH', '')
if branch == "master":
env.hosts = ["marge.lavaboom.io:36104"]
api_uri = "https://api.lavaboom.com"
root_domain = "lavaboom.com"
elif branch == "staging":
env.hosts = ["lisa.lavaboom.io:36412"]
api_uri = "https://api.lavaboom.io"
root_domain = "lavaboom.io"
elif branch == "develop":
env.hosts = ["bart.lavaboom.io:36467"]
api_uri = "https://api.lavaboom.co"
root_domain = "lavaboom.co"
# build
def build():
# we install required npm packages with increased number of retries and if it fails we use backup mirror
local("npm install --fetch-retries 3 -g gulp || npm install --fetch-retries 3 --registry http://registry.npmjs.eu -g gulp")
local("npm install --fetch-retries 3 || npm install --fetch-retries 3 --registry http://registry.npmjs.eu")
if branch == "master" or branch == "staging":
with shell_env(API_URI=api_uri, ROOT_DOMAIN=root_domain):
local("gulp production")
elif branch == "develop":
with shell_env(API_URI=api_uri, ROOT_DOMAIN=root_domain):
local("gulp develop")
else:
local("gulp develop")
def deploy():
branch = os.getenv('DRONE_BRANCH', 'master')
commit = os.getenv('DRONE_COMMIT', 'master')
tmp_dir = '/tmp/' + ''.join(random.choice(string.lowercase) for i in xrange(10))
local('tar cvfz dist.tgz dist/')
run('mkdir ' + tmp_dir)
with cd(tmp_dir):
run('mkdir -p ' + tmp_dir + '/web/dist')
put('dist.tgz', tmp_dir + '/web/dist.tgz')
put('Dockerfile', tmp_dir + '/web/Dockerfile')
put('website.conf', tmp_dir + '/web/website.conf')
with cd('web'):
run('tar -xzvf dist.tgz')
run('docker build -t registry.lavaboom.io/lavaboom/web-' + branch + ' .')
run('git clone git@github.com:lavab/docker.git')
with settings(warn_only=True):
run('docker rm -f web-' + branch)
with cd('docker/runners'):
run('./web-' + branch + '.sh')
run('rm -r ' + tmp_dir)
def integrate():
build()
if branch == "master" or branch == "staging" or branch == "develop":
deploy()
|
MartinCote1978/web
|
fabfile.py
|
Python
|
gpl-3.0
| 2,240
|
[
"GULP"
] |
0a83e3fb54744c839fa808c5478d0ed2b22200760b28dd156784e86953130535
|
from lxml import html
import requests
import click
import os
import sys
import logging
import re
from pprint import pprint
from collections import OrderedDict as odict
import json
import database
import utils
from models import *
from sqlalchemy import func
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)s [%(name)s]: %(message)s",
stream=sys.stderr
)
log = logging.getLogger("inara")
bn = os.path.basename
dn = os.path.dirname
pj = os.path.join
fn_lower = func.lower
def save_url(url, outdir):
log.info("fetching %s to %s", url, outdir)
name = bn(url)
path = pj(outdir, name)
resp = requests.get(url)
assert resp.status_code == 200
with open(path, "w") as fp:
fp.write(resp.content)
@click.group()
def inara():
pass
@inara.command()
@click.argument("url")
@click.argument("outdir")
def get_index(url, outdir):
index_name = bn(url)
index_path = pj(outdir, index_name)
if not os.path.isfile(index_path):
log.info("not a file: %s. fetch %s", index_path, url)
resp = requests.get(url)
assert resp.status_code == 200
with open(index_path, "w") as fp:
fp.write(resp.content)
else:
log.info("using existing %s", index_path)
tree = html.parse(index_path)
root = tree.getroot()
root.make_links_absolute(url, resolve_base_href=True)
links = root.xpath('.//table/tbody/tr/td[2]/a')
log.info("found %s links using path", len(links))
for link in links:
save_url(link.get("href"), outdir)
# ARMOUR - BLAST RESISTANT ARMOUR (GRADE 1)
re_headline = re.compile(ur'(.+) - (.+) \(grade (\d+)\)', re.IGNORECASE)
re_number = re.compile(ur"(\d+[\.\d]*)")
re_ingredient = re.compile(ur'(\d+)x\s+(.*)')
def parse_effect_number(value):
m = re_number.search(value)
if m:
return float(m.group(1))
return 0
def parse_ingredient(value):
m = re_ingredient.search(value)
if m:
return m.group(1), m.group(2)
def parse_effect_scale(tvalmin, tvalmax):
if "%" in tvalmin or "%" in tvalmax:
return "percent"
else:
return "absolute"
def parse_headline(headline_text):
match = re_headline.search(headline_text)
if match:
return odict(
type=match.group(1),
title=match.group(2),
level=int(match.group(3))
)
return None
def parse_blueprint_effects(el):
container = el.find('.//div[@class="blueprintparams"]')
div_names = container.findall('.//div[@class="name smaller"]')
for div_name in div_names:
effect_name = div_name.text_content()
tvalmin = div_name.getnext().text_content()
tvalmax = div_name.getnext().getnext().text_content()
valmin = parse_effect_number(tvalmin)
valmax = parse_effect_number(tvalmax)
yield odict(
title=div_name.text_content(),
min=valmin,
max=valmax,
scale=parse_effect_scale(tvalmin, tvalmax)
)
def parse_blueprint_ingredients(el):
items = el.findall('.//span[@class="tooltip"]')
for item in items:
quantity, material_title = parse_ingredient(item.text_content())
yield odict(
quantity=int(quantity),
material=material_title.strip()
)
def parse_blueprint_engineers(el):
el.find('span[@class="major smaller uppercase"]')
def parse_blueprint(fpath):
tree = html.parse(fpath)
root = tree.getroot()
headlines = tree.xpath("/html/body/div[2]/div/div[1]/div[2]/div[2]/div[1]/h3")
log.info("%s found in %s", len(headlines), fpath)
for headline in headlines:
blueprint = parse_headline(headline.text)
blueprint["effects"] = list(
parse_blueprint_effects(headline.getnext())
)
blueprint["ingredients"] = list(
parse_blueprint_ingredients(headline.getnext()[1])
)
yield blueprint
@inara.command()
@click.argument("base_path")
def parse_blueprints(base_path):
blueprints = []
for fname in os.listdir(base_path):
if fname == "galaxy-blueprints" or fname == "dump.json":
continue
blueprints.extend(
parse_blueprint(pj(base_path, fname))
)
print json.dumps(blueprints, indent=2)
def make_ingredients(session, ingredients_data):
for i_data in ingredients_data:
material = session.query(Material).filter(
fn_lower(Material.title) == fn_lower(i_data["material"])
).first()
if material is None:
log.warn("material '%s' not in db", i_data["material"])
sys.exit(1)
yield Ingredient(material=material, quantity=i_data["quantity"])
@inara.command()
@click.argument("json_file")
def import_json(json_file):
config = utils.load_config()
session = database.session(config)
data = json.load(file(json_file, "r"))
for bp_data in data:
blueprint = session.query(Blueprint).filter(
fn_lower(Blueprint.title) == fn_lower(bp_data["title"]),
fn_lower(Blueprint.type) == fn_lower(bp_data["type"]),
Blueprint.level == bp_data["level"]
).first()
if blueprint is None:
log.warn("blueprint '%s - %s: %s' not in db",
bp_data["type"], bp_data["title"], bp_data["level"])
else:
blueprint.ingredients[:] = make_ingredients(session, bp_data["ingredients"])
blueprint.effects[:] = [PrimaryEffect(**data) for data in bp_data["effects"]]
session.add(blueprint)
session.commit()
if __name__ == "__main__":
inara()
|
fre-sch/collector-drone
|
collectordrone/inara.py
|
Python
|
gpl-3.0
| 5,636
|
[
"BLAST",
"Galaxy"
] |
b44be1c2b6b7cc0d86ede1a6f4a7029996b20eb11ed1d94378ee53adb82c5554
|
import psi4
from psi4 import *
from psi4.core import *
def run_myplugin1(name, **kwargs):
r"""Function encoding sequence of PSI module and plugin calls so that
myplugin1 can be called via :py:func:`~driver.energy`.
>>> energy('myplugin1')
"""
lowername = name.lower()
kwargs = p4util.kwargs_lower(kwargs)
# Your plugin's psi4 run sequence goes here
psi4.set_global_option('BASIS', 'sto-3g')
psi4.set_local_option('MYPLUGIN1', 'PRINT', 1)
energy('scf', **kwargs)
returnvalue = psi4.plugin('myplugin1.so')
return returnvalue
def exampleFN(name, **kwargs):
psi4.set_variable('CURRENT ENERGY', -74.94550962)
# Your Python code goes here
pass
# Integration with driver routines
procedures['energy']['myplugin1'] = exampleFN
|
andysim/psi4
|
tests/psithon2/psiaux1/myplugin1/pymodule.py
|
Python
|
gpl-2.0
| 790
|
[
"Psi4"
] |
7bb781de8d960df52d111a82b6c0ead294203993314b51d24cdd5ff7f71ebfb3
|
#!/usr/bin/env python
#
# Python Y86 Assembler
# Copyright (c) 2012 Linus Yang <laokongzi@gmail.com>
#
# ** Compatible with Shedskin **
# Shedskin is an RPython to C++ compiler
# Visit https://code.google.com/p/shedskin/wiki/docs for more info
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import getopt
import re
import binascii
import os
import sys
__ver__ = '0.1.2'
class YAssembler:
def __init__(self, lar=True, big=False, sec=False, asc=False):
self.largemem = lar
self.bigendian = big
self.second = sec
self.asciibin = asc
self.regs = {
"%eax": "0",
"%ecx": "1",
"%edx": "2",
"%ebx": "3",
"%esp": "4",
"%ebp": "5",
"%esi": "6",
"%edi": "7",
"rnone": "8"
}
self.instr = {
"nop": "00",
"halt": "10",
"rrmovl": "20",
"cmovle": "21",
"cmovl": "22",
"cmove": "23",
"cmovne": "24",
"cmovge": "25",
"cmovg": "26",
"irmovl": "30",
"rmmovl": "40",
"mrmovl": "50",
"addl": "60",
"subl": "61",
"andl": "62",
"xorl": "63",
"jmp": "70",
"jle": "71",
"jl": "72",
"je": "73",
"jne": "74",
"jge": "75",
"jg": "76",
"call": "80",
"ret": "90",
"pushl": "a0",
"popl": "b0",
"iaddl": "c0",
"leave": "d0"
}
self.instbyte = {
"nop": 1,
"halt": 1,
"rrmovl": 2,
"cmovle": 2,
"cmovl": 2,
"cmove": 2,
"cmovne": 2,
"cmovge": 2,
"cmovg": 2,
"irmovl": 6,
"rmmovl": 6,
"mrmovl": 6,
"addl": 2,
"subl": 2,
"andl": 2,
"xorl": 2,
"jmp": 5,
"jle": 5,
"jl": 5,
"je": 5,
"jne": 5,
"jge": 5,
"jg": 5,
"call": 5,
"ret": 1,
"pushl": 2,
"popl": 2,
"iaddl": 6,
"leave": 1
}
self.bytelen = {
'.long': 4,
'.word': 2,
'.byte': 1
}
if self.second:
self.regs['rnone'] = 'f'
self.instr['nop'] = '10'
self.instr['halt'] = '00'
def endianStr(self, x, length, bigendian=False):
s = ''
nowlen = 0
while x != 0 and nowlen < length:
if bigendian:
s = "%.2x" % (x & 0xff) + s
else:
s += "%.2x" % (x & 0xff)
x = x >> 8
nowlen += 1
while nowlen < length:
if bigendian:
s = '00' + s
else:
s += '00'
nowlen += 1
return s
def printError(self, error):
print('Error: assembly failed:\n%s' % error)
sys.exit(1)
def runAssembler(self, inputName):
try:
fin = open(inputName)
except IOError:
print('Error: cannot open input file: %s' % inputName)
sys.exit(1)
binpos = 0
linepos = 0
alignment = 0
labels = {}
error = ''
strippedline = {}
origline = []
yaslineno = {}
# First pass to get labels and detect errors
for nowline in fin:
linepos += 1
origline.append(nowline)
nowline = re.sub(r'#.*$', '', nowline)
nowline = re.sub(r'/\*.*\*/', '', nowline)
nowline = re.sub(r'\s*,\s*', ',', nowline)
if nowline.find(':') != -1:
lab = re.compile('([^\s]+):')
labmatch = lab.search(nowline)
nowline = lab.sub('', nowline)
if labmatch != None:
labelname = labmatch.group(1)
else:
error += 'Line %d: %s\n' % (linepos, 'Label error.')
continue
if labelname in labels:
error += 'Line %d: %s\n' % (linepos, 'Label repeated error.')
continue
else:
labels[labelname] = binpos
yaslineno[linepos] = binpos
linelist = []
for element in nowline.split(' '):
ele = element.replace('\t', '').replace('\n', '').replace('\r', '')
if ele != '':
linelist.append(ele)
if linelist == []:
continue
posindex = str(linepos)
strippedline[posindex] = linelist
try:
if linelist[0] in self.instbyte:
alignment = 0
yaslineno[linepos] = binpos
binpos += self.instbyte[linelist[0]]
elif linelist[0] == '.pos':
binpos = int(linelist[1], 0)
yaslineno[linepos] = binpos
elif linelist[0] == '.align':
alignment = int(linelist[1], 0)
if binpos % alignment != 0:
binpos += alignment - binpos % alignment
yaslineno[linepos] = binpos
elif linelist[0] in ('.long', '.word', '.byte'):
yaslineno[linepos] = binpos
if alignment != 0:
binpos += alignment
else:
binpos += self.bytelen[linelist[0]]
else:
error += 'Line %d: Instruction "%s" not defined.\n' % (linepos, linelist[0])
continue
except:
error += 'Line %d: Instruction error.\n' % linepos
continue
try:
fin.close()
except IOError:
pass
if error != '':
self.printError(error)
# Second pass to convert binary
yasbin = {}
for line in strippedline:
try:
linepos = int(line)
except ValueError:
print('Error: unexpected internal error')
sys.exit(1)
linelist = strippedline[line]
if linelist == []:
continue
resbin = ''
if linelist[0] in self.instr:
alignment = 0
try:
if linelist[0] in ('nop', 'halt', 'ret', 'leave'):
resbin = self.instr[linelist[0]]
elif linelist[0] in ('pushl', 'popl'):
resbin = self.instr[linelist[0]] + self.regs[linelist[1]] + self.regs["rnone"]
elif linelist[0] in ('addl', 'subl', 'andl', 'xorl', 'rrmovl') \
or linelist[0].startswith('cmov'):
reglist = linelist[1].split(',')
resbin = self.instr[linelist[0]] + self.regs[reglist[0]] + self.regs[reglist[1]]
elif linelist[0].startswith('j') or linelist[0] == 'call':
resbin = self.instr[linelist[0]]
if linelist[1] in labels:
resbin += self.endianStr(labels[linelist[1]], 4, self.bigendian)
else:
resbin += self.endianStr(int(linelist[1], 0), 4, self.bigendian)
elif linelist[0] in ('irmovl', 'iaddl'):
reglist = linelist[1].split(',')
if reglist[0] in labels:
instnum = self.endianStr(labels[reglist[0]], 4, self.bigendian)
else:
instnum = self.endianStr(int(reglist[0].replace('$', ''), 0), 4, self.bigendian)
resbin = self.instr[linelist[0]] + self.regs["rnone"] + \
self.regs[reglist[1]] + instnum
elif linelist[0].endswith('movl'):
reglist = linelist[1].split(',')
if linelist[0] == 'rmmovl':
memstr = reglist[1]
self.regstr = reglist[0]
elif linelist[0] == 'mrmovl':
memstr = reglist[0]
self.regstr = reglist[1]
regre = re.compile('\((.+)\)')
regmatch = regre.search(memstr)
memint = regre.sub('', memstr)
if memint == '' or memint == None:
memint = '0'
resbin = self.instr[linelist[0]] + self.regs[self.regstr] + \
self.regs[regmatch.group(1)] + \
self.endianStr(int(memint, 0), 4, self.bigendian)
else:
error += 'Line %d: Instruction "%s" not defined.\n' % (linepos, linelist[0])
continue
except:
error += 'Line %d: Instruction error.\n' % linepos
continue
else:
try:
if linelist[0] == '.pos':
pass
elif linelist[0] == '.align':
alignment = int(linelist[1], 0)
elif linelist[0] in ('.long', '.word', '.byte'):
if alignment != 0:
length = alignment
else:
length = self.bytelen[linelist[0]]
if linelist[1] in labels:
resbin = self.endianStr(labels[linelist[1]], length, self.bigendian)
else:
resbin = self.endianStr(int(linelist[1], 0), length, self.bigendian)
else:
error += 'Line %d: Alignment error.\n' % linepos
continue
except:
error += 'Line %d: Alignment error.\n' % linepos
continue
if resbin != '':
yasbin[linepos] = resbin
# Write to files
binpos = 0
linepos = 0
maxaddrlen = 3
if self.largemem:
maxaddrlen = len("%x" % (max(yaslineno.values())))
if maxaddrlen < 3:
maxaddrlen = 3
if error != '':
self.printError(error)
else:
prefixName = os.path.splitext(inputName)[0]
outputName = prefixName + '.yo'
outbinName = prefixName + '.ybo'
outascName = prefixName + '.yao'
try:
fout = open(outputName, 'w')
fbout = open(outbinName, 'wb')
if self.asciibin:
faout = open(outascName, 'w')
except IOError:
print('Error: cannot create output files')
sys.exit(1)
for line in origline:
linepos += 1
if (linepos in yasbin) and (linepos in yaslineno):
ystr = yasbin[linepos]
nowaddr = yaslineno[linepos]
if binpos != nowaddr:
blank = '0' * (2 * (nowaddr - binpos))
if self.asciibin:
faout.write(blank)
fbout.write(binascii.a2b_hex(blank))
binpos = nowaddr
binpos += len(ystr) // 2
fout.write(' 0x%.*x: %-12s | %s' % (maxaddrlen, nowaddr, ystr, line))
if self.asciibin:
faout.write(ystr)
fbout.write(binascii.a2b_hex(ystr))
elif linepos in yaslineno:
nowaddr = yaslineno[linepos]
fout.write(' 0x%.*x: | %s' % (maxaddrlen, nowaddr, line))
else:
fout.write((' ' * (maxaddrlen + 19)) + '| %s' % line)
try:
if self.asciibin:
faout.close()
fout.close()
fbout.close()
except IOError:
pass
print('Assembled file: %s' % os.path.basename(inputName))
def showUsage():
print('''Usage: %s [options] [assembly file]
Options:
-h, --help show this help message and exit
-l, --largemem support code generation for more than 4096 bytes. (default
is enabled)
-b, --bigendian code generation using big-endian. (default is little-
endian)
-s, --second using generation rules in csapp 2nd edition. (default using
1st editon rules)
-a, --asciibin enable conversion binary object to ASCII digits. (default
is disabled)
''' % os.path.basename(sys.argv[0]))
sys.exit(1)
def main():
print('Y86 Assembler %s\nCopyright (c) 2012 Linus Yang\n' % __ver__)
largemem = True
bigendian = False
second = False
asciibin = False
try:
opts, args = getopt.getopt(sys.argv[1:], 'lbsah', ['largemem', 'bigendian', 'second', 'asciibin', 'help'])
if len(opts) == 0 and len(args) != 1:
if len(args) == 0:
print("Error: missing input file")
else:
print("Error: only one input file allowed")
showUsage()
for o, a in opts:
if o in ('-l', '--largemem'):
largemem = True
if o in ('-b', '--bigendian'):
bigendian = True
print('Warning: generation using big-endian')
if o in ('-s', '--second'):
second = True
print('Warning: using csapp 2nd edition rules')
if o in ('-a', '--asciibin'):
asciibin = True
if o in ('-h', '--help'):
showUsage()
except getopt.GetoptError:
print("Error: illegal option")
showUsage()
assembler = YAssembler(largemem, bigendian, second, asciibin)
assembler.runAssembler(args[0])
if __name__ == '__main__':
main()
|
linusyang/python-y86
|
yas.py
|
Python
|
gpl-3.0
| 15,193
|
[
"VisIt"
] |
567e61e82864f02ec596563829baca40cce51a986abc0a79ba70a4776cc3f8d5
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from compmod.distributions import Rayleigh, Triangular, Rectangular
from compmod.rheology import SaintVenant, Bilinear
E = 1.
sigmay = .01
n = .1
sigma_sat = .02
epsilon = np.linspace(0., 0.2, 1000)
sigmay_mean = sigmay
ray = Rayleigh(sigmay_mean)
std = ray.stats()[1]**.5
tri = Triangular(sigmay_mean, std)
rect = Rectangular(sigmay_mean, std)
grid = np.linspace(0., 0.06, 10000)
cell= lambda eps, sy: Bilinear(eps, E, sy, n, sigma_sat)
sigma = cell(epsilon, sigmay)
sv_ray = SaintVenant(epsilon, cell, grid, ray)
sv_tri = SaintVenant(epsilon, cell, grid, tri)
sv_rect = SaintVenant(epsilon, cell, grid, rect)
sigma_ray = sv_ray.sigma()
sigma_tri = sv_tri.sigma()
sigma_rect = sv_rect.sigma()
prob_ray = sv_ray.Dist
prob_tri = sv_tri.Dist
prob_rect = sv_rect.Dist
fig = plt.figure(0)
plt.clf()
fig.add_subplot(2,1,1)
plt.plot(epsilon, sigma, "k-", label = "Dirac")
plt.plot(epsilon, sigma_ray, 'r-', label = "Rayleigh")
plt.plot(epsilon, sigma_tri, 'b-', label = "Triangular")
plt.plot(epsilon, sigma_rect, 'g-', label = "Rectangular")
plt.legend(loc = "lower right")
plt.grid()
plt.xlabel('Strain, $\epsilon$')
plt.ylabel('Stress, $\sigma$')
fig.add_subplot(2,1,2)
plt.plot(grid, prob_ray, 'r-', label = "Rayleigh")
plt.plot(grid, prob_tri, 'b-', label = "Triangular")
plt.plot(grid, prob_rect, 'g-', label = "Rectangular")
plt.grid()
plt.xlabel('Yield Stress, $\sigma_y$')
plt.ylabel('Probability density, $p$')
plt.legend(loc = "lower right")
plt.tight_layout()
plt.show()
|
lcharleux/compmod
|
doc/example_code/rheology/demo.py
|
Python
|
gpl-2.0
| 1,561
|
[
"DIRAC"
] |
15576acd69a4eb2188f6cfcb173f630aab8f03fe6422ae96459489a585352992
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# Aging Analysis of Cytokine and SNP data
# <markdowncell>
# This analysis will look at how aging effects HIV-1 disease progression. This will include looking at things like clinical parameters, LTR SNPs, Cytokine Profiling, and NeuroCog Impairment.
# <headingcell level=2>
# Data Extraction
# <codecell>
from __future__ import division
import os, os.path
import numpy as np
import pandas as pd
from patsy import dmatrices
from patsy.contrasts import Treatment
import statsmodels.api as sm
from statsmodels.stats.multitest import multipletests
sys.path.append('/home/will/PySeqUtils/')
sys.path.append('/home/will/PatientPicker/')
os.chdir('/home/will/AgingAnalysis/')
# <codecell>
import LoadingTools
redcap_data = LoadingTools.load_redcap_data()
redcap_data = redcap_data.groupby(['Patient ID', 'VisitNum']).first()
# <codecell>
def count_with_skips(inser, nskips):
skips = 0
for row in inser.values:
if row:
skips = 0
else:
skips += 1
if skips > nskips:
return False
return True
name_mappings = {'Test-Benzodiazepine':'SBe',
'Test-Cannabinoid':'SCa',
'Test-Cocaine':'PC',
'Test-Opiates':'PO',
'Test-Amphetamines':np.nan,
'Test-Barbiturates':np.nan,
'Test-Phencyclidine':np.nan
}
def niz_groupings(indf):
inds = [v for p, v in indf.index]
if len(indf.index) < 3:
return pd.Series(np.nan, index=inds)
indf = indf.dropna()
ever_used = indf.any(axis = 0)
if not ever_used.any():
return pd.Series('PN', index=inds)
all_used = indf.all(axis = 0)
if all_used.sum() == 1:
return pd.Series(name_mappings[all_used.idxmax()], index=inds)
elif all_used.sum() > 1:
return pd.Series('MDU', index=inds)
pure_cols = []
non_pure_cols = []
for col in indf.columns:
if count_with_skips(indf[col], 1):
pure_cols.append(col)
else:
non_pure_cols.append(col)
if ever_used[non_pure_cols].any():
return pd.Series(np.nan, index=inds)
if len(pure_cols) == 1:
return pd.Series(name_mappings[pure_cols[0]], index=inds)
else:
return pd.Series('MDU', index=inds)
admit_cols = [col for col in redcap_data.columns if col.startswith('Admit')]
admit_data = redcap_data[admit_cols].any(axis = 1).groupby(level = 'Patient ID').transform('any')
drug_names = ['Test-Benzodiazepine', 'Test-Cannabinoid', 'Test-Cocaine', 'Test-Opiates']
niz_groupings = redcap_data[drug_names].groupby(level = 'Patient ID').apply(niz_groupings)
niz_groupings[(niz_groupings == 'PN') & (admit_data)] = np.nan
# <codecell>
def safe_days(val):
try:
return val/np.timedelta64(1, 'D')
except:
return np.nan
def safe_float(val):
try:
return float(val)
except:
return np.nan
def get_days_since_bl(ser):
fdate = ser.dropna().min()
diff_dates = (ser-fdate).apply(safe_days)
return diff_dates
def guess_race(indf):
race_cols = [col for col in indf.columns if col.startswith('Race-')]
race = indf[race_cols].sum().idxmax()
vnums = [v for _, v in indf.index]
rser = pd.Series([race]*len(indf), index = vnums)
return rser
def convert_haart(inval):
tdict = {'on': 'cH',
'non-adherent': 'dH',
'off': 'dH',
'naive': 'nH'}
return tdict.get(inval, np.nan)
pat_groups = redcap_data.groupby(level = 0)
cols = {'Age':redcap_data.groupby(level = 0)['Age'].transform('min'),
'NumTotalVisits': redcap_data.groupby(level = 0)['Age'].transform(len).map(safe_float),
'CD4': redcap_data['Latest CD4 count (cells/uL)'].map(safe_float),
'Alcohol': redcap_data['Current Alcohol Use'],
'Tobacco': redcap_data['Current Tobacco Use'],
'DaysSinceBaseline': pat_groups['Date Of Visit'].transform(get_days_since_bl),
'Gender': redcap_data['Gender'],
'Grouping': niz_groupings,
'Race': pat_groups.apply(guess_race).map(lambda x: x.replace('-', '')),
'HAART': redcap_data['Current ART status'].map(convert_haart),
'HCV': pat_groups['Hepatitis C status (HCV)'].transform(pd.expanding_max),
'HIVD': redcap_data['TMHDS'].map(safe_float),
'HIVDI': redcap_data['TMHDS']<10,
'HBV': pat_groups['Hepatitis B status (HBV)'].transform(pd.expanding_max).map(safe_float),
'CD8': redcap_data['Latest CD8 count (cells/uL)'].map(safe_float),
'NadirCD4': pat_groups['Nadir CD4 count (cells/uL)'].transform(pd.expanding_min).map(safe_float),
'NadirCD8': pat_groups['Nadir CD8 count (cells/uL)'].transform(pd.expanding_min).map(safe_float),
'PeakLVL': pat_groups['Peak viral load (copies/mL)'].transform(pd.expanding_max).map(safe_float).map(np.log10),
'LVL': redcap_data['Latest viral load'].map(safe_float).map(np.log10),
'YearsSeropositive': redcap_data['Years Seropositive'].map(safe_float),
'TOSampleBenzodiazepines': pat_groups['Test-Benzodiazepine'].transform(pd.expanding_mean).map(safe_float),
'TOSampleCannabinoid': pat_groups['Test-Cannabinoid'].transform(pd.expanding_mean).map(safe_float),
'TOSampleCocaine': pat_groups['Test-Cocaine'].transform(pd.expanding_mean).map(safe_float),
'TOSampleOpiates': pat_groups['Test-Opiates'].transform(pd.expanding_mean).map(safe_float),
'ALLBenzodiazepines': pat_groups['Test-Benzodiazepine'].transform('mean').map(safe_float),
'ALLCannabinoid': pat_groups['Test-Cannabinoid'].transform('mean').map(safe_float),
'ALLCocaine': pat_groups['Test-Cocaine'].transform('mean').map(safe_float),
'ALLOpiates': pat_groups['Test-Opiates'].transform('mean').map(safe_float),
'ATSampleBenzodiazepines': redcap_data['Test-Benzodiazepine'].map(safe_float),
'ATSampleCannabinoid': redcap_data['Test-Cannabinoid'].map(safe_float),
'ATSampleCocaine': redcap_data['Test-Cocaine'].map(safe_float),
'ATSampleOpiates': redcap_data['Test-Opiates'].map(safe_float)}
known_pat_data = pd.DataFrame(cols)
# <codecell>
import Rtools
cytos = sorted(['IL.8','VEGF','IL.1beta',
'G.CSF','EGF','IL.10','HGF',
'FGF.basic','IFN.alpha','IL.6',
'IL.12','Rantes','Eotaxin',
'GM.CSF','MIP.1beta',
'MCP.1','IL.5','IL.13', 'IFN.gamma','TNF.alpha',
'IL.RA','IL.2','IL.7','IP.10',
'IL.2R','MIG','IL.4','IL.15',
'IL.17','MIP.1alpha']) + ['Th1', 'Th2']
raw_cyto_data = pd.read_csv('/home/will/HIVSystemsBio/NewCytokineAnalysis/CytoRawData.csv',
sep = '\t').groupby(['Patient ID', 'VisitNum', 'SampleNum']).first().applymap(safe_float)
raw_cyto_data['Th1'] = raw_cyto_data[['IFN.gamma', 'IL.2', 'TNF.alpha']].sum(axis=1)
raw_cyto_data['Th2'] = raw_cyto_data[['IL.4', 'IL.5', 'IL.10']].sum(axis=1)
#norm_cyto_data = Rtools.quantile_norm_with_R(raw_cyto_data[cytos].applymap(safe_float))
raw_cyto_data['Th1Th2'] = raw_cyto_data['Th1']/raw_cyto_data['Th2']
raw_cyto_data = raw_cyto_data.groupby(level=[0,1]).median()
# <codecell>
from functools import partial
known_pat_data['CD4CD8'] = known_pat_data['CD4'] / known_pat_data['CD8']
known_pat_data['HIVHealthy'] = (known_pat_data['LVL'] <= 2) & \
(known_pat_data['CD4'] >= 250)
known_pat_data['OlderThan50'] = known_pat_data['Age']>50
known_pat_data['OlderThan60'] = known_pat_data['Age']>60
known_pat_data['AgePast50'] = (known_pat_data['Age'] - 50).map(partial(max, 0))
known_pat_data['AgePast60'] = (known_pat_data['Age'] - 60).map(partial(max, 0))
# <codecell>
anal_data = pd.merge(raw_cyto_data.reset_index(), known_pat_data.reset_index(),
on = ['Patient ID', 'VisitNum']).set_index(['Patient ID', 'VisitNum'])
# <codecell>
anal_data['YearsSeropositive'].describe()
# <codecell>
pd.pivot_table(anal_data, rows='OlderThan50', values=cytos+['HIVD']).T
# <headingcell level=2>
# Cytokine Stats
# <codecell>
from statsmodels.graphics.boxplots import beanplot
from itertools import chain, combinations
from statsmodels.graphics.regressionplots import plot_fit
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def unique_powerset(iterable):
seen = set()
for tup in powerset(iterable):
fz = frozenset(tup)
if fz not in seen:
yield tup
seen.add(fz)
def most_common(inser):
try:
return inser.value_counts().index[0]
except IndexError:
return np.nan
def make_agg(indata, confounders):
agg_dict = {}
for col in confounders:
if indata[col].dtype == 'O':
agg_dict[col] = most_common
else:
agg_dict[col] = 'mean'
return agg_dict
def extract_cols(indata, cyto, cols):
extract_cols = [cyto]+cols
tmp_data = indata[extract_cols]
agg_dict = make_agg(indata, extract_cols)
tmp_agg = tmp_data.groupby(level=[0,1]).agg(agg_dict).dropna()
tmp_agg = tmp_agg.rename(columns={cyto:'y'})
eqn = 'y ~ ' + ' + '.join(cols)
return eqn, tmp_agg
def make_50_binary(indata, cyto, ax, confounders):
eqn, tmp_cyto = extract_cols(indata, cyto, ['OlderThan50']+confounders)
y, X = dmatrices(eqn, tmp_cyto, return_type='dataframe')
try:
model = sm.OLS(y, X).fit()
except:
print indata
print confounders
print eqn
print tmp_cyto
print X
print y
raise TypeError
if ax is not None:
boxes = [tmp_cyto['y'][tmp_cyto['OlderThan50']], tmp_cyto['y'][~tmp_cyto['OlderThan50']]]
try:
beanplot(boxes, labels=['>50', '<50'], ax=ax)
except:
ax.boxplot(boxes)
return model, model.f_pvalue, model.pvalues['OlderThan50[T.True]'], model.params['OlderThan50[T.True]']
def make_35_binary(indata, cyto, ax, confounders):
valid_mask = (indata['Age'] >= 50) | (indata['Age'] <= 35)
eqn, tmp_cyto = extract_cols(indata[valid_mask], cyto,
['OlderThan50']+confounders)
y, X = dmatrices(eqn, tmp_cyto, return_type='dataframe')
model = sm.OLS(y, X).fit()
boxes = [tmp_cyto['y'][tmp_cyto['OlderThan50']], tmp_cyto['y'][~tmp_cyto['OlderThan50']]]
if ax is not None:
try:
beanplot(boxes, labels=['>50', '<35'], ax=ax)
except:
ax.boxplot(boxes)
return model, model.f_pvalue, model.pvalues['OlderThan50[T.True]'], model.params['OlderThan50[T.True]']
def make_age_linear(indata, cyto, ax, confounders):
eqn, tmp_cyto = extract_cols(indata, cyto,
['Age']+confounders)
y, X = dmatrices(eqn, tmp_cyto, return_type='dataframe')
model = sm.OLS(y, X).fit()
num = (num for num, col in enumerate(X.columns) if col=='Age').next()
if ax is not None:
plot_fit(model, num, ax=ax)
return model, model.f_pvalue, model.pvalues['Age'], model.params['Age']
def check_more_data(indata, check_func, cyto, confounders, num_extras = [0.5, 1.0, 2.0, 5.0]):
for num in num_extras:
num_choose = int(num*len(indata))
pats = [p for p, v in indata.index]
tdata = indata.copy()
for e in range(num_choose):
rep_pat = np.random.choice(pats)
pat = indata.ix[rep_pat]
pat.index = pd.MultiIndex.from_tuples([(rep_pat+str(e), v) for v in pat.index])
tdata = pd.concat([tdata, pat], axis=0)
_, model_p, age_p, age_e = check_func(tdata, cyto, None, confounders)
yield num_choose, model_p, age_p, age_e
# <headingcell level=3>
# Patient Groups
# <codecell>
all_selectors = set(['HCV', 'LVL', 'HAART', 'Race', 'Grouping'])
restrict_dict = {
'HCV':anal_data['HCV']==False,
'LVL':anal_data['LVL']<=2,
'HAART':anal_data['HAART']=='cH',
'Race':anal_data['Race']=='RaceBlack',
'Grouping':anal_data['Grouping']=='PN'
}
anal_methods = [('50|50 split', make_50_binary),
('35|50 split', make_35_binary),
('Linear', make_age_linear)]
checks = cytos+['CD4', 'LVL', 'HIVD']
num_subsets = len(list(unique_powerset(all_selectors)))
print num_subsets
# <codecell>
anal_data['Race']
# <codecell>
base_confounders = ['YearsSeropositive']
results = []
for cyto in checks:
print cyto
max_val = anal_data[cyto].max()*1.1
for row_num, selectors in enumerate(unique_powerset(all_selectors)):
print selectors
fig, axs = plt.subplots(1, 3, figsize=(15,5), sharey=True)
set_sels = set(selectors)
confounders = base_confounders + list(all_selectors-set_sels)
wanted_mask = anal_data[cyto].notnull()
for sel in selectors:
wanted_mask &= restrict_dict[sel]
wanted_pats = anal_data[wanted_mask]
sel_bool = [s in set_sels for s in sorted(all_selectors)]
for ax, (pname, pfunc) in zip(axs.flatten(), anal_methods):
model, model_p, age_p, age_e = pfunc(wanted_pats, cyto, ax, confounders)
results.append(sel_bool+[cyto, pname, model_p, age_p, age_e, 0])
ax.set_title(pname + ' p=%f' % age_p)
if ax.is_first_col():
ax.set_ylabel(cyto)
if ax.is_last_col():
ax.yaxis.set_label_position("right")
ax.set_ylabel(', '.join(confounders))
for num_extra, model_p, age_p, age_e in check_more_data(wanted_pats, pfunc, cyto, confounders):
results.append(sel_bool+[cyto, pname, model_p, age_p, age_e, num_extra])
if len(selectors):
sels = '_'.join(sorted(selectors))
else:
sels = 'None'
fname = cyto + '_' + sels + '.png'
fig.savefig('draft_figures/aging_cyto_dump/'+fname)
plt.close(fig)
# <codecell>
print model.params
# <codecell>
res = pd.DataFrame(results, columns = sorted(all_selectors)+['Cyto', 'AnalName', 'ModelP', 'AgeP', 'NumExtra'])
mask = (res['AgeP']<0.05) & (res['ModelP']<0.05) & (res['NumExtra'] == 0)
res[mask].groupby(['Cyto', 'AnalName', 'NumExtra'])['AgeP'].min()
# <codecell>
more_counts = pd.pivot_table(res[mask], rows='Cyto', cols='AnalName',
values='NumExtra', aggfunc='min')
more_counts.to_excel('needed_pats.xlsx')
# <codecell>
import statsmodels.formula.api as smi
from scipy.stats import ttest_ind, ks_2samp
from patsy import dmatrices
from statsmodels.graphics.boxplots import beanplot, violinplot
fig = make_50_binary(wanted_cyto)
fig.savefig('draft_figures/Binary_50_cytos.png', dpi=500)
# <codecell>
fig = make_35_binary(wanted_cyto)
fig.savefig('draft_figures/Binary_35_cytos.png', dpi=500)
# <codecell>
fig = make_age_linear(wanted_cyto)
fig.savefig('draft_figures/Age_cytos.png', dpi=500)
# <codecell>
larger_wanted_pats = pat_cyto_data['HCV']==False
#larger_wanted_pats &= pat_cyto_data['VL']<=100
larger_wanted_pats &= pat_cyto_data['HAART']=='cH'
larger_wanted_pats &= pat_cyto_data['Race']=='Black/AA'
larger_wanted_pats &= (pat_cyto_data['Grouping']=='PN')|(pat_cyto_data['Grouping']=='PC')
print larger_wanted_pats.sum()/4
larger_wanted_cyto = pat_cyto_data[larger_wanted_pats]
# <codecell>
fig = make_50_binary(larger_wanted_cyto, confounders=['YearsSeropositive', 'Grouping'])
fig.savefig('draft_figures/Binary_50_cytos_withPC.png', dpi=500)
# <codecell>
fig = make_35_binary(larger_wanted_cyto, confounders=['YearsSeropositive', 'Grouping'])
fig.savefig('draft_figures/Binary_35_cytos_withPC.png', dpi=500)
# <codecell>
fig = make_age_linear(larger_wanted_cyto, confounders=['YearsSeropositive', 'Grouping'])
#fig.savefig('draft_figures/Age_cytos_withPC.png', dpi=500)
# <codecell>
|
JudoWill/ResearchNotebooks
|
AgingAnalysis.py
|
Python
|
mit
| 16,158
|
[
"VisIt"
] |
e2ac0fe70f298dc079223e81883a50f4b329e5d9df692e63a5516b5b5fd21ea1
|
def read_aims(filename):
"""Import FHI-aims geometry type files.
Reads unitcell, atom positions and constraints from
a geometry.in file.
"""
from ase import Atoms
from ase.constraints import FixAtoms, FixCartesian
import numpy as np
atoms = Atoms()
fd = open(filename, 'r')
lines = fd.readlines()
fd.close()
positions = []
cell = []
symbols = []
magmoms = []
fix = []
fix_cart = []
xyz = np.array([0, 0, 0])
i = -1
n_periodic = -1
periodic = np.array([False, False, False])
for n, line in enumerate(lines):
inp = line.split()
if inp == []:
continue
if inp[0] == 'atom':
if xyz.all():
fix.append(i)
elif xyz.any():
fix_cart.append(FixCartesian(i, xyz))
floatvect = float(inp[1]), float(inp[2]), float(inp[3])
positions.append(floatvect)
symbols.append(inp[-1])
i += 1
xyz = np.array([0, 0, 0])
elif inp[0] == 'lattice_vector':
floatvect = float(inp[1]), float(inp[2]), float(inp[3])
cell.append(floatvect)
n_periodic = n_periodic + 1
periodic[n_periodic] = True
elif inp[0] == 'initial_moment':
magmoms.append(float(inp[1]))
if inp[0] == 'constrain_relaxation':
if inp[1] == '.true.':
fix.append(i)
elif inp[1] == 'x':
xyz[0] = 1
elif inp[1] == 'y':
xyz[1] = 1
elif inp[1] == 'z':
xyz[2] = 1
if xyz.all():
fix.append(i)
elif xyz.any():
fix_cart.append(FixCartesian(i, xyz))
atoms = Atoms(symbols, positions)
if len(magmoms) > 0:
atoms.set_initial_magnetic_moments(magmoms)
if periodic.any():
atoms.set_cell(cell)
atoms.set_pbc(periodic)
if len(fix):
atoms.set_constraint([FixAtoms(indices=fix)]+fix_cart)
else:
atoms.set_constraint(fix_cart)
return atoms
def write_aims(filename, atoms):
"""Method to write FHI-aims geometry files.
Writes the atoms positions and constraints (only FixAtoms is
supported at the moment).
"""
from ase.constraints import FixAtoms, FixCartesian
import numpy as np
if isinstance(atoms, (list, tuple)):
if len(atoms) > 1:
raise RuntimeError("Don't know how to save more than "+
"one image to FHI-aims input")
else:
atoms = atoms[0]
fd = open(filename, 'w')
fd.write('#=======================================================\n')
fd.write('#FHI-aims file: '+filename+'\n')
fd.write('#Created using the Atomic Simulation Environment (ASE)\n')
fd.write('#=======================================================\n')
i = 0
if atoms.get_pbc().any():
for n, vector in enumerate(atoms.get_cell()):
fd.write('lattice_vector ')
for i in range(3):
fd.write('%16.16f ' % vector[i])
fd.write('\n')
fix_cart = np.zeros([len(atoms),3])
if atoms.constraints:
for constr in atoms.constraints:
if isinstance(constr, FixAtoms):
fix_cart[constr.index] = [1,1,1]
elif isinstance(constr, FixCartesian):
fix_cart[constr.a] = -constr.mask+1
for i, atom in enumerate(atoms):
fd.write('atom ')
for pos in atom.position:
fd.write('%16.16f ' % pos)
fd.write(atom.symbol)
fd.write('\n')
# (1) all coords are constrained:
if fix_cart[i].all():
fd.write('constrain_relaxation .true.\n')
# (2) some coords are constrained:
elif fix_cart[i].any():
xyz = fix_cart[i]
for n in range(3):
if xyz[n]:
fd.write('constrain_relaxation %s\n' % 'xyz'[n])
if atom.charge:
fd.write('initial_charge %16.6f\n' % atom.charge)
if atom.magmom:
fd.write('initial_moment %16.6f\n' % atom.magmom)
# except KeyError:
# continue
def read_energy(filename):
for line in open(filename, 'r'):
if line.startswith(' | Total energy corrected'):
E = float(line.split()[-2])
return E
def read_aims_output(filename, index = -1):
""" Import FHI-aims output files with all data available, i.e. relaxations,
MD information, force information etc etc etc. """
from ase import Atoms, Atom
from ase.calculators.singlepoint import SinglePointCalculator
from ase.units import Ang, fs
from ase.constraints import FixAtoms, FixCartesian
molecular_dynamics = False
fd = open(filename, 'r')
cell = []
images = []
fix = []
fix_cart = []
n_periodic = -1
f = None
pbc = False
found_aims_calculator = False
v_unit = Ang/(1000.0*fs)
while True:
line = fd.readline()
if not line:
break
if "List of parameters used to initialize the calculator:" in line:
fd.readline()
calc = read_aims_calculator(fd)
calc.out = filename
found_aims_calculator = True
if "Number of atoms" in line:
inp = line.split()
n_atoms = int(inp[5])
if "| Unit cell:" in line:
if not pbc:
pbc = True
for i in range(3):
inp = fd.readline().split()
cell.append([inp[1],inp[2],inp[3]])
if "Found relaxation constraint for atom" in line:
xyz = [0, 0, 0]
ind = int(line.split()[5][:-1])-1
if "All coordinates fixed" in line:
if ind not in fix:
fix.append(ind)
if "coordinate fixed" in line:
coord = line.split()[6]
constr_ind = 0
if coord == 'x':
xyz[0] = 1
elif coord == 'y':
xyz[1] = 1
elif coord == 'z':
xyz[2] = 1
keep = True
for n,c in enumerate(fix_cart):
if ind == c.a:
keep = False
constr_ind = n
if keep:
fix_cart.append(FixCartesian(ind, xyz))
else:
fix_cart[n].mask[xyz.index(1)] = 0
if "Atomic structure:" in line and not molecular_dynamics:
fd.readline()
atoms = Atoms()
for i in range(n_atoms):
inp = fd.readline().split()
atoms.append(Atom(inp[3],(inp[4],inp[5],inp[6])))
if "Complete information for previous time-step:" in line:
molecular_dynamics = True
if "Updated atomic structure:" in line and not molecular_dynamics:
fd.readline()
atoms = Atoms()
velocities = []
for i in range(n_atoms):
inp = fd.readline().split()
if 'lattice_vector' in inp[0]:
cell = []
for i in range(3):
cell += [[float(inp[1]),float(inp[2]),float(inp[3])]]
inp = fd.readline().split()
atoms.set_cell(cell)
inp = fd.readline().split()
atoms.append(Atom(inp[4],(inp[1],inp[2],inp[3])))
if molecular_dynamics:
inp = fd.readline().split()
if "Atomic structure (and velocities)" in line:
fd.readline()
atoms = Atoms()
velocities = []
for i in range(n_atoms):
inp = fd.readline().split()
atoms.append(Atom(inp[4],(inp[1],inp[2],inp[3])))
inp = fd.readline().split()
velocities += [[float(inp[1])*v_unit,float(inp[2])*v_unit,float(inp[3])*v_unit]]
atoms.set_velocities(velocities)
if len(fix):
atoms.set_constraint([FixAtoms(indices=fix)]+fix_cart)
else:
atoms.set_constraint(fix_cart)
images.append(atoms)
if "Total atomic forces" in line:
f = []
for i in range(n_atoms):
inp = fd.readline().split()
f.append([float(inp[2]),float(inp[3]),float(inp[4])])
if not found_aims_calculator:
e = images[-1].get_potential_energy()
images[-1].set_calculator(SinglePointCalculator(atoms,
energy=e,
forces=f))
e = None
f = None
if "Total energy corrected" in line:
e = float(line.split()[5])
if pbc:
atoms.set_cell(cell)
atoms.pbc = True
if not found_aims_calculator:
atoms.set_calculator(SinglePointCalculator(atoms, energy=e))
if not molecular_dynamics:
if len(fix):
atoms.set_constraint([FixAtoms(indices=fix)]+fix_cart)
else:
atoms.set_constraint(fix_cart)
images.append(atoms)
e = None
if found_aims_calculator:
calc.set_results(images[-1])
images[-1].set_calculator(calc)
fd.close()
if molecular_dynamics:
images = images[1:]
# return requested images, code borrowed from ase/io/trajectory.py
if isinstance(index, int):
return images[index]
else:
step = index.step or 1
if step > 0:
start = index.start or 0
if start < 0:
start += len(images)
stop = index.stop or len(images)
if stop < 0:
stop += len(images)
else:
if index.start is None:
start = len(images) - 1
else:
start = index.start
if start < 0:
start += len(images)
if index.stop is None:
stop = -1
else:
stop = index.stop
if stop < 0:
stop += len(images)
return [images[i] for i in range(start, stop, step)]
|
grhawk/ASE
|
tools/ase/io/aims.py
|
Python
|
gpl-2.0
| 10,468
|
[
"ASE",
"FHI-aims"
] |
6ad083b950e9f8b3447c85084b2367b6e7d66eec1ba4c4223c63128a8033a8d5
|
"""Convolutional/Variational autoencoder, including demonstration of
training such a network on MNIST, CelebNet and the film, "Sita Sings The Blues"
using an image pipeline.
Copyright Parag K. Mital, January 2016
"""
import tensorflow as tf
import numpy as np
import os
from libs.dataset_utils import create_input_pipeline
from libs.datasets import CELEB, MNIST
from libs.batch_norm import batch_norm
from libs import utils
def VAE(input_shape=[None, 784],
n_filters=[64, 64, 64],
filter_sizes=[4, 4, 4],
n_hidden=32,
n_code=2,
activation=tf.nn.tanh,
dropout=False,
denoising=False,
convolutional=False,
variational=False):
"""(Variational) (Convolutional) (Denoising) Autoencoder.
Uses tied weights.
Parameters
----------
input_shape : list, optional
Shape of the input to the network. e.g. for MNIST: [None, 784].
n_filters : list, optional
Number of filters for each layer.
If convolutional=True, this refers to the total number of output
filters to create for each layer, with each layer's number of output
filters as a list.
If convolutional=False, then this refers to the total number of neurons
for each layer in a fully connected network.
filter_sizes : list, optional
Only applied when convolutional=True. This refers to the ksize (height
and width) of each convolutional layer.
n_hidden : int, optional
Only applied when variational=True. This refers to the first fully
connected layer prior to the variational embedding, directly after
the encoding. After the variational embedding, another fully connected
layer is created with the same size prior to decoding. Set to 0 to
not use an additional hidden layer.
n_code : int, optional
Only applied when variational=True. This refers to the number of
latent Gaussians to sample for creating the inner most encoding.
activation : function, optional
Activation function to apply to each layer, e.g. tf.nn.relu
dropout : bool, optional
Whether or not to apply dropout. If using dropout, you must feed a
value for 'keep_prob', as returned in the dictionary. 1.0 means no
dropout is used. 0.0 means every connection is dropped. Sensible
values are between 0.5-0.8.
denoising : bool, optional
Whether or not to apply denoising. If using denoising, you must feed a
value for 'corrupt_prob', as returned in the dictionary. 1.0 means no
corruption is used. 0.0 means every feature is corrupted. Sensible
values are between 0.5-0.8.
convolutional : bool, optional
Whether or not to use a convolutional network or else a fully connected
network will be created. This effects the n_filters parameter's
meaning.
variational : bool, optional
Whether or not to create a variational embedding layer. This will
create a fully connected layer after the encoding, if `n_hidden` is
greater than 0, then will create a multivariate gaussian sampling
layer, then another fully connected layer. The size of the fully
connected layers are determined by `n_hidden`, and the size of the
sampling layer is determined by `n_code`.
Returns
-------
model : dict
{
'cost': Tensor to optimize.
'Ws': All weights of the encoder.
'x': Input Placeholder
'z': Inner most encoding Tensor (latent features)
'y': Reconstruction of the Decoder
'keep_prob': Amount to keep when using Dropout
'corrupt_prob': Amount to corrupt when using Denoising
'train': Set to True when training/Applies to Batch Normalization.
}
"""
# network input / placeholders for train (bn) and dropout
x = tf.placeholder(tf.float32, input_shape, 'x')
phase_train = tf.placeholder(tf.bool, name='phase_train')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
corrupt_prob = tf.placeholder(tf.float32, [1])
# apply noise if denoising
x_ = (utils.corrupt(x) * corrupt_prob + x * (1 - corrupt_prob)) if denoising else x
# 2d -> 4d if convolution
x_tensor = utils.to_tensor(x_) if convolutional else x_
current_input = x_tensor
Ws = []
shapes = []
# Build the encoder
for layer_i, n_output in enumerate(n_filters):
with tf.variable_scope('encoder/{}'.format(layer_i)):
shapes.append(current_input.get_shape().as_list())
if convolutional:
h, W = utils.conv2d(x=current_input,
n_output=n_output,
k_h=filter_sizes[layer_i],
k_w=filter_sizes[layer_i])
else:
h, W = utils.linear(x=current_input,
n_output=n_output)
h = activation(batch_norm(h, phase_train, 'bn' + str(layer_i)))
if dropout:
h = tf.nn.dropout(h, keep_prob)
Ws.append(W)
current_input = h
shapes.append(current_input.get_shape().as_list())
with tf.variable_scope('variational'):
if variational:
dims = current_input.get_shape().as_list()
flattened = utils.flatten(current_input)
if n_hidden:
h = utils.linear(flattened, n_hidden, name='W_fc')[0]
h = activation(batch_norm(h, phase_train, 'fc/bn'))
if dropout:
h = tf.nn.dropout(h, keep_prob)
else:
h = flattened
z_mu = utils.linear(h, n_code, name='mu')[0]
z_log_sigma = 0.5 * utils.linear(h, n_code, name='log_sigma')[0]
# Sample from noise distribution p(eps) ~ N(0, 1)
epsilon = tf.random_normal(
tf.stack([tf.shape(x)[0], n_code]))
# Sample from posterior
z = z_mu + tf.multiply(epsilon, tf.exp(z_log_sigma))
if n_hidden:
h = utils.linear(z, n_hidden, name='fc_t')[0]
h = activation(batch_norm(h, phase_train, 'fc_t/bn'))
if dropout:
h = tf.nn.dropout(h, keep_prob)
else:
h = z
size = dims[1] * dims[2] * dims[3] if convolutional else dims[1]
h = utils.linear(h, size, name='fc_t2')[0]
current_input = activation(batch_norm(h, phase_train, 'fc_t2/bn'))
if dropout:
current_input = tf.nn.dropout(current_input, keep_prob)
if convolutional:
current_input = tf.reshape(
current_input, tf.stack([
tf.shape(current_input)[0],
dims[1],
dims[2],
dims[3]]))
else:
z = current_input
shapes.reverse()
n_filters.reverse()
Ws.reverse()
n_filters += [input_shape[-1]]
# %%
# Decoding layers
for layer_i, n_output in enumerate(n_filters[1:]):
with tf.variable_scope('decoder/{}'.format(layer_i)):
shape = shapes[layer_i + 1]
if convolutional:
h, W = utils.deconv2d(x=current_input,
n_output_h=shape[1],
n_output_w=shape[2],
n_output_ch=shape[3],
n_input_ch=shapes[layer_i][3],
k_h=filter_sizes[layer_i],
k_w=filter_sizes[layer_i])
else:
h, W = utils.linear(x=current_input,
n_output=n_output)
h = activation(batch_norm(h, phase_train, 'dec/bn' + str(layer_i)))
if dropout:
h = tf.nn.dropout(h, keep_prob)
current_input = h
y = current_input
x_flat = utils.flatten(x)
y_flat = utils.flatten(y)
# l2 loss
loss_x = tf.reduce_sum(tf.squared_difference(x_flat, y_flat), 1)
if variational:
# variational lower bound, kl-divergence
loss_z = -0.5 * tf.reduce_sum(
1.0 + 2.0 * z_log_sigma -
tf.square(z_mu) - tf.exp(2.0 * z_log_sigma), 1)
# add l2 loss
cost = tf.reduce_mean(loss_x + loss_z)
else:
# just optimize l2 loss
cost = tf.reduce_mean(loss_x)
return {'cost': cost, 'Ws': Ws,
'x': x, 'z': z, 'y': y,
'keep_prob': keep_prob,
'corrupt_prob': corrupt_prob,
'train': phase_train}
def train_vae(files,
input_shape,
learning_rate=0.0001,
batch_size=100,
n_epochs=50,
n_examples=10,
crop_shape=[64, 64, 3],
crop_factor=0.8,
n_filters=[100, 100, 100, 100],
n_hidden=256,
n_code=50,
convolutional=True,
variational=True,
filter_sizes=[3, 3, 3, 3],
dropout=True,
keep_prob=0.8,
activation=tf.nn.relu,
img_step=100,
save_step=100,
ckpt_name="vae.ckpt"):
"""General purpose training of a (Variational) (Convolutional) Autoencoder.
Supply a list of file paths to images, and this will do everything else.
Parameters
----------
files : list of strings
List of paths to images.
input_shape : list
Must define what the input image's shape is.
learning_rate : float, optional
Learning rate.
batch_size : int, optional
Batch size.
n_epochs : int, optional
Number of epochs.
n_examples : int, optional
Number of example to use while demonstrating the current training
iteration's reconstruction. Creates a square montage, so make
sure int(sqrt(n_examples))**2 = n_examples, e.g. 16, 25, 36, ... 100.
crop_shape : list, optional
Size to centrally crop the image to.
crop_factor : float, optional
Resize factor to apply before cropping.
n_filters : list, optional
Same as VAE's n_filters.
n_hidden : int, optional
Same as VAE's n_hidden.
n_code : int, optional
Same as VAE's n_code.
convolutional : bool, optional
Use convolution or not.
variational : bool, optional
Use variational layer or not.
filter_sizes : list, optional
Same as VAE's filter_sizes.
dropout : bool, optional
Use dropout or not
keep_prob : float, optional
Percent of keep for dropout.
activation : function, optional
Which activation function to use.
img_step : int, optional
How often to save training images showing the manifold and
reconstruction.
save_step : int, optional
How often to save checkpoints.
ckpt_name : str, optional
Checkpoints will be named as this, e.g. 'model.ckpt'
"""
batch = create_input_pipeline(
files=files,
batch_size=batch_size,
n_epochs=n_epochs,
crop_shape=crop_shape,
crop_factor=crop_factor,
shape=input_shape)
ae = VAE(input_shape=[None] + crop_shape,
convolutional=convolutional,
variational=variational,
n_filters=n_filters,
n_hidden=n_hidden,
n_code=n_code,
dropout=dropout,
filter_sizes=filter_sizes,
activation=activation)
# Create a manifold of our inner most layer to show
# example reconstructions. This is one way to see
# what the "embedding" or "latent space" of the encoder
# is capable of encoding, though note that this is just
# a random hyperplane within the latent space, and does not
# encompass all possible embeddings.
zs = np.random.uniform(
-1.0, 1.0, [4, n_code]).astype(np.float32)
zs = utils.make_latent_manifold(zs, n_examples)
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate).minimize(ae['cost'])
# We create a session to use the graph
sess = tf.Session()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
# This will handle our threaded image pipeline
coord = tf.train.Coordinator()
# Ensure no more changes to graph
tf.get_default_graph().finalize()
# Start up the queues for handling the image pipeline
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
if os.path.exists(ckpt_name + '.index') or os.path.exists(ckpt_name):
saver.restore(sess, ckpt_name)
# Fit all training data
t_i = 0
batch_i = 0
epoch_i = 0
cost = 0
n_files = len(files)
test_xs = sess.run(batch) / 255.0
utils.montage(test_xs, 'test_xs.png')
try:
while not coord.should_stop() and epoch_i < n_epochs:
batch_i += 1
batch_xs = sess.run(batch) / 255.0
train_cost = sess.run([ae['cost'], optimizer], feed_dict={
ae['x']: batch_xs, ae['train']: True,
ae['keep_prob']: keep_prob})[0]
print(batch_i, train_cost)
cost += train_cost
if batch_i % n_files == 0:
print('epoch:', epoch_i)
print('average cost:', cost / batch_i)
cost = 0
batch_i = 0
epoch_i += 1
if batch_i % img_step == 0:
# Plot example reconstructions from latent layer
recon = sess.run(
ae['y'], feed_dict={
ae['z']: zs,
ae['train']: False,
ae['keep_prob']: 1.0})
utils.montage(recon.reshape([-1] + crop_shape),
'manifold_%08d.png' % t_i)
# Plot example reconstructions
recon = sess.run(
ae['y'], feed_dict={ae['x']: test_xs,
ae['train']: False,
ae['keep_prob']: 1.0})
print('reconstruction (min, max, mean):',
recon.min(), recon.max(), recon.mean())
utils.montage(recon.reshape([-1] + crop_shape),
'reconstruction_%08d.png' % t_i)
t_i += 1
if batch_i % save_step == 0:
# Save the variables to disk.
saver.save(sess, "./" + ckpt_name,
global_step=batch_i,
write_meta_graph=False)
except tf.errors.OutOfRangeError:
print('Done.')
finally:
# One of the threads has issued an exception. So let's tell all the
# threads to shutdown.
coord.request_stop()
# Wait until all threads have finished.
coord.join(threads)
# Clean up the session.
sess.close()
# %%
def test_mnist(n_epochs=10):
"""Train an autoencoder on MNIST.
This function will train an autoencoder on MNIST and also
save many image files during the training process, demonstrating
the latent space of the inner most dimension of the encoder,
as well as reconstructions of the decoder.
"""
# load MNIST
n_code = 2
mnist = MNIST(split=[0.8, 0.1, 0.1])
ae = VAE(input_shape=[None, 784], n_filters=[512, 256],
n_hidden=64, n_code=n_code, activation=tf.nn.sigmoid,
convolutional=False, variational=True)
n_examples = 100
zs = np.random.uniform(
-1.0, 1.0, [4, n_code]).astype(np.float32)
zs = utils.make_latent_manifold(zs, n_examples)
learning_rate = 0.02
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate).minimize(ae['cost'])
# We create a session to use the graph
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Fit all training data
t_i = 0
batch_i = 0
batch_size = 200
test_xs = mnist.test.images[:n_examples]
utils.montage(test_xs.reshape((-1, 28, 28)), 'test_xs.png')
for epoch_i in range(n_epochs):
train_i = 0
train_cost = 0
for batch_xs, _ in mnist.train.next_batch(batch_size):
train_cost += sess.run([ae['cost'], optimizer], feed_dict={
ae['x']: batch_xs, ae['train']: True, ae['keep_prob']: 1.0})[0]
train_i += 1
if batch_i % 10 == 0:
# Plot example reconstructions from latent layer
recon = sess.run(
ae['y'], feed_dict={
ae['z']: zs,
ae['train']: False,
ae['keep_prob']: 1.0})
utils.montage(recon.reshape((-1, 28, 28)),
'manifold_%08d.png' % t_i)
# Plot example reconstructions
recon = sess.run(
ae['y'], feed_dict={ae['x']: test_xs,
ae['train']: False,
ae['keep_prob']: 1.0})
utils.montage(recon.reshape(
(-1, 28, 28)), 'reconstruction_%08d.png' % t_i)
t_i += 1
batch_i += 1
valid_i = 0
valid_cost = 0
for batch_xs, _ in mnist.valid.next_batch(batch_size):
valid_cost += sess.run([ae['cost']], feed_dict={
ae['x']: batch_xs, ae['train']: False, ae['keep_prob']: 1.0})[0]
valid_i += 1
print('train:', train_cost / train_i, 'valid:', valid_cost / valid_i)
def test_celeb(n_epochs=50):
"""Train an autoencoder on Celeb Net.
"""
files = CELEB()
train_vae(
files=files,
input_shape=[218, 178, 3],
batch_size=100,
n_epochs=n_epochs,
crop_shape=[64, 64, 3],
crop_factor=0.8,
convolutional=True,
variational=True,
n_filters=[100, 100, 100],
n_hidden=250,
n_code=100,
dropout=True,
filter_sizes=[3, 3, 3],
activation=tf.nn.sigmoid,
ckpt_name='./celeb.ckpt')
def test_sita():
"""Train an autoencoder on Sita Sings The Blues.
"""
if not os.path.exists('sita'):
os.system('wget http://ossguy.com/sita/Sita_Sings_the_Blues_640x360_XviD.avi')
os.mkdir('sita')
os.system('ffmpeg -i Sita_Sings_the_Blues_640x360_XviD.avi -r 60 -f' +
' image2 -s 160x90 sita/sita-%08d.jpg')
files = [os.path.join('sita', f) for f in os.listdir('sita')]
train_vae(
files=files,
input_shape=[90, 160, 3],
batch_size=100,
n_epochs=50,
crop_shape=[90, 160, 3],
crop_factor=1.0,
convolutional=True,
variational=True,
n_filters=[100, 100, 100],
n_hidden=250,
n_code=100,
dropout=True,
filter_sizes=[3, 3, 3],
activation=tf.nn.sigmoid,
ckpt_name='./sita.ckpt')
if __name__ == '__main__':
test_celeb()
|
pkmital/CADL
|
session-3/libs/vae.py
|
Python
|
apache-2.0
| 19,312
|
[
"Gaussian"
] |
8534212f46bcf9b268738a3ec68aa15aac8cdf2a10b3c115b81d4bd401e3c730
|
# Simulacion de sistema MM1 usando la libreria SimPy
from SimPy.Simulation import *
from random import expovariate, seed
from numpy.random import normal
import matplotlib.pyplot as plt
queue_size = []
service_size = []
## Componentes del modelo ------------------------
class Source(Process):
""" Este codigo genera clientes aleatoriamente """
def generate(self,number,interval,resource):
for i in range(number):
c = Customer(name = "Cliente %02d"%(i,))
activate(c,c.visit(timeInBank=random.randint(1,10),
res=resource,P=0))
t = expovariate(1.0/interval)
yield hold,self,t
class Customer(Process):
def visit(self,timeInBank=0,res=None,P=0):
global queue_size, service_size
arrive = now() # tiempo de arrivo
Nwaiting = len(res.waitQ)
# queue_size_graph.append(Nwaiting)
print "%8.3f %s: Nuevo cliente. Tamanio de cola: %d"%(now(),self.name,Nwaiting)
queue_size.append(Nwaiting)
yield request,self,res,P
waitabs = now()
wait = now()-arrive # tiempo de espera
print "%8.3f %s: Espero %6.3f"%(now(),self.name,wait)
#waiting_graph.append(wait)
yield hold,self,timeInBank
yield release,self,res
finish = now()
service = finish-waitabs
service_size.append(service)
print "%8.3f %s: Termino"%(finish,self.name)
wait = now() - arrive
wM.observe(wait)
## Datos del experimento -------------------------
maxTime = 120000 # tiempo maximo en minutos
k = Resource(name="Mostrador",unitName="Servidor1",
qType=PriorityQ, preemptable=True, monitored=True)
## Experimento ------------------------------
seed(random.random())
wM = Monitor()
initialize()
s = Source('Source')
activate(s,s.generate(number=10000, interval=5.0,
resource=k),at=0.0)
simulate(until=maxTime)
result = wM.count(), wM.mean()
queue_avg_size = sum(queue_size) / float(len(queue_size))
service_avg_size = sum(service_size) / float(len(service_size))
print("Tiempo de espera total en cola para %3d arribos fue de %5.3f minutos." % result)
print("Tiempo promedio de espera en cola: %5.3f" % (wM.mean()/wM.count()))
print("Longitud promedio de la cola: %5.3f" % queue_avg_size)
print("Longitud promedio de servicio: %5.3f" % service_avg_size)
|
milardovich/simulacion
|
mm1.py
|
Python
|
mit
| 2,636
|
[
"VisIt"
] |
29a16ddba0b3bdb5296235ef2968b89e32f708fdfc3255e25993f69459197612
|
from multiprocessing import freeze_support
from rdkit import Chem
from mordred import Calculator
from mordred.RingCount import RingCount
# for parallel calculation on windows only
if __name__ == "__main__":
freeze_support()
# Start Code 5
calc = Calculator()
calc.register(RingCount())
print(calc(Chem.MolFromSmiles("c1ccccc1")))
print(list(calc.map([Chem.MolFromSmiles("c1ccccc1"), Chem.MolFromSmiles("CCCCCC")])))
|
mordred-descriptor/mordred
|
examples/article/Code5.py
|
Python
|
bsd-3-clause
| 444
|
[
"RDKit"
] |
2c0ac66b65103cd62032dfeb31861aabe1df10c48718a88247353fa7fe3c7f12
|
"""
Subraph centrality and communicability betweenness.
"""
import networkx as nx
from networkx.utils import not_implemented_for
__all__ = [
"subgraph_centrality_exp",
"subgraph_centrality",
"communicability_betweenness_centrality",
"estrada_index",
]
@not_implemented_for("directed")
@not_implemented_for("multigraph")
def subgraph_centrality_exp(G):
r"""Returns the subgraph centrality for each node of G.
Subgraph centrality of a node `n` is the sum of weighted closed
walks of all lengths starting and ending at node `n`. The weights
decrease with path length. Each closed walk is associated with a
connected subgraph ([1]_).
Parameters
----------
G: graph
Returns
-------
nodes:dictionary
Dictionary of nodes with subgraph centrality as the value.
Raises
------
NetworkXError
If the graph is not undirected and simple.
See Also
--------
subgraph_centrality:
Alternative algorithm of the subgraph centrality for each node of G.
Notes
-----
This version of the algorithm exponentiates the adjacency matrix.
The subgraph centrality of a node `u` in G can be found using
the matrix exponential of the adjacency matrix of G [1]_,
.. math::
SC(u)=(e^A)_{uu} .
References
----------
.. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez,
"Subgraph centrality in complex networks",
Physical Review E 71, 056103 (2005).
https://arxiv.org/abs/cond-mat/0504730
Examples
--------
(Example from [1]_)
>>> G = nx.Graph(
... [
... (1, 2),
... (1, 5),
... (1, 8),
... (2, 3),
... (2, 8),
... (3, 4),
... (3, 6),
... (4, 5),
... (4, 7),
... (5, 6),
... (6, 7),
... (7, 8),
... ]
... )
>>> sc = nx.subgraph_centrality_exp(G)
>>> print([f"{node} {sc[node]:0.2f}" for node in sorted(sc)])
['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90']
"""
# alternative implementation that calculates the matrix exponential
import scipy.linalg
nodelist = list(G) # ordering of nodes in matrix
A = nx.to_numpy_array(G, nodelist)
# convert to 0-1 matrix
A[A != 0.0] = 1
expA = scipy.linalg.expm(A)
# convert diagonal to dictionary keyed by node
sc = dict(zip(nodelist, map(float, expA.diagonal())))
return sc
@not_implemented_for("directed")
@not_implemented_for("multigraph")
def subgraph_centrality(G):
r"""Returns subgraph centrality for each node in G.
Subgraph centrality of a node `n` is the sum of weighted closed
walks of all lengths starting and ending at node `n`. The weights
decrease with path length. Each closed walk is associated with a
connected subgraph ([1]_).
Parameters
----------
G: graph
Returns
-------
nodes : dictionary
Dictionary of nodes with subgraph centrality as the value.
Raises
------
NetworkXError
If the graph is not undirected and simple.
See Also
--------
subgraph_centrality_exp:
Alternative algorithm of the subgraph centrality for each node of G.
Notes
-----
This version of the algorithm computes eigenvalues and eigenvectors
of the adjacency matrix.
Subgraph centrality of a node `u` in G can be found using
a spectral decomposition of the adjacency matrix [1]_,
.. math::
SC(u)=\sum_{j=1}^{N}(v_{j}^{u})^2 e^{\lambda_{j}},
where `v_j` is an eigenvector of the adjacency matrix `A` of G
corresponding corresponding to the eigenvalue `\lambda_j`.
Examples
--------
(Example from [1]_)
>>> G = nx.Graph(
... [
... (1, 2),
... (1, 5),
... (1, 8),
... (2, 3),
... (2, 8),
... (3, 4),
... (3, 6),
... (4, 5),
... (4, 7),
... (5, 6),
... (6, 7),
... (7, 8),
... ]
... )
>>> sc = nx.subgraph_centrality(G)
>>> print([f"{node} {sc[node]:0.2f}" for node in sorted(sc)])
['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90']
References
----------
.. [1] Ernesto Estrada, Juan A. Rodriguez-Velazquez,
"Subgraph centrality in complex networks",
Physical Review E 71, 056103 (2005).
https://arxiv.org/abs/cond-mat/0504730
"""
import numpy as np
import numpy.linalg
nodelist = list(G) # ordering of nodes in matrix
A = nx.to_numpy_array(G, nodelist)
# convert to 0-1 matrix
A[np.nonzero(A)] = 1
w, v = numpy.linalg.eigh(A)
vsquare = np.array(v) ** 2
expw = np.exp(w)
xg = np.dot(vsquare, expw)
# convert vector dictionary keyed by node
sc = dict(zip(nodelist, map(float, xg)))
return sc
@not_implemented_for("directed")
@not_implemented_for("multigraph")
def communicability_betweenness_centrality(G, normalized=True):
r"""Returns subgraph communicability for all pairs of nodes in G.
Communicability betweenness measure makes use of the number of walks
connecting every pair of nodes as the basis of a betweenness centrality
measure.
Parameters
----------
G: graph
Returns
-------
nodes : dictionary
Dictionary of nodes with communicability betweenness as the value.
Raises
------
NetworkXError
If the graph is not undirected and simple.
Notes
-----
Let `G=(V,E)` be a simple undirected graph with `n` nodes and `m` edges,
and `A` denote the adjacency matrix of `G`.
Let `G(r)=(V,E(r))` be the graph resulting from
removing all edges connected to node `r` but not the node itself.
The adjacency matrix for `G(r)` is `A+E(r)`, where `E(r)` has nonzeros
only in row and column `r`.
The subraph betweenness of a node `r` is [1]_
.. math::
\omega_{r} = \frac{1}{C}\sum_{p}\sum_{q}\frac{G_{prq}}{G_{pq}},
p\neq q, q\neq r,
where
`G_{prq}=(e^{A}_{pq} - (e^{A+E(r)})_{pq}` is the number of walks
involving node r,
`G_{pq}=(e^{A})_{pq}` is the number of closed walks starting
at node `p` and ending at node `q`,
and `C=(n-1)^{2}-(n-1)` is a normalization factor equal to the
number of terms in the sum.
The resulting `\omega_{r}` takes values between zero and one.
The lower bound cannot be attained for a connected
graph, and the upper bound is attained in the star graph.
References
----------
.. [1] Ernesto Estrada, Desmond J. Higham, Naomichi Hatano,
"Communicability Betweenness in Complex Networks"
Physica A 388 (2009) 764-774.
https://arxiv.org/abs/0905.4102
Examples
--------
>>> G = nx.Graph(
... [
... (0, 1),
... (1, 2),
... (1, 5),
... (5, 4),
... (2, 4),
... (2, 3),
... (4, 3),
... (3, 6),
... ]
... )
>>> cbc = nx.communicability_betweenness_centrality(G)
>>> print([f"{node} {cbc[node]:0.2f}" for node in sorted(cbc)])
['0 0.03', '1 0.45', '2 0.51', '3 0.45', '4 0.40', '5 0.19', '6 0.03']
"""
import numpy as np
import scipy.linalg
nodelist = list(G) # ordering of nodes in matrix
n = len(nodelist)
A = nx.to_numpy_array(G, nodelist)
# convert to 0-1 matrix
A[np.nonzero(A)] = 1
expA = scipy.linalg.expm(A)
mapping = dict(zip(nodelist, range(n)))
cbc = {}
for v in G:
# remove row and col of node v
i = mapping[v]
row = A[i, :].copy()
col = A[:, i].copy()
A[i, :] = 0
A[:, i] = 0
B = (expA - scipy.linalg.expm(A)) / expA
# sum with row/col of node v and diag set to zero
B[i, :] = 0
B[:, i] = 0
B -= np.diag(np.diag(B))
cbc[v] = float(B.sum())
# put row and col back
A[i, :] = row
A[:, i] = col
# rescaling
cbc = _rescale(cbc, normalized=normalized)
return cbc
def _rescale(cbc, normalized):
# helper to rescale betweenness centrality
if normalized is True:
order = len(cbc)
if order <= 2:
scale = None
else:
scale = 1.0 / ((order - 1.0) ** 2 - (order - 1.0))
if scale is not None:
for v in cbc:
cbc[v] *= scale
return cbc
def estrada_index(G):
r"""Returns the Estrada index of a the graph G.
The Estrada Index is a topological index of folding or 3D "compactness" ([1]_).
Parameters
----------
G: graph
Returns
-------
estrada index: float
Raises
------
NetworkXError
If the graph is not undirected and simple.
Notes
-----
Let `G=(V,E)` be a simple undirected graph with `n` nodes and let
`\lambda_{1}\leq\lambda_{2}\leq\cdots\lambda_{n}`
be a non-increasing ordering of the eigenvalues of its adjacency
matrix `A`. The Estrada index is ([1]_, [2]_)
.. math::
EE(G)=\sum_{j=1}^n e^{\lambda _j}.
References
----------
.. [1] E. Estrada, "Characterization of 3D molecular structure",
Chem. Phys. Lett. 319, 713 (2000).
https://doi.org/10.1016/S0009-2614(00)00158-5
.. [2] José Antonio de la Peñaa, Ivan Gutman, Juan Rada,
"Estimating the Estrada index",
Linear Algebra and its Applications. 427, 1 (2007).
https://doi.org/10.1016/j.laa.2007.06.020
Examples
--------
>>> G = nx.Graph(
... [
... (0, 1),
... (1, 2),
... (1, 5),
... (5, 4),
... (2, 4),
... (2, 3),
... (4, 3),
... (3, 6),
... ]
... )
>>> G = nx.Graph([(0,1),(1,2),(1,5),(5,4),(2,4),(2,3),(4,3),(3,6)])
>>> ei = nx.estrada_index(G)
>>> print(f"{ei:0.5}")
20.55
"""
return sum(subgraph_centrality(G).values())
|
SpaceGroupUCL/qgisSpaceSyntaxToolkit
|
esstoolkit/external/networkx/algorithms/centrality/subgraph_alg.py
|
Python
|
gpl-3.0
| 10,125
|
[
"Desmond"
] |
af1c0d1fac2d9457c59fa0d9626a7178b80fb5f7ce3ea86a6fd52ccd0691c018
|
# Copyright 2012, 2013 by the Micromagnum authors.
#
# This file is part of MicroMagnum.
#
# MicroMagnum is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MicroMagnum is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MicroMagnum. If not, see <http://www.gnu.org/licenses/>.
from .vtk import *
|
MicroMagnum/MicroMagnum
|
src/magnum/micromagnetics/io/vtk/__init__.py
|
Python
|
gpl-3.0
| 742
|
[
"VTK"
] |
6c0c62e2733cc7eacd708df3a7e34290d622bf2ffb8f0ad2dc0451876af8d413
|
import numpy as np
from alis import almsgs
from alis import alfunc_base
from scipy.signal import convolve
msgs=almsgs.msgs()
class APOD(alfunc_base.Base) :
"""
Convolves the spectrum with a Gaussian with full-width at half-maxium AFWHM (in Angstroms):
p[0] = AFWHM
"""
def __init__(self, prgname="", getinst=False, atomic=None, verbose=2):
self._idstr = 'apod' # ID string for this class
self._pnumr = 1 # Total number of parameters fed in
self._keywd = dict({'kind':'uniform', 'blind':False}) # Additional arguments to describe the model --- 'input' cannot be used as a keyword
self._keych = dict({'kind':1, 'blind':0}) # Require keywd to be changed (1 for yes, 0 for no)
self._keyfm = dict({'kind':"", 'blind':""}) # Require keywd to be changed (1 for yes, 0 for no)
self._parid = ['scale'] # Name of each parameter
self._defpar = [ 1.0 ] # Default values for parameters that are not provided
self._fixpar = [ True ] # By default, should these parameters be fixed?
self._limited = [ [0 ,0 ] ] # Should any of these parameters be limited from below or above
self._limits = [ [0.0,0.0] ] # What should these limiting values be
self._svfmt = [ "{0:s}" ] # Specify the format used to print or save output
self._prekw = [] # Specify the keywords to print out before the parameters
# DON'T CHANGE THE FOLLOWING --- it tells ALIS what parameters are provided by the user.
tempinput = self._parid+list(self._keych.keys()) #
self._keywd['input'] = dict(zip((tempinput),([0]*np.size(tempinput)))) #
########################################################################
self._verbose = verbose
# Set the atomic data
self._atomic = atomic
if getinst: return
def call_CPU(self, x, y, p, ncpus=1):
"""
Define the functional form of the model
--------------------------------------------------------
x : array of wavelengths
y : model flux array
p : array of parameters for this model
--------------------------------------------------------
"""
midval = x[x.size // 2]
deltaF = np.mean(x[1:]-x[:-1])
kvals = (x - midval) / deltaF
# Determine which apodization function to use
kind = self._keywd['kind']
# Calculate the apodization function
if kind == 'uniform':
instfunc = np.sinc(kvals)
elif kind == 'hanning':
instfunc = (np.sinc(kvals) + 0.5 * np.sinc(kvals - 1.0) + 0.5 * np.sinc(kvals + 1.0))
elif kind == 'hamming':
fact = (27 - 16 * (kvals / 2.0) ** 2) / 25
instfunc = fact * (np.sinc(kvals) + 0.5 * np.sinc(kvals - 1.0) + 0.5 * np.sinc(kvals + 1.0))
elif kind == 'bartlett':
instfunc = np.sinc(kvals / 2.0) ** 2
elif kind == 'blackmann':
fact = (21 - 9 * (kvals / 2.0) ** 2) / 25
instfunc = (fact / (1.0 - (kvals / 2.0) ** 2)) * (
np.sinc(kvals) + 0.5 * np.sinc(kvals - 1.0) + 0.5 * np.sinc(kvals + 1.0))
elif kind == 'welch':
pk = np.pi * kvals
instfunc = 4.0 * (np.sin(pk) - pk * np.cos(pk)) / (2.0 * pk ** 3)
ww = np.where(pk == 0.0)
if ww[0].size != 0:
instfunc[ww] = 0.5 * (instfunc[ww[0] - 1] + instfunc[ww[0] + 1])
elif kind == 'new':
pk = np.pi * kvals
instfunc = np.cos(pk) / pk ** 2
ww = np.where(pk == 0.0)
if ww[0].size != 0:
instfunc[ww] = 0.5 * (instfunc[ww[0] - 1] + instfunc[ww[0] + 1])
else:
return y
# Normalise
instfunc = instfunc / instfunc.sum()
# Convolve the data with the instrument function
yb = convolve(y, instfunc, mode='same', method='fft')
return yb
def getfwhm(self):
kind = self._keywd['kind']
if kind == 'uniform':
return 1.20671
elif kind == 'hanning':
return 2.0
elif kind == 'hamming':
return 1.81522
elif kind == 'bartlett':
return 1.77179
elif kind == 'blackmann':
return 2.29880
elif kind == 'welch':
return 1.59044
elif kind == 'new':
return 0.0
def getminmax(self, par, fitrng, Nsig=20.0):
"""
This definition is only used for specifying the
FWHM Resolution of the data.
--------------------------------------------------------
This definition will return the additional wavelength range
of the data to be extracted around the user-speficied fitrange
to ensure the edges of the model near the min and max of
fitrange aren't affected.
--------------------------------------------------------
par : The input parameters which defines the FWHM of this
function
fitrng : The fitrange specified by the user at input
Nsig : Width in number of sigma to extract either side of
fitrange
"""
# Use the parameters to now calculate the sigma width
frac = 0.1
# Calculate the min and max extraction wavelengths
wmin = fitrng[0]*(1.0 - frac)
wmax = fitrng[1]*(1.0 + frac)
return wmin, wmax
def load(self, instr, cntr, mp, specid, forcefix=False):
"""
Load the parameters in the input model file
--------------------------------------------------------
instr: input string for the parameters and keywords of
model to be loaded (ignoring the identifier at
the beginning of the model line).
cntr : The line number of the model (e.g. if it's the
first model line, cntr=0)
mp : modpass --> A dictionary with the details of all
models read in so far.
--------------------------------------------------------
Nothing should be changed here when writing a new function.
--------------------------------------------------------
"""
def check_tied_param(ival, cntr, mps, iind):
havtie = False
tieval=ival.lstrip('+-.0123456789')
if tieval[0:2] in ['E+', 'e+', 'E-', 'e-']: # Scientific Notation is used.
tieval=tieval[2:].lstrip('.0123456789')
inval=float(ival.rstrip(tieval))
if len(tieval) == 0: # Parameter is not tied
mps['mtie'][cntr].append(-1)
if forcefix:
mps['mfix'][cntr].append(1)
else:
mps['mfix'][cntr].append(0)
else: # parameter is tied
# Determine if this parameter is fixed
if tieval[0].isupper() or forcefix: mps['mfix'][cntr].append(1)
else: mps['mfix'][cntr].append(0)
# Check if this tieval has been used before
if len(mps['tpar']) == 0: # If it's the first known tied parameter in the model
mps['tpar'].append([])
mps['tpar'][0].append(tieval)
mps['tpar'][0].append(len(mps['p0']))
mps['mtie'][cntr].append(-1) # i.e. not tied to anything
else:
for j in range(0,len(mps['tpar'])):
if mps['tpar'][j][0] == tieval:
mps['mtie'][cntr].append(j)
havtie = True
if havtie == False: # create a New tied parameter
mps['tpar'].append([])
mps['tpar'][-1].append(tieval)
mps['tpar'][-1].append(len(mps['p0']))
mps['mtie'][cntr].append(-1) # i.e. not tied to anything
if havtie == False: mps['p0'].append(inval)
mps['mpar'][cntr].append(inval)
mps['mlim'][cntr].append([self._limits[iind][i] if self._limited[iind][i]==1 else None for i in range(2)])
return mps
################
# Convert colon back to equals so that it's interpreted as a keyword
instr = instr.replace(":", "=")
isspl = instr.split(",")
# Seperate the parameters from the keywords
kywrd = []
keywdk = list(self._keywd.keys())
keywdk[:] = (kych for kych in keywdk if kych[:] != 'input') # Remove the keyword 'input'
param = [str(self._defpar[all]) for all in range(self._pnumr)]
parid = [i for i in range(self._pnumr)]
for i in range(len(isspl)):
if "=" in isspl[i]:
kwspl = isspl[i].split('=')
if kwspl[0] in keywdk:
self._keywd['input'][kwspl[0]]=1
kywrd.append(isspl[i])
else: msgs.error("Keyword '"+isspl[i]+"' is unknown for -"+msgs.newline()+self._idstr+" "+instr)
else:
param[i] = isspl[i]
self._keywd['input'][self._parid[i]]=1
# Do some quick checks
if len(param) != self._pnumr:
msgs.error("Incorrect number of parameters (should be "+str(self._pnumr)+"):"+msgs.newline()+self._idstr+" "+instr)
# Set the parameters:
mp['mtyp'].append(self._idstr)
mp['mpar'].append([])
mp['mtie'].append([])
mp['mfix'].append([])
mp['mlim'].append([])
for i in range(self._pnumr):
mp = check_tied_param(param[i], cntr, mp, i)
# Now load the keywords:
for i in range(len(kywrd)):
kwspl = kywrd[i].split('=')
ksspl = kwspl[1].split(',')
for j in range(len(ksspl)):
if type(self._keywd[kwspl[0]]) is int:
typeval='integer'
self._keywd[kwspl[0]] = int(kwspl[1])
elif type(self._keywd[kwspl[0]]) is str:
typeval='string'
self._keywd[kwspl[0]] = kwspl[1]
elif type(self._keywd[kwspl[0]]) is float:
typeval='float'
self._keywd[kwspl[0]] = float(kwspl[1])
elif type(self._keywd[kwspl[0]]) is list:
typeval='list'
self._keywd[kwspl[0]].append(kwspl[1])
elif type(self._keywd[kwspl[0]]) is bool:
if kwspl[1] in ['True', 'False']:
typeval='boolean'
self._keywd[kwspl[0]] = kwspl[1] in ['True']
else:
typeval='string'
self._keywd[kwspl[0]] = kwspl[1]
msgs.warn(kwspl[0]+" should be of type boolean (True/False)", verbose=self._verbose)
elif self._keywd[kwspl[0]] is None:
typeval='None'
self._keywd[kwspl[0]] = None
else:
msgs.error("I don't understand the format on line:"+msgs.newline()+self._idstr+" "+instr)
self._keych[kwspl[0]] = 0 # Set keych for this keyword to zero to show that this has been changed
# Check that all required keywords were changed
for i in range(len(keywdk)):
if self._keych[keywdk[i]] == 1: msgs.error(keywdk[i]+" must be set for -"+msgs.newline()+self._idstr+" "+instr)
# Append the final set of keywords
mp['mkey'].append(self._keywd.copy())
return mp, parid
def parin(self, i, par):
"""
This routine converts a parameter in the input model file
to the parameter used in 'call'
--------------------------------------------------------
When writing a new function, one should change how each
input parameter 'par' is converted into a parameter used
in the function specified by 'call'
--------------------------------------------------------
"""
if i == 0: pin = par
return pin
def set_vars(self, p, level, mp, ival, wvrng=[0.0,0.0], spid='None', levid=None, nexbin=None, ddpid=None, getinfl=False, getstdd=None):
"""
Return the parameters for a Gaussian function to be used by 'call'
The only thing that should be changed here is the parb values
"""
levadd=0
params=np.zeros(self._pnumr)
parinf=[]
for i in range(self._pnumr):
lnkprm = None
if mp['mtie'][ival][i] >= 0:
getid = mp['tpar'][mp['mtie'][ival][i]][1]
elif mp['mtie'][ival][i] <= -2:
if len(mp['mlnk']) == 0:
lnkprm = mp['mpar'][ival][i]
else:
for j in range(len(mp['mlnk'])):
if mp['mlnk'][j][0] == mp['mtie'][ival][i]:
cmd = 'lnkprm = ' + mp['mlnk'][j][1]
namespace = dict({'p': p})
exec(cmd, namespace)
lnkprm = namespace['lnkprm']
levadd += 1
else:
getid = level+levadd
levadd+=1
if lnkprm is None:
params[i] = self.parin(i, p[getid])
if mp['mfix'][ival][i] == 0: parinf.append(getid) # If parameter not fixed, append it to the influence array
else:
params[i] = lnkprm
if ddpid is not None:
if ddpid not in parinf: return []
if nexbin is not None:
if params[0] == 0: return params, 1
if nexbin[0] == "km/s": msgs.error("bintype is set to 'km/s', when FWHM is specified in Hz.")
elif nexbin[0] == "A" : msgs.error("bintype is set to 'A', when FWHM is specified in Hz.")
elif nexbin[0] == "Hz" : return params, nexbin[1]
else: msgs.bug("bintype "+nexbin[0]+" should not have been specified in model function: "+self._idstr, verbose=self._verbose)
elif getstdd is not None:
fact = 2.0*np.sqrt(2.0*np.log(2.0))
return getstdd[1]*(1.0+getstdd[0]*self.getfwhm()/fact), getstdd[2]*(1.0-getstdd[0]*self.getfwhm()/fact)
elif getinfl: return params, parinf
else: return params
|
rcooke-ast/ALIS
|
alis/alfunc_apod.py
|
Python
|
gpl-3.0
| 14,420
|
[
"Gaussian"
] |
54cf4bd3ecbc92cc3a74029f8e229cb05a4d53ea30411e72b384064935df923e
|
##############################################################################
#
# Copyright (C) 2018 Compassion CH (http://www.compassion.ch)
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import models, fields, _
class GroupVisitPaymentForm(models.AbstractModel):
_name = "cms.form.event.group.visit.payment"
_inherit = "cms.form.payment"
_display_type = "full"
event_id = fields.Many2one("crm.event.compassion", readonly=False)
registration_id = fields.Many2one("event.registration", readonly=False)
partner_name = fields.Char("Participant", readonly=True)
@property
def _payment_success_redirect(self):
return f"/event/payment/gpv_payment_validate/{self.invoice_id.id}"
@property
def _form_fieldsets(self):
return [
{
"id": "payment",
"fields": ["partner_name"],
},
]
@property
def form_title(self):
if self.event_id:
return self.event_id.name + " " + _("payment")
else:
return _("Travel payment")
@property
def submit_text(self):
return _("Proceed with payment")
@property
def form_widgets(self):
# Hide fields
res = super().form_widgets
res["partner_name"] = "cms_form_compassion.form.widget.readonly"
return res
def form_init(self, request, main_object=None, **kw):
form = super().form_init(request, main_object, **kw)
# Store ambassador and event in model to use it in properties
registration = kw.get("registration")
if registration:
form.event_id = registration.compassion_event_id
form.partner_id = registration.partner_id
form.registration_id = registration
return form
def _form_load_partner_name(self, fname, field, value, **req_values):
return self.partner_id.sudo().name
def generate_invoice(self):
# modifiy and add line
group_visit_invoice = self.registration_id.sudo().group_visit_invoice_id
# Admin
analytic_account = (
self.env["account.analytic.account"]
.sudo()
.search([("code", "=", "ATT_ADM")])
)
# Financial Expenses
account = self.env["account.account"].sudo().search([("code", "=", "4200")])
existing_tax = group_visit_invoice.invoice_line_ids.filtered(
lambda l: l.account_id == account
)
if not existing_tax:
group_visit_invoice.with_delay().modify_open_invoice(
{
"invoice_line_ids": [
(
0,
0,
{
"quantity": 1.0,
"price_unit": group_visit_invoice.amount_total
* 0.019,
"account_id": account.id,
"name": "Credit card tax",
"account_analytic_id": analytic_account.id,
},
)
]
}
)
return group_visit_invoice
|
eicher31/compassion-switzerland
|
website_event_compassion/forms/group_visit_payment.py
|
Python
|
agpl-3.0
| 3,408
|
[
"VisIt"
] |
c12e9fb4d7b60513010b48f1557ea228b2e446ad58d83261b33b49bab6586ff2
|
"""
Kernel Density Estimation
-------------------------
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
import numpy as np
from scipy.special import gammainc
from ..base import BaseEstimator
from ..utils import check_array, check_random_state, check_consistent_length
from ..utils.extmath import row_norms
from .ball_tree import BallTree, DTYPE
from .kd_tree import KDTree
VALID_KERNELS = ['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear',
'cosine']
TREE_DICT = {'ball_tree': BallTree, 'kd_tree': KDTree}
# TODO: implement a brute force version for testing purposes
# TODO: bandwidth estimation
# TODO: create a density estimation base class?
class KernelDensity(BaseEstimator):
"""Kernel Density Estimation
Read more in the :ref:`User Guide <kernel_density>`.
Parameters
----------
bandwidth : float
The bandwidth of the kernel.
algorithm : string
The tree algorithm to use. Valid options are
['kd_tree'|'ball_tree'|'auto']. Default is 'auto'.
kernel : string
The kernel to use. Valid kernels are
['gaussian'|'tophat'|'epanechnikov'|'exponential'|'linear'|'cosine']
Default is 'gaussian'.
metric : string
The distance metric to use. Note that not all metrics are
valid with all algorithms. Refer to the documentation of
:class:`BallTree` and :class:`KDTree` for a description of
available algorithms. Note that the normalization of the density
output is correct only for the Euclidean distance metric. Default
is 'euclidean'.
atol : float
The desired absolute tolerance of the result. A larger tolerance will
generally lead to faster execution. Default is 0.
rtol : float
The desired relative tolerance of the result. A larger tolerance will
generally lead to faster execution. Default is 1E-8.
breadth_first : boolean
If true (default), use a breadth-first approach to the problem.
Otherwise use a depth-first approach.
leaf_size : int
Specify the leaf size of the underlying tree. See :class:`BallTree`
or :class:`KDTree` for details. Default is 40.
metric_params : dict
Additional parameters to be passed to the tree for use with the
metric. For more information, see the documentation of
:class:`BallTree` or :class:`KDTree`.
"""
def __init__(self, bandwidth=1.0, algorithm='auto',
kernel='gaussian', metric="euclidean", atol=0, rtol=0,
breadth_first=True, leaf_size=40, metric_params=None):
self.algorithm = algorithm
self.bandwidth = bandwidth
self.kernel = kernel
self.metric = metric
self.atol = atol
self.rtol = rtol
self.breadth_first = breadth_first
self.leaf_size = leaf_size
self.metric_params = metric_params
# run the choose algorithm code so that exceptions will happen here
# we're using clone() in the GenerativeBayes classifier,
# so we can't do this kind of logic in __init__
self._choose_algorithm(self.algorithm, self.metric)
if bandwidth <= 0:
raise ValueError("bandwidth must be positive")
if kernel not in VALID_KERNELS:
raise ValueError("invalid kernel: '{0}'".format(kernel))
def _choose_algorithm(self, algorithm, metric):
# given the algorithm string + metric string, choose the optimal
# algorithm to compute the result.
if algorithm == 'auto':
# use KD Tree if possible
if metric in KDTree.valid_metrics:
return 'kd_tree'
elif metric in BallTree.valid_metrics:
return 'ball_tree'
else:
raise ValueError("invalid metric: '{0}'".format(metric))
elif algorithm in TREE_DICT:
if metric not in TREE_DICT[algorithm].valid_metrics:
raise ValueError("invalid metric for {0}: "
"'{1}'".format(TREE_DICT[algorithm],
metric))
return algorithm
else:
raise ValueError("invalid algorithm: '{0}'".format(algorithm))
def fit(self, X, y=None, sample_weight=None):
"""Fit the Kernel Density model on the data.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
sample_weight : array_like, shape (n_samples,), optional
List of sample weights attached to the data X.
"""
algorithm = self._choose_algorithm(self.algorithm, self.metric)
X = check_array(X, order='C', dtype=DTYPE)
if sample_weight is not None:
sample_weight = check_array(sample_weight, order='C', dtype=DTYPE,
ensure_2d=False)
if sample_weight.ndim != 1:
raise ValueError("the shape of sample_weight must be ({0},),"
" but was {1}".format(X.shape[0],
sample_weight.shape))
check_consistent_length(X, sample_weight)
if sample_weight.min() <= 0:
raise ValueError("sample_weight must have positive values")
kwargs = self.metric_params
if kwargs is None:
kwargs = {}
self.tree_ = TREE_DICT[algorithm](X, metric=self.metric,
leaf_size=self.leaf_size,
sample_weight=sample_weight,
**kwargs)
return self
def score_samples(self, X):
"""Evaluate the density model on the data.
Parameters
----------
X : array_like, shape (n_samples, n_features)
An array of points to query. Last dimension should match dimension
of training data (n_features).
Returns
-------
density : ndarray, shape (n_samples,)
The array of log(density) evaluations.
"""
# The returned density is normalized to the number of points.
# For it to be a probability, we must scale it. For this reason
# we'll also scale atol.
X = check_array(X, order='C', dtype=DTYPE)
if self.tree_.sample_weight is None:
N = self.tree_.data.shape[0]
else:
N = self.tree_.sum_weight
atol_N = self.atol * N
log_density = self.tree_.kernel_density(
X, h=self.bandwidth, kernel=self.kernel, atol=atol_N,
rtol=self.rtol, breadth_first=self.breadth_first, return_log=True)
log_density -= np.log(N)
return log_density
def score(self, X, y=None):
"""Compute the total log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Total log-likelihood of the data in X.
"""
return np.sum(self.score_samples(X))
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Currently, this is implemented only for gaussian and tophat kernels.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
random_state : int, RandomState instance or None. default to None
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by `np.random`.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples.
"""
# TODO: implement sampling for other valid kernel shapes
if self.kernel not in ['gaussian', 'tophat']:
raise NotImplementedError()
data = np.asarray(self.tree_.data)
rng = check_random_state(random_state)
u = rng.uniform(0, 1, size=n_samples)
if self.tree_.sample_weight is None:
i = (u * data.shape[0]).astype(np.int64)
else:
cumsum_weight = np.cumsum(np.asarray(self.tree_.sample_weight))
sum_weight = cumsum_weight[-1]
i = np.searchsorted(cumsum_weight, u * sum_weight)
if self.kernel == 'gaussian':
return np.atleast_2d(rng.normal(data[i], self.bandwidth))
elif self.kernel == 'tophat':
# we first draw points from a d-dimensional normal distribution,
# then use an incomplete gamma function to map them to a uniform
# d-dimensional tophat distribution.
dim = data.shape[1]
X = rng.normal(size=(n_samples, dim))
s_sq = row_norms(X, squared=True)
correction = (gammainc(0.5 * dim, 0.5 * s_sq) ** (1. / dim)
* self.bandwidth / np.sqrt(s_sq))
return data[i] + X * correction[:, np.newaxis]
|
vortex-ape/scikit-learn
|
sklearn/neighbors/kde.py
|
Python
|
bsd-3-clause
| 9,452
|
[
"Gaussian"
] |
2ab5fecccebed2fac0240780cf1e667805a91554295fa54351ebe076d1db3355
|
#!/usr/bin/python3
# Copyright 2020 Uraniborg authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Package Whitelists Module.
This module defines a generic class that encapsulates the necessary attributes
of what package whitelists would be like. Package whitelists and baseline may
differ according to OEM and API level - thus the need for specificity.
Currently, we are exempting safety information packages.
Google Mobile Service (GMS) packages are also exempted.
"""
import json
import os
class BaselinePackages:
"""A set of packages used as point of reference based on API level.
The package names are observed from either AOSP or GSI builds.
"""
data_filename = ""
instance = None
instances = {}
api_level_to_filename = {
24: "data/N-AOSP.json",
25: "data/N-AOSP.json",
26: "data/O-AOSP.json",
27: "data/O-AOSP.json",
28: "data/P-AOSP.json",
29: "data/Q-GSI.json",
30: "data/R-GSI.json",
31: "data/S-GSI.json",
}
@staticmethod
def get_instance(api_level):
"""Obtains a usable instance of BaselinePackages according to API Level.
Args:
api_level: An int representing the api level.
Returns:
An instance of BaselinePackages that is tailored for the API level.
"""
instance = BaselinePackages.instances.get(api_level)
if not instance:
instance = BaselinePackages()
curr_dir = os.path.dirname(os.path.abspath(__file__))
data_filepath = os.path.join(curr_dir,
BaselinePackages.api_level_to_filename.get(
api_level))
instance.load_data_files(data_filepath)
BaselinePackages.instances[api_level] = instance
return instance
def load_data_files(self, data_filepath):
"""Reads in data files & converts JSON objects into instance attributes.
Args:
data_filepath: the filepath of the data file that is to be read in.
"""
in_json = ""
with open(data_filepath, "r") as f_in:
in_json = f_in.read()
in_json = json.loads(in_json)
self.packages_all = in_json["packagesAll"]
self.packages_no_code = in_json["packagesNoCode"]
self.packages_platform_signed = in_json["platformAppsAll"]
self.packages_platform_no_code = in_json["platformAppsNoCode"]
self.packages_shared_uid = in_json["packagesSharedUid"]
def get_all_packages(self, no_code=False):
if no_code:
return self.packages_no_code
return self.packages_all
def get_platform_signed_packages(self, no_code=False):
if no_code:
return self.packages_platform_no_code
return self.packages_platform_signed
def get_shared_uid_packages(self):
return self.packages_shared_uid
class GMS:
"""A class defining Google Mobile Services (GMS) apps.
Maintenance: This class may need to be updatede in the future should the
signing certificates be rotated/changed.
"""
PACKAGES = {
# The actual gmscore APK
"com.google.android.gms":
"f0fd6c5b410f25cb25c3b53346c8972fae30f8ee7411df910480ad6b2d60db83",
# Google Drive
"com.google.android.apps.docs":
"f0fd6c5b410f25cb25c3b53346c8972fae30f8ee7411df910480ad6b2d60db83",
# Youtube
"com.google.android.youtube":
"3d7a1223019aa39d9ea0e3436ab7c0896bfb4fb679f4de5fe7c23f326c8f994a",
# Google Play Client
"com.android.vending":
"f0fd6c5b410f25cb25c3b53346c8972fae30f8ee7411df910480ad6b2d60db83",
# Chrome browser
"com.android.chrome":
"f0fd6c5b410f25cb25c3b53346c8972fae30f8ee7411df910480ad6b2d60db83",
# Google Photos
"com.google.android.apps.photos":
"3d7a1223019aa39d9ea0e3436ab7c0896bfb4fb679f4de5fe7c23f326c8f994a",
# Google Maps
"com.google.android.apps.maps":
"f0fd6c5b410f25cb25c3b53346c8972fae30f8ee7411df910480ad6b2d60db83",
# Gmail
"com.google.android.gm":
"f0fd6c5b410f25cb25c3b53346c8972fae30f8ee7411df910480ad6b2d60db83",
# Google Services Framework
"com.google.android.gsf":
"f0fd6c5b410f25cb25c3b53346c8972fae30f8ee7411df910480ad6b2d60db83",
# Google
"com.google.android.googlequicksearchbox":
"f0fd6c5b410f25cb25c3b53346c8972fae30f8ee7411df910480ad6b2d60db83",
# Google Setup Wizard
"com.google.android.setupwizard":
"f0fd6c5b410f25cb25c3b53346c8972fae30f8ee7411df910480ad6b2d60db83",
# Google Calendar
"com.google.android.calendar":
"f0fd6c5b410f25cb25c3b53346c8972fae30f8ee7411df910480ad6b2d60db83",
# Google Movies
"com.google.android.videos":
"3d7a1223019aa39d9ea0e3436ab7c0896bfb4fb679f4de5fe7c23f326c8f994a",
}
@staticmethod
def is_gms_package(package):
"""Checks if a package is a legitimate GMS APK based on signer cert.
Args:
package: A JSON object representing info about an APK.
Returns:
True if the package's signature matches the correct one.
"""
package_name = package["name"]
if package_name not in GMS.PACKAGES:
return False
package_signature = package["certIds"][0]
if package_signature == GMS.PACKAGES.get(package_name):
return True
return False
class PackageWhitelists:
"""The main class.
This class defines globally excluded packages. It can be inherited to create
OEM-specific excluded packages.
As of now, we found 3 categories of what we think can legitimately be
excluded:
1. The "android" APK, which every Android device must have.
2. Regulatory APK that displays safety or other legal information.
3. Installer APKs in the form of 1P OEM app store. Note that 3P installers
should never be included in the exception list, and are considered as
adding risk to the build!
Categories 1 & 2 are listed using EXCLUDED_PACKAGES set, and the 3rd is listed
using INSTALLER_PACKAGES dict.
More categories may be added as necessary in the future.
"""
PREFIXES = {
"com.android.",
"android."
}
EXCLUDED_PACKAGES = {
# These are globally excluded packages. In OEM-specific instances,
# regulatory safety/legal APKs can be included here.
"android", # the framework package
"com.uraniborg.hubble", # our observer app
}
INSTALLER_PACKAGES = {
# The other exception we are making is for 1P app stores. We are listing
# Google Play Store client as the global whitelist as it is a required app
# in MADA.
"com.android.vending":
"f0fd6c5b410f25cb25c3b53346c8972fae30f8ee7411df910480ad6b2d60db83",
}
@staticmethod
def get_whitelist(oem):
"""Gets an oem-specific whitelist.
Args:
oem: A string describing the manufacturer.
Returns:
A specific PackageWhitelists instance that contains OEM-specific excluded
packages.
"""
if oem == "Google":
return Google()
elif oem == "samsung":
return Samsung()
elif oem == "HUAWEI":
return Huawei()
elif oem == "HMD Global":
return Nokia()
elif oem == "Sony":
return Sony()
elif oem == "motorola":
return Motorola()
elif oem == "asus":
return Asus()
elif oem == "OnePlus":
return OnePlus()
else:
return PackageWhitelists()
@staticmethod
def package_name_fuzzy_match(logger, package_name, whitelist):
"""Performs package name fuzzy matching.
We are doing some sort of prefix matching currently.
Args:
logger: a logger object
package_name: the name of the package to be matched
whitelist: a list containing package names to be matched with.
Returns:
The corresponding package name within the whitelist that the package_name
was matched to. In case of no match, <code>None</code> is returned.
"""
if package_name in whitelist:
logger.debug("%s is direct match!", package_name)
return package_name
for whitelisted_package in whitelist:
for prefix in PackageWhitelists.PREFIXES:
truncated_package_name = ""
if whitelisted_package.startswith(prefix):
truncated_package_name = "." + whitelisted_package[len(prefix):]
if package_name.endswith(truncated_package_name):
return whitelisted_package
return None
class Samsung(PackageWhitelists):
EXCLUDED_PACKAGES = PackageWhitelists.EXCLUDED_PACKAGES | {
# Safety information
"com.samsung.safetyinformation",
}
INSTALLER_PACKAGES = {
**PackageWhitelists.INSTALLER_PACKAGES,
# Galaxy Store with signer
"com.sec.android.app.samsungapps":
"fba3af4e7757d9016e953fb3ee4671ca2bd9af725f9a53d52ed4a38eaaa08901",
}
class Google(PackageWhitelists):
EXCLUDED_PACKAGES = PackageWhitelists.EXCLUDED_PACKAGES | {
"com.android.safetyregulatoryinfo",
}
class Nokia(PackageWhitelists):
EXCLUDED_PACKAGES = PackageWhitelists.EXCLUDED_PACKAGES | {
"com.hmdglobal.app.legalinformation",
}
class Huawei(PackageWhitelists):
INSTALLER_PACKAGES = {
**PackageWhitelists.INSTALLER_PACKAGES,
# AppGallery (Huawei's App Store)
"com.huawei.appmarket":
"ffe391e0ea186d0734ed601e4e70e3224b7309d48e2075bac46d8c667eae7212",
}
class Sony(PackageWhitelists):
pass
class Motorola(PackageWhitelists):
pass
class Asus(PackageWhitelists):
pass
class OnePlus(PackageWhitelists):
pass
# NOTE: Create new oem-specific classes here:
|
android/security-certification-resources
|
ioXt/uraniborg/scripts/python/package_whitelists.py
|
Python
|
apache-2.0
| 9,989
|
[
"Galaxy"
] |
9561fbc0187312dbedd91b1114a2668d16cd0eaacf1415f9429986203fbe888e
|
# $HeadURL$
__RCSID__ = "$Id$"
import types
import os
from DIRAC import gLogger, gConfig, rootPath, S_OK, S_ERROR
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Core.Utilities import DEncode, Time
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.ConfigurationSystem.Client import PathFinder
from DIRAC.FrameworkSystem.private.monitoring.ServiceInterface import gServiceInterface
def initializeMonitoringHandler( serviceInfo ):
#Check that the path is writable
monitoringSection = PathFinder.getServiceSection( "Framework/Monitoring" )
#Get data location
dataPath = gConfig.getValue( "%s/DataLocation" % monitoringSection, "data/monitoring" )
dataPath = dataPath.strip()
if "/" != dataPath[0]:
dataPath = os.path.realpath( "%s/%s" % ( gConfig.getValue( '/LocalSite/InstancePath', rootPath ), dataPath ) )
gLogger.info( "Data will be written into %s" % dataPath )
try:
os.makedirs( dataPath )
except:
pass
try:
testFile = "%s/mon.jarl.test" % dataPath
fd = file( testFile, "w" )
fd.close()
os.unlink( testFile )
except IOError:
gLogger.fatal( "Can't write to %s" % dataPath )
return S_ERROR( "Data location is not writable" )
#Define globals
gServiceInterface.initialize( dataPath )
if not gServiceInterface.initializeDB():
return S_ERROR( "Can't start db engine" )
gMonitor.registerActivity( "cachedplots", "Cached plot images", "Monitoring plots", "plots", gMonitor.OP_SUM )
gMonitor.registerActivity( "drawnplots", "Drawn plot images", "Monitoring plots", "plots", gMonitor.OP_SUM )
return S_OK()
class MonitoringHandler( RequestHandler ):
types_registerActivities = [ types.DictType, types.DictType ]
def export_registerActivities( self, sourceDict, activitiesDict, componentExtraInfo = {} ):
"""
Registers new activities
"""
return gServiceInterface.registerActivities( sourceDict, activitiesDict, componentExtraInfo )
types_commitMarks = [ types.IntType, types.DictType ]
def export_commitMarks( self, sourceId, activitiesDict, componentExtraInfo = {} ):
"""
Adds marks for activities
"""
nowEpoch = Time.toEpoch()
maxEpoch = nowEpoch + 7200
minEpoch = nowEpoch - 86400
invalidActivities = []
for acName in activitiesDict:
for time in activitiesDict[ acName ]:
if time > maxEpoch or time < minEpoch:
gLogger.info( "Time %s ( [%s,%s] ) is invalid for activity %s" % ( time, minEpoch, maxEpoch, acName ) )
invalidActivities.append( acName )
break
for acName in invalidActivities:
gLogger.info( "Not commiting activity %s" % acName )
del( activitiesDict[ acName ] )
return gServiceInterface.commitMarks( sourceId, activitiesDict, componentExtraInfo )
types_queryField = [ types.StringType, types.DictType ]
def export_queryField( self, field, definedFields ):
"""
Returns available values for a field., given a set of fields and values,
"""
definedFields[ 'sources.setup' ] = self.serviceInfoDict[ 'clientSetup' ]
return gServiceInterface.fieldValue( field, definedFields )
types_tryView = [ types.IntType, types.IntType, types.StringType ]
def export_tryView( self, fromSecs, toSecs, viewDescriptionStub ):
"""
Generates plots based on a DEncoded view description
"""
viewDescription, stubLength = DEncode.decode( viewDescriptionStub )
if not 'definition' in viewDescription:
return S_ERROR( "No plot definition given" )
defDict = viewDescription[ 'definition' ]
defDict[ 'sources.setup' ] = self.serviceInfoDict[ 'clientSetup' ]
return gServiceInterface.generatePlots( fromSecs, toSecs, viewDescription )
types_saveView = [ types.StringType, types.StringType ]
def export_saveView( self, viewName, viewDescriptionStub ):
"""
Saves a view
"""
if len( viewName ) == 0:
return S_OK( "View name not valid" )
viewDescription, stubLength = DEncode.decode( viewDescriptionStub )
if not 'definition' in viewDescription:
return S_ERROR( "No plot definition given" )
defDict = viewDescription[ 'definition' ]
defDict[ 'sources.setup' ] = self.serviceInfoDict[ 'clientSetup' ]
return gServiceInterface.saveView( viewName, viewDescription )
types_getViews = []
def export_getViews( self, onlyStatic = True ):
"""
Returns a list of stored views
"""
return gServiceInterface.getViews( onlyStatic )
types_plotView = [ types.DictType ]
def export_plotView( self, viewRequest ):
"""
Generates plots for a view
"""
for required in ( "fromSecs", "toSecs", "id" ):
if required not in viewRequest:
return S_ERROR( "Missing %s field in request" % required )
for intFields in ( "fromSecs", "toSecs" ):
viewRequest[ intFields ] = int( viewRequest[ intFields ] )
if not "size" in viewRequest:
viewRequest[ 'size' ] = 1
if viewRequest[ 'size' ] not in ( 0, 1, 2, 3 ):
return S_ERROR( "Invalid size" )
return gServiceInterface.plotView( viewRequest )
types_deleteView = [ types.IntType ]
def export_deleteView( self, viewId ):
"""
Deletes a view
"""
return gServiceInterface.deleteView( viewId )
types_deleteViews = [ types.ListType ]
def export_deleteViews( self, viewList ):
"""
Deletes a view
"""
for viewId in viewList:
result = gServiceInterface.deleteView( viewId )
if not result[ 'OK' ]:
return result
return S_OK()
types_getActivities = []
def export_getActivities( self ):
"""
Returns a list of defined activities
"""
dbCondition = { 'sources.setup' : self.serviceInfoDict[ 'clientSetup' ] }
return S_OK( gServiceInterface.getActivities( dbCondition ) )
types_getActivitiesContents = [ types.DictType, ( types.ListType, types.TupleType ),
( types.IntType, types.LongType ), ( types.IntType, types.LongType ) ]
def export_getActivitiesContents( self, selDict, sortList, start, limit ):
"""
Retrieve the contents of the activity db
"""
setupCond = {'sources.setup' : self.serviceInfoDict[ 'clientSetup' ] }
selDict.update( setupCond )
result = gServiceInterface.getActivitiesContents( selDict, sortList, start, limit )
if not result[ 'OK' ]:
return result
resultTuple = result[ 'Value' ]
result = { 'Records' : resultTuple[0], 'Fields' : resultTuple[1]}
result[ 'TotalRecords' ] = gServiceInterface.getNumberOfActivities( setupCond )
return S_OK( result )
types_deleteActivity = [ types.IntType, types.IntType ]
def export_deleteActivity( self, sourceId, activityId ):
"""
Deletes an activity
"""
return gServiceInterface.deleteActivity( sourceId, activityId )
types_deleteActivities = [ types.ListType ]
def export_deleteActivities( self, deletionList ):
"""
Deletes a list of activities
"""
failed = []
for acList in deletionList:
retVal = gServiceInterface.deleteActivity( acList[0], acList[1] )
if not retVal[ 'OK' ]:
failed.append( retVal[ 'Value' ] )
if failed:
return S_ERROR( "\n".join( failed ) )
return S_OK()
#Component monitoring functions
types_getComponentsStatus = [ types.DictType ]
def export_getComponentsStatus( self, condDict ):
if 'Setup' not in condDict:
condDict[ 'Setup' ] = self.serviceInfoDict[ 'clientSetup' ]
return gServiceInterface.getComponentsStatus( condDict )
#Transfer files
def transfer_toClient( self, fileId, token, fileHelper ):
retVal = gServiceInterface.getGraphData( fileId )
if not retVal[ 'OK' ]:
return retVal
retVal = fileHelper.sendData( retVal[ 'Value' ] )
if not retVal[ 'OK' ]:
return retVal
fileHelper.sendEOF()
return S_OK()
|
Sbalbp/DIRAC
|
FrameworkSystem/Service/MonitoringHandler.py
|
Python
|
gpl-3.0
| 7,836
|
[
"DIRAC"
] |
b52e163e75140fa1271a716bde42fcbcb0006d600ee930f733f422ec74371040
|
# (C) British Crown Copyright 2010 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Test pickling of Iris objects.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris tests first so that some things can be initialised
# before importing anything else.
import iris.tests as tests
import six.moves.cPickle as pickle
import io
import cf_units
import iris
from iris._lazy_data import as_concrete_data
class TestPickle(tests.IrisTest):
def pickle_then_unpickle(self, obj):
"""
Returns a generator of ("cpickle protocol number", object) tuples.
"""
for protocol in range(1 + pickle.HIGHEST_PROTOCOL):
bio = io.BytesIO()
pickle.dump(obj, bio, protocol)
# move the bio back to the start and reconstruct
bio.seek(0)
reconstructed_obj = pickle.load(bio)
yield protocol, reconstructed_obj
@staticmethod
def _real_data(cube):
# Get the concrete data of the cube for performing data values
# comparison checks.
return as_concrete_data(cube.core_data())
def assertCubeData(self, cube1, cube2):
self.assertArrayEqual(self._real_data(cube1), self._real_data(cube2))
@tests.skip_data
def test_cube_pickle(self):
cube = iris.load_cube(tests.get_data_path(('PP',
'globClim1',
'theta.pp')))
self.assertTrue(cube.has_lazy_data())
self.assertCML(cube, ('cube_io', 'pickling', 'theta.cml'),
checksum=False)
for p, recon_cube in self.pickle_then_unpickle(cube):
self.assertTrue(recon_cube.has_lazy_data())
self.assertCML(recon_cube, ('cube_io', 'pickling', 'theta.cml'),
checksum=False)
self.assertCubeData(cube, recon_cube)
@tests.skip_data
def test_cube_with_coord_points(self):
filename = tests.get_data_path(('NetCDF',
'rotated',
'xy',
'rotPole_landAreaFraction.nc'))
cube = iris.load_cube(filename)
# Pickle and unpickle. Do not perform any CML tests
# to avoid side effects.
_, recon_cube = next(self.pickle_then_unpickle(cube))
self.assertEqual(recon_cube, cube)
@tests.skip_data
def test_cubelist_pickle(self):
cubelist = iris.load(tests.get_data_path(('PP', 'COLPEX',
'theta_and_orog_subset.pp')))
single_cube = cubelist[0]
self.assertCML(cubelist, ('cube_io', 'pickling', 'cubelist.cml'))
self.assertCML(single_cube, ('cube_io', 'pickling', 'single_cube.cml'))
for _, reconstructed_cubelist in self.pickle_then_unpickle(cubelist):
self.assertCML(reconstructed_cubelist, ('cube_io', 'pickling',
'cubelist.cml'))
self.assertCML(reconstructed_cubelist[0], ('cube_io', 'pickling',
'single_cube.cml'))
for cube_orig, cube_reconstruct in zip(cubelist,
reconstructed_cubelist):
self.assertArrayEqual(cube_orig.data, cube_reconstruct.data)
self.assertEqual(cube_orig, cube_reconstruct)
def test_picking_equality_misc(self):
items_to_test = [
cf_units.Unit("hours since 2007-01-15 12:06:00",
calendar=cf_units.CALENDAR_STANDARD),
cf_units.as_unit('1'),
cf_units.as_unit('meters'),
cf_units.as_unit('no-unit'),
cf_units.as_unit('unknown')
]
for orig_item in items_to_test:
for protocol, reconst_item in self.pickle_then_unpickle(orig_item):
fail_msg = ('Items are different after pickling '
'at protocol {}.\nOrig item: {!r}\nNew item: {!r}')
fail_msg = fail_msg.format(protocol, orig_item, reconst_item)
self.assertEqual(orig_item, reconst_item, fail_msg)
if __name__ == "__main__":
tests.main()
|
duncanwp/iris
|
lib/iris/tests/test_pickling.py
|
Python
|
lgpl-3.0
| 5,105
|
[
"NetCDF"
] |
74c34731adcf96404ccf5938c4b052bb29c56bb5f12572e8839da440e441b4ee
|
#!/usr/bin/env python
"""
Copyright 2014 Jirafe, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Visit:
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'visit_id': 'str',
'visitor_id': 'str',
'pageview_id': 'str',
'last_pageview_id': 'str'
}
self.visit_id = None # str
self.visitor_id = None # str
self.pageview_id = None # str
self.last_pageview_id = None # str
|
concerned3rdparty/jirafe-python
|
jirafe/models/Visit.py
|
Python
|
mit
| 1,119
|
[
"VisIt"
] |
fc272538d2b7c0f5fe3b9f3482b3f4d0350cc583119124ae0dc3bed6c0eb6467
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PipelineRunner, an abstract base runner object."""
from __future__ import absolute_import
import logging
import os
import shelve
import shutil
import tempfile
def _get_runner_map(runner_names, module_path):
"""Create a map of runner name in lower case to full import path to the
runner class.
"""
return {runner_name.lower(): module_path + runner_name
for runner_name in runner_names}
_DIRECT_RUNNER_PATH = 'apache_beam.runners.direct.direct_runner.'
_DATAFLOW_RUNNER_PATH = (
'apache_beam.runners.google_cloud_dataflow.dataflow_runner.')
_TEST_RUNNER_PATH = 'apache_beam.runners.test.'
_KNOWN_DIRECT_RUNNERS = ('DirectRunner', 'EagerRunner')
_KNOWN_DATAFLOW_RUNNERS = ('DataflowRunner',)
_KNOWN_TEST_RUNNERS = ('TestDataflowRunner',)
_RUNNER_MAP = {}
_RUNNER_MAP.update(_get_runner_map(_KNOWN_DIRECT_RUNNERS,
_DIRECT_RUNNER_PATH))
_RUNNER_MAP.update(_get_runner_map(_KNOWN_DATAFLOW_RUNNERS,
_DATAFLOW_RUNNER_PATH))
_RUNNER_MAP.update(_get_runner_map(_KNOWN_TEST_RUNNERS,
_TEST_RUNNER_PATH))
_ALL_KNOWN_RUNNERS = (
_KNOWN_DIRECT_RUNNERS + _KNOWN_DATAFLOW_RUNNERS + _KNOWN_TEST_RUNNERS)
def create_runner(runner_name):
"""Creates a runner instance from a runner class name.
Args:
runner_name: Name of the pipeline runner. Possible values are:
DirectRunner, DataflowRunner and TestDataflowRunner.
Returns:
A runner object.
Raises:
RuntimeError: if an invalid runner name is used.
"""
# Get the qualified runner name by using the lower case runner name. If that
# fails try appending the name with 'runner' and check if it matches.
# If that also fails, use the given runner name as is.
runner_name = _RUNNER_MAP.get(
runner_name.lower(),
_RUNNER_MAP.get(runner_name.lower() + 'runner', runner_name))
if '.' in runner_name:
module, runner = runner_name.rsplit('.', 1)
return getattr(__import__(module, {}, {}, [runner], -1), runner)()
else:
raise ValueError(
'Unexpected pipeline runner: %s. Valid values are %s '
'or the fully qualified name of a PipelineRunner subclass.' % (
runner_name, ', '.join(_ALL_KNOWN_RUNNERS)))
class PipelineRunner(object):
"""A runner of a pipeline object.
The base runner provides a run() method for visiting every node in the
pipeline's DAG and executing the transforms computing the PValue in the node.
It also provides a clear() method for visiting every node and clearing out
the values contained in PValue objects produced during a run.
A custom runner will typically provide implementations for some of the
transform methods (ParDo, GroupByKey, Create, etc.). It may also
provide a new implementation for clear_pvalue(), which is used to wipe out
materialized values in order to reduce footprint.
"""
def run(self, pipeline):
"""Execute the entire pipeline or the sub-DAG reachable from a node."""
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.pipeline import PipelineVisitor
class RunVisitor(PipelineVisitor):
def __init__(self, runner):
self.runner = runner
def visit_transform(self, transform_node):
try:
self.runner.run_transform(transform_node)
except:
logging.error('Error while visiting %s', transform_node.full_label)
raise
pipeline.visit(RunVisitor(self))
def clear(self, pipeline, node=None):
"""Clear all nodes or nodes reachable from node of materialized values.
Args:
pipeline: Pipeline object containing PValues to be cleared.
node: Optional node in the Pipeline processing DAG. If specified only
nodes reachable from this node will be cleared (ancestors of the node).
This method is not intended (for now) to be called by users of Runner
objects. It is a hook for future layers on top of the current programming
model to control how much of the previously computed values are kept
around. Presumably an interactivity layer will use it. The simplest way
to change the behavior would be to define a runner that overwrites the
clear_pvalue() method since this method (runner.clear) will visit all
relevant nodes and call clear_pvalue on them.
"""
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.pipeline import PipelineVisitor
class ClearVisitor(PipelineVisitor):
def __init__(self, runner):
self.runner = runner
def visit_value(self, value, _):
self.runner.clear_pvalue(value)
pipeline.visit(ClearVisitor(self), node=node)
def apply(self, transform, input):
"""Runner callback for a pipeline.apply call.
Args:
transform: the transform to apply.
input: transform's input (typically a PCollection).
A concrete implementation of the Runner class may want to do custom
pipeline construction for a given transform. To override the behavior
for a transform class Xyz, implement an apply_Xyz method with this same
signature.
"""
for cls in transform.__class__.mro():
m = getattr(self, 'apply_%s' % cls.__name__, None)
if m:
return m(transform, input)
raise NotImplementedError(
'Execution of [%s] not implemented in runner %s.' % (transform, self))
def apply_PTransform(self, transform, input):
# The base case of apply is to call the transform's expand.
return transform.expand(input)
def run_transform(self, transform_node):
"""Runner callback for a pipeline.run call.
Args:
transform_node: transform node for the transform to run.
A concrete implementation of the Runner class must implement run_Abc for
some class Abc in the method resolution order for every non-composite
transform Xyz in the pipeline.
"""
for cls in transform_node.transform.__class__.mro():
m = getattr(self, 'run_%s' % cls.__name__, None)
if m:
return m(transform_node)
raise NotImplementedError(
'Execution of [%s] not implemented in runner %s.' % (
transform_node.transform, self))
class PValueCache(object):
"""Local cache for arbitrary information computed for PValue objects."""
def __init__(self, use_disk_backed_cache=False):
# Cache of values computed while a runner executes a pipeline. This is a
# dictionary of PValues and their computed values. Note that in principle
# the runner could contain PValues from several pipelines without clashes
# since a PValue is associated with one and only one pipeline. The keys of
# the dictionary are tuple of PValue instance addresses obtained using id()
# and tag names converted to strings.
self._use_disk_backed_cache = use_disk_backed_cache
if use_disk_backed_cache:
self._tempdir = tempfile.mkdtemp()
self._cache = shelve.open(os.path.join(self._tempdir, 'shelve'))
else:
self._cache = {}
def __del__(self):
if self._use_disk_backed_cache:
self._cache.close()
shutil.rmtree(self._tempdir)
def __len__(self):
return len(self._cache)
def to_cache_key(self, transform, tag):
return str((id(transform), tag))
def _ensure_pvalue_has_real_producer(self, pvalue):
"""Ensure the passed-in PValue has the real_producer attribute.
Args:
pvalue: A PValue instance whose cached value is requested.
During the runner's execution only the results of the primitive transforms
are cached. Whenever we are looking for a PValue that is the output of a
composite transform we need to find the output of its rightmost transform
part.
"""
if not hasattr(pvalue, 'real_producer'):
real_producer = pvalue.producer
while real_producer.parts:
real_producer = real_producer.parts[-1]
pvalue.real_producer = real_producer
def is_cached(self, pobj):
from apache_beam.pipeline import AppliedPTransform
if isinstance(pobj, AppliedPTransform):
transform = pobj
tag = None
else:
self._ensure_pvalue_has_real_producer(pobj)
transform = pobj.real_producer
tag = pobj.tag
return self.to_cache_key(transform, tag) in self._cache
def cache_output(self, transform, tag_or_value, value=None):
if value is None:
value = tag_or_value
tag = None
else:
tag = tag_or_value
self._cache[
self.to_cache_key(transform, tag)] = [value, transform.refcounts[tag]]
def get_pvalue(self, pvalue):
"""Gets the value associated with a PValue from the cache."""
self._ensure_pvalue_has_real_producer(pvalue)
try:
value_with_refcount = self._cache[self.key(pvalue)]
value_with_refcount[1] -= 1
logging.debug('PValue computed by %s (tag %s): refcount: %d => %d',
pvalue.real_producer.full_label, self.key(pvalue)[1],
value_with_refcount[1] + 1, value_with_refcount[1])
if value_with_refcount[1] <= 0:
self.clear_pvalue(pvalue)
return value_with_refcount[0]
except KeyError:
if (pvalue.tag is not None
and self.to_cache_key(pvalue.real_producer, None) in self._cache):
# This is an undeclared, empty side output of a DoFn executed
# in the local runner before this side output referenced.
return []
else:
raise
def get_unwindowed_pvalue(self, pvalue):
return [v.value for v in self.get_pvalue(pvalue)]
def clear_pvalue(self, pvalue):
"""Removes a PValue from the cache."""
if self.is_cached(pvalue):
del self._cache[self.key(pvalue)]
def key(self, pobj):
self._ensure_pvalue_has_real_producer(pobj)
return self.to_cache_key(pobj.real_producer, pobj.tag)
class PipelineState(object):
"""State of the Pipeline, as returned by PipelineResult.state.
This is meant to be the union of all the states any runner can put a
pipeline in. Currently, it represents the values of the dataflow
API JobState enum.
"""
UNKNOWN = 'UNKNOWN' # not specified
STOPPED = 'STOPPED' # paused or not yet started
RUNNING = 'RUNNING' # currently running
DONE = 'DONE' # successfully completed (terminal state)
FAILED = 'FAILED' # failed (terminal state)
CANCELLED = 'CANCELLED' # explicitly cancelled (terminal state)
UPDATED = 'UPDATED' # replaced by another job (terminal state)
DRAINING = 'DRAINING' # still processing, no longer reading data
DRAINED = 'DRAINED' # draining completed (terminal state)
class PipelineResult(object):
"""A PipelineResult provides access to info about a pipeline."""
def __init__(self, state):
self._state = state
@property
def state(self):
"""Return the current state of the pipeline execution."""
return self._state
def wait_until_finish(self, duration=None):
"""Waits until the pipeline finishes and returns the final status.
Args:
duration: The time to wait (in milliseconds) for job to finish. If it is
set to None, it will wait indefinitely until the job is finished.
Raises:
IOError: If there is a persistent problem getting job information.
NotImplementedError: If the runner does not support this operation.
Returns:
The final state of the pipeline, or None on timeout.
"""
raise NotImplementedError
def cancel(self):
"""Cancels the pipeline execution.
Raises:
IOError: If there is a persistent problem getting job information.
NotImplementedError: If the runner does not support this operation.
Returns:
The final state of the pipeline.
"""
raise NotImplementedError
def metrics(self):
"""Returns MetricsResult object to query metrics from the runner.
Raises:
NotImplementedError: If the runner does not support this operation.
"""
raise NotImplementedError
# pylint: disable=unused-argument
def aggregated_values(self, aggregator_or_name):
"""Return a dict of step names to values of the Aggregator."""
logging.warn('%s does not implement aggregated_values',
self.__class__.__name__)
return {}
|
jasonkuster/incubator-beam
|
sdks/python/apache_beam/runners/runner.py
|
Python
|
apache-2.0
| 13,090
|
[
"VisIt"
] |
44b56a7aec5d526647a0f847600563bb40c50c21dff0dbf6eeedf7d86af6a4df
|
#!/ysr/bin/env python
from __future__ import division
import numpy as np
import theano
import theano.tensor as T
from theano import tensor
#-----------------------------------------------------------------------------
def my_batched_dot(A, B):
"""Batched version of dot-product.
For A[dim_1, dim_2, dim_3] and B[dim_1, dim_3, dim_4] this
is \approx equal to:
for i in range(dim_1):
C[i] = tensor.dot(A[i], B[i])
Returns
-------
C : shape (dim_1 \times dim_2 \times dim_4)
"""
C = A.dimshuffle([0,1,2,'x']) * B.dimshuffle([0,'x',1,2])
return C.sum(axis=-2)
#-----------------------------------------------------------------------------
class ZoomableAttentionWindow(object):
def __init__(self, channels, img_height, img_width, N):
"""A zoomable attention window for images.
Parameters
----------
channels : int
img_heigt, img_width : int
shape of the images
N :
$N \times N$ attention window size
"""
self.channels = channels
self.img_height = img_height
self.img_width = img_width
self.N = N
def filterbank_matrices(self, center_y, center_x, delta, sigma):
"""Create a Fy and a Fx
Parameters
----------
center_y : T.vector (shape: batch_size)
center_x : T.vector (shape: batch_size)
Y and X center coordinates for the attention window
delta : T.vector (shape: batch_size)
sigma : T.vector (shape: batch_size)
Returns
-------
FY : T.fvector (shape: )
FX : T.fvector (shape: )
"""
tol = 1e-4
N = self.N
muX = center_x.dimshuffle([0, 'x']) + delta.dimshuffle([0, 'x'])*(T.arange(N)-N/2-0.5)
muY = center_y.dimshuffle([0, 'x']) + delta.dimshuffle([0, 'x'])*(T.arange(N)-N/2-0.5)
a = tensor.arange(self.img_width)
b = tensor.arange(self.img_height)
FX = tensor.exp( -(a-muX.dimshuffle([0,1,'x']))**2 / 2. / sigma.dimshuffle([0,'x','x'])**2 )
FY = tensor.exp( -(b-muY.dimshuffle([0,1,'x']))**2 / 2. / sigma.dimshuffle([0,'x','x'])**2 )
FX = FX / (FX.sum(axis=-1).dimshuffle(0, 1, 'x') + tol)
FY = FY / (FY.sum(axis=-1).dimshuffle(0, 1, 'x') + tol)
return FY, FX
def read(self, images, center_y, center_x, delta, sigma):
"""Extract a batch of attention windows from the given images.
Parameters
----------
images : :class:`~tensor.TensorVariable`
Batch of images with shape (batch_size x img_size). Internally it
will be reshaped to a (batch_size, img_height, img_width)-shaped
stack of images.
center_y : :class:`~tensor.TensorVariable`
Center coordinates for the attention window.
Expected shape: (batch_size,)
center_x : :class:`~tensor.TensorVariable`
Center coordinates for the attention window.
Expected shape: (batch_size,)
delta : :class:`~tensor.TensorVariable`
Distance between extracted grid points.
Expected shape: (batch_size,)
sigma : :class:`~tensor.TensorVariable`
Std. dev. for Gaussian readout kernel.
Expected shape: (batch_size,)
Returns
-------
windows : :class:`~tensor.TensorVariable`
extracted windows of shape: (batch_size x N**2)
"""
N = self.N
channels = self.channels
batch_size = images.shape[0]
# Reshape input into proper 2d images
I = images.reshape( (batch_size*channels, self.img_height, self.img_width) )
# Get separable filterbank
FY, FX = self.filterbank_matrices(center_y, center_x, delta, sigma)
FY = T.repeat(FY, channels, axis=0)
FX = T.repeat(FX, channels, axis=0)
# apply to the batch of images
W = my_batched_dot(my_batched_dot(FY, I), FX.transpose([0,2,1]))
return W.reshape((batch_size, channels*N*N))
def write(self, windows, center_y, center_x, delta, sigma):
"""Write a batch of windows into full sized images.
Parameters
----------
windows : :class:`~tensor.TensorVariable`
Batch of images with shape (batch_size x N*N). Internally it
will be reshaped to a (batch_size, N, N)-shaped
stack of images.
center_y : :class:`~tensor.TensorVariable`
Center coordinates for the attention window.
Expected shape: (batch_size,)
center_x : :class:`~tensor.TensorVariable`
Center coordinates for the attention window.
Expected shape: (batch_size,)
delta : :class:`~tensor.TensorVariable`
Distance between extracted grid points.
Expected shape: (batch_size,)
sigma : :class:`~tensor.TensorVariable`
Std. dev. for Gaussian readout kernel.
Expected shape: (batch_size,)
Returns
-------
images : :class:`~tensor.TensorVariable`
extracted windows of shape: (batch_size x img_height*img_width)
"""
N = self.N
channels = self.channels
batch_size = windows.shape[0]
# Reshape input into proper 2d windows
W = windows.reshape( (batch_size*channels, N, N) )
# Get separable filterbank
FY, FX = self.filterbank_matrices(center_y, center_x, delta, sigma)
FY = T.repeat(FY, channels, axis=0)
FX = T.repeat(FX, channels, axis=0)
# apply...
I = my_batched_dot(my_batched_dot(FY.transpose([0,2,1]), W), FX)
return I.reshape( (batch_size, channels*self.img_height*self.img_width) )
def nn2att(self, l):
"""Convert neural-net outputs to attention parameters
Parameters
----------
layer : :class:`~tensor.TensorVariable`
A batch of neural net outputs with shape (batch_size x 5)
Returns
-------
center_y : :class:`~tensor.TensorVariable`
center_x : :class:`~tensor.TensorVariable`
delta : :class:`~tensor.TensorVariable`
sigma : :class:`~tensor.TensorVariable`
gamma : :class:`~tensor.TensorVariable`
"""
center_y = l[:,0]
center_x = l[:,1]
log_delta = l[:,2]
log_sigma = l[:,3]
log_gamma = l[:,4]
delta = T.exp(log_delta)
sigma = T.exp(log_sigma/2.)
gamma = T.exp(log_gamma).dimshuffle(0, 'x')
# normalize coordinates
center_x = (center_x+1.)/2. * self.img_width
center_y = (center_y+1.)/2. * self.img_height
delta = (max(self.img_width, self.img_height)-1) / (self.N-1) * delta
return center_y, center_x, delta, sigma, gamma
#=============================================================================
if __name__ == "__main__":
from PIL import Image
N = 40
channels = 3
height = 480
width = 640
#------------------------------------------------------------------------
att = ZoomableAttentionWindow(channels, height, width, N)
I_ = T.matrix()
center_y_ = T.vector()
center_x_ = T.vector()
delta_ = T.vector()
sigma_ = T.vector()
W_ = att.read(I_, center_y_, center_x_, delta_, sigma_)
do_read = theano.function(inputs=[I_, center_y_, center_x_, delta_, sigma_],
outputs=W_, allow_input_downcast=True)
W_ = T.matrix()
center_y_ = T.vector()
center_x_ = T.vector()
delta_ = T.vector()
sigma_ = T.vector()
I_ = att.write(W_, center_y_, center_x_, delta_, sigma_)
do_write = theano.function(inputs=[W_, center_y_, center_x_, delta_, sigma_],
outputs=I_, allow_input_downcast=True)
#------------------------------------------------------------------------
I = Image.open("cat.jpg")
I = I.resize((640, 480)) #.convert('L')
I = np.asarray(I).transpose([2, 0, 1])
I = I.reshape( (channels*width*height) )
I = I / 255.
center_y = 200.5
center_x = 330.5
delta = 5.
sigma = 2.
def vectorize(*args):
return [a.reshape((1,)+a.shape) for a in args]
I, center_y, center_x, delta, sigma = \
vectorize(I, np.array(center_y), np.array(center_x), np.array(delta), np.array(sigma))
#import ipdb; ipdb.set_trace()
W = do_read(I, center_y, center_x, delta, sigma)
I2 = do_write(W, center_y, center_x, delta, sigma)
def imagify(flat_image, h, w):
image = flat_image.reshape([channels, h, w])
image = image.transpose([1, 2, 0])
return image / image.max()
import pylab
pylab.figure()
pylab.gray()
pylab.imshow(imagify(I, height, width), interpolation='nearest')
pylab.figure()
pylab.gray()
pylab.imshow(imagify(W, N, N), interpolation='nearest')
pylab.figure()
pylab.gray()
pylab.imshow(imagify(I2, height, width), interpolation='nearest')
pylab.show(block=True)
import ipdb; ipdb.set_trace()
|
langholz/draw
|
draw/attention.py
|
Python
|
mit
| 9,303
|
[
"Gaussian"
] |
650e8ded1758e070a184e4a980cc8db37f3e30a59b1ca224f86e6ffd58a03208
|
"""
API operations on Group objects.
"""
import logging
from galaxy.web.base.controller import BaseAPIController, url_for
from galaxy import web
log = logging.getLogger( __name__ )
class GroupAPIController( BaseAPIController ):
@web.expose_api
@web.require_admin
def index( self, trans, **kwd ):
"""
GET /api/groups
Displays a collection (list) of groups.
"""
rval = []
for group in trans.sa_session.query( trans.app.model.Group ).filter( trans.app.model.Group.table.c.deleted == False ):
if trans.user_is_admin():
item = group.to_dict( value_mapper={ 'id': trans.security.encode_id } )
encoded_id = trans.security.encode_id( group.id )
item['url'] = url_for( 'group', id=encoded_id )
rval.append( item )
return rval
@web.expose_api
def create( self, trans, payload, **kwd ):
"""
POST /api/groups
Creates a new group.
"""
log.info("groups payload%s\n" % (payload))
if not trans.user_is_admin():
trans.response.status = 403
return "You are not authorized to create a new group."
name = payload.get( 'name', None )
if not name:
trans.response.status = 400
return "Enter a valid name"
if trans.sa_session.query( trans.app.model.Group ).filter( trans.app.model.Group.table.c.name==name ).first():
trans.response.status = 400
return "A group with that name already exists"
group = trans.app.model.Group( name=name )
trans.sa_session.add( group )
user_ids = payload.get( 'user_ids', [] )
for i in user_ids:
log.info("user_id: %s\n" % (i ))
log.info("%s %s\n" % (i, trans.security.decode_id( i ) ))
users = [ trans.sa_session.query( trans.model.User ).get( trans.security.decode_id( i ) ) for i in user_ids ]
role_ids = payload.get( 'role_ids', [] )
roles = [ trans.sa_session.query( trans.model.Role ).get( trans.security.decode_id( i ) ) for i in role_ids ]
trans.app.security_agent.set_entity_group_associations( groups=[ group ], roles=roles, users=users )
"""
# Create the UserGroupAssociations
for user in users:
trans.app.security_agent.associate_user_group( user, group )
# Create the GroupRoleAssociations
for role in roles:
trans.app.security_agent.associate_group_role( group, role )
"""
trans.sa_session.flush()
encoded_id = trans.security.encode_id( group.id )
item = group.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id } )
item['url'] = url_for( 'group', id=encoded_id )
return [ item ]
@web.expose_api
@web.require_admin
def show( self, trans, id, **kwd ):
"""
GET /api/groups/{encoded_group_id}
Displays information about a group.
"""
group_id = id
try:
decoded_group_id = trans.security.decode_id( group_id )
except TypeError:
trans.response.status = 400
return "Malformed group id ( %s ) specified, unable to decode." % str( group_id )
try:
group = trans.sa_session.query( trans.app.model.Group ).get( decoded_group_id )
except:
group = None
if not group:
trans.response.status = 400
return "Invalid group id ( %s ) specified." % str( group_id )
item = group.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id } )
item['url'] = url_for( 'group', id=group_id )
item['users_url'] = url_for( 'group_users', group_id=group_id )
item['roles_url'] = url_for( 'group_roles', group_id=group_id )
return item
@web.expose_api
@web.require_admin
def update( self, trans, id, payload, **kwd ):
"""
PUT /api/groups/{encoded_group_id}
Modifies a group.
"""
group_id = id
try:
decoded_group_id = trans.security.decode_id( group_id )
except TypeError:
trans.response.status = 400
return "Malformed group id ( %s ) specified, unable to decode." % str( group_id )
try:
group = trans.sa_session.query( trans.app.model.Group ).get( decoded_group_id )
except:
group = None
if not group:
trans.response.status = 400
return "Invalid group id ( %s ) specified." % str( group_id )
name = payload.get( 'name', None )
if name:
group.name = name
trans.sa_session.add(group)
user_ids = payload.get( 'user_ids', [] )
users = [ trans.sa_session.query( trans.model.User ).get( trans.security.decode_id( i ) ) for i in user_ids ]
role_ids = payload.get( 'role_ids', [] )
roles = [ trans.sa_session.query( trans.model.Role ).get( trans.security.decode_id( i ) ) for i in role_ids ]
trans.app.security_agent.set_entity_group_associations( groups=[ group ], roles=roles, users=users,delete_existing_assocs=False )
trans.sa_session.flush()
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/webapps/galaxy/api/groups.py
|
Python
|
gpl-3.0
| 5,237
|
[
"Galaxy"
] |
2f6b3441978a3b853643b79ea6539dcefeb0df7e126a9e4d46d6479aef051e63
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module implements an interface to the Henkelmann et al.'s excellent
Fortran code for calculating a Bader charge analysis.
This module depends on a compiled bader executable available in the path.
Please download the library at http://theory.cm.utexas.edu/vasp/bader/ and
follow the instructions to compile the executable.
If you use this module, please cite the following:
G. Henkelman, A. Arnaldsson, and H. Jonsson, "A fast and robust algorithm for
Bader decomposition of charge density", Comput. Mater. Sci. 36, 254-360 (2006).
"""
from six.moves import map
from six.moves import zip
__author__ = "shyuepingong"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Beta"
__date__ = "4/5/13"
import os
import subprocess
import shutil
from pymatgen.io.vasp.outputs import Chgcar
from pymatgen.io.vasp.inputs import Potcar
from monty.os.path import which
from monty.dev import requires
from monty.tempfile import ScratchDir
@requires(which("bader") or which("bader.exe"),
"BaderAnalysis requires the executable bader to be in the path."
" Please download the library at http://theory.cm.utexas"
".edu/vasp/bader/ and compile the executable.")
class BaderAnalysis(object):
"""
Bader analysis for a CHGCAR.
.. attribute: data
Atomic data parsed from bader analysis. Essentially a list of dicts
of the form::
[
{
"dist": 8.769,
"min": 0.8753,
"charge": 7.4168,
"y": 1.1598,
"x": 0.0079,
"z": 0.8348
},
...
]
.. attribute: vacuum_volume
Vacuum volume of the Bader analysis.
.. attribute: vacuum_charge
Vacuum charge of the Bader analysis.
.. attribute: nelectrons
Number of electrons of the Bader analysis.
.. attribute: chgcar
Chgcar object associated with input CHGCAR file.
.. attribute: potcar
Potcar object associated with POTCAR used for calculation (used for
calculating charge transferred).
"""
def __init__(self, chgcar_filename, potcar_filename=None):
"""
Initializes the Bader caller.
Args:
chgcar_filename: The filename of the CHGCAR.
potcar_filename: Optional: the filename of the corresponding
POTCAR file. Used for calculating the charge transfer. If
None, the get_charge_transfer method will raise a ValueError.
"""
self.chgcar = Chgcar.from_file(chgcar_filename)
self.potcar = Potcar.from_file(potcar_filename) \
if potcar_filename is not None else None
self.natoms = self.chgcar.poscar.natoms
chgcarpath = os.path.abspath(chgcar_filename)
with ScratchDir(".") as temp_dir:
shutil.copy(chgcarpath, os.path.join(temp_dir, "CHGCAR"))
rs = subprocess.Popen(["bader", "CHGCAR"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if rs.returncode != 0:
raise RuntimeError("bader exited with return code %d. "
"Pls check your bader installation."
% rs.returncode)
data = []
with open("ACF.dat") as f:
raw = f.readlines()
headers = [s.lower() for s in raw.pop(0).split()]
raw.pop(0)
while True:
l = raw.pop(0).strip()
if l.startswith("-"):
break
vals = map(float, l.split()[1:])
data.append(dict(zip(headers[1:], vals)))
for l in raw:
toks = l.strip().split(":")
if toks[0] == "VACUUM CHARGE":
self.vacuum_charge = float(toks[1])
elif toks[0] == "VACUUM VOLUME":
self.vacuum_volume = float(toks[1])
elif toks[0] == "NUMBER OF ELECTRONS":
self.nelectrons = float(toks[1])
self.data = data
def get_charge(self, atom_index):
"""
Convenience method to get the charge on a particular atom.
Args:
atom_index:
Index of atom.
Returns:
Charge associated with atom from the Bader analysis.
"""
return self.data[atom_index]["charge"]
def get_charge_transfer(self, atom_index):
"""
Returns the charge transferred for a particular atom. Requires POTCAR
to be supplied.
Args:
atom_index:
Index of atom.
Returns:
Charge transfer associated with atom from the Bader analysis.
Given by final charge on atom - nelectrons in POTCAR for
associated atom.
"""
if self.potcar is None:
raise ValueError("POTCAR must be supplied in order to calculate "
"charge transfer!")
potcar_indices = []
for i, v in enumerate(self.natoms):
potcar_indices += [i] * v
nelect = self.potcar[potcar_indices[atom_index]].nelectrons
return self.data[atom_index]["charge"] - nelect
def get_oxidation_state_decorated_structure(self):
"""
Returns an oxidation state decorated structure.
Returns:
Returns an oxidation state decorated structure. Requires POTCAR
to be supplied.
"""
structure = self.chgcar.structure
charges = [self.get_charge_transfer(i) for i in range(len(structure))]
structure.add_oxidation_state_by_site(charges)
return structure
|
xhqu1981/pymatgen
|
pymatgen/command_line/bader_caller.py
|
Python
|
mit
| 6,078
|
[
"VASP",
"pymatgen"
] |
26ed81fabf0ae795d92bdb5166f0552efe6c4fe65c8409a2911cd63ae93f3cb7
|
import ghmm
# example code for a continuous HMM with gaussian emissions
F = ghmm.Float() # emission domain of this model
A = [[0.0,1.0,0],[0.5,0.0,0.5],[0.3,0.3,0.4]] # transition matrix
B = [[0.0,1.0],[-1.0,0.5], [1.0,0.2]] # parameters of emission distributions in pairs of (mu, sigma)
pi = [1.0,0.0,0.0] # initial probabilities per state
# generate model from parameters
model = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B, pi)
# modify model parameters
p = model.getInitial(2)
model.setInitial(2,0.5)
model.setInitial(0,0.5)
# re-set transition from state 0 to state 1
trans = model.getTransition(0,1)
model.setTransition(0,1,0.6)
# re-setting emission of state 1
model.setEmission(1,[5.0,0.6])
# re-normalize model parameters
model.normalize()
#print model
# sample single sequence of length 50
seq = model.sampleSingle(50)
#print seq
# sample 10 sequences of length 50
seq_set = model.sample(10,50)
#print seq_set
# get log P(seq | model)
logp = model.loglikelihood(seq)
print logp
# cacluate viterbi path
path = model.viterbi(seq)
print path
# train model parameters
model.baumWelch(seq_set,5,0.01)
print model
|
tapomayukh/projects_in_python
|
classification/Classification_with_HMM/Single_Contact_Classification/Misc/continuousHMM.py
|
Python
|
mit
| 1,157
|
[
"Gaussian"
] |
6a417323564893bfa4f312250ad4d6c96b830302e84ff79ed6ebfa20adf07348
|
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The 'grit xtb' tool.
"""
import getopt
import os
from xml.sax import saxutils
from grit import grd_reader
from grit import lazy_re
from grit import tclib
from grit import util
from grit.tool import interface
from grit.tool import xmb
# Used to collapse presentable content to determine if
# xml:space="preserve" is needed.
_WHITESPACES_REGEX = lazy_re.compile(ur'\s\s*')
# See XmlEscape below.
_XML_QUOTE_ESCAPES = {
u"'": u''',
u'"': u'"',
}
_XML_BAD_CHAR_REGEX = lazy_re.compile(u'[^\u0009\u000A\u000D'
u'\u0020-\uD7FF\uE000-\uFFFD]')
def _XmlEscape(s):
"""Returns text escaped for XML in a way compatible with Google's
internal Translation Console tool. May be used for attributes as
well as for contents.
"""
if not type(s) == unicode:
s = unicode(s)
result = saxutils.escape(s, _XML_QUOTE_ESCAPES)
return _XML_BAD_CHAR_REGEX.sub(u'', result).encode('utf-8')
def _WriteAttribute(file, name, value):
"""Writes an XML attribute to the specified file.
Args:
file: file to write to
name: name of the attribute
value: (unescaped) value of the attribute
"""
if value:
file.write(' %s="%s"' % (name, _XmlEscape(value)))
def _WriteMessage(file, message):
presentable_content = message.GetPresentableContent()
assert (type(presentable_content) == unicode or
(len(message.parts) == 1 and
type(message.parts[0] == tclib.Placeholder)))
preserve_space = presentable_content != _WHITESPACES_REGEX.sub(
u' ', presentable_content.strip())
file.write('<translation')
_WriteAttribute(file, 'id', message.GetId())
if preserve_space:
_WriteAttribute(file, 'xml:space', 'preserve')
file.write('>')
if not preserve_space:
file.write('\n ')
parts = message.GetContent()
for part in parts:
if isinstance(part, tclib.Placeholder):
file.write('<ph')
_WriteAttribute(file, 'name', part.GetPresentation())
file.write('/>')
else:
file.write(_XmlEscape(part))
if not preserve_space:
file.write('\n')
file.write('</translation>\n')
def WriteXtbFile(file, messages):
"""Writes the given grit.tclib.Message items to the specified open
file-like object in the XTB format.
"""
file.write("""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE translationbundle [
<!ELEMENT translationbundle (translation)*>
<!ATTLIST translationbundle class CDATA #IMPLIED>
<!ELEMENT translation (#PCDATA|ph)*>
<!ATTLIST translation id CDATA #IMPLIED>
<!ATTLIST msg xml:space (default|preserve) "default">
<!ELEMENT ph (#PCDATA|ex)*>
<!ATTLIST ph name CDATA #REQUIRED>
<translationbundle>
""")
for message in messages:
_WriteMessage(file, message)
file.write('</translationbundle>')
def WriteMessagesToFile(file, messages):
file.write('// Do not translate lines that start with //, like this one.\n//\n')
for message in messages:
file.write('// ID %s\n' % message.GetId())
parts = message.GetContent()
do_not_translate = []
for part in parts:
if isinstance(part, tclib.Placeholder):
do_not_translate += [part.GetPresentation()]
if do_not_translate:
file.write('// Please do not modify the following parts of this message:\n')
for dnt in do_not_translate:
file.write('// %s\n' % dnt)
file.write(message.GetPresentableContent().encode('utf-8'))
file.write('\n\n\n')
def WriteGengoFile(file, messages):
def GengoEscape(str):
return str.encode('utf-8')
for message in messages:
description = message.GetDescription()
if description:
file.write(u'[[[ Description: %s ]]]\n' % message.GetDescription())
meaning = message.GetMeaning()
if meaning:
file.write(u'[[[ Context: %s ]]]\n' % meaning)
file.write(u'[[[.BEGIN.%s.]]]' % message.GetId())
parts = message.GetContent()
for part in parts:
if isinstance(part, tclib.Placeholder):
example = part.GetExample()
if not example:
example = GengoEscape(part.GetOriginal())
file.write(u'[[[%s|%s]]]' % (part.GetPresentation(), example))
else:
file.write(GengoEscape(part))
file.write(u'[[[.END.]]]\n\n')
class OutputXtbUntranslated(interface.Tool):
"""Outputs translateable messages in the .grd input file THAT DO NOT YET
HAVE A TRANSLATION to an .xtb file, which is the format that Google's internal
Translation Console tool outputs.
Usage: grit xtb LANG OUTPUTPATH
LANG is the language you want to use to determine whether messages already
have a translation or not.
OUTPUTPATH is the path you want to output the .xtb file to.
Other options:
-D NAME[=VAL] Specify a C-preprocessor-like define NAME with optional
value VAL (defaults to 1) which will be used to control
conditional inclusion of resources.
-E NAME=VALUE Set environment variable NAME to VALUE (within grit).
"""
def __init__(self, defines=None):
super(OutputXtbUntranslated, self).__init__()
self.defines = defines or {}
self.include_all = False
self.output_only_translated = False
def ShortDescription(self):
return 'Exports all untranslated messages into an XTB file.'
def Run(self, opts, args):
self.SetOptions(opts)
limit_file = None
limit_is_grd = False
limit_file_dir = None
output_format = 'xtb'
include_all = False
output_only_translated = False
own_opts, args = getopt.getopt(args, 'D:E:tpgAT')
for key, val in own_opts:
if key == '-D':
name, val = util.ParseDefine(val)
self.defines[name] = val
elif key == '-E':
(env_name, env_value) = val.split('=', 1)
os.environ[env_name] = env_value
elif key == '-t':
output_format = 'text'
elif key == '-p':
output_format = 'pot'
elif key == '-g':
output_format = 'gengo'
elif key == '-A':
self.include_all = True
elif key == '-T':
self.output_only_translated = True
if not len(args) == 2:
print ('grit xtb takes exactly two arguments, LANG and OUTPUTPATH')
return 2
lang = args[0]
xmb_path = args[1]
res_tree = grd_reader.Parse(opts.input, debug=opts.extra_verbose)
res_tree.SetOutputLanguage(lang)
res_tree.SetDefines(self.defines)
res_tree.OnlyTheseTranslations([lang])
res_tree.RunGatherers()
with open(xmb_path, 'wb') as output_file:
self.Process(lang, res_tree, output_file, output_format)
if limit_file:
limit_file.close()
print "Wrote %s" % xmb_path
def Process(self, lang, res_tree, output_file, output_format):
"""Writes a document with the contents of res_tree into output_file,
limiting output to messages missing translations.
Args:
lang: Language to check, e.g. 'is' or 'fr'
res_tree: base.Node()
output_file: file open for writing
"""
ids_already_done = {}
cliques = []
for node in res_tree:
if not node.IsTranslateable():
continue
for clique in node.GetCliques():
if not clique.IsTranslateable():
continue
if not clique.GetMessage().GetRealContent():
continue
# Some explanation is in order here. Note that we can have
# many messages with the same ID.
#
# The way we work around this is to maintain a list of cliques
# per message ID (in the UberClique) and select the "best" one
# (the first one that has a description, or an arbitrary one
# if there is no description) for inclusion in the XMB file.
# The translations are all going to be the same for messages
# with the same ID, although the way we replace placeholders
# might be slightly different.
id = clique.GetMessage().GetId()
if id in ids_already_done:
continue
ids_already_done[id] = 1
clique = node.UberClique().BestClique(id)
# We output in three cases:
# - We were asked to output only untranslated messages and this
# message is not translated (this is the default case)
# - We were asked to output only translated messages and this
# message is translated
# - We were asked to output all messages
message_has_translation = lang in clique.clique.keys()
if self.include_all:
cliques += [clique]
elif self.output_only_translated:
if message_has_translation:
cliques += [clique]
else:
if not message_has_translation:
cliques += [clique]
# Ensure a stable order of messages, to help regression testing.
cliques.sort(key=lambda x:x.GetMessage().GetId())
messages = [c.GetMessage() for c in cliques]
if output_format == 'xtb':
WriteXtbFile(output_file, messages)
elif output_format == 'text':
WriteMessagesToFile(output_file, messages)
elif output_format == 'pot':
xmb.WritePotFile(output_file, cliques, lang=lang, include_translation=self.output_only_translated)
elif output_format == 'gengo':
WriteGengoFile(output_file, messages)
else:
print "Unknown message format."
|
CrankWheel/grit-i18n
|
grit/tool/xtb.py
|
Python
|
bsd-2-clause
| 9,411
|
[
"xTB"
] |
5b449bd9b1b41acbe64b928a58f19ee4fd3ba04f6ae6e6b6a0725f5a3b09fcad
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Set up a DPD fluid and calculate pressure as a function of the
varying density. The fluid is thermalized using a DPD thermostat.
"""
import espressomd
required_features = ["DPD"]
espressomd.assert_features(required_features)
import numpy as np
# Set up the box and time step
system = espressomd.System(box_l=3 * [10])
system.time_step = 0.01
system.cell_system.skin = 0.4
# DPD parameters
n_part = 200
kT = 1.
gamma = 1.5
r_cut = 1.
# Repulsive parameter
F_max = 1.
# Activate the thermostat
system.thermostat.set_dpd(kT=kT, seed=123)
system.set_random_state_PRNG()
np.random.seed(seed=system.seed)
#system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
# Set up the DPD friction interaction
system.non_bonded_inter[0, 0].dpd.set_params(
weight_function=0, gamma=gamma, r_cut=r_cut,
trans_weight_function=0, trans_gamma=gamma, trans_r_cut=r_cut)
# Set up the repulsive interaction
system.non_bonded_inter[0, 0].hat.set_params(F_max=F_max, cutoff=r_cut)
# Add particles that are randomly distributed over the box
system.part.add(pos=system.box_l * np.random.random((n_part, 3)))
# As a usage example, we calculate the pressure at several
# particle densities.
sample_size = 100
int_steps = 1000
for V in range(100, 1000, 100):
# Rescale the system to the new volume
system.change_volume_and_rescale_particles(V**0.3333)
# List of samples
p_samples = []
for i in range(sample_size):
system.integrator.run(int_steps)
p_samples.append(system.analysis.pressure()['total'])
# Average pressure
p_avg = np.mean(p_samples)
# Standard deviation of pressure
p_std = np.std(p_samples)
print('rho {:.2f} p {:.2f} ({:.2f})'
.format(float(n_part) / V, p_avg, p_std))
|
psci2195/espresso-ffans
|
samples/dpd.py
|
Python
|
gpl-3.0
| 2,464
|
[
"ESPResSo"
] |
7b9691c9bf0c7e5f05b51e9c1f944fadf4e9fd590aeab077d5fa7bd492f352bd
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .script_interface import ScriptInterfaceHelper, script_interface_register
@script_interface_register
class VirtualSitesOff(ScriptInterfaceHelper):
"""Virtual sites implementation which does nothing (default)"""
_so_name = "VirtualSites::VirtualSitesOff"
@script_interface_register
class VirtualSitesInertialessTracers(ScriptInterfaceHelper):
"""Virtual sites which are advected with an lb fluid without inertia. Forces are on them are transferred to the fluid instantly.
"""
_so_name = "VirtualSites::VirtualSitesInertialessTracers"
@script_interface_register
class VirtualSitesRelative(ScriptInterfaceHelper):
"""Virtual sites implementation placing virtual sites relative to other particles.
See :ref:`Rigid arrangements of particles` for details.
Attributes
----------
have_velocity : :obj:`bool`
Determines whether the velocity of the virtual sites is calculated.
This carries a performance cost.
Attributes can be set on the instance or passed to the constructor as
keyword arguments.
"""
_so_name = "VirtualSites::VirtualSitesRelative"
@script_interface_register
class ActiveVirtualSitesHandle(ScriptInterfaceHelper):
"""Handle for the virtual sites implementation active in the core
This should not be used directly.
Attributes
----------
implementation : instance of a virtual sites implementation
"""
_so_name = "VirtualSites::ActiveVirtualSitesHandle"
|
hmenke/espresso
|
src/python/espressomd/virtual_sites.py
|
Python
|
gpl-3.0
| 2,232
|
[
"ESPResSo"
] |
1b445d346e2e5665551d94d246c767e4b100f377b4c99b4fb2f92b08ac4d02e9
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# pylint: disable=undefined-variable
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd
import espressomd.checkpointing
import espressomd.virtual_sites
import espressomd.integrate
from espressomd.shapes import Sphere, Wall
modes = {x for mode in set("@TEST_COMBINATION@".upper().split('-'))
for x in [mode, mode.split('.')[0]]}
LB = ('LB.CPU' in modes or 'LB.GPU' in modes and espressomd.gpu_available())
EK = ('EK.GPU' in modes and espressomd.gpu_available()
and espressomd.has_features('ELECTROKINETICS'))
def skipIfMissingFeatureStokesianDynamics():
"""Specialized unittest skipIf decorator for missing Stokesian Dynamics."""
if not espressomd.has_features(["STOKESIAN_DYNAMICS"]) and (not espressomd.has_features(
["STOKESIAN_DYNAMICS_GPU"]) or not espressomd.gpu_available()):
return ut.skip("Skipping test: feature STOKESIAN_DYNAMICS unavailable")
return utx._id
class CheckpointTest(ut.TestCase):
@classmethod
def setUpClass(cls):
cls.checkpoint = espressomd.checkpointing.Checkpoint(
checkpoint_id="mycheckpoint_@TEST_COMBINATION@_@TEST_BINARY@".replace(
'.', '__'),
checkpoint_path="@CMAKE_CURRENT_BINARY_DIR@")
cls.checkpoint.load(0)
@ut.skipIf(not LB, "Skipping test due to missing mode.")
def test_LB(self):
lbf = system.actors[0]
cpt_mode = int("@TEST_BINARY@")
cpt_path = self.checkpoint.checkpoint_dir + "/lb{}.cpt"
with self.assertRaises(RuntimeError):
lbf.load_checkpoint(cpt_path.format("-corrupted"), cpt_mode)
with self.assertRaisesRegex(RuntimeError, 'grid dimensions mismatch'):
lbf.load_checkpoint(cpt_path.format("-wrong-boxdim"), cpt_mode)
lbf.load_checkpoint(cpt_path.format(""), cpt_mode)
precision = 9 if "LB.CPU" in modes else 5
m = np.pi / 12
nx = lbf.shape[0]
ny = lbf.shape[1]
nz = lbf.shape[2]
grid_3D = np.fromfunction(
lambda i, j, k: np.cos(i * m) * np.cos(j * m) * np.cos(k * m),
(nx, ny, nz), dtype=float)
for i in range(nx):
for j in range(ny):
for k in range(nz):
np.testing.assert_almost_equal(
np.copy(lbf[i, j, k].population),
grid_3D[i, j, k] * np.arange(1, 20),
decimal=precision)
state = lbf.get_params()
reference = {'agrid': 0.5, 'visc': 1.3, 'dens': 1.5, 'tau': 0.01}
for key in reference:
self.assertIn(key, state)
self.assertAlmostEqual(reference[key], state[key], delta=1E-7)
@utx.skipIfMissingFeatures('ELECTROKINETICS')
@ut.skipIf(not EK, "Skipping test due to missing mode.")
def test_EK(self):
ek = system.actors[0]
ek_species = ek.get_params()['species'][0]
cpt_path = self.checkpoint.checkpoint_dir + "/ek"
ek.load_checkpoint(cpt_path)
precision = 5
m = np.pi / 12
nx = int(np.round(system.box_l[0] / ek.get_params()["agrid"]))
ny = int(np.round(system.box_l[1] / ek.get_params()["agrid"]))
nz = int(np.round(system.box_l[2] / ek.get_params()["agrid"]))
grid_3D = np.fromfunction(
lambda i, j, k: np.cos(i * m) * np.cos(j * m) * np.cos(k * m),
(nx, ny, nz), dtype=float)
for i in range(nx):
for j in range(ny):
for k in range(nz):
np.testing.assert_almost_equal(
np.copy(ek_species[i, j, k].density),
grid_3D[i, j, k],
decimal=precision)
state = ek.get_params()
reference = {'agrid': 0.5, 'lb_density': 26.15,
'viscosity': 1.7, 'friction': 0.0,
'T': 1.1, 'prefactor': 0.88, 'stencil': "linkcentered"}
for key in reference:
self.assertIn(key, state)
self.assertAlmostEqual(reference[key], state[key], delta=1E-5)
state_species = ek_species.get_params()
reference_species = {'density': 0.4, 'D': 0.02, 'valency': 0.3}
for key in reference_species:
self.assertIn(key, state_species)
self.assertAlmostEqual(
reference_species[key],
state_species[key],
delta=1E-5)
self.assertAlmostEqual(
state_species['ext_force_density'][0],
0.01,
delta=1E-5)
self.assertAlmostEqual(
state_species['ext_force_density'][1],
-0.08,
delta=1E-5)
self.assertAlmostEqual(
state_species['ext_force_density'][2],
0.06,
delta=1E-5)
def test_variables(self):
self.assertEqual(system.cell_system.skin, 0.1)
self.assertEqual(system.time_step, 0.01)
self.assertEqual(system.min_global_cut, 2.0)
def test_part(self):
np.testing.assert_allclose(
np.copy(system.part[0].pos), np.array([1.0, 2.0, 3.0]))
np.testing.assert_allclose(
np.copy(system.part[1].pos), np.array([1.0, 1.0, 2.0]))
np.testing.assert_allclose(np.copy(system.part[0].f), particle_force0)
np.testing.assert_allclose(np.copy(system.part[1].f), particle_force1)
@ut.skipIf('THERM.LB' not in modes, 'LB thermostat not in modes')
def test_thermostat_LB(self):
thmst = system.thermostat.get_state()[0]
if 'LB.GPU' in modes and not espressomd.gpu_available():
self.assertEqual(thmst['type'], 'OFF')
else:
self.assertEqual(thmst['type'], 'LB')
# rng_counter_fluid = seed, seed is 0 because kT=0
self.assertEqual(thmst['rng_counter_fluid'], 0)
self.assertEqual(thmst['gamma'], 2.0)
@ut.skipIf('THERM.LANGEVIN' not in modes,
'Langevin thermostat not in modes')
def test_thermostat_Langevin(self):
thmst = system.thermostat.get_state()[0]
self.assertEqual(thmst['type'], 'LANGEVIN')
self.assertEqual(thmst['kT'], 1.0)
self.assertEqual(thmst['seed'], 42)
self.assertFalse(thmst['act_on_virtual'])
np.testing.assert_array_equal(thmst['gamma'], 3 * [2.0])
if espressomd.has_features('ROTATION'):
np.testing.assert_array_equal(thmst['gamma_rotation'], 3 * [2.0])
@ut.skipIf('THERM.BD' not in modes,
'Brownian thermostat not in modes')
def test_thermostat_Brownian(self):
thmst = system.thermostat.get_state()[0]
self.assertEqual(thmst['type'], 'BROWNIAN')
self.assertEqual(thmst['kT'], 1.0)
self.assertEqual(thmst['seed'], 42)
self.assertFalse(thmst['act_on_virtual'])
np.testing.assert_array_equal(thmst['gamma'], 3 * [2.0])
if espressomd.has_features('ROTATION'):
np.testing.assert_array_equal(thmst['gamma_rotation'], 3 * [2.0])
@utx.skipIfMissingFeatures('DPD')
@ut.skipIf('THERM.DPD' not in modes, 'DPD thermostat not in modes')
def test_thermostat_DPD(self):
thmst = system.thermostat.get_state()[0]
self.assertEqual(thmst['type'], 'DPD')
self.assertEqual(thmst['kT'], 1.0)
self.assertEqual(thmst['seed'], 42 + 6)
@utx.skipIfMissingFeatures('NPT')
@ut.skipIf('THERM.NPT' not in modes, 'NPT thermostat not in modes')
def test_thermostat_NPT(self):
thmst = system.thermostat.get_state()[0]
self.assertEqual(thmst['type'], 'NPT_ISO')
self.assertEqual(thmst['seed'], 42)
self.assertEqual(thmst['gamma0'], 2.0)
self.assertEqual(thmst['gammav'], 0.1)
@skipIfMissingFeatureStokesianDynamics()
@ut.skipIf('THERM.SDM' not in modes, 'SDM thermostat not in modes')
def test_thermostat_SDM(self):
thmst = system.thermostat.get_state()[0]
self.assertEqual(thmst['type'], 'SD')
self.assertEqual(thmst['kT'], 1.0)
self.assertEqual(thmst['seed'], 42)
@utx.skipIfMissingFeatures('NPT')
@ut.skipIf('INT.NPT' not in modes, 'NPT integrator not in modes')
def test_integrator_NPT(self):
integ = system.integrator.get_state()
self.assertIsInstance(
integ, espressomd.integrate.VelocityVerletIsotropicNPT)
params = integ.get_params()
self.assertEqual(params['ext_pressure'], 2.0)
self.assertEqual(params['piston'], 0.01)
self.assertEqual(params['direction'], [1, 0, 0])
self.assertEqual(params['cubic_box'], False)
@ut.skipIf('INT.SD' not in modes, 'SD integrator not in modes')
def test_integrator_SD(self):
integ = system.integrator.get_state()
self.assertIsInstance(integ, espressomd.integrate.SteepestDescent)
params = integ.get_params()
self.assertEqual(params['f_max'], 2.0)
self.assertEqual(params['gamma'], 0.1)
self.assertEqual(params['max_displacement'], 0.01)
@ut.skipIf('INT.NVT' not in modes, 'NVT integrator not in modes')
def test_integrator_NVT(self):
integ = system.integrator.get_state()
self.assertIsInstance(integ, espressomd.integrate.VelocityVerlet)
params = integ.get_params()
self.assertEqual(params, {})
@ut.skipIf('INT' in modes, 'VV integrator not the default')
def test_integrator_VV(self):
integ = system.integrator.get_state()
self.assertIsInstance(integ, espressomd.integrate.VelocityVerlet)
params = integ.get_params()
self.assertEqual(params, {})
@ut.skipIf('INT.BD' not in modes, 'BD integrator not in modes')
def test_integrator_BD(self):
integ = system.integrator.get_state()
self.assertIsInstance(integ, espressomd.integrate.BrownianDynamics)
params = integ.get_params()
self.assertEqual(params, {})
@utx.skipIfMissingFeatures('STOKESIAN_DYNAMICS')
@ut.skipIf('INT.SDM.CPU' not in modes, 'SDM CPU integrator not in modes')
def test_integrator_SDM_cpu(self):
integ = system.integrator.get_state()
self.assertIsInstance(integ, espressomd.integrate.StokesianDynamics)
expected_params = {
'approximation_method': 'ft', 'device': 'cpu', 'radii': {0: 1.5},
'viscosity': 0.5, 'lubrication': False, 'pair_mobility': False,
'self_mobility': True}
params = integ.get_params()
self.assertEqual(params, expected_params)
@utx.skipIfMissingGPU()
@utx.skipIfMissingFeatures('STOKESIAN_DYNAMICS_GPU')
@ut.skipIf('INT.SDM.GPU' not in modes, 'SDM GPU integrator not in modes')
def test_integrator_SDM_gpu(self):
integ = system.integrator.get_state()
self.assertIsInstance(integ, espressomd.integrate.StokesianDynamics)
expected_params = {
'approximation_method': 'fts', 'device': 'gpu', 'radii': {0: 1.0},
'viscosity': 2.0, 'lubrication': False, 'pair_mobility': True,
'self_mobility': False}
params = integ.get_params()
self.assertEqual(params, expected_params)
@utx.skipIfMissingFeatures('LENNARD_JONES')
@ut.skipIf('LJ' not in modes, "Skipping test due to missing mode.")
def test_non_bonded_inter(self):
state = system.non_bonded_inter[
0, 0].lennard_jones._get_params_from_es_core()
state2 = system.non_bonded_inter[
3, 0].lennard_jones._get_params_from_es_core()
reference = {'shift': 0.1, 'sigma': 1.3, 'epsilon': 1.2,
'cutoff': 2.0, 'offset': 0.0, 'min': 0.0}
reference2 = {'shift': 0.1, 'sigma': 1.7, 'epsilon': 1.2,
'cutoff': 2.0, 'offset': 0.0, 'min': 0.0}
self.assertEqual(
len(set(state.items()) & set(reference.items())), len(reference))
self.assertEqual(len(set(state2.items()) & set(
reference2.items())), len(reference2))
def test_bonded_inter(self):
state = system.part[1].bonds[0][0].params
reference = {'r_0': 0.0, 'k': 1.0}
for key in reference:
self.assertAlmostEqual(state[key], reference[key], delta=1E-10)
if 'THERM.LB' not in modes:
state = system.part[1].bonds[1][0].params
reference = {'temp_com': 0., 'gamma_com': 0., 'temp_distance': 0.2,
'gamma_distance': 0.5, 'r_cut': 2.0, 'seed': 51}
for key in reference:
self.assertAlmostEqual(state[key], reference[key], delta=1E-10)
@utx.skipIfMissingFeatures(['VIRTUAL_SITES', 'VIRTUAL_SITES_RELATIVE'])
def test_virtual_sites(self):
self.assertTrue(system.part[1].virtual)
self.assertIsInstance(
system.virtual_sites,
espressomd.virtual_sites.VirtualSitesRelative)
def test_mean_variance_calculator(self):
np.testing.assert_array_equal(
acc_mean_variance.get_mean(),
np.array([[1.0, 1.5, 2.0], [1.0, 1.0, 2.0]]))
np.testing.assert_array_equal(
acc_mean_variance.get_variance(),
np.array([[0., 0.5, 2.], [0., 0., 0.]]))
np.testing.assert_array_equal(
system.auto_update_accumulators[0].get_variance(),
np.array([[0., 0.5, 2.], [0., 0., 0.]]))
def test_time_series(self):
expected = [[[1, 1, 1], [1, 1, 2]], [[1, 2, 3], [1, 1, 2]]]
np.testing.assert_array_equal(acc_time_series.time_series(), expected)
np.testing.assert_array_equal(
system.auto_update_accumulators[1].time_series(),
expected)
def test_correlator(self):
expected = np.zeros((36, 2, 3))
expected[0:2] = [[[1, 2.5, 5], [1, 1, 4]], [[1, 2, 3], [1, 1, 4]]]
np.testing.assert_array_equal(acc_correlator.result(), expected)
np.testing.assert_array_equal(
system.auto_update_accumulators[2].result(),
expected)
@utx.skipIfMissingFeatures('P3M')
@ut.skipIf('P3M.CPU' not in modes,
"Skipping test due to missing combination.")
def test_p3m(self):
self.assertTrue(any(isinstance(actor, espressomd.electrostatics.P3M)
for actor in system.actors.active_actors))
@utx.skipIfMissingFeatures('COLLISION_DETECTION')
def test_collision_detection(self):
coldet = system.collision_detection
self.assertEqual(coldet.mode, "bind_centers")
self.assertAlmostEqual(coldet.distance, 0.11, delta=1E-9)
self.assertEqual(coldet.bond_centers, system.bonded_inter[0])
@utx.skipIfMissingFeatures('EXCLUSIONS')
def test_exclusions(self):
self.assertEqual(list(system.part[0].exclusions), [2])
self.assertEqual(list(system.part[1].exclusions), [2])
self.assertEqual(list(system.part[2].exclusions), [0, 1])
@ut.skipIf(not LB or EK or not (espressomd.has_features("LB_BOUNDARIES")
or espressomd.has_features("LB_BOUNDARIES_GPU")), "Missing features")
def test_lb_boundaries(self):
self.assertEqual(len(system.lbboundaries), 1)
np.testing.assert_allclose(
np.copy(system.lbboundaries[0].velocity), [1e-4, 1e-4, 0])
self.assertIsInstance(system.lbboundaries[0].shape, Wall)
def test_constraints(self):
from espressomd import constraints
self.assertEqual(len(system.constraints),
8 - int(not espressomd.has_features("ELECTROSTATICS")))
c = system.constraints
self.assertIsInstance(c[0].shape, Sphere)
self.assertAlmostEqual(c[0].shape.radius, 0.1, delta=1E-10)
self.assertEqual(c[0].particle_type, 17)
self.assertIsInstance(c[1].shape, Wall)
np.testing.assert_allclose(np.copy(c[1].shape.normal),
[1. / np.sqrt(3)] * 3)
self.assertIsInstance(c[2], constraints.Gravity)
np.testing.assert_allclose(np.copy(c[2].g), [1., 2., 3.])
self.assertIsInstance(c[3], constraints.HomogeneousMagneticField)
np.testing.assert_allclose(np.copy(c[3].H), [1., 2., 3.])
self.assertIsInstance(c[4], constraints.HomogeneousFlowField)
np.testing.assert_allclose(np.copy(c[4].u), [1., 2., 3.])
self.assertAlmostEqual(c[4].gamma, 2.3, delta=1E-10)
self.assertIsInstance(c[5], constraints.PotentialField)
self.assertEqual(c[5].field.shape, (14, 16, 18, 1))
self.assertAlmostEqual(c[5].default_scale, 1.6, delta=1E-10)
np.testing.assert_allclose(np.copy(c[5].origin), [-0.5, -0.5, -0.5])
np.testing.assert_allclose(np.copy(c[5].grid_spacing), np.ones(3))
ref_pot = constraints.PotentialField(
field=pot_field_data, grid_spacing=np.ones(3), default_scale=1.6)
np.testing.assert_allclose(np.copy(c[5].field), np.copy(ref_pot.field),
atol=1e-10)
self.assertIsInstance(c[6], constraints.ForceField)
self.assertEqual(c[6].field.shape, (14, 16, 18, 3))
self.assertAlmostEqual(c[6].default_scale, 1.4, delta=1E-10)
np.testing.assert_allclose(np.copy(c[6].origin), [-0.5, -0.5, -0.5])
np.testing.assert_allclose(np.copy(c[6].grid_spacing), np.ones(3))
ref_vec = constraints.ForceField(
field=vec_field_data, grid_spacing=np.ones(3), default_scale=1.4)
np.testing.assert_allclose(np.copy(c[6].field), np.copy(ref_vec.field),
atol=1e-10)
if espressomd.has_features("ELECTROSTATICS"):
self.assertIsInstance(c[7], constraints.ElectricPlaneWave)
np.testing.assert_allclose(np.copy(c[7].E0), [1., -2., 3.])
np.testing.assert_allclose(np.copy(c[7].k), [-.1, .2, .3])
self.assertAlmostEqual(c[7].omega, 5., delta=1E-10)
self.assertAlmostEqual(c[7].phi, 1.4, delta=1E-10)
if __name__ == '__main__':
ut.main()
|
KaiSzuttor/espresso
|
testsuite/python/test_checkpoint.py
|
Python
|
gpl-3.0
| 18,699
|
[
"ESPResSo"
] |
34990f1faf2d49ea644baacf8e52b952e0d129d6f4dbd4f24f72ebf46abe7c70
|
"""Wrapper around different ASE calculators to perform simple QM/MM calculations"""
def __init__(self, atoms):
|
PHOTOX/fuase
|
ase/ase/calculators/qmmm.py
|
Python
|
gpl-2.0
| 112
|
[
"ASE"
] |
582c5f74e5a6481066ed0989d9cc1e5341ab04987a1eae2c81fbd94f43080f3b
|
""" The POOL XML File module provides a means to extract the GUID of a file or list
of files by searching for an appropriate POOL XML Catalog in the specified directory.
"""
import os
import glob
import tarfile
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Resources.Catalog.PoolXMLCatalog import PoolXMLCatalog
from DIRAC.Core.Utilities.List import uniqueElements
from DIRAC.Core.Utilities.File import makeGuid
__RCSID__ = "$Id$"
#############################################################################
def getGUID( fileNames, directory = '' ):
""" This function searches the directory for POOL XML catalog files and extracts the GUID.
fileNames can be a string or a list, directory defaults to PWD.
"""
if not directory:
directory = os.getcwd()
if not os.path.isdir( directory ):
return S_ERROR( '%s is not a directory' % directory )
if not isinstance( fileNames, list ):
fileNames = [fileNames]
gLogger.verbose( 'Will look for POOL XML Catalog GUIDs in %s for %s' % ( directory, ', '.join( fileNames ) ) )
finalCatList = _getPoolCatalogs( directory )
#Create POOL catalog with final list of catalog files and extract GUIDs
generated = []
pfnGUIDs = {}
catalog = PoolXMLCatalog( finalCatList )
for fname in fileNames:
guid = str( catalog.getGuidByPfn( fname ) )
if not guid:
guid = makeGuid( fname )
generated.append( fname )
pfnGUIDs[fname] = guid
if not generated:
gLogger.info( 'Found GUIDs from POOL XML Catalogue for all files: %s' % ', '.join( fileNames) )
else:
gLogger.info( 'GUIDs not found from POOL XML Catalogue (and were generated) for: %s' % ', '.join( generated) )
result = S_OK( pfnGUIDs )
result['directory'] = directory
result['generated'] = generated
return result
#############################################################################
def getType( fileNames, directory = '' ):
""" This function searches the directory for POOL XML catalog files and extracts the type of the pfn.
fileNames can be a string or a list, directory defaults to PWD.
"""
if not directory:
directory = os.getcwd()
if not os.path.isdir( directory ):
return S_ERROR( '%s is not a directory' % directory )
if not isinstance( fileNames, list ):
fileNames = [fileNames]
gLogger.verbose( 'Will look for POOL XML Catalog file types in %s for %s' % ( directory, ', '.join( fileNames ) ) )
finalCatList = _getPoolCatalogs( directory )
#Create POOL catalog with final list of catalog files and extract GUIDs
generated = []
pfnTypes = {}
catalog = PoolXMLCatalog( finalCatList )
for fname in fileNames:
typeFile = str( catalog.getTypeByPfn( fname ) )
if not typeFile:
typeFile = 'ROOT_All'
generated.append( fname )
pfnTypes[fname] = typeFile
if not generated:
gLogger.info( 'Found Types from POOL XML Catalogue for all files: %s' % ', '.join( fileNames ) )
else:
gLogger.info( 'GUIDs not found from POOL XML Catalogue (and were generated) for: %s' % ', '.join( generated ) )
result = S_OK( pfnTypes )
result['directory'] = directory
result['generated'] = generated
return result
#############################################################################
def _getPoolCatalogs( directory = '' ):
patterns = ['*.xml', '*.xml*gz']
omissions = ['\.bak$'] # to be ignored for production files
#First obtain valid list of unpacked catalog files in directory
poolCatalogList = []
for pattern in patterns:
fileList = glob.glob( os.path.join( directory, pattern ) )
for fname in fileList:
if fname.endswith( '.bak' ):
gLogger.verbose( 'Ignoring BAK file: %s' % fname )
elif tarfile.is_tarfile( fname ):
gLogger.debug( 'Unpacking catalog XML file %s' % ( os.path.join( directory, fname ) ) )
with tarfile.open( os.path.join( directory, fname ), 'r' ) as tf:
for member in tf.getmembers():
tf.extract( member, directory )
poolCatalogList.append( os.path.join( directory, member.name ) )
else:
poolCatalogList.append( fname )
poolCatalogList = uniqueElements( poolCatalogList )
#Now have list of all XML files but some may not be Pool XML catalogs...
finalCatList = []
for possibleCat in poolCatalogList:
try:
_cat = PoolXMLCatalog( possibleCat )
finalCatList.append( possibleCat )
except Exception as x:
gLogger.debug( 'Ignoring non-POOL catalogue file %s' % possibleCat )
gLogger.debug( 'Final list of catalog files are: %s' % ', '.join( finalCatList ) )
return finalCatList
#############################################################################
|
chaen/DIRAC
|
Resources/Catalog/PoolXMLFile.py
|
Python
|
gpl-3.0
| 4,803
|
[
"DIRAC"
] |
a4c05b47612366747c01531f839c523026d9e47451be52e34a1519427626899c
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# Copyright (c) 2009-2015 Richard Jones, Claudio Canepa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
from __future__ import division, print_function, unicode_literals
import math
try:
import cPickle as pickle
except ImportError:
import pickle
import cocos
from cocos import euclid
import pyglet
from pyglet.gl import *
import copy
class Skin(cocos.cocosnode.CocosNode):
def __init__(self, skeleton):
super(Skin, self).__init__()
self.skeleton = skeleton
class ColorSkin(Skin):
def __init__(self, skeleton, color):
super(ColorSkin, self).__init__(skeleton)
self.color = color
def draw(self):
self.skeleton.propagate_matrix()
glPushMatrix()
self.transform()
self.skeleton.visit_children(lambda bone: self.draw_bone(bone))
bones = self.skeleton.visit_children(
lambda bone: (bone.label, bone.parent_matrix * bone.matrix))
bones = dict(bones)
glPopMatrix()
def draw_bone(self, bone):
p1 = bone.get_start()
p2 = bone.get_end()
glColor4ub(*self.color)
glLineWidth(5)
glBegin(GL_LINES)
glVertex2f(*p1)
glVertex2f(*p2)
glEnd()
class BitmapSkin(Skin):
skin_parts = []
def __init__(self, skeleton, skin_def, alpha=255):
super(BitmapSkin, self).__init__(skeleton)
self.alpha = alpha
self.skin_parts = skin_def
self.regenerate()
def move(self, idx, dx, dy):
sp = self.skin_parts
pos = sp[idx][1]
sp[idx] = sp[idx][0], (pos[0] + dx, pos[1] + dy), sp[idx][2], \
sp[idx][3], sp[idx][4], sp[idx][5]
self.regenerate()
def get_control_points(self):
return [(i, p[0]) for i, p in enumerate(self.skin_parts)]
def regenerate(self):
# print self.skin_parts
self.parts = [(name, position, scale,
pyglet.resource.image(image, flip_y=flip_y, flip_x=flip_x))
for name, position, image, flip_x, flip_y, scale
in self.skin_parts]
def draw(self):
self.skeleton.propagate_matrix()
glPushMatrix()
self.transform()
bones = self.skeleton.visit_children(
lambda bone: (bone.label, bone.parent_matrix * bone.matrix))
bones = dict(bones)
for bname, position, scale, image in self.parts:
matrix = bones[bname]
self.blit_image(matrix, position, scale, image)
glPopMatrix()
def blit_image(self, matrix, position, scale, image):
x, y = image.width * scale, image.height * scale
# dx = self.x + position[0]
# dy = self.y + position[1]
dx, dy = position
glEnable(image.target)
glBindTexture(image.target, image.id)
glPushAttrib(GL_COLOR_BUFFER_BIT)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
# blit img
points = [
(-dx, -dy),
(x - dx, -dy),
(x - dx, y - dy),
(-dx, y - dy)
]
a, b, _, c, d, _, e, f, _, g, h, _ = image.texture.tex_coords
textures = [a, b, c, d, e, f, g, h]
np = [matrix * euclid.Point2(*p) for p in points]
glColor4ub(255, 255, 255, self.alpha)
glBegin(GL_QUADS)
glTexCoord2f(a, b)
glVertex2f(*np[0])
glTexCoord2f(c, d)
glVertex2f(*np[1])
glTexCoord2f(e, f)
glVertex2f(*np[2])
glTexCoord2f(g, h)
glVertex2f(*np[3])
glEnd()
glColor4ub(255, 255, 255, 255)
# pyglet.graphics.draw(4, GL_QUADS,
# ("v2f", new_points),
# ("t2f", textures),
# ("c4B", [255, 255, 255, self.alpha] * 4),
# )
glPopAttrib()
glDisable(image.target)
def flip(self):
nsp = []
for name, position, image, flip_x, flip_y, scale in self.skin_parts:
im = pyglet.resource.image(image, flip_y=flip_y, flip_x=flip_x)
x = im.width*scale - position[0]
y = position[1]
nsp.append((name, (x, y), image, not flip_x, flip_y, scale))
self.skin_parts = nsp
self.regenerate()
self.skeleton = self.skeleton.flipped()
class Animate(cocos.actions.IntervalAction):
def init(self, animation, recenter=False, recenter_x=False, recenter_y=False):
if recenter:
recenter_x = recenter_y = True
self.recenter_x = recenter_x
self.recenter_y = recenter_y
self.duration = animation.get_duration()
self.animation = animation
def start(self):
nsk = copy.deepcopy(self.target.skeleton)
if self.recenter_x:
self.target.x += nsk.translation.x
nsk.translation.x = 0
if self.recenter_y:
self.target.y += nsk.translation.y
nsk.translation.y = 0
self.start_skeleton = nsk
def update(self, t):
self.animation.pose(self.target.skeleton, t, self.start_skeleton)
def __reversed__(self):
raise NotImplementedError("gimme some time")
class Skeleton(object):
def __init__(self, bone):
super(Skeleton, self).__init__()
self.bone = bone
self.matrix = euclid.Matrix3.new_identity()
self.translation = euclid.Vector2(0, 0)
def flipped(self):
sk = Skeleton(self.bone.flipped())
sk.translation.x = -self.translation.x
sk.translation.y = self.translation.y
sk.matrix = euclid.Matrix3.new_translate(*sk.translation)
return sk
def save(self, name):
f = open(name, "wb")
pickle.dump(self, f)
f.close()
def move(self, dx, dy):
self.matrix.translate(dx, dy)
self.translation.x += dx
self.translation.y += dy
def propagate_matrix(self):
def visit(matrix, child):
child.parent_matrix = matrix
matrix = matrix * child.matrix
for c in child.children:
visit(matrix, c)
visit(self.matrix, self.bone)
def visit_children(self, func):
result = []
def inner(bone):
result.append(func(bone))
for b in bone.children:
inner(b)
inner(self.bone)
return result
def get_control_points(self):
points = [self]
self.propagate_matrix()
points += self.visit_children(lambda bone: bone)
return points
def interpolated_to(self, next, delta):
sk = Skeleton(self.bone.interpolated_to(next.bone, delta))
sk.translation = (next.translation - self.translation) * delta + self.translation
sk.matrix = euclid.Matrix3.new_translate(*sk.translation)
return sk
def pose_from(self, other):
self.matrix = other.matrix
self.translation = other.translation
self.bone = copy.deepcopy(other.bone)
class Bone(object):
def __init__(self, label, size, rotation, translation):
self.size = size
self.label = label
self.children = []
self.matrix = euclid.Matrix3.new_translate(*translation) * \
euclid.Matrix3.new_rotate(math.radians(rotation))
self.parent_matrix = euclid.Matrix3.new_identity()
self.translation = euclid.Point2(*translation)
self.rotation = math.radians(rotation)
def move(self, dx, dy):
self.translation.x += dx
self.translation.y += dy
self.matrix = euclid.Matrix3.new_translate(*self.translation) * \
euclid.Matrix3.new_rotate(self.rotation)
def flipped(self):
bone = Bone(self.label, self.size, -math.degrees(self.rotation),
(-self.translation[0], self.translation[1]))
for b in self.children:
bone.add(b.flipped())
return bone
def rotate(self, angle):
self.rotation += angle
self.matrix.rotate(angle)
def add(self, bone):
self.children.append(bone)
return self
def get_end(self):
return self.parent_matrix * self.matrix * euclid.Point2(0, -self.size)
def get_start(self):
return self.parent_matrix * self.matrix * euclid.Point2(0, 0)
def interpolated_to(self, next, delta):
ea = next.rotation % (math.pi*2)
sa = self.rotation % (math.pi*2)
angle = ((ea % (math.pi*2)) - (sa % (math.pi*2)))
if angle > math.pi:
angle += -math.pi * 2
if angle < -math.pi:
angle += math.pi * 2
nr = (sa + angle*delta) % (math.pi*2)
nr = math.degrees(nr)
bone = Bone(self.label, self.size, nr, self.translation)
for i, c in enumerate(self.children):
nc = c.interpolated_to(next.children[i], delta)
bone.add(nc)
return bone
def dump(self, depth=0):
print("-" * depth, self)
for c in self.children:
c.dump(depth + 1)
def repr(self, depth=0):
repr = " "*depth*4 + "Bone('%s', %s, %s, %s)" % (
self.label, self.size, math.degrees(self.rotation), self.translation)
for c in self.children:
repr += " "*depth*4 + ".add(\n" + c.repr(depth+1) + ")"
repr += "\n"
return repr
class Animation(object):
def __init__(self, skeleton):
self.frames = {}
self.position = 0
self.skeleton = skeleton
def flipped(self):
c = copy.deepcopy(self)
for t, sk in c.frames.items():
c.frames[t] = sk.flipped()
return c
def pose(self, who, t, start):
dt = t * self.get_duration()
self.position = dt
ct, curr = self.get_keyframe()
# print who.tranlation
# if we are in a keyframe, pose that
if curr:
who.pose_from(curr)
return
# find previous, if not, use start
pt, prev = self.get_keyframe(-1)
if not prev:
prev = start
pt = 0
# find next, if not, pose at prev
nt, next = self.get_keyframe(1)
if not next:
who.pose_from(prev)
return
# we find the dt betwen prev and next and pose from it
ft = (nt-dt) / (nt-pt)
who.pose_from(next.interpolated_to(prev, ft))
def get_duration(self):
if self.frames:
return max(max(self.frames), self.position)
else:
return self.position
def get_markers(self):
return self.frames.keys()
def get_position(self):
return self.position
def get_keyframe(self, offset=0):
if offset == 0:
if self.position in self.frames:
return self.position, self.frames[self.position]
else:
return None, None
elif offset < 0:
prevs = [t for t in self.frames if t < self.position]
prevs.sort()
if abs(offset) <= len(prevs):
return prevs[offset], self.frames[prevs[offset]]
else:
return None, None
elif offset > 0:
next = [t for t in self.frames if t > self.position]
next.sort()
if abs(offset) <= len(next):
return next[offset - 1], self.frames[next[offset - 1]]
else:
return None, None
def next_keyframe(self):
next = [t for t in self.frames if t > self.position]
if not next:
return False
self.position = min(next)
return True
def prev_keyframe(self):
prevs = [t for t in self.frames if t < self.position]
if not prevs:
return False
self.position = max(prevs)
return True
def move_position(self, delta):
self.position = max(self.position + delta, 0)
return True
def move_start(self):
self.position = 0
return True
def move_end(self):
if self.frames:
self.position = max(self.frames)
else:
self.position = 0
return True
def insert_keyframe(self):
if self.position not in self.frames:
t, sk = self.get_keyframe(-1)
if not sk:
sk = self.skeleton
self.frames[self.position] = copy.deepcopy(sk)
return True
return False
def remove_keyframe(self):
if self.position in self.frames:
del self.frames[self.position]
return True
return False
def insert_time(self, delta):
new_frames = {}
for t, sk in sorted(self.frames.items()):
if t >= self.position:
t += delta
new_frames[t] = sk
self.frames = new_frames
def delete_time(self, delta):
for t in self.frames:
if self.position <= t < self.position + delta:
return False
new_frames = {}
for t, sk in sorted(self.frames.items()):
if t > self.position:
t -= delta
new_frames[t] = sk
self.frames = new_frames
|
dangillet/cocos
|
cocos/skeleton.py
|
Python
|
bsd-3-clause
| 14,881
|
[
"VisIt"
] |
c2533f2f9617f8ce69d661f9e0e35d57206d2084b00c429dacc797a6aecc6aa0
|
from django.apps import apps as django_apps
from django.db import models
from edc_visit_schedule.site_visit_schedules import site_visit_schedules
from ...constants import REQUIRED, NOT_REQUIRED
class MetadataError(Exception):
pass
class UpdatesMetadataModelMixin(models.Model):
updater_cls = None
metadata_category = None
def metadata_update(self, entry_status=None):
"""Updates metatadata.
"""
self.metadata_updater.update(entry_status=entry_status)
def run_metadata_rules_for_crf(self):
"""Runs all the rule groups for this app label.
"""
self.visit.run_metadata_rules(visit=self.visit)
@property
def metadata_updater(self):
"""Returns an instance of MetadataUpdater.
"""
return self.updater_cls(
visit=self.visit,
target_model=self._meta.label_lower)
def metadata_reset_on_delete(self):
"""Sets the metadata instance to its original state.
"""
obj = self.metadata_model.objects.get(**self.metadata_query_options)
try:
obj.entry_status = self.metadata_default_entry_status
except IndexError:
# means crf is not listed in visit schedule, so remove it.
# for example, this is a PRN form
obj.delete()
else:
obj.entry_status = self.metadata_default_entry_status or REQUIRED
obj.report_datetime = None
obj.save()
@property
def metadata_default_entry_status(self):
"""Returns a string that represents the default entry status
of the crf in the visit schedule.
"""
crfs_prn = self.metadata_visit_object.crfs_prn
if self.visit.visit_code_sequence != 0:
crfs = self.metadata_visit_object.crfs_unscheduled + crfs_prn
else:
crfs = self.metadata_visit_object.crfs + crfs_prn
crf = [c for c in crfs if c.model == self._meta.label_lower][0]
return REQUIRED if crf.required else NOT_REQUIRED
@property
def metadata_visit_object(self):
visit_schedule = site_visit_schedules.get_visit_schedule(
visit_schedule_name=self.visit.visit_schedule_name)
schedule = visit_schedule.schedules.get(self.visit.schedule_name)
return schedule.visits.get(self.visit.visit_code)
@property
def metadata_query_options(self):
options = self.visit.metadata_query_options
options.update({
'subject_identifier': self.visit.subject_identifier,
'model': self._meta.label_lower})
return options
@property
def metadata_model(self):
"""Returns the metadata model associated with self.
"""
app_config = django_apps.get_app_config('edc_metadata')
return app_config.get_metadata_model(self.metadata_category)
class Meta:
abstract = True
|
botswana-harvard/edc-meta-data
|
edc_metadata/model_mixins/updates/updates_metadata_model_mixin.py
|
Python
|
gpl-2.0
| 2,915
|
[
"VisIt"
] |
464ffc24ad16b7cf9ccd992e238626ac37e6b78bd08e84607fefcd124f7652f1
|
"""
.. module: FSRStools.uvvis
:platform: Windows
.. moduleauthor:: Daniel Dietze <daniel.dietze@berkeley.edu>
Load and process UV/VIS spectra and related formats. Supports PerkinElmers old Lambda format.
Provides also some short cuts to useful functions from the :py:mod:`FSRStools.raman` module.
..
This file is part of the FSRStools python module.
The FSRStools python module is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The FSRStools python module is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with the FSRStools python module. If not, see <http://www.gnu.org/licenses/>.
Copyright 2015 Daniel Dietze <daniel.dietze@berkeley.edu>.
"""
import numpy as np
import pylab as pl
import glob
import struct
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
import FSRStools.fitting as ft
import FSRStools.raman as fs
# ----------------
# Function shortcuts to raman module..
def cut(x, y, x0, x1):
"""Cut a slice from x and y using the values in x. See :py:mod:`FSRStools.raman` for details.
"""
return fs.cut(x, y, x0, x1)
def at(x, y, x0):
"""Return the value of y with x-coordinate closest to x0. See :py:mod:`FSRStools.raman` for details.
"""
return fs.at(x, y, x0)
def denoise(y):
"""Optimized smoothing of data based on Appl. Spectrosc. 62, 1160 (2008). See :py:mod:`FSRStools.raman` for details.
"""
return fs.denoise(y)
def get_OD(wl, spectrum, wl0):
"""Returns the interpolated value of a spectrum at a wavelength wl0 using the wavelength axis wl.
"""
return interp1d(wl, spectrum, 'cubic')(wl0)
def getOD(wl, spectrum, wl0):
"""Same as :py:func:`get_OD`.
"""
return get_OD(wl, spectrum, wl0)
# returned binned version of x, y where d is the bin width (odd number)
# each bin is assigned the mean value of y, also returns the std over the bin width
# keeps the first and last point
def binning(x, y, d):
"""Return a binned version of x and y, where the new y values are calculated as the average over the bin width at the center of the bin.
:param array x: x-axis.
:param array y: Data (same shape as x).
:param int d: Bin width in indices. Should be an odd number.
:returns: bin centers, binned value and standard deviation over the binned values.
"""
if d % 2 == 0:
d += 1
N = len(x)
NN = int(N/d)
xtmp = np.zeros(NN+2)
ytmp = np.zeros(NN+2)
etmp = np.zeros(NN+2)
xtmp[0] = x[0]
ytmp[0] = y[0]
xtmp[-1] = x[-1]
ytmp[-1] = y[-1]
for i in range(NN):
xtmp[i+1] = np.mean(x[d*i:d*(i+1)])
ytmp[i+1] = np.mean(y[d*i:d*(i+1)])
etmp[i+1] = np.std(y[d*i:d*(i+1)])
return [xtmp, ytmp, etmp]
# returned smoothed version of x, y
# using binning to generate interpolation points
# d is the bin width (odd number)
def binsmooth(x, y, d):
"""Returned smoothed version of x, y based on binning and cubic interpolation.
:param array x: x-axis.
:param array y: Data (same shape as x).
:param int d: Bin width in indices. Should be an odd number.
:returns: Smoothed data array (same shape as x).
"""
xtmp, ytmp, _ = binning(x, y, d)
return interp1d(xtmp, ytmp, 'cubic')(x)
# ----------------
# convenience functions to read numeric values from binary file at given position relative to beginning of file stream
def readCHAR(stream, position):
stream.seek(position, 0)
return int.from_bytes(stream.read(1), byteorder = 'little', signed = True)
def readUCHAR(stream, position):
stream.seek(position, 0)
return int.from_bytes(stream.read(1), byteorder = 'little', signed = False)
def readINT(stream, position):
stream.seek(position, 0)
return int.from_bytes(stream.read(2), byteorder = 'little', signed = False)
def readUINT(stream, position):
stream.seek(position, 0)
return int.from_bytes(stream.read(2), byteorder = 'little', signed = True)
def readDINT(stream, position):
stream.seek(position, 0)
return int.from_bytes(stream.read(4), byteorder = 'little', signed = False)
def readUDINT(stream, position):
stream.seek(position, 0)
return int.from_bytes(stream.read(4), byteorder = 'little', signed = True)
def readLINT(stream, position):
stream.seek(position, 0)
return int.from_bytes(stream.read(8), byteorder = 'little', signed = False)
def readULINT(stream, position):
stream.seek(position, 0)
return int.from_bytes(stream.read(8), byteorder = 'little', signed = True)
def readFLOAT(stream, position):
stream.seek(position, 0)
return struct.unpack("<f", stream.read(4))
def readDOUBLE(stream, position):
stream.seek(position, 0)
return struct.unpack("<d", stream.read(8))
# actual load function
# very simple, just read out the data and create a x-axis according to xmin and xmax values
# if filename contains wildcards, return the average over all matching files
def load_sp(filename, lmin = -1, lmax = -1, smooth=True):
"""Basic support for PerkinElmer's Lambda-series UV/VIS file format (`*.sp`).
Reads out a single spectrum and generates a x-axis according to the xmin and xmax values stored in the file.
No support for multiple spectra per file.
:param str filename: Filename(s) of the file(s) to load. Supports wildcards via glob. If several files match the pattern, the returned data is the average over these files.
:param float lmin: Set minimum wavelength of returned data (same units as stored wavelength). Return all when -1 (default).
:param float lmax: Set maximum wavelength of returned data (same units as stored wavelength). Return all when -1 (default).
:param bool smooth: If True, apply :py:func:`denoise` to data before returning (default).
:returns: Wavelength axis and spectrum.
"""
files = glob.glob(filename)
if(len(files) == 0):
print("ERROR: No file found!")
return np.array([[],[]])
elif(len(files) == 1):
fp = open(filename, "rb")
# read header information
numPoints = readUDINT(fp, 10)
xMax = readUDINT(fp, 14) / 100.0
xMin = readUDINT(fp, 18) / 100.0
dataInterval = readUINT(fp, 22) / 100.0
scaleFactor = readUDINT(fp, 28)
scaleDef = readUDINT(fp, 252)
fp.seek(512, 0)
raw = fp.read()
fp.close()
# extract data
data = np.array(struct.unpack("<" + "i" * int((len(raw) / 4)), raw), dtype='float') * scaleDef / 100.0 / scaleFactor
data = np.nan_to_num(data)
if smooth:
data = denoise(data)
# make x-axis
xaxis = np.linspace(xMin, xMax, len(data))
# slicing?
if lmax < 0:
lmax = np.amax(x) + 1
return cut(xaxis, data, lmin, lmax)
else:
data = 0
for f in files:
x, tmp = load_sp(f, lmin, lmax, denoise)
data = data + tmp
data = data / float(len(files))
return x, data
def load(filename, lmin=-1, lmax=-1, smooth=True, delim=None):
"""Loads a spectrum / set of spectra from ASCII file(s). Expects the first column to be wavelength. Text lines are ignored, i.e., this function can be used to load Ocean Optics text files, for instance.
:param str filename: Filename(s) of the file(s) to load. Supports wildcards via glob. If several files match the pattern, the returned data is the average over these files.
:param float lmin: Set minimum wavelength of returned data (same units as stored wavelength). Return all when -1 (default).
:param float lmax: Set maximum wavelength of returned data (same units as stored wavelength). Return all when -1 (default).
:param bool smooth: If True, apply :py:func:`denoise` to data before returning (default).
:param str delim: Delimiter argument to be passed to :py:func:`numpy.loadtxt`. Set to None to use any whitespace (default).
:returns: Wavelength axis and spectrum.
"""
files = glob.glob(filename)
if len(files) > 1:
out = 0
for f in files:
x, y = load(f, lmin, lmax, smooth, delim)
out = out + y
out = out / float(len(files))
return x, y
elif len(files) == 1:
try:
# try to load data with loadtxt, if there are text lines, this will fail
tmp = np.loadtxt(filename, unpack=True, delimiter=delim)
x = tmp[0, :]
y = tmp[1:, :]
except:
# there are text lines, so load file manually and convert line by line
# maybe there is a faster way using numpy native functions??
tmp = []
fp = open(filename, "r")
for line in fp:
try:
if delim is not None: # str.split works slightly different than np.loadtxt regarding the delimiter
c = line.split(delim)
else:
c = line.split()
c = map(lambda x: float(x), c) # convert str to float; this function raises an exception on a text line
tmp.append(c) # append as new line
except:
pass
fp.close()
# convert to numpy array
tmp = np.array(tmp)
x = tmp[0, :]
y = tmp[1:, :]
if y.shape[0] == 1:
y = y[0]
if smooth:
y = denoise(y)
if lmax < 0:
lmax = np.amax(x) + 1
return cut(x, y, lmin, lmax)
# if no file is found, return an empty array
print("ERROR: No file found!")
return np.array([]), np.array([])
# ##################################################################################################################
# shortcut to fit a single peak spectrum with a single gaussian
# returns the fit parameters
# if show=True, displays the results
# if delta=True, returns parameters and errors in second list
def fit_OO_spectrum(wl, spectrum, show=False, delta=False):
"""Fit a single peaked spectrum with a gaussian and a constant offset.
:param array wl: Wavelength axis.
:param array spectrum: Spectrum to fit (same shape as wl).
:param bool show: If True, show the fitted spectrum in a new figure (False by default).
:param bool delta: If True, return the errors along with the fit parameters.
:returns: Fit parameters [offset, amplitude, center, fwhm].
"""
popt = [ np.amin(spectrum), np.amax(spectrum) - np.amin(spectrum), wl[np.argmax(spectrum)], 5 ]
popt, pcov = curve_fit(ft.gaussians_const, wl, spectrum, popt)
if show:
pl.figure()
pl.plot(wl, spectrum, "r-")
pl.plot(wl, ft.gaussians_const(wl, *popt), ":k")
pl.xlabel("Wavelength (nm)")
pl.ylabel("Spectrum (arb. units)")
if delta:
err = np.sqrt(np.diag(pcov))
return popt, err
return popt
# ##################################################################################################################
def get_concentration(spectrum, epspeak, cuvL = 0.1, refspectrum = None, maxOD = 3, retfit = False, withoffset = True):
"""Extract concentration of solute from UV/VIS spectrum and peak absorptivity of solute.
If a reference spectrum is given, apply peak epsilon to this spectrum and then fit it to the data (this method is suited for very small and very large ODs, especially exceeding the dynamic range of the instrument).
:param array spectrum: UV/VIS data to analyze (OD). If spectrum is list of spectra, get concentration for every one.
:param float epspeak: Peak molar extinction (cm-1 M-1).
:param float cuvL: Length of cuvette (cm) (default 1mm).
:param array refspectrum: Reference spectrum (optional).
:param float maxOD: Maximum OD that is taken into account for fitting to reference spectrum (default = 3.0).
:param bool retfit: If True, return also fit parameters and uncertainties (default = False).
:param bool withoffset: If True, use a constant offset/background when fitting to the reference spectrum (default).
:returns: Solute concentration (and fit parameters and errors if retfit = True).
"""
if refspectrum != None:
cref = np.amax(refspectrum) / (epspeak * cuvL)
N = len(spectrum)
if withoffset:
fitfunc = lambda x, a, b: np.minimum(np.absolute(a) * refspectrum + b, np.ones(N) * maxOD)
else:
fitfunc = lambda x, a: np.minimum(np.absolute(a) * refspectrum, np.ones(N) * maxOD)
x = np.arange(len(spectrum))
fitpars = []
fitsigma = []
if(isinstance(spectrum, list) or spectrum.ndim > 1):
c = []
for y in spectrum:
if refspectrum == None:
c.append(np.amax(y) / (epspeak * cuvL))
else:
if withoffset:
popt = [(np.amax(y)-np.amin(y))/(np.amax(refspectrum)-np.amin(refspectrum)), np.amin(y)-np.amin(refspectrum)]
else:
popt = [(np.amax(y)-np.amin(y))/(np.amax(refspectrum)-np.amin(refspectrum))]
popt, pcov = curve_fit(fitfunc, x, np.minimum(y, np.ones(N) * maxOD), popt)
c.append(cref * abs(popt[0]))
fitpars.append(popt)
try:
fitsigma.append(np.sqrt(np.diag(pcov)))
except:
fitsigma.append(np.zeros(len(popt)))
if not isinstance(spectrum, list):
c = np.array(c)
else:
if refspectrum == None:
c = np.amax(spectrum) / (epspeak * cuvL)
else:
if withoffset:
popt = [(np.amax(spectrum)-np.amin(spectrum))/(np.amax(refspectrum)-np.amin(refspectrum)), np.amin(spectrum)-np.amin(refspectrum)]
else:
popt = [(np.amax(spectrum)-np.amin(spectrum))/(np.amax(refspectrum)-np.amin(refspectrum))]
popt, pcov = curve_fit(fitfunc, x, np.minimum(spectrum, np.ones(N) * maxOD), popt)
c = cref * abs(popt[0])
fitpars = popt
try:
fitsigma = np.sqrt(np.diag(pcov))
except:
fitsigma = np.zeros(len(popt))
if(retfit == True):
return [c, fitpars, fitsigma]
else:
return c
|
ddietze/FSRStools
|
uvvis/__init__.py
|
Python
|
gpl-3.0
| 15,078
|
[
"Gaussian"
] |
a0e201b8d1a3bed417aebc0739809511754332929d10690d511d148c6a190be9
|
import models
import util
class ReportLine():
''' Generic report line'''
def __init__(self, **kwds):
self.__dict__.update(kwds)
class PatientReport():
def __init__(self, org, visit_date_from, visit_date_to, default_country, residence):
self.org = org
self.visit_date_from = visit_date_from
self.visit_date_to = visit_date_to
self.country = default_country
self.residence = residence
def _get_visits(self, ordered = False):
'''Get visits from our org in our date range.
TODO(dan): Move to Visit.'''
query = "WHERE organization = :1 and visit_date >= :2 and visit_date <= :3"
if ordered: query += " ORDER BY visit_date DESC"
return models.Visit.gql(query, self.org, self.visit_date_from, self.visit_date_to)
def get_screening_data(self):
''' Return # of visits and # unique patients within a date range.
'''
report_data = []
visit_count = 0
visits = self._get_visits()
patients = []
# Get patients and count
if self.residence > '':
for visit in visits:
if util.string_eq_nocase(visit.get_patient().residence, self.residence):
(patients, visit_count) = add_screening_record(visit, patients, visit_count)
#patients.append(visit.get_patient())
#visit_count += 1
patient_count = len(set(patients))
elif self.country > '':
for visit in visits:
if visit.get_patient().country == self.country:
(patients, visit_count) = add_screening_record(visit, patients, visit_count)
#patients.append(visit.get_patient())
#visit_count += 1
patient_count = len(set(patients))
else:
patient_count = len(set([visit.get_patient() for visit in visits]))
visit_count = visits.count()
# Build report
report_data.append(ReportLine(visitcount = visit_count, patientcount=patient_count))
return report_data
def get_screening_details(self):
''' Retrieve patients that have visits within a date range in order to build a report
'''
# TODO(dan): Factor get_screening_data and get_screening_details
report_detail = []
visits = self._get_visits(ordered = True)
for visit in visits:
if self.residence > '':
if util.string_eq_nocase(visit.get_patient().residence, self.residence):
report_detail.append(ReportLine(patient=visit.get_patient(), visit=visit))
elif self.country > '':
if visit.get_patient().country == self.country:
report_detail.append(ReportLine(patient=visit.get_patient(), visit=visit))
else:
report_detail.append(ReportLine(patient=visit.get_patient(), visit=visit))
return report_detail
def get_undernutrition_data(self, zscore):
''' Retrieve undernutrition data based on zscore in order to build a report
'''
# TODO(dan): Factor get_undernutrition_data and get_undernutrition_detail
report_data = []
patients = []
visits_latest = []
visits = self._get_visits()
stunted_children_counter = 0
underweight_children_counter = 0
wasting_children_counter = 0
visit_counter = 0
visit_latest_counter = 0
# Get latest visit per patient
for visit in visits:
if self.residence > '':
if util.string_eq_nocase(visit.get_patient().residence, self.residence):
(patients, visits_latest, visit_counter) = add_undernutrition_record(visit, patients, visits_latest, visit_counter)
#patients.append(visit.get_patient())
#visits_latest.append(visit.get_patient().get_latest_visit())
#visit_counter += 1
elif self.country > '':
if visit.get_patient().country == self.country:
(patients, visits_latest, visit_counter) = add_undernutrition_record(visit, patients, visits_latest, visit_counter)
#patients.append(visit.get_patient())
#visits_latest.append(visit.get_patient().get_latest_visit())
#visit_counter += 1
else:
(patients, visits_latest, visit_counter) = add_undernutrition_record(visit, patients, visits_latest, visit_counter)
#patients.append(visit.get_patient())
#visits_latest.append(visit.get_patient().get_latest_visit())
#visit_counter += 1
visits_latest = list(set(visits_latest))
for visit in visits_latest:
visit_latest_counter += 1
# If the statistics does not exist, ignore the visit in the counts
try:
stats = visit.get_visit_statistics()
if stats.length_or_height_for_age.zscore:
if stats.length_or_height_for_age.zscore < float(zscore):
stunted_children_counter += 1
if stats.weight_for_age.zscore:
if stats.weight_for_age.zscore < float(zscore):
underweight_children_counter += 1
if stats.weight_for_length_or_height.zscore:
if stats.weight_for_length_or_height.zscore < float(zscore):
wasting_children_counter += 1
except:
pass
# Remove duplicate patients
patients = list(set(patients))
percent_stunted = 0
percent_underweight = 0
percent_wasting = 0
total_undernourished = 0
percent_undernourished = 0
if visit_counter > 0:
# Calculate percentages
percent_stunted = stunted_children_counter * 100.00 / visit_counter
percent_underweight = underweight_children_counter * 100.00 / visit_counter
percent_wasting = wasting_children_counter * 100.00 / visit_counter
total_undernourished = stunted_children_counter + underweight_children_counter + wasting_children_counter
percent_undernourished = total_undernourished * 100.00 / visit_counter
# Build report
report_data.append(ReportLine(stunted=stunted_children_counter, percentagestunted=percent_stunted,
underweight=underweight_children_counter, percentageunderweight=percent_underweight,
wasting=wasting_children_counter, percentagewasting=percent_wasting,
totalundernourished = total_undernourished, percentundernourished = percent_undernourished,
visitcount=visit_counter, patientcount=len(patients)))
return report_data
def get_undernutrition_detail(self, zscore_type, zscore):
''' Retrieve patients with undernutrition zscores within a date range in order to build a report
'''
report_detail = []
visits = self._get_visits(ordered = True)
for visit in visits:
try:
stats = visit.get_visit_statistics()
stat = float(zscore)
if zscore_type == 'stunted':
stat = float(stats.length_or_height_for_age.zscore)
elif zscore_type == 'underweight':
stat = float(stats.weight_for_age.zscore)
elif zscore_type == 'wasting':
stat = float(stats.weight_for_length_or_height.zscore)
else:
stat = float(zscore)
if stat < float(zscore):
if self.residence > '':
if util.string_eq_nocase(visit.get_patient().residence, self.residence):
report_detail.append(ReportLine(patient=visit.get_patient(), visit=visit))
elif self.country > '':
if visit.get_patient().country == self.country:
report_detail.append(ReportLine(patient=visit.get_patient(), visit=visit))
else:
report_detail.append(ReportLine(patient=visit.get_patient(), visit=visit))
except:
pass
return report_detail
def add_screening_record(visit, patients, visit_count):
'''helper function
'''
patients.append(visit.get_patient())
visit_count += 1
return (patients, visit_count)
def add_undernutrition_record(visit, patients, visits_latest, visit_counter):
'''helper function
'''
patients.append(visit.get_patient())
visits_latest.append(visit.get_patient().get_latest_visit())
visit_counter += 1
return (patients, visits_latest, visit_counter)
|
avastjohn/maventy_new
|
healthdb/reports.py
|
Python
|
bsd-3-clause
| 8,101
|
[
"VisIt"
] |
d441e749c9f6a5e95c9b23b98c70f5cc1cc32336488b0b9e1c5b2f5d62be531f
|
import numpy as np
import inspect
import sys
small = .000000000001
class NeighborhoodFactory(object):
@staticmethod
def build(neighborhood_func):
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj):
if hasattr(obj, 'name') and neighborhood_func == obj.name:
return obj()
else:
raise Exception(
"Unsupported neighborhood function '%s'" % neighborhood_func)
class GaussianNeighborhood(object):
name = 'gaussian'
@staticmethod
def calculate(distance_matrix, radius, dim):
return np.exp(-1.0*distance_matrix/(2.0*radius**2)).reshape(dim, dim)
def __call__(self, *args, **kwargs):
return self.calculate(*args)
class BubbleNeighborhood(object):
name = 'bubble'
@staticmethod
def calculate(distance_matrix, radius, dim):
def l(a, b):
c = np.zeros(b.shape)
c[a-b >= 0] = 1
return c
return l(radius,
np.sqrt(distance_matrix.flatten())).reshape(dim, dim) + small
def __call__(self, *args, **kwargs):
return self.calculate(*args)
|
sevamoo/SOMPY
|
sompy/neighborhood.py
|
Python
|
apache-2.0
| 1,239
|
[
"Gaussian"
] |
832447f1ed86346970cbb5334d7e0f225b1c62b513f178064671ded322017801
|
"""
Histogram-related functions
"""
from __future__ import division, absolute_import, print_function
import operator
import warnings
import numpy as np
from numpy.compat.py3k import basestring
__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
# range is a keyword argument to many functions, so save the builtin so they can
# use it.
_range = range
def _hist_bin_sqrt(x):
"""
Square root histogram bin estimator.
Bin width is inversely proportional to the data size. Used by many
programs for its simplicity.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / np.sqrt(x.size)
def _hist_bin_sturges(x):
"""
Sturges histogram bin estimator.
A very simplistic estimator based on the assumption of normality of
the data. This estimator has poor performance for non-normal data,
which becomes especially obvious for large data sets. The estimate
depends only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / (np.log2(x.size) + 1.0)
def _hist_bin_rice(x):
"""
Rice histogram bin estimator.
Another simple estimator with no normality assumption. It has better
performance for large data than Sturges, but tends to overestimate
the number of bins. The number of bins is proportional to the cube
root of data size (asymptotically optimal). The estimate depends
only on size of the data.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return x.ptp() / (2.0 * x.size ** (1.0 / 3))
def _hist_bin_scott(x):
"""
Scott histogram bin estimator.
The binwidth is proportional to the standard deviation of the data
and inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
def _hist_bin_doane(x):
"""
Doane's histogram bin estimator.
Improved version of Sturges' formula which works better for
non-normal data. See
stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
if x.size > 2:
sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
sigma = np.std(x)
if sigma > 0.0:
# These three operations add up to
# g1 = np.mean(((x - np.mean(x)) / sigma)**3)
# but use only one temp array instead of three
temp = x - np.mean(x)
np.true_divide(temp, sigma, temp)
np.power(temp, 3, temp)
g1 = np.mean(temp)
return x.ptp() / (1.0 + np.log2(x.size) +
np.log2(1.0 + np.absolute(g1) / sg1))
return 0.0
def _hist_bin_fd(x):
"""
The Freedman-Diaconis histogram bin estimator.
The Freedman-Diaconis rule uses interquartile range (IQR) to
estimate binwidth. It is considered a variation of the Scott rule
with more robustness as the IQR is less affected by outliers than
the standard deviation. However, the IQR depends on fewer points
than the standard deviation, so it is less accurate, especially for
long tailed distributions.
If the IQR is 0, this function returns 1 for the number of bins.
Binwidth is inversely proportional to the cube root of data size
(asymptotically optimal).
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
iqr = np.subtract(*np.percentile(x, [75, 25]))
return 2.0 * iqr * x.size ** (-1.0 / 3.0)
def _hist_bin_auto(x):
"""
Histogram bin estimator that uses the minimum width of the
Freedman-Diaconis and Sturges estimators if the FD bandwidth is non zero
and the Sturges estimator if the FD bandwidth is 0.
The FD estimator is usually the most robust method, but its width
estimate tends to be too large for small `x` and bad for data with limited
variance. The Sturges estimator is quite good for small (<1000) datasets
and is the default in the R language. This method gives good off the shelf
behaviour.
.. versionchanged:: 1.15.0
If there is limited variance the IQR can be 0, which results in the
FD bin width being 0 too. This is not a valid bin width, so
``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.
If the IQR is 0, it's unlikely any variance based estimators will be of
use, so we revert to the sturges estimator, which only uses the size of the
dataset in its calculation.
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
See Also
--------
_hist_bin_fd, _hist_bin_sturges
"""
fd_bw = _hist_bin_fd(x)
sturges_bw = _hist_bin_sturges(x)
if fd_bw:
return min(fd_bw, sturges_bw)
else:
# limited variance, so we return a len dependent bw estimator
return sturges_bw
# Private dict initialized at module load time
_hist_bin_selectors = {'auto': _hist_bin_auto,
'doane': _hist_bin_doane,
'fd': _hist_bin_fd,
'rice': _hist_bin_rice,
'scott': _hist_bin_scott,
'sqrt': _hist_bin_sqrt,
'sturges': _hist_bin_sturges}
def _ravel_and_check_weights(a, weights):
""" Check a and weights have matching shapes, and ravel both """
a = np.asarray(a)
if weights is not None:
weights = np.asarray(weights)
if weights.shape != a.shape:
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
return a, weights
def _get_outer_edges(a, range):
"""
Determine the outer bin edges to use, from either the data or the range
argument
"""
if range is not None:
first_edge, last_edge = range
if first_edge > last_edge:
raise ValueError(
'max must be larger than min in range parameter.')
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
"supplied range of [{}, {}] is not finite".format(first_edge, last_edge))
elif a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
first_edge, last_edge = 0, 1
else:
first_edge, last_edge = a.min(), a.max()
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
"autodetected range of [{}, {}] is not finite".format(first_edge, last_edge))
# expand empty range to avoid divide by zero
if first_edge == last_edge:
first_edge = first_edge - 0.5
last_edge = last_edge + 0.5
return first_edge, last_edge
def _get_bin_edges(a, bins, range, weights):
"""
Computes the bins used internally by `histogram`.
Parameters
==========
a : ndarray
Ravelled data array
bins, range
Forwarded arguments from `histogram`.
weights : ndarray, optional
Ravelled weights array, or None
Returns
=======
bin_edges : ndarray
Array of bin edges
uniform_bins : (Number, Number, int):
The upper bound, lowerbound, and number of bins, used in the optimized
implementation of `histogram` that works on uniform bins.
"""
# parse the overloaded bins argument
n_equal_bins = None
bin_edges = None
if isinstance(bins, basestring):
bin_name = bins
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
if bin_name not in _hist_bin_selectors:
raise ValueError(
"{!r} is not a valid estimator for `bins`".format(bin_name))
if weights is not None:
raise TypeError("Automated estimation of the number of "
"bins is not supported for weighted data")
first_edge, last_edge = _get_outer_edges(a, range)
# truncate the range if needed
if range is not None:
keep = (a >= first_edge)
keep &= (a <= last_edge)
if not np.logical_and.reduce(keep):
a = a[keep]
if a.size == 0:
n_equal_bins = 1
else:
# Do not call selectors on empty arrays
width = _hist_bin_selectors[bin_name](a)
if width:
n_equal_bins = int(np.ceil((last_edge - first_edge) / width))
else:
# Width can be zero for some estimators, e.g. FD when
# the IQR of the data is zero.
n_equal_bins = 1
elif np.ndim(bins) == 0:
try:
n_equal_bins = operator.index(bins)
except TypeError:
raise TypeError(
'`bins` must be an integer, a string, or an array')
if n_equal_bins < 1:
raise ValueError('`bins` must be positive, when an integer')
first_edge, last_edge = _get_outer_edges(a, range)
elif np.ndim(bins) == 1:
bin_edges = np.asarray(bins)
if np.any(bin_edges[:-1] > bin_edges[1:]):
raise ValueError(
'`bins` must increase monotonically, when an array')
else:
raise ValueError('`bins` must be 1d, when an array')
if n_equal_bins is not None:
# gh-10322 means that type resolution rules are dependent on array
# shapes. To avoid this causing problems, we pick a type now and stick
# with it throughout.
bin_type = np.result_type(first_edge, last_edge, a)
if np.issubdtype(bin_type, np.integer):
bin_type = np.result_type(bin_type, float)
# bin edges must be computed
bin_edges = np.linspace(
first_edge, last_edge, n_equal_bins + 1,
endpoint=True, dtype=bin_type)
return bin_edges, (first_edge, last_edge, n_equal_bins)
else:
return bin_edges, None
def _search_sorted_inclusive(a, v):
"""
Like `searchsorted`, but where the last item in `v` is placed on the right.
In the context of a histogram, this makes the last bin edge inclusive
"""
return np.concatenate((
a.searchsorted(v[:-1], 'left'),
a.searchsorted(v[-1:], 'right')
))
def histogram_bin_edges(a, bins=10, range=None, weights=None):
r"""
Function to calculate only the edges of the bins used by the `histogram` function.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
If `bins` is a string from the list below, `histogram_bin_edges` will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
range. While the bin width will be optimal for the actual data
in the range, the number of bins will be computed to fill the
entire range, including the empty portions. For visualisation,
using the 'auto' option is suggested. Weighted data is not
supported for automated bin size selection.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good
all around performance.
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into
account data variability and data size.
'doane'
An improved version of Sturges' estimator that works better
with non-normal datasets.
'scott'
Less robust estimator that that takes into account data
variability and data size.
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only
optimal for gaussian data and underestimates number of bins
for large non-gaussian datasets.
'sqrt'
Square root (of data size) estimator, used by Excel and
other programs for its speed and simplicity.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). This is currently not used by any of the bin estimators,
but may be in the future.
Returns
-------
bin_edges : array of dtype float
The edges to pass into `histogram`
See Also
--------
histogram
Notes
-----
The methods to estimate the optimal number of bins are well founded
in literature, and are inspired by the choices R provides for
histogram visualisation. Note that having the number of bins
proportional to :math:`n^{1/3}` is asymptotically optimal, which is
why it appears in most estimators. These are simply plug-in methods
that give good starting points for number of bins. In the equations
below, :math:`h` is the binwidth and :math:`n_h` is the number of
bins. All estimators that compute bin counts are recast to bin width
using the `ptp` of the data. The final bin count is obtained from
``np.round(np.ceil(range / h))``.
'Auto' (maximum of the 'Sturges' and 'FD' estimators)
A compromise to get a good value. For small datasets the Sturges
value will usually be chosen, while larger datasets will usually
default to FD. Avoids the overly conservative behaviour of FD
and Sturges for small and large datasets respectively.
Switchover point is usually :math:`a.size \approx 1000`.
'FD' (Freedman Diaconis Estimator)
.. math:: h = 2 \frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good for large
datasets. The IQR is very robust to outliers.
'Scott'
.. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}}
The binwidth is proportional to the standard deviation of the
data and inversely proportional to cube root of ``x.size``. Can
be too conservative for small datasets, but is quite good for
large datasets. The standard deviation is not very robust to
outliers. Values are very similar to the Freedman-Diaconis
estimator in the absence of outliers.
'Rice'
.. math:: n_h = 2n^{1/3}
The number of bins is only proportional to cube root of
``a.size``. It tends to overestimate the number of bins and it
does not take into account data variability.
'Sturges'
.. math:: n_h = \log _{2}n+1
The number of bins is the base 2 log of ``a.size``. This
estimator assumes normality of data and is too conservative for
larger, non-normal datasets. This is the default method in R's
``hist`` method.
'Doane'
.. math:: n_h = 1 + \log_{2}(n) +
\log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}})
g_1 = mean[(\frac{x - \mu}{\sigma})^3]
\sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
An improved version of Sturges' formula that produces better
estimates for non-normal datasets. This estimator attempts to
account for the skew of the data.
'Sqrt'
.. math:: n_h = \sqrt n
The simplest and fastest estimator. Only takes into account the
data size.
Examples
--------
>>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
>>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))
array([0. , 0.25, 0.5 , 0.75, 1. ])
>>> np.histogram_bin_edges(arr, bins=2)
array([0. , 2.5, 5. ])
For consistency with histogram, an array of pre-computed bins is
passed through unmodified:
>>> np.histogram_bin_edges(arr, [1, 2])
array([1, 2])
This function allows one set of bins to be computed, and reused across
multiple histograms:
>>> shared_bins = np.histogram_bin_edges(arr, bins='auto')
>>> shared_bins
array([0., 1., 2., 3., 4., 5.])
>>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])
>>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)
>>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)
>>> hist_0; hist_1
array([1, 1, 0, 1, 0])
array([2, 0, 1, 1, 2])
Which gives more easily comparable results than using separate bins for
each histogram:
>>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')
>>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')
>>> hist_0; hist1
array([1, 1, 1])
array([2, 1, 1, 2])
>>> bins_0; bins_1
array([0., 1., 2., 3.])
array([0. , 1.25, 2.5 , 3.75, 5. ])
"""
a, weights = _ravel_and_check_weights(a, weights)
bin_edges, _ = _get_bin_edges(a, bins, range, weights)
return bin_edges
def histogram(a, bins=10, range=None, normed=None, weights=None,
density=None):
r"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines a monotonically increasing array of bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string, it defines the method used to calculate the
optimal bin width, as defined by `histogram_bin_edges`.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
normed : bool, optional
.. deprecated:: 1.6.0
This is equivalent to the `density` argument, but produces incorrect
results for unequal bin widths. It should not be used.
.. versionchanged:: 1.15.0
DeprecationWarnings are actually emitted.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). If `density` is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
density : bool, optional
If ``False``, the result will contain the number of samples in
each bin. If ``True``, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the ``normed`` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `density` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize, histogram_bin_edges
Notes
-----
All but the last (righthand-most) bin is half-open. In other words,
if `bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
*includes* 4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist * np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
Automated Bin Selection Methods example, using 2 peak random data
with 2000 points:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size=1000),
... rng.normal(loc=5, scale=2, size=1000)))
>>> plt.hist(a, bins='auto') # arguments are passed to np.histogram
>>> plt.title("Histogram with 'auto' bins")
>>> plt.show()
"""
a, weights = _ravel_and_check_weights(a, weights)
bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
# The fast path uses bincount, but that only works for certain types
# of weight
simple_weights = (
weights is None or
np.can_cast(weights.dtype, np.double) or
np.can_cast(weights.dtype, complex)
)
if uniform_bins is not None and simple_weights:
# Fast algorithm for equal bins
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
first_edge, last_edge, n_equal_bins = uniform_bins
# Initialize empty histogram
n = np.zeros(n_equal_bins, ntype)
# Pre-compute histogram scaling factor
norm = n_equal_bins / (last_edge - first_edge)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i:i + BLOCK]
# Only include values in the right range
keep = (tmp_a >= first_edge)
keep &= (tmp_a <= last_edge)
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
# This cast ensures no type promotions occur below, which gh-10322
# make unpredictable. Getting it wrong leads to precision errors
# like gh-8123.
tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)
# Compute the bin indices, and for values that lie exactly on
# last_edge we need to subtract one
f_indices = (tmp_a - first_edge) * norm
indices = f_indices.astype(np.intp)
indices[indices == n_equal_bins] -= 1
# The index computation is not guaranteed to give exactly
# consistent results within ~1 ULP of the bin edges.
decrement = tmp_a < bin_edges[indices]
indices[decrement] -= 1
# The last bin includes the right edge. The other bins do not.
increment = ((tmp_a >= bin_edges[indices + 1])
& (indices != n_equal_bins - 1))
indices[increment] += 1
# We now compute the histogram using bincount
if ntype.kind == 'c':
n.real += np.bincount(indices, weights=tmp_w.real,
minlength=n_equal_bins)
n.imag += np.bincount(indices, weights=tmp_w.imag,
minlength=n_equal_bins)
else:
n += np.bincount(indices, weights=tmp_w,
minlength=n_equal_bins).astype(ntype)
else:
# Compute via cumulative histogram
cum_n = np.zeros(bin_edges.shape, ntype)
if weights is None:
for i in _range(0, len(a), BLOCK):
sa = np.sort(a[i:i+BLOCK])
cum_n += _search_sorted_inclusive(sa, bin_edges)
else:
zero = np.zeros(1, dtype=ntype)
for i in _range(0, len(a), BLOCK):
tmp_a = a[i:i+BLOCK]
tmp_w = weights[i:i+BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate((zero, sw.cumsum()))
bin_index = _search_sorted_inclusive(sa, bin_edges)
cum_n += cw[bin_index]
n = np.diff(cum_n)
# density overrides the normed keyword
if density is not None:
if normed is not None:
# 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)
warnings.warn(
"The normed argument is ignored when density is provided. "
"In future passing both will result in an error.",
DeprecationWarning, stacklevel=2)
normed = False
if density:
db = np.array(np.diff(bin_edges), float)
return n/db/n.sum(), bin_edges
elif normed:
# 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)
warnings.warn(
"Passing `normed=True` on non-uniform bins has always been "
"broken, and computes neither the probability density "
"function nor the probability mass function. "
"The result is only correct if the bins are uniform, when "
"density=True will produce the same result anyway. "
"The argument will be removed in a future version of "
"numpy.",
np.VisibleDeprecationWarning, stacklevel=2)
# this normalization is incorrect, but
db = np.array(np.diff(bin_edges), float)
return n/(n*db).sum(), bin_edges
else:
if normed is not None:
# 2018-06-13, numpy 1.15.0 (this was not noisily deprecated in 1.6)
warnings.warn(
"Passing normed=False is deprecated, and has no effect. "
"Consider passing the density argument instead.",
DeprecationWarning, stacklevel=2)
return n, bin_edges
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : (N, D) array, or (D, N) array_like
The data to be histogrammed.
Note the unusual interpretation of sample when an array_like:
* When an array, each row is a coordinate in a D-dimensional space -
such as ``histogramgramdd(np.array([p1, p2, p3]))``.
* When an array_like, each element is the list of values for single
coordinate - such as ``histogramgramdd((X, Y, Z))``.
The first form should be preferred.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the monotonically increasing bin
edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of length D, each an optional (lower, upper) tuple giving
the outer bin edges to be used if the edges are not given explicitly in
`bins`.
An entry of None in the sequence results in the minimum and maximum
values being used for the corresponding dimension.
The default, None, is equivalent to passing a tuple of D None values.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_volume``.
weights : (N,) array_like, optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False,
the values of the returned histogram are equal to the sum of the
weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights
for the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
N, D = sample.shape
nbin = np.empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = np.asarray(weights)
try:
M = len(bins)
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# normalize the range argument
if range is None:
range = (None,) * D
elif len(range) != D:
raise ValueError('range argument must have one entry per dimension')
# Create edge arrays
for i in _range(D):
if np.ndim(bins[i]) == 0:
if bins[i] < 1:
raise ValueError(
'`bins[{}]` must be positive, when an integer'.format(i))
smin, smax = _get_outer_edges(sample[:,i], range[i])
edges[i] = np.linspace(smin, smax, bins[i] + 1)
elif np.ndim(bins[i]) == 1:
edges[i] = np.asarray(bins[i])
if np.any(edges[i][:-1] > edges[i][1:]):
raise ValueError(
'`bins[{}]` must be monotonically increasing, when an array'
.format(i))
else:
raise ValueError(
'`bins[{}]` must be a scalar or 1d array'.format(i))
nbin[i] = len(edges[i]) + 1 # includes an outlier on each end
dedges[i] = np.diff(edges[i])
# Compute the bin number each sample falls into.
Ncount = tuple(
# avoid np.digitize to work around gh-11022
np.searchsorted(edges[i], sample[:, i], side='right')
for i in _range(D)
)
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right edge to be
# counted in the last bin, and not as an outlier.
for i in _range(D):
# Find which points are on the rightmost edge.
on_edge = (sample[:, i] == edges[i][-1])
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Compute the sample indices in the flattened histogram matrix.
# This raises an error if the array is too large.
xy = np.ravel_multi_index(Ncount, nbin)
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
hist = np.bincount(xy, weights, minlength=nbin.prod())
# Shape into a proper matrix
hist = hist.reshape(nbin)
# This preserves the (bad) behavior observed in gh-7845, for now.
hist = hist.astype(float, casting='safe')
# Remove outliers (indices 0 and -1 for each dimension).
core = D*(slice(1, -1),)
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in _range(D):
shape = np.ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
|
Eric89GXL/numpy
|
numpy/lib/histograms.py
|
Python
|
bsd-3-clause
| 35,040
|
[
"Gaussian"
] |
06e575642b0e7c8f54e741a2eb178a927f643bd09d10e811e9c307e0d2754c3b
|
# coding: utf-8
"""
The initial version of this module was based on a similar implementation
present in FireWorks (https://pypi.python.org/pypi/FireWorks).
Work done by D. Waroquiers, A. Jain, and M. Kocher.
The main difference wrt the Fireworks implementation is that the QueueAdapter
objects provide a programmatic interface for setting important attributes
such as the number of MPI nodes, the number of OMP threads and the memory requirements.
This programmatic interface is used by the `TaskManager` for optimizing the parameters
of the run before submitting the job (Abinit provides the autoparal option that
allows one to get a list of parallel configuration and their expected efficiency).
"""
from __future__ import print_function, division, unicode_literals
import sys
import os
import abc
import string
import copy
import getpass
import six
import json
import math
from . import qutils as qu
from collections import namedtuple
from subprocess import Popen, PIPE
from atomicfile import AtomicFile
from monty.string import is_string, list_strings
from monty.collections import AttrDict
from monty.functools import lazy_property
from monty.inspect import all_subclasses
from monty.io import FileLock
from pymatgen.core.units import Memory
from .utils import Condition
from .launcher import ScriptEditor
from .qjobs import QueueJob
import logging
logger = logging.getLogger(__name__)
__all__ = [
"MpiRunner",
"make_qadapter",
]
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
class SubmitResults(namedtuple("SubmitResult", "qid, out, err, process")):
"""
named tuple createc by the concrete implementation of _submit_to_que to pass the results of the process of
submitting the jobfile to the que.
qid: queue id of the submission
out: stdout of the submission
err: stdrr of the submisison
process: process object of the submission
"""
class MpiRunner(object):
"""
This object provides an abstraction for the mpirunner provided
by the different MPI libraries. It's main task is handling the
different syntax and options supported by the different mpirunners.
"""
def __init__(self, name, type=None, options=""):
self.name = name
self.type = None
self.options = options
def string_to_run(self, executable, mpi_procs, stdin=None, stdout=None, stderr=None, exec_args=None):
stdin = "< " + stdin if stdin is not None else ""
stdout = "> " + stdout if stdout is not None else ""
stderr = "2> " + stderr if stderr is not None else ""
if exec_args:
executable = executable + " " + " ".join(list_strings(exec_args))
if self.has_mpirun:
if self.type is None:
# TODO: better treatment of mpirun syntax.
#se.add_line('$MPIRUN -n $MPI_PROCS $EXECUTABLE < $STDIN > $STDOUT 2> $STDERR')
num_opt = "-n " + str(mpi_procs)
cmd = " ".join([self.name, num_opt, executable, stdin, stdout, stderr])
else:
raise NotImplementedError("type %s is not supported!")
else:
#assert mpi_procs == 1
cmd = " ".join([executable, stdin, stdout, stderr])
return cmd
@property
def has_mpirun(self):
"""True if we are running via mpirun, mpiexec ..."""
return self.name is not None
class OmpEnv(AttrDict):
"""
Dictionary with the OpenMP environment variables
see https://computing.llnl.gov/tutorials/openMP/#EnvironmentVariables
"""
_KEYS = [
"OMP_SCHEDULE",
"OMP_NUM_THREADS",
"OMP_DYNAMIC",
"OMP_PROC_BIND",
"OMP_NESTED",
"OMP_STACKSIZE",
"OMP_WAIT_POLICY",
"OMP_MAX_ACTIVE_LEVELS",
"OMP_THREAD_LIMIT",
"OMP_STACKSIZE",
"OMP_PROC_BIND",
]
@classmethod
def as_ompenv(cls, obj):
"""Convert an object into a OmpEnv"""
if isinstance(obj, cls): return obj
if obj is None: return cls()
return cls(**obj)
def __init__(self, *args, **kwargs):
"""
Constructor method inherited from dictionary:
>>> assert OmpEnv(OMP_NUM_THREADS=1).OMP_NUM_THREADS == 1
To create an instance from an INI file, use:
OmpEnv.from_file(filename)
"""
super(OmpEnv, self).__init__(*args, **kwargs)
err_msg = ""
for key, value in self.items():
self[key] = str(value)
if key not in self._KEYS:
err_msg += "unknown option %s\n" % key
if err_msg:
raise ValueError(err_msg)
def export_str(self):
"""Return a string with the bash statements needed to setup the OMP env."""
return "\n".join("export %s=%s" % (k, v) for k, v in self.items())
class Hardware(object):
"""
This object collects information on the hardware available in a given queue.
Basic definitions:
- A node refers to the physical box, i.e. cpu sockets with north/south switches connecting memory systems
and extension cards, e.g. disks, nics, and accelerators
- A cpu socket is the connector to these systems and the cpu cores
- A cpu core is an independent computing with its own computing pipeline, logical units, and memory controller.
Each cpu core will be able to service a number of cpu threads, each having an independent instruction stream
but sharing the cores memory controller and other logical units.
"""
def __init__(self, **kwargs):
self.num_nodes = int(kwargs.pop("num_nodes"))
self.sockets_per_node = int(kwargs.pop("sockets_per_node"))
self.cores_per_socket = int(kwargs.pop("cores_per_socket"))
# Convert memory to megabytes.
m = str(kwargs.pop("mem_per_node"))
self.mem_per_node = int(Memory.from_string(m).to("Mb"))
if self.mem_per_node <= 0 or self.sockets_per_node <= 0 or self.cores_per_socket <= 0:
raise ValueError("invalid parameters: %s" % kwargs)
if kwargs:
raise ValueError("Found invalid keywords in the partition section:\n %s" % kwargs.keys())
def __str__(self):
"""String representation."""
lines = []
app = lines.append
app(" num_nodes: %d, sockets_per_node: %d, cores_per_socket: %d, mem_per_node %s," %
(self.num_nodes, self.sockets_per_node, self.cores_per_socket, self.mem_per_node))
return "\n".join(lines)
@property
def num_cores(self):
"""Total number of cores available"""
return self.cores_per_socket * self.sockets_per_node * self.num_nodes
@property
def cores_per_node(self):
"""Number of cores per node."""
return self.cores_per_socket * self.sockets_per_node
@property
def mem_per_core(self):
"""Memory available on a single node."""
return self.mem_per_node / self.cores_per_node
def can_use_omp_threads(self, omp_threads):
"""True if omp_threads fit in a node."""
return self.cores_per_node >= omp_threads
def divmod_node(self, mpi_procs, omp_threads):
"""Use divmod to compute (num_nodes, rest_cores)"""
return divmod(mpi_procs * omp_threads, self.cores_per_node)
class _ExcludeNodesFile(object):
"""
This file contains the list of nodes to be excluded.
Nodes are indexed by queue name.
"""
DIRPATH = os.path.join(os.getenv("HOME"), ".abinit", "abipy")
FILEPATH = os.path.join(DIRPATH, "exclude_nodes.json")
def __init__(self):
if not os.path.exists(self.FILEPATH):
if not os.path.exists(self.DIRPATH): os.makedirs(self.DIRPATH)
with FileLock(self.FILEPATH):
with open(self.FILEPATH, "w") as fh:
json.dump({}, fh)
def read_nodes(self, qname):
with open(self.FILEPATH, "w") as fh:
return json.load(fh).get(qname, [])
def add_nodes(self, qname, nodes):
nodes = (nodes,) if not isinstance(nodes, (tuple, list)) else nodes
with FileLock(self.FILEPATH):
with AtomicFile(self.FILEPATH, mode="w+") as fh:
d = json.load(fh)
if qname in d:
d["qname"].extend(nodes)
d["qname"] = list(set(d["qname"]))
else:
d["qname"] = nodes
json.dump(d, fh)
_EXCL_NODES_FILE = _ExcludeNodesFile()
def show_qparams(qtype, stream=sys.stdout):
"""Print to the given stream the template of the :class:`QueueAdapter` of type `qtype`."""
for cls in all_subclasses(QueueAdapter):
if cls.QTYPE == qtype: return stream.write(cls.QTEMPLATE)
raise ValueError("Cannot find class associated to qtype %s" % qtype)
def all_qtypes():
"""List of all qtypes supported."""
return [cls.QTYPE for cls in all_subclasses(QueueAdapter)]
def make_qadapter(**kwargs):
"""
Return the concrete :class:`QueueAdapter` class from a string.
Note that one can register a customized version with:
.. example::
from qadapters import SlurmAdapter
class MyAdapter(SlurmAdapter):
QTYPE = "myslurm"
# Add your customized code here
# Register your class.
SlurmAdapter.register(MyAdapter)
make_qadapter(qtype="myslurm", **kwargs)
.. warning::
MyAdapter should be pickleable, hence one should declare it
at the module level so that pickle can import it at run-time.
"""
# Get all known subclasses of QueueAdapter.
d = {c.QTYPE: c for c in all_subclasses(QueueAdapter)}
# Preventive copy before pop
kwargs = copy.deepcopy(kwargs)
qtype = kwargs["queue"].pop("qtype")
return d[qtype](**kwargs)
class QueueAdapterError(Exception):
"""Base Error class for exceptions raise by QueueAdapter."""
class MaxNumLaunchesError(QueueAdapterError):
"""Raised by `submit_to_queue` if we try to submit more than `max_num_launches` times."""
class QueueAdapter(six.with_metaclass(abc.ABCMeta, object)):
"""
The `QueueAdapter` is responsible for all interactions with a specific queue management system.
This includes handling all details of queue script format as well as queue submission and management.
This is the **abstract** base class defining the methods that must be implemented by the concrete classes.
Concrete classes should extend this class with implementations that work on specific queue systems.
.. note::
A `QueueAdapter` has a handler (:class:`QueueJob`) defined in qjobs.py that allows one
to contact the resource manager to get info about the status of the job.
Each concrete implementation of `QueueAdapter` should have a corresponding `QueueJob`.
"""
Error = QueueAdapterError
MaxNumLaunchesError = MaxNumLaunchesError
@classmethod
def autodoc(cls):
return """
# dictionary with info on the hardware available on this particular queue.
hardware:
num_nodes: # Number of nodes available on this queue. Mandatory
sockets_per_node: # Mandatory.
cores_per_socket: # Mandatory.
# dictionary with the options used to prepare the enviroment before submitting the job
job:
setup: # List of commands (str) executed before running (default empty)
omp_env: # Dictionary with OpenMP env variables (default empty i.e. no OpenMP)
modules: # List of modules to be imported (default empty)
shell_env: # Dictionary with shell env variables.
mpi_runner: # MPI runner i.e. mpirun, mpiexec, Default is None i.e. no mpirunner
pre_run: # List of commands executed before the run (default: empty)
post_run: # List of commands executed after the run (default: empty)
# dictionary with the name of the queue and optional parameters
# used to build/customize the header of the submission script.
queue:
qname: # Name of the queue (mandatory)
qparams: # Dictionary with values used to generate the header of the job script
# See pymatgen.io.abinitio.qadapters.py for the list of supported values.
# dictionary with the constraints that must be fulfilled in order to run on this queue.
limits:
min_cores: # Minimum number of cores (default 1)
max_cores: # Maximum number of cores (mandatory),
# hard limit to hint_cores, the limit beyond which the scheduler will not accept the job (mandatory)
hint_cores: # the limit used in the first setup of jobs,
# Fix_Critical method may increase this number until max_cores is reached
min_mem_per_proc: # Minimum memory per MPI process in Mb, units can be specified e.g. 1.4 Gb
# (default hardware.mem_per_core)
max_mem_per_proc: # Maximum memory per MPI process in Mb, units can be specified e.g. `1.4Gb`
# (default hardware.mem_per_node)
timelimit # Initial time-limit
timelimit_hard # The hard time-limit for this queue.
# Error handlers could try to submit jobs with increased timelimit
# up to timelimit_hard. If not specified, timelimit_hard == timelimit
condition: # MongoDB-like condition (default empty, i.e. not used)
allocation: # String defining the policy used to select the optimal number of CPUs.
# possible values are ["nodes", "force_nodes", "shared"]
# "nodes" means that we should try to allocate entire nodes if possible.
# This is a soft limit, in the sense that the qadapter may use a configuration
# that does not fulfill this requirement. If failing, it will try to use the
# smallest number of nodes compatible with the optimal configuration.
# Use `force_nodes` to enfore entire nodes allocation.
# `shared` mode does not enforce any constraint (default).
max_num_launches # limit to the time a specific task can be restarted (default 10)
"""
def __init__(self, **kwargs):
"""
Args:
qname: Name of the queue.
qparams: Dictionary with the parameters used in the template.
setup: String or list of commands to execute during the initial setup.
modules: String or list of modules to load before running the application.
shell_env: Dictionary with the environment variables to export before running the application.
omp_env: Dictionary with the OpenMP variables.
pre_run: String or list of commands to execute before launching the calculation.
post_run: String or list of commands to execute once the calculation is completed.
mpi_runner: Path to the MPI runner or :class:`MpiRunner` instance. None if not used
max_num_launches: Maximum number of submissions that can be done for a specific task. Defaults to 10
qverbatim:
min_cores, max_cores, hint_cores: Minimum, maximum, and hint limits of number of cores that can be used
min_mem_per_proc=Minimun memory per process in megabytes.
max_mem_per_proc=Maximum memory per process in megabytes.
timelimit: initial time limit in seconds
timelimit_hard: hard limelimit for this queue
priority: Priority level, integer number > 0
condition: Condition object (dictionary)
.. note::
priority is a non-negative integer used to order the qadapters. The :class:`TaskManager` will
try to run jobs on the qadapter with the highest priority if possible
"""
# TODO
#task_classes
# Make defensive copies so that we can change the values at runtime.
kwargs = copy.deepcopy(kwargs)
self.priority = int(kwargs.pop("priority"))
self.hw = Hardware(**kwargs.pop("hardware"))
self._parse_queue(kwargs.pop("queue"))
self._parse_limits(kwargs.pop("limits"))
self._parse_job(kwargs.pop("job"))
# List of dictionaries with the parameters used to submit jobs
# The launcher will use this information to increase the resources
self.launches = []
if kwargs:
raise ValueError("Found unknown keywords:\n%s" % list(kwargs.keys()))
self.validate_qparams()
# Initialize some values from the info reported in the partition.
self.set_mpi_procs(self.min_cores)
self.set_mem_per_proc(self.min_mem_per_proc)
# Final consistency check.
self.validate_qparams()
def validate_qparams(self):
"""
Check if the keys specified by the user in qparams are supported.
Raise:
`ValueError` if errors.
"""
# No validation for ShellAdapter.
if isinstance(self, ShellAdapter): return
# Parse the template so that we know the list of supported options.
err_msg = ""
for param in self.qparams:
if param not in self.supported_qparams:
err_msg += "Unsupported QUEUE parameter name %s\n" % param
err_msg += "Supported parameters:\n"
for param_sup in self.supported_qparams:
err_msg += " %s \n" % param_sup
if err_msg:
raise ValueError(err_msg)
def _parse_limits(self, d):
# Time limits.
self.set_timelimit(qu.timelimit_parser(d.pop("timelimit")))
tl_hard = d.pop("timelimit_hard", None)
tl_hard = qu.timelimit_parser(tl_hard) if tl_hard is not None else self.timelimit
self.set_timelimit_hard(tl_hard)
# Cores
self.min_cores = int(d.pop("min_cores", 1))
self.max_cores = int(d.pop("max_cores"))
self.hint_cores = int(d.pop("hint_cores", self.max_cores))
if self.min_cores > self.max_cores:
raise ValueError("min_cores %s cannot be greater than max_cores %s" % (self.min_cores, self.max_cores))
# Memory
# FIXME: Neeed because autoparal 1 with paral_kgb 1 is not able to estimate memory
self.min_mem_per_proc = qu.any2mb(d.pop("min_mem_per_proc", self.hw.mem_per_core))
self.max_mem_per_proc = qu.any2mb(d.pop("max_mem_per_proc", self.hw.mem_per_node))
# Misc
self.max_num_launches = int(d.pop("max_num_launches", 10))
self.condition = Condition(d.pop("condition", {}))
self.allocation = d.pop("allocation", "shared")
if self.allocation not in ("nodes", "force_nodes", "shared"):
raise ValueError("Wrong value for `allocation` option")
if d:
raise ValueError("Found unknown keyword(s) in limits section:\n %s" % d.keys())
def _parse_job(self, d):
setup = d.pop("setup", None)
if is_string(setup): setup = [setup]
self.setup = setup[:] if setup is not None else []
omp_env = d.pop("omp_env", None)
self.omp_env = omp_env.copy() if omp_env is not None else {}
modules = d.pop("modules", None)
if is_string(modules): modules = [modules]
self.modules = modules[:] if modules is not None else []
shell_env = d.pop("shell_env", None)
self.shell_env = shell_env.copy() if shell_env is not None else {}
self.mpi_runner = d.pop("mpi_runner", None)
if not isinstance(self.mpi_runner, MpiRunner):
self.mpi_runner = MpiRunner(self.mpi_runner)
pre_run = d.pop("pre_run", None)
if is_string(pre_run): pre_run = [pre_run]
self.pre_run = pre_run[:] if pre_run is not None else []
post_run = d.pop("post_run", None)
if is_string(post_run): post_run = [post_run]
self.post_run = post_run[:] if post_run is not None else []
if d:
raise ValueError("Found unknown keyword(s) in job section:\n %s" % d.keys())
def _parse_queue(self, d):
# Init params
qparams = d.pop("qparams", None)
self._qparams = copy.deepcopy(qparams) if qparams is not None else {}
self.set_qname(d.pop("qname", ""))
if d:
raise ValueError("Found unknown keyword(s) in queue section:\n %s" % d.keys())
def __str__(self):
lines = ["%s:%s" % (self.__class__.__name__, self.qname)]
app = lines.append
app("Hardware:\n" + str(self.hw))
#lines.extend(["qparams:\n", str(self.qparams)])
if self.has_omp: app(str(self.omp_env))
return "\n".join(lines)
@property
def qparams(self):
"""Dictionary with the parameters used to construct the header."""
return self._qparams
@lazy_property
def supported_qparams(self):
"""
Dictionary with the supported parameters that can be passed to the
queue manager (obtained by parsing QTEMPLATE).
"""
import re
return re.findall("\$\$\{(\w+)\}", self.QTEMPLATE)
@property
def has_mpi(self):
"""True if we are using MPI"""
return bool(self.mpi_runner)
@property
def has_omp(self):
"""True if we are using OpenMP threads"""
return hasattr(self, "omp_env") and bool(getattr(self, "omp_env"))
@property
def num_cores(self):
"""Total number of cores employed"""
return self.mpi_procs * self.omp_threads
@property
def omp_threads(self):
"""Number of OpenMP threads."""
if self.has_omp:
return self.omp_env["OMP_NUM_THREADS"]
else:
return 1
@property
def pure_mpi(self):
"""True if only MPI is used."""
return self.has_mpi and not self.has_omp
@property
def pure_omp(self):
"""True if only OpenMP is used."""
return self.has_omp and not self.has_mpi
@property
def hybrid_mpi_omp(self):
"""True if we are running in MPI+Openmp mode."""
return self.has_omp and self.has_mpi
@property
def run_info(self):
"""String with info on the run."""
return "MPI: %d, OMP: %d" % (self.mpi_procs, self.omp_threads)
def deepcopy(self):
"""Deep copy of the object."""
return copy.deepcopy(self)
def record_launch(self, queue_id): # retcode):
"""Save submission"""
self.launches.append(
AttrDict(queue_id=queue_id, mpi_procs=self.mpi_procs, omp_threads=self.omp_threads,
mem_per_proc=self.mem_per_proc, timelimit=self.timelimit))
return len(self.launches)
def remove_launch(self, index):
"""Remove launch with the given index."""
self.launches.pop(index)
@property
def num_launches(self):
"""Number of submission tried with this adapter so far."""
return len(self.launches)
@property
def last_launch(self):
"""Return the last launch."""
if len(self.launches) > 0:
return self.launches[-1]
else:
return None
def validate(self):
"""Validate the parameters of the run. Raises self.Error if invalid parameters."""
errors = []
app = errors.append
if not self.hint_cores >= self.mpi_procs * self.omp_threads >= self.min_cores:
app("self.hint_cores >= mpi_procs * omp_threads >= self.min_cores not satisfied")
if self.omp_threads > self.hw.cores_per_node:
app("omp_threads > hw.cores_per_node")
if self.mem_per_proc > self.hw.mem_per_node:
app("mem_mb >= self.hw.mem_per_node")
if not self.max_mem_per_proc >= self.mem_per_proc >= self.min_mem_per_proc:
app("self.max_mem_per_proc >= mem_mb >= self.min_mem_per_proc not satisfied")
if self.priority <= 0:
app("priority must be > 0")
if not (1 <= self.min_cores <= self.hw.num_cores >= self.hint_cores):
app("1 <= min_cores <= hardware num_cores >= hint_cores not satisfied")
if errors:
raise self.Error(str(self) + "\n".join(errors))
def set_omp_threads(self, omp_threads):
"""Set the number of OpenMP threads."""
self.omp_env["OMP_NUM_THREADS"] = omp_threads
@property
def mpi_procs(self):
"""Number of CPUs used for MPI."""
return self._mpi_procs
def set_mpi_procs(self, mpi_procs):
"""Set the number of MPI processes to mpi_procs"""
self._mpi_procs = mpi_procs
@property
def qname(self):
"""The name of the queue."""
return self._qname
def set_qname(self, qname):
"""Set the name of the queue."""
self._qname = qname
# todo this assumes only one wall time. i.e. the one in the mamanager file is the one always used
# we should use the standard walltime to start with but also allow to increase the walltime
@property
def timelimit(self):
"""Returns the walltime in seconds."""
return self._timelimit
@property
def timelimit_hard(self):
"""Returns the walltime in seconds."""
return self._timelimit_hard
def set_timelimit(self, timelimit):
"""Set the start walltime in seconds, fix method may increase this one until timelimit_hard is reached."""
self._timelimit = timelimit
def set_timelimit_hard(self, timelimit_hard):
"""Set the maximal possible walltime in seconds."""
self._timelimit_hard = timelimit_hard
@property
def mem_per_proc(self):
"""The memory per process in megabytes."""
return self._mem_per_proc
def set_mem_per_proc(self, mem_mb):
"""
Set the memory per process in megabytes. If mem_mb <=0, min_mem_per_proc is used.
"""
# Hack needed because abinit is still not able to estimate memory.
if mem_mb <= self.min_mem_per_proc: mem_mb = self.min_mem_per_proc
self._mem_per_proc = int(mem_mb)
@property
def total_mem(self):
"""Total memory required by the job in megabytes."""
return Memory(self.mem_per_proc * self.mpi_procs, "Mb")
@abc.abstractmethod
def cancel(self, job_id):
"""
Cancel the job.
Args:
job_id: Job identifier.
Returns:
Exit status.
"""
def can_run_pconf(self, pconf):
"""True if the qadapter in principle is able to run the :class:`ParalConf` pconf"""
if not self.hint_cores >= pconf.num_cores >= self.min_cores: return False
if not self.hw.can_use_omp_threads(self.omp_threads): return False
if pconf.mem_per_proc > self.hw.mem_per_node: return False
if self.allocation == "force_nodes" and pconf.num_cores % self.hw.cores_per_node != 0:
return False
return self.condition(pconf)
def distribute(self, mpi_procs, omp_threads, mem_per_proc):
"""
Returns (num_nodes, mpi_per_node)
Aggressive: When Open MPI thinks that it is in an exactly- or under-subscribed mode
(i.e., the number of running processes is equal to or less than the number of available processors),
MPI processes will automatically run in aggressive mode, meaning that they will never voluntarily give
up the processor to other processes. With some network transports, this means that Open MPI will spin
in tight loops attempting to make message passing progress, effectively causing other processes to not get
any CPU cycles (and therefore never make any progress)
"""
class Distrib(namedtuple("Distrib", "num_nodes mpi_per_node exact")):
pass
#@property
#def mem_per_node
# return self.mpi_per_node * mem_per_proc
#def set_nodes(self, nodes):
hw = self.hw
# TODO: Add check on user-memory
if mem_per_proc <= 0:
logger.warning("mem_per_proc <= 0")
mem_per_proc = hw.mem_per_core
if mem_per_proc > hw.mem_per_node:
raise self.Error(
"mem_per_proc > mem_per_node.\n Cannot distribute mpi_procs %d, omp_threads %d, mem_per_proc %s" %
(mpi_procs, omp_threads, mem_per_proc))
# Try to use all then cores in the node.
num_nodes, rest_cores = hw.divmod_node(mpi_procs, omp_threads)
if num_nodes == 0 and mpi_procs * mem_per_proc <= hw.mem_per_node:
# One node is enough
return Distrib(num_nodes=1, mpi_per_node=mpi_procs, exact=True)
if num_nodes == 0: num_nodes = 2
mpi_per_node = mpi_procs // num_nodes
if mpi_per_node * mem_per_proc <= hw.mem_per_node and rest_cores == 0:
# Commensurate with nodes.
return Distrib(num_nodes=num_nodes, mpi_per_node=mpi_per_node, exact=True)
#if mode == "block", "cyclic"
# Try first to pack MPI processors in a node as much as possible
mpi_per_node = int(hw.mem_per_node / mem_per_proc)
assert mpi_per_node != 0
num_nodes = (mpi_procs * omp_threads) // mpi_per_node
print("exact --> false", num_nodes, mpi_per_node)
if mpi_per_node * omp_threads <= hw.cores_per_node and mem_per_proc <= hw.mem_per_node:
return Distrib(num_nodes=num_nodes, mpi_per_node=mpi_per_node, exact=False)
if (mpi_procs * omp_threads) % mpi_per_node != 0:
# Have to reduce the number of MPI procs per node
for mpi_per_node in reversed(range(1, mpi_per_node)):
if mpi_per_node > hw.cores_per_node: continue
num_nodes = (mpi_procs * omp_threads) // mpi_per_node
if (mpi_procs * omp_threads) % mpi_per_node == 0 and mpi_per_node * mem_per_proc <= hw.mem_per_node:
return Distrib(num_nodes=num_nodes, mpi_per_node=mpi_per_node, exact=False)
else:
raise self.Error("Cannot distribute mpi_procs %d, omp_threads %d, mem_per_proc %s" %
(mpi_procs, omp_threads, mem_per_proc))
def optimize_params(self):
"""
This method is called in get_subs_dict. Return a dict with parameters to be added to qparams
Subclasses may provide a specialized version.
"""
logger.debug("optimize_params of baseclass --> no optimization available!!!")
return {}
def get_subs_dict(self):
"""
Return substitution dict for replacements into the template
Subclasses may want to customize this method.
"""
#d = self.qparams.copy()
d = self.qparams
d.update(self.optimize_params())
# clean null values
subs_dict = {k: v for k, v in d.items() if v is not None}
#print("subs_dict:", subs_dict)
return subs_dict
def _make_qheader(self, job_name, qout_path, qerr_path):
"""Return a string with the options that are passed to the resource manager."""
# get substitution dict for replacements into the template
subs_dict = self.get_subs_dict()
# Set job_name and the names for the stderr and stdout of the
# queue manager (note the use of the extensions .qout and .qerr
# so that we can easily locate this file.
subs_dict['job_name'] = job_name.replace('/', '_')
subs_dict['_qout_path'] = qout_path
subs_dict['_qerr_path'] = qerr_path
qtemplate = QScriptTemplate(self.QTEMPLATE)
# might contain unused parameters as leftover $$.
unclean_template = qtemplate.safe_substitute(subs_dict)
# Remove lines with leftover $$.
clean_template = []
for line in unclean_template.split('\n'):
if '$$' not in line:
clean_template.append(line)
return '\n'.join(clean_template)
def get_script_str(self, job_name, launch_dir, executable, qout_path, qerr_path,
stdin=None, stdout=None, stderr=None, exec_args=None):
"""
Returns a (multi-line) String representing the queue script, e.g. PBS script.
Uses the template_file along with internal parameters to create the script.
Args:
job_name: Name of the job.
launch_dir: (str) The directory the job will be launched in.
executable: String with the name of the executable to be executed or list of commands
qout_path Path of the Queue manager output file.
qerr_path: Path of the Queue manager error file.
exec_args: List of arguments passed to executable (used only if executable is a string, default: empty)
"""
# PbsPro does not accept job_names longer than 15 chars.
if len(job_name) > 14 and isinstance(self, PbsProAdapter):
job_name = job_name[:14]
# Construct the header for the Queue Manager.
qheader = self._make_qheader(job_name, qout_path, qerr_path)
# Add the bash section.
se = ScriptEditor()
if self.setup:
se.add_comment("Setup section")
se.add_lines(self.setup)
se.add_emptyline()
if self.modules:
se.add_comment("Load Modules")
se.add_line("module purge")
se.load_modules(self.modules)
se.add_emptyline()
if self.has_omp:
se.add_comment("OpenMp Environment")
se.declare_vars(self.omp_env)
se.add_emptyline()
if self.shell_env:
se.add_comment("Shell Environment")
se.declare_vars(self.shell_env)
se.add_emptyline()
# Cd to launch_dir
se.add_line("cd " + os.path.abspath(launch_dir))
if self.pre_run:
se.add_comment("Commands before execution")
se.add_lines(self.pre_run)
se.add_emptyline()
# Construct the string to run the executable with MPI and mpi_procs.
if is_string(executable):
line = self.mpi_runner.string_to_run(executable, self.mpi_procs,
stdin=stdin, stdout=stdout, stderr=stderr, exec_args=exec_args)
se.add_line(line)
else:
assert isinstance(executable, (list, tuple))
se.add_lines(executable)
if self.post_run:
se.add_emptyline()
se.add_comment("Commands after execution")
se.add_lines(self.post_run)
return qheader + se.get_script_str() + "\n"
def submit_to_queue(self, script_file):
"""
Public API: wraps the concrete implementation _submit_to_queue
Raises:
`self.MaxNumLaunchesError` if we have already tried to submit the job max_num_launches
`self.Error` if generic error
"""
if not os.path.exists(script_file):
raise self.Error('Cannot find script file located at: {}'.format(script_file))
if self.num_launches == self.max_num_launches:
raise self.MaxNumLaunchesError("num_launches %s == max_num_launches %s" % (self.num_launches, self.max_num_launches))
# Call the concrete implementation.
s = self._submit_to_queue(script_file)
self.record_launch(s.qid)
if s.qid is None:
raise self.Error("Error in job submission with %s. file %s \n" %
(self.__class__.__name__, script_file) +
"The error response reads:\n %s \n " % s.err +
"The out response reads:\n %s \n" % s.out)
# Here we create a concrete instance of QueueJob
return QueueJob.from_qtype_and_id(self.QTYPE, s.qid, self.qname), s.process
@abc.abstractmethod
def _submit_to_queue(self, script_file):
"""
Submits the job to the queue, probably using subprocess or shutil
This method must be provided by the concrete classes and will be called by submit_to_queue
Args:
script_file: (str) name of the script file to use (String)
Returns:
queue_id, process
"""
def get_njobs_in_queue(self, username=None):
"""
returns the number of jobs in the queue, probably using subprocess or shutil to
call a command like 'qstat'. returns None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
"""
if username is None: username = getpass.getuser()
njobs, process = self._get_njobs_in_queue(username=username)
if process is not None and process.returncode != 0:
# there's a problem talking to squeue server?
err_msg = ('Error trying to get the number of jobs in the queue' +
'The error response reads:\n {}'.format(process.stderr.read()))
logger.critical(err_msg)
if not isinstance(self, ShellAdapter):
logger.info('The number of jobs currently in the queue is: {}'.format(njobs))
return njobs
@abc.abstractmethod
def _get_njobs_in_queue(self, username):
"""
Concrete Subclasses must implement this method. Return (njobs, process)
"""
# Methods to fix problems
def add_exclude_nodes(self, nodes):
return _EXCL_NODES_FILE.add_nodes(self.qname, nodes)
def get_exclude_nodes(self):
return _EXCL_NODES_FILE.read_nodes(self.qname)
@abc.abstractmethod
def exclude_nodes(self, nodes):
"""Method to exclude nodes in the calculation. Return True if nodes have been excluded"""
def more_mem_per_proc(self, factor=1):
"""
Method to increase the amount of memory asked for, by factor.
Return: new memory if success, 0 if memory cannot be increased.
"""
base_increase = 2000
old_mem = self.mem_per_proc
new_mem = old_mem + factor*base_increase
if new_mem < self.hw.mem_per_node:
self.set_mem_per_proc(new_mem)
return new_mem
raise self.Error('could not increase mem_per_proc further')
def more_cores(self, factor=1):
"""
Method to increase the number of MPI procs.
Return: new number of processors if success, 0 if processors cannot be increased.
"""
# TODO : find a formula that works for all max_cores
if self.max_cores > 40:
base_increase = 4 * int(self.max_cores / 40)
else:
base_increase = 4
new_cores = self.hint_cores + factor * base_increase
if new_cores < self.max_cores:
self.hint_cores = new_cores
return new_cores
raise self.Error('%s hint_cores reached limit on max_core %s' % (new_cores, self.max_cores))
def more_time(self, factor=1):
"""
Method to increase the wall time
"""
base_increase = int(self.timelimit_hard / 10)
new_time = self.timelimit + base_increase*factor
if new_time < self.timelimit_hard:
self.set_timelimit(new_time)
return new_time
self.priority = -1
raise self.Error("increasing time is not possible, the hard limit has been raised")
####################
# Concrete classes #
####################
class ShellAdapter(QueueAdapter):
"""Simple Adapter used to submit runs through the shell."""
QTYPE = "shell"
QTEMPLATE = """\
#!/bin/bash
$${qverbatim}
"""
def cancel(self, job_id):
return os.system("kill -9 %d" % job_id)
def _submit_to_queue(self, script_file):
# submit the job, return process and pid.
process = Popen(("/bin/bash", script_file), stderr=PIPE)
return SubmitResults(qid=process.pid, out='no out in shell submission', err='no err in shell submission', process=process)
def _get_njobs_in_queue(self, username):
return None, None
def exclude_nodes(self, nodes):
return False
class SlurmAdapter(QueueAdapter):
"""Adapter for SLURM."""
QTYPE = "slurm"
QTEMPLATE = """\
#!/bin/bash
#SBATCH --partition=$${partition}
#SBATCH --job-name=$${job_name}
#SBATCH --nodes=$${nodes}
#SBATCH --total_tasks=$${total_tasks}
#SBATCH --ntasks=$${ntasks}
#SBATCH --ntasks-per-node=$${ntasks_per_node}
#SBATCH --cpus-per-task=$${cpus_per_task}
#####SBATCH --mem=$${mem}
#SBATCH --mem-per-cpu=$${mem_per_cpu}
#SBATCH --hint=$${hint}
#SBATCH --time=$${time}
#SBATCH --exclude=$${exclude_nodes}
#SBATCH --account=$${account}
#SBATCH --mail-user=$${mail_user}
#SBATCH --mail-type=$${mail_type}
#SBATCH --constraint=$${constraint}
#SBATCH --gres=$${gres}
#SBATCH --requeue=$${requeue}
#SBATCH --nodelist=$${nodelist}
#SBATCH --propagate=$${propagate}
#SBATCH --licenses=$${licenses}
#SBATCH --output=$${_qout_path}
#SBATCH --error=$${_qerr_path}
$${qverbatim}
"""
def set_qname(self, qname):
super(SlurmAdapter, self).set_qname(qname)
if qname:
self.qparams["partition"] = qname
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
super(SlurmAdapter, self).set_mpi_procs(mpi_procs)
self.qparams["ntasks"] = mpi_procs
def set_omp_threads(self, omp_threads):
super(SlurmAdapter, self).set_omp_threads(omp_threads)
self.qparams["cpus_per_task"] = omp_threads
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
super(SlurmAdapter, self).set_mem_per_proc(mem_mb)
self.qparams["mem_per_cpu"] = self.mem_per_proc
# Remove mem if it's defined.
#self.qparams.pop("mem", None)
def set_timelimit(self, timelimit):
super(SlurmAdapter, self).set_timelimit(timelimit)
self.qparams["time"] = qu.time2slurm(timelimit)
def cancel(self, job_id):
return os.system("scancel %d" % job_id)
def optimize_params(self):
params = {}
if self.allocation == "nodes":
# run on the smallest number of nodes compatible with the configuration
params["nodes"] = max(int(math.ceil(self.mpi_procs / self.hw.cores_per_node)),
int(math.ceil(self.total_mem / self.hw.mem_per_node)))
return params
#dist = self.distribute(self.mpi_procs, self.omp_threads, self.mem_per_proc)
##print(dist)
#if False and dist.exact:
# # Can optimize parameters
# self.qparams["nodes"] = dist.num_nodes
# self.qparams.pop("ntasks", None)
# self.qparams["ntasks_per_node"] = dist.mpi_per_node
# self.qparams["cpus_per_task"] = self.omp_threads
# self.qparams["mem"] = dist.mpi_per_node * self.mem_per_proc
# self.qparams.pop("mem_per_cpu", None)
#else:
# # Delegate to slurm.
# self.qparams["ntasks"] = self.mpi_procs
# self.qparams.pop("nodes", None)
# self.qparams.pop("ntasks_per_node", None)
# self.qparams["cpus_per_task"] = self.omp_threads
# self.qparams["mem_per_cpu"] = self.mem_per_proc
# self.qparams.pop("mem", None)
#return {}
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
process = Popen(['sbatch', script_file], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
# grab the returncode. SLURM returns 0 if the job was successful
queue_id = None
if process.returncode == 0:
try:
# output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id
queue_id = int(out.split()[3])
logger.info('Job submission was successful and queue_id is {}'.format(queue_id))
except:
# probably error parsing job code
logger.critical('Could not parse job id following slurm...')
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def exclude_nodes(self, nodes):
try:
if 'exclude_nodes' not in self.qparams:
self.qparams.update({'exclude_nodes': 'node' + nodes[0]})
print('excluded node %s' % nodes[0])
for node in nodes[1:]:
self.qparams['exclude_nodes'] += ',node' + node
print('excluded node %s' % node)
return True
except (KeyError, IndexError):
raise self.Error('qadapter failed to exclude nodes')
def _get_njobs_in_queue(self, username):
process = Popen(['squeue', '-o "%u"', '-u', username], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result. lines should have this form:
# username
# count lines that include the username in it
outs = out.splitlines()
njobs = len([line.split() for line in outs if username in line])
return njobs, process
class PbsProAdapter(QueueAdapter):
"""Adapter for PbsPro"""
QTYPE = "pbspro"
#PBS -l select=$${select}:ncpus=$${ncpus}:vmem=$${vmem}mb:mpiprocs=$${mpiprocs}:ompthreads=$${ompthreads}
#PBS -l select=$${select}:ncpus=1:vmem=$${vmem}mb:mpiprocs=1:ompthreads=$${ompthreads}
####PBS -l select=$${select}:ncpus=$${ncpus}:vmem=$${vmem}mb:mpiprocs=$${mpiprocs}:ompthreads=$${ompthreads}
####PBS -l pvmem=$${pvmem}mb
QTEMPLATE = """\
#!/bin/bash
#PBS -q $${queue}
#PBS -N $${job_name}
#PBS -A $${account}
#PBS -l select=$${select}
#PBS -l pvmem=$${pvmem}mb
#PBS -l walltime=$${walltime}
#PBS -l model=$${model}
#PBS -l place=$${place}
#PBS -W group_list=$${group_list}
#PBS -M $${mail_user}
#PBS -m $${mail_type}
#PBS -o $${_qout_path}
#PBS -e $${_qerr_path}
$${qverbatim}
"""
def set_qname(self, qname):
super(PbsProAdapter, self).set_qname(qname)
if qname:
self.qparams["queue"] = qname
def set_timelimit(self, timelimit):
super(PbsProAdapter, self).set_timelimit(timelimit)
self.qparams["walltime"] = qu.time2pbspro(timelimit)
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
super(PbsProAdapter, self).set_mem_per_proc(mem_mb)
#self.qparams["vmem"] = self.mem_per_proc
self.qparams["pvmem"] = self.mem_per_proc
def cancel(self, job_id):
return os.system("qdel %d" % job_id)
def optimize_params(self):
return {"select": self.get_select()}
def get_select(self, ret_dict=False):
"""
Select is not the most intuitive command. For more info see:
* http://www.cardiff.ac.uk/arcca/services/equipment/User-Guide/pbs.html
* https://portal.ivec.org/docs/Supercomputers/PBS_Pro
"""
hw, mem_per_proc = self.hw, int(self.mem_per_proc)
#dist = self.distribute(self.mpi_procs, self.omp_threads, mem_per_proc)
"""
if self.pure_mpi:
num_nodes, rest_cores = hw.divmod_node(self.mpi_procs, self.omp_threads)
if num_nodes == 0:
logger.info("IN_CORE PURE MPI: %s" % self.run_info)
chunks = 1
ncpus = rest_cores
mpiprocs = rest_cores
vmem = mem_per_proc * ncpus
ompthreads = 1
elif rest_cores == 0:
# Can allocate entire nodes because self.mpi_procs is divisible by cores_per_node.
logger.info("PURE MPI run commensurate with cores_per_node %s" % self.run_info)
chunks = num_nodes
ncpus = hw.cores_per_node
mpiprocs = hw.cores_per_node
vmem = ncpus * mem_per_proc
ompthreads = 1
else:
logger.info("OUT-OF-CORE PURE MPI (not commensurate with cores_per_node): %s" % self.run_info)
chunks = self.mpi_procs
ncpus = 1
mpiprocs = 1
vmem = mem_per_proc
ompthreads = 1
elif self.pure_omp:
# Pure OMP run.
logger.info("PURE OPENMP run: %s" % self.run_info)
assert hw.can_use_omp_threads(self.omp_threads)
chunks = 1
ncpus = self.omp_threads
mpiprocs = 1
vmem = mem_per_proc
ompthreads = self.omp_threads
elif self.hybrid_mpi_omp:
assert hw.can_use_omp_threads(self.omp_threads)
num_nodes, rest_cores = hw.divmod_node(self.mpi_procs, self.omp_threads)
#print(num_nodes, rest_cores)
# TODO: test this
if rest_cores == 0 or num_nodes == 0:
logger.info("HYBRID MPI-OPENMP run, perfectly divisible among nodes: %s" % self.run_info)
chunks = max(num_nodes, 1)
mpiprocs = self.mpi_procs // chunks
chunks = chunks
ncpus = mpiprocs * self.omp_threads
mpiprocs = mpiprocs
vmem = mpiprocs * mem_per_proc
ompthreads = self.omp_threads
else:
logger.info("HYBRID MPI-OPENMP, NOT commensurate with nodes: %s" % self.run_info)
chunks=self.mpi_procs
ncpus=self.omp_threads
mpiprocs=1
vmem= mem_per_proc
ompthreads=self.omp_threads
else:
raise RuntimeError("You should not be here")
"""
if not self.has_omp:
chunks, ncpus, vmem, mpiprocs = self.mpi_procs, 1, self.mem_per_proc, 1
select_params = AttrDict(chunks=chunks, ncpus=ncpus, mpiprocs=mpiprocs, vmem=int(vmem))
s = "{chunks}:ncpus={ncpus}:vmem={vmem}mb:mpiprocs={mpiprocs}".format(**select_params)
else:
chunks, ncpus, vmem, mpiprocs, ompthreads = self.mpi_procs, self.omp_threads, self.mem_per_proc, 1, self.omp_threads
select_params = AttrDict(chunks=chunks, ncpus=ncpus, mpiprocs=mpiprocs, vmem=int(vmem), ompthreads=ompthreads)
s = "{chunks}:ncpus={ncpus}:vmem={vmem}mb:mpiprocs={mpiprocs}:ompthreads={ompthreads}".format(**select_params)
if ret_dict:
return s, select_params
return s
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
# grab the return code. PBS returns 0 if the job was successful
queue_id = None
if process.returncode == 0:
try:
# output should of the form '2561553.sdb' or '352353.jessup' - just grab the first part for job id
queue_id = int(out.split('.')[0])
except:
# probably error parsing job code
logger.critical("Could not parse job id following qsub...")
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def _get_njobs_in_queue(self, username):
process = Popen(['qstat', '-a', '-u', username], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result
# lines should have this form
# '1339044.sdb username queuename 2012-02-29-16-43 20460 -- -- -- 00:20 C 00:09'
# count lines that include the username in it
# TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'.
outs = out.split('\n')
njobs = len([line.split() for line in outs if username in line])
return njobs, process
def exclude_nodes(self, nodes):
"""No meaning for Shell"""
return False
class TorqueAdapter(PbsProAdapter):
"""Adapter for Torque."""
QTYPE = "torque"
QTEMPLATE = """\
#!/bin/bash
#PBS -q $${queue}
#PBS -N $${job_name}
#PBS -A $${account}
#PBS -l pmem=$${pmem}mb
####PBS -l mppwidth=$${mppwidth}
#PBS -l nodes=$${nodes}:ppn=$${ppn}
#PBS -l walltime=$${walltime}
#PBS -l model=$${model}
#PBS -l place=$${place}
#PBS -W group_list=$${group_list}
#PBS -M $${mail_user}
#PBS -m $${mail_type}
# Submission environment
#PBS -V
#PBS -o $${_qout_path}
#PBS -e $${_qerr_path}
$${qverbatim}
"""
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
QueueAdapter.set_mem_per_proc(self, mem_mb)
self.qparams["pmem"] = self.mem_per_proc
#self.qparams["mem"] = self.mem_per_proc
#@property
#def mpi_procs(self):
# """Number of MPI processes."""
# return self.qparams.get("nodes", 1) * self.qparams.get("ppn", 1)
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
QueueAdapter.set_mpi_procs(self, mpi_procs)
self.qparams["nodes"] = 1
self.qparams["ppn"] = mpi_procs
def exclude_nodes(self, nodes):
raise self.Error('qadapter failed to exclude nodes, not implemented yet in torque')
class SGEAdapter(QueueAdapter):
"""
Adapter for Sun Grid Engine (SGE) task submission software.
See also:
* https://www.wiki.ed.ac.uk/display/EaStCHEMresearchwiki/How+to+write+a+SGE+job+submission+script
* http://www.uibk.ac.at/zid/systeme/hpc-systeme/common/tutorials/sge-howto.html
"""
QTYPE = "sge"
QTEMPLATE = """\
#!/bin/bash
#$ -account_name $${account_name}
#$ -N $${job_name}
#$ -q $${queue_name}
#$ -pe $${parallel_environment} $${ncpus}
#$ -l h_rt=$${walltime}
# request a per slot memory limit of size bytes.
##$ -l h_vmem=$${mem_per_slot}
##$ -l mf=$${mem_per_slot}
###$ -j no
#$ -M $${mail_user}
#$ -m $${mail_type}
# Submission environment
##$ -S /bin/bash
###$ -cwd # Change to current working directory
###$ -V # Export environment variables into script
#$ -e $${_qerr_path}
#$ -o $${_qout_path}
$${qverbatim}
"""
def set_qname(self, qname):
super(SGEAdapter, self).set_qname(qname)
if qname:
self.qparams["queue_name"] = qname
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
super(SGEAdapter, self).set_mpi_procs(mpi_procs)
self.qparams["ncpus"] = mpi_procs
def set_omp_threads(self, omp_threads):
super(SGEAdapter, self).set_omp_threads(omp_threads)
logger.warning("Cannot use omp_threads with SGE")
def set_mem_per_proc(self, mem_mb):
"""Set the memory per process in megabytes"""
super(SGEAdapter, self).set_mem_per_proc(mem_mb)
self.qparams["mem_per_slot"] = str(int(self.mem_per_proc)) + "M"
def set_timelimit(self, timelimit):
super(SGEAdapter, self).set_timelimit(timelimit)
# Same convention as pbspro e.g. [hours:minutes:]seconds
self.qparams["walltime"] = qu.time2pbspro(timelimit)
def cancel(self, job_id):
return os.system("qdel %d" % job_id)
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
process = Popen(['qsub', script_file], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
# grab the returncode. SGE returns 0 if the job was successful
queue_id = None
if process.returncode == 0:
try:
# output should of the form
# Your job 1659048 ("NAME_OF_JOB") has been submitted
queue_id = int(out.split(' ')[2])
except:
# probably error parsing job code
logger.critical("Could not parse job id following qsub...")
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def exclude_nodes(self, nodes):
"""Method to exclude nodes in the calculation"""
raise self.Error('qadapter failed to exclude nodes, not implemented yet in sge')
def _get_njobs_in_queue(self, username):
process = Popen(['qstat', '-u', username], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result
# lines should contain username
# count lines that include the username in it
# TODO: only count running or queued jobs. or rather, *don't* count jobs that are 'C'.
outs = out.splitlines()
njobs = len([line.split() for line in outs if username in line])
return njobs, process
class MOABAdapter(QueueAdapter):
"""Adapter for MOAB. See https://computing.llnl.gov/tutorials/moab/"""
QTYPE = "moab"
QTEMPLATE = """\
#!/bin/bash
#MSUB -a $${eligible_date}
#MSUB -A $${account}
#MSUB -c $${checkpoint_interval}
#MSUB -l feature=$${feature}
#MSUB -l gres=$${gres}
#MSUB -l nodes=$${nodes}
#MSUB -l partition=$${partition}
#MSUB -l procs=$${procs}
#MSUB -l ttc=$${ttc}
#MSUB -l walltime=$${walltime}
#MSUB -l $${resources}
#MSUB -p $${priority}
#MSUB -q $${queue}
#MSUB -S $${shell}
#MSUB -N $${job_name}
#MSUB -v $${variable_list}
#MSUB -o $${_qout_path}
#MSUB -e $${_qerr_path}
$${qverbatim}
"""
def set_mpi_procs(self, mpi_procs):
"""Set the number of CPUs used for MPI."""
super(MOABAdapter, self).set_mpi_procs(mpi_procs)
self.qparams["procs"] = mpi_procs
def set_timelimit(self, timelimit):
super(MOABAdapter, self).set_timelimit(timelimit)
self.qparams["walltime"] = qu.time2slurm(timelimit)
def set_mem_per_proc(self, mem_mb):
super(MOABAdapter, self).set_mem_per_proc(mem_mb)
#TODO
#raise NotImplementedError("set_mem_per_cpu")
def exclude_nodes(self, nodes):
raise self.Error('qadapter failed to exclude nodes, not implemented yet in moad')
def cancel(self, job_id):
return os.system("canceljob %d" % job_id)
def _submit_to_queue(self, script_file):
"""Submit a job script to the queue."""
process = Popen(['msub', script_file], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
queue_id = None
if process.returncode == 0:
# grab the returncode. MOAB returns 0 if the job was successful
try:
# output should be the queue_id
queue_id = int(out.split()[0])
except:
# probably error parsing job code
logger.critical('Could not parse job id following msub...')
return SubmitResults(qid=queue_id, out=out, err=err, process=process)
def _get_njobs_in_queue(self, username):
process = Popen(['showq', '-s -u', username], stdout=PIPE, stderr=PIPE)
out, err = process.communicate()
njobs = None
if process.returncode == 0:
# parse the result
# lines should have this form:
##
## active jobs: N eligible jobs: M blocked jobs: P
##
## Total job: 1
##
# Split the output string and return the last element.
out = out.splitlines()[-1]
njobs = int(out.split()[-1])
return njobs, process
class QScriptTemplate(string.Template):
delimiter = '$$'
|
rousseab/pymatgen
|
pymatgen/io/abinitio/qadapters.py
|
Python
|
mit
| 59,656
|
[
"ABINIT",
"pymatgen"
] |
3be5107dd560c54ea6c6c624838b78ba9fca95599f372df00bc111276dc36afc
|
"""
RDKit Utilities.
This file contains utilities that compute useful properties of
molecules. Some of these are simple cleanup utilities, and
others are more sophisticated functions that detect chemical
properties of molecules.
"""
import os
import logging
import itertools
import numpy as np
from io import StringIO
from deepchem.utils.pdbqt_utils import pdbqt_to_pdb
from deepchem.utils.pdbqt_utils import convert_mol_to_pdbqt
from deepchem.utils.pdbqt_utils import convert_protein_to_pdbqt
from deepchem.utils.geometry_utils import compute_pairwise_distances
from deepchem.utils.geometry_utils import compute_centroid
from deepchem.utils.fragment_utils import MolecularFragment
from deepchem.utils.fragment_utils import MoleculeLoadException
from typing import Any, List, Tuple, Set, Optional, Dict
from deepchem.utils.typing import OneOrMany, RDKitMol
logger = logging.getLogger(__name__)
def get_xyz_from_mol(mol):
"""Extracts a numpy array of coordinates from a molecules.
Returns a `(N, 3)` numpy array of 3d coords of given rdkit molecule
Parameters
----------
mol: rdkit Molecule
Molecule to extract coordinates for
Returns
-------
Numpy ndarray of shape `(N, 3)` where `N = mol.GetNumAtoms()`.
"""
xyz = np.zeros((mol.GetNumAtoms(), 3))
conf = mol.GetConformer()
for i in range(conf.GetNumAtoms()):
position = conf.GetAtomPosition(i)
xyz[i, 0] = position.x
xyz[i, 1] = position.y
xyz[i, 2] = position.z
return (xyz)
def add_hydrogens_to_mol(mol, is_protein=False):
"""
Add hydrogens to a molecule object
Parameters
----------
mol: Rdkit Mol
Molecule to hydrogenate
is_protein: bool, optional (default False)
Whether this molecule is a protein.
Returns
-------
Rdkit Mol
Note
----
This function requires RDKit and PDBFixer to be installed.
"""
return apply_pdbfixer(mol, hydrogenate=True, is_protein=is_protein)
def apply_pdbfixer(mol,
add_missing=True,
hydrogenate=True,
pH=7.4,
remove_heterogens=True,
is_protein=True):
"""
Apply PDBFixer to a molecule to try to clean it up.
Parameters
----------
mol: Rdkit Mol
Molecule to clean up.
add_missing: bool, optional
If true, add in missing residues and atoms
hydrogenate: bool, optional
If true, add hydrogens at specified pH
pH: float, optional
The pH at which hydrogens will be added if `hydrogenate==True`. Set to 7.4 by default.
remove_heterogens: bool, optional
Often times, PDB files come with extra waters and salts attached.
If this field is set, remove these heterogens.
is_protein: bool, optional
If false, then don't remove heterogens (since this molecule is
itself a heterogen).
Returns
-------
Rdkit Mol
Note
----
This function requires RDKit and PDBFixer to be installed.
"""
molecule_file = None
try:
from rdkit import Chem
pdbblock = Chem.MolToPDBBlock(mol)
pdb_stringio = StringIO()
pdb_stringio.write(pdbblock)
pdb_stringio.seek(0)
import pdbfixer
fixer = pdbfixer.PDBFixer(pdbfile=pdb_stringio)
if add_missing:
fixer.findMissingResidues()
fixer.findMissingAtoms()
fixer.addMissingAtoms()
if hydrogenate:
fixer.addMissingHydrogens(pH)
if is_protein and remove_heterogens:
# False here specifies that water is to be removed
fixer.removeHeterogens(False)
hydrogenated_io = StringIO()
import simtk
simtk.openmm.app.PDBFile.writeFile(fixer.topology, fixer.positions,
hydrogenated_io)
hydrogenated_io.seek(0)
return Chem.MolFromPDBBlock(
hydrogenated_io.read(), sanitize=False, removeHs=False)
except ValueError as e:
logger.warning("Unable to add hydrogens %s", e)
raise MoleculeLoadException(e)
finally:
try:
os.remove(molecule_file)
except (OSError, TypeError):
pass
def compute_charges(mol):
"""Attempt to compute Gasteiger Charges on Mol
This also has the side effect of calculating charges on mol. The
mol passed into this function has to already have been sanitized
Parameters
----------
mol: rdkit molecule
Returns
-------
No return since updates in place.
Note
----
This function requires RDKit to be installed.
"""
from rdkit.Chem import AllChem
try:
# Updates charges in place
AllChem.ComputeGasteigerCharges(mol)
except Exception as e:
logging.exception("Unable to compute charges for mol")
raise MoleculeLoadException(e)
def load_complex(molecular_complex: OneOrMany[str],
add_hydrogens: bool = True,
calc_charges: bool = True,
sanitize: bool = True) -> List[Tuple[np.ndarray, RDKitMol]]:
"""Loads a molecular complex.
Given some representation of a molecular complex, returns a list of
tuples, where each tuple contains (xyz coords, rdkit object) for
that constituent molecule in the complex.
For now, assumes that molecular_complex is a tuple of filenames.
Parameters
----------
molecular_complex: list or str
If list, each entry should be a filename for a constituent
molecule in complex. If str, should be the filename of a file that
holds the full complex.
add_hydrogens: bool, optional
If true, add hydrogens via pdbfixer
calc_charges: bool, optional
If true, add charges via rdkit
sanitize: bool, optional
If true, sanitize molecules via rdkit
Returns
-------
List of tuples (xyz, mol)
Note
----
This function requires RDKit to be installed.
"""
if isinstance(molecular_complex, str):
molecular_complex = [molecular_complex]
fragments = []
for mol in molecular_complex:
loaded = load_molecule(
mol,
add_hydrogens=add_hydrogens,
calc_charges=calc_charges,
sanitize=sanitize)
if isinstance(loaded, list):
fragments += loaded
else:
fragments.append(loaded)
return fragments
def load_molecule(molecule_file,
add_hydrogens=True,
calc_charges=True,
sanitize=True,
is_protein=False):
"""Converts molecule file to (xyz-coords, obmol object)
Given molecule_file, returns a tuple of xyz coords of molecule
and an rdkit object representing that molecule in that order `(xyz,
rdkit_mol)`. This ordering convention is used in the code in a few
places.
Parameters
----------
molecule_file: str
filename for molecule
add_hydrogens: bool, optional (default True)
If True, add hydrogens via pdbfixer
calc_charges: bool, optional (default True)
If True, add charges via rdkit
sanitize: bool, optional (default False)
If True, sanitize molecules via rdkit
is_protein: bool, optional (default False)
If True`, this molecule is loaded as a protein. This flag will
affect some of the cleanup procedures applied.
Returns
-------
Tuple (xyz, mol) if file contains single molecule. Else returns a
list of the tuples for the separate molecules in this list.
Note
----
This function requires RDKit to be installed.
"""
from rdkit import Chem
from_pdb = False
if ".mol2" in molecule_file:
my_mol = Chem.MolFromMol2File(molecule_file, sanitize=False, removeHs=False)
elif ".sdf" in molecule_file:
suppl = Chem.SDMolSupplier(str(molecule_file), sanitize=False)
# TODO: This is wrong. Should return all molecules
my_mol = suppl[0]
elif ".pdbqt" in molecule_file:
pdb_block = pdbqt_to_pdb(molecule_file)
my_mol = Chem.MolFromPDBBlock(
str(pdb_block), sanitize=False, removeHs=False)
from_pdb = True
elif ".pdb" in molecule_file:
my_mol = Chem.MolFromPDBFile(
str(molecule_file), sanitize=False, removeHs=False)
from_pdb = True # noqa: F841
else:
raise ValueError("Unrecognized file type for %s" % str(molecule_file))
if my_mol is None:
raise ValueError("Unable to read non None Molecule Object")
if add_hydrogens or calc_charges:
my_mol = apply_pdbfixer(
my_mol, hydrogenate=add_hydrogens, is_protein=is_protein)
if sanitize:
try:
Chem.SanitizeMol(my_mol)
# Ideally we should catch AtomValenceException but Travis seems to choke on it for some reason.
except:
logger.warning("Mol %s failed sanitization" % Chem.MolToSmiles(my_mol))
if calc_charges:
# This updates in place
compute_charges(my_mol)
xyz = get_xyz_from_mol(my_mol)
return xyz, my_mol
def write_molecule(mol, outfile, is_protein=False):
"""Write molecule to a file
This function writes a representation of the provided molecule to
the specified `outfile`. Doesn't return anything.
Parameters
----------
mol: rdkit Mol
Molecule to write
outfile: str
Filename to write mol to
is_protein: bool, optional
Is this molecule a protein?
Note
----
This function requires RDKit to be installed.
Raises
------
ValueError: if `outfile` isn't of a supported format.
"""
from rdkit import Chem
if ".pdbqt" in outfile:
writer = Chem.PDBWriter(outfile)
writer.write(mol)
writer.close()
if is_protein:
convert_protein_to_pdbqt(mol, outfile)
else:
convert_mol_to_pdbqt(mol, outfile)
elif ".pdb" in outfile:
writer = Chem.PDBWriter(outfile)
writer.write(mol)
writer.close()
elif ".sdf" in outfile:
writer = Chem.SDWriter(outfile)
writer.write(mol)
writer.close()
else:
raise ValueError("Unsupported Format")
def merge_molecules_xyz(xyzs):
"""Merges coordinates of multiple molecules.
Parameters
----------
xyzs: List
List of numpy arrays each of shape `(N_i, 3)` where `N_i` is the number of atoms in the i-th atom.
"""
return np.array(np.vstack(np.vstack(xyzs)))
def merge_molecules(molecules):
"""Helper method to merge two molecules.
Parameters
----------
molecules: list
List of rdkit molecules
Returns
-------
merged: rdkit molecule
"""
from rdkit.Chem import rdmolops
if len(molecules) == 0:
return None
elif len(molecules) == 1:
return molecules[0]
else:
combined = molecules[0]
for nextmol in molecules[1:]:
combined = rdmolops.CombineMols(combined, nextmol)
return combined
def compute_all_ecfp(mol: RDKitMol,
indices: Optional[Set[int]] = None,
degree: int = 2) -> Dict[int, str]:
"""Obtain molecular fragment for all atoms emanating outward to given degree.
For each fragment, compute SMILES string (for now) and hash to
an int. Return a dictionary mapping atom index to hashed
SMILES.
Parameters
----------
mol: rdkit Molecule
Molecule to compute ecfp fragments on
indices: Optional[Set[int]]
List of atom indices for molecule. Default is all indices. If
specified will only compute fragments for specified atoms.
degree: int
Graph degree to use when computing ECFP fingerprints
Returns
----------
dict
Dictionary mapping atom index to hashed smiles.
"""
ecfp_dict = {}
from rdkit import Chem
for i in range(mol.GetNumAtoms()):
if indices is not None and i not in indices:
continue
env = Chem.FindAtomEnvironmentOfRadiusN(mol, degree, i, useHs=True)
submol = Chem.PathToSubmol(mol, env)
smile = Chem.MolToSmiles(submol)
ecfp_dict[i] = "%s,%s" % (mol.GetAtoms()[i].GetAtomicNum(), smile)
return ecfp_dict
def compute_contact_centroid(molecular_complex: Any,
cutoff: float = 4.5) -> np.ndarray:
"""Computes the (x,y,z) centroid of the contact regions of this molecular complex.
For a molecular complex, it's necessary for various featurizations
that compute voxel grids to find a reasonable center for the
voxelization. This function computes the centroid of all the contact
atoms, defined as an atom that's within `cutoff` Angstroms of an
atom from a different molecule.
Parameters
----------
molecular_complex: Object
A representation of a molecular complex, produced by
`rdkit_util.load_complex`.
cutoff: float, optional
The distance in Angstroms considered for computing contacts.
"""
fragments = reduce_molecular_complex_to_contacts(molecular_complex, cutoff)
coords = [frag[0] for frag in fragments]
contact_coords = merge_molecules_xyz(coords)
centroid = np.mean(contact_coords, axis=0)
return (centroid)
def reduce_molecular_complex_to_contacts(fragments: List,
cutoff: float = 4.5) -> List:
"""Reduce a molecular complex to only those atoms near a contact.
Molecular complexes can get very large. This can make it unwieldy to
compute functions on them. To improve memory usage, it can be very
useful to trim out atoms that aren't close to contact regions. This
function takes in a molecular complex and returns a new molecular
complex representation that contains only contact atoms. The contact
atoms are computed by calling `get_contact_atom_indices` under the
hood.
Parameters
----------
fragments: List
As returned by `rdkit_util.load_complex`, a list of tuples of
`(coords, mol)` where `coords` is a `(N_atoms, 3)` array and `mol`
is the rdkit molecule object.
cutoff: float
The cutoff distance in angstroms.
Returns
-------
A list of length `len(molecular_complex)`. Each entry in this list
is a tuple of `(coords, MolecularShim)`. The coords is stripped down
to `(N_contact_atoms, 3)` where `N_contact_atoms` is the number of
contact atoms for this complex. `MolecularShim` is used since it's
tricky to make a RDKit sub-molecule.
"""
atoms_to_keep = get_contact_atom_indices(fragments, cutoff)
reduced_complex = []
for frag, keep in zip(fragments, atoms_to_keep):
contact_frag = get_mol_subset(frag[0], frag[1], keep)
reduced_complex.append(contact_frag)
return reduced_complex
def compute_ring_center(mol, ring_indices):
"""Computes 3D coordinates of a center of a given ring.
Parameters:
-----------
mol: rdkit.rdchem.Mol
Molecule containing a ring
ring_indices: array-like
Indices of atoms forming a ring
Returns:
--------
ring_centroid: np.ndarray
Position of a ring center
"""
conformer = mol.GetConformer()
ring_xyz = np.zeros((len(ring_indices), 3))
for i, atom_idx in enumerate(ring_indices):
atom_position = conformer.GetAtomPosition(atom_idx)
ring_xyz[i] = np.array(atom_position)
ring_centroid = compute_centroid(ring_xyz)
return ring_centroid
def get_contact_atom_indices(fragments: List, cutoff: float = 4.5) -> List:
"""Compute the atoms close to contact region.
Molecular complexes can get very large. This can make it unwieldy to
compute functions on them. To improve memory usage, it can be very
useful to trim out atoms that aren't close to contact regions. This
function computes pairwise distances between all pairs of molecules
in the molecular complex. If an atom is within cutoff distance of
any atom on another molecule in the complex, it is regarded as a
contact atom. Otherwise it is trimmed.
Parameters
----------
fragments: List
As returned by `rdkit_util.load_complex`, a list of tuples of
`(coords, mol)` where `coords` is a `(N_atoms, 3)` array and `mol`
is the rdkit molecule object.
cutoff: float
The cutoff distance in angstroms.
Returns
-------
A list of length `len(molecular_complex)`. Each entry in this list
is a list of atom indices from that molecule which should be kept, in
sorted order.
"""
# indices of atoms to keep
keep_inds: List[Set] = [set([]) for _ in fragments]
for (ind1, ind2) in itertools.combinations(range(len(fragments)), 2):
frag1, frag2 = fragments[ind1], fragments[ind2]
pairwise_distances = compute_pairwise_distances(frag1[0], frag2[0])
# contacts is of form (x_coords, y_coords), a tuple of 2 lists
contacts = np.nonzero((pairwise_distances < cutoff))
# contacts[0] is the x_coords, that is the frag1 atoms that have
# nonzero contact.
frag1_atoms = set([int(c) for c in contacts[0].tolist()])
# contacts[1] is the y_coords, the frag2 atoms with nonzero contacts
frag2_atoms = set([int(c) for c in contacts[1].tolist()])
keep_inds[ind1] = keep_inds[ind1].union(frag1_atoms)
keep_inds[ind2] = keep_inds[ind2].union(frag2_atoms)
keep_ind_lists = [sorted(list(keep)) for keep in keep_inds]
return keep_ind_lists
def get_mol_subset(coords, mol, atom_indices_to_keep):
"""Strip a subset of the atoms in this molecule
Parameters
----------
coords: Numpy ndarray
Must be of shape (N, 3) and correspond to coordinates of mol.
mol: Rdkit mol or `MolecularFragment`
The molecule to strip
atom_indices_to_keep: list
List of the indices of the atoms to keep. Each index is a unique
number between `[0, N)`.
Returns
-------
A tuple of (coords, mol_frag) where coords is a Numpy array of
coordinates with hydrogen coordinates. mol_frag is a
`MolecularFragment`.
"""
from rdkit import Chem
indexes_to_keep = []
atoms_to_keep = []
#####################################################
# Compute partial charges on molecule if rdkit
if isinstance(mol, Chem.Mol):
compute_charges(mol)
#####################################################
atoms = list(mol.GetAtoms())
for index in atom_indices_to_keep:
indexes_to_keep.append(index)
atoms_to_keep.append(atoms[index])
coords = coords[indexes_to_keep]
mol_frag = MolecularFragment(atoms_to_keep, coords)
return coords, mol_frag
def compute_ring_normal(mol, ring_indices):
"""Computes normal to a plane determined by a given ring.
Parameters:
-----------
mol: rdkit.rdchem.Mol
Molecule containing a ring
ring_indices: array-like
Indices of atoms forming a ring
Returns:
--------
normal: np.ndarray
Normal vector
"""
conformer = mol.GetConformer()
points = np.zeros((3, 3))
for i, atom_idx in enumerate(ring_indices[:3]):
atom_position = conformer.GetAtomPosition(atom_idx)
points[i] = np.array(atom_position)
v1 = points[1] - points[0]
v2 = points[2] - points[0]
normal = np.cross(v1, v2)
return normal
|
lilleswing/deepchem
|
deepchem/utils/rdkit_utils.py
|
Python
|
mit
| 18,305
|
[
"OpenMM",
"RDKit"
] |
7f79355d2e4525a61c41bdedbe40aed20dc6a93558103b7eafa8f28412bc9380
|
from abc import abstractmethod, ABCMeta
import logging
import os
import urlparse
logger = logging.getLogger(__name__)
class ProductInstance(object):
"""
Configuration object for a product's instance.
"""
def __init__(self, instance_id, base_url):
"""
Constructor.
@type instance_id: str
@param instance_id: Instance id to use
@type base_url: str
@param base_url: Base URL of the instance
"""
self.instance_id = instance_id
self.base_url = base_url
class Product(object):
"""
Product to test.
"""
__metaclass__ = ABCMeta
def __init__(self, driver_manager, instance):
"""
Constructor.
@type driver_manager: DriverManager
@type instance: ProductInstance
@param instance: Settings for the instance
"""
self._driver_manager = driver_manager
self._instance = instance
@property
def driver(self):
return self._driver_manager.get_driver()
@abstractmethod
def visit(self):
pass
class ProductManager(object):
"""
Instantiate a product object from a given class.
"""
def __init__(self, driver_manager=None, settings=None):
if driver_manager:
self._driver_manager = driver_manager
else:
from friendly.pageobjects.driver import driver_manager
self._driver_manager = driver_manager
if settings:
self._settings = settings
else:
from friendly.pageobjects.settings import settings
self._settings = settings
self._instances = {}
def get_instance(self, *args, **kwargs):
"""
@deprecated
"""
return self.get_product(*args, **kwargs)
def get_product(self, instance_id=None):
# Use default instance id if we didn't get one
if not instance_id:
instance_id = self._settings['to_test.id']
# Check for existing instance
try:
return self._instances[instance_id]
except KeyError:
pass
# Look-up instance data from settings
for i in self._settings['instances']:
if i['instance']['id'] == instance_id:
instance_data = i['instance']
break
if not instance_data:
raise ValueError('Product-instance {0} not found in settings.'.format(instance_id))
# Load product class
product_classname = instance_data['product']['class']
klass = self._get_product_class(product_classname)
# Look-up instance_url override at the ENV
instance_url = instance_data['url']
instance_url_envvar = (instance_id + '_url').upper()
if instance_url_envvar in os.environ:
instance_url = os.environ[instance_url_envvar]
# Compose base_url
url = urlparse.urlparse(instance_url)
base_url = '%(scheme)s://%(netloc)s' % dict((s, getattr(url, s)) for s in url._fields)
instance = klass(self._driver_manager, ProductInstance(instance_id, base_url))
self._instances[instance_id] = instance
return instance
def _get_product_class(self, class_path):
module_name, class_name = class_path.rsplit('.', 1)
mod = __import__(module_name, fromlist=[class_name])
return getattr(mod, class_name)
product_manager = ProductManager()
|
butfriendly/friendly-pageobjects
|
friendly/pageobjects/product.py
|
Python
|
bsd-3-clause
| 3,450
|
[
"VisIt"
] |
e1b6b69a47dbd1004bf72551c7770fa258bd9f9f7e04d15f8f390105fd554f0a
|
from .core import OneHaloTerm, TwoHaloTerm
from .core import GalaxyPowerTerm, DampedGalaxyPowerTerm
class ZeroShotNoise(object):
"""
Class to manage the handling of `N` when computing component spectra
"""
def __init__(self, model):
self.model = model
self._N = self.model.N
def __enter__(self):
self.model.N = 0.
def __exit__(self, *args):
self.model.N = self._N
from .Pcc import Pcc
from .Pcs import Pcs
from .Pss import Pss
class Pgal(GalaxyPowerTerm):
"""
The auto specturm of cental + satellite galaxies
"""
name = "Pgal"
def __init__(self, model):
super(Pgal, self).__init__(model, Pcc, Pcs, Pss)
def __call__(self, k, mu):
"""
The total galaxy auto spectrum
"""
with ZeroShotNoise(self.model):
toret = super(Pgal, self).__call__(k, mu)
return toret
from .power_gal import GalaxySpectrum
|
nickhand/pyRSD
|
pyRSD/rsd/power/gal/__init__.py
|
Python
|
gpl-3.0
| 996
|
[
"Galaxy"
] |
18f6873962f0cebd09c0c5520ec0af93213f3df7ffdb4af179540477ae3990a8
|
# -*- coding: utf-8 -*-
'''python3.3/html/entities.py, plus HTMLParser.decode(), for Python 2 compat'''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import re
def unescape(s):
'''Decode HTML entities.'''
if '&' not in s:
return s
return UNESCAPE_RE.sub(_replace_entities, s)
try:
UNESCAPE_RE = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+;|\w{1,32};?))",
flags=re.ASCII)
except AttributeError:
UNESCAPE_RE = re.compile(br"&(#?[xX]?(?:[0-9a-fA-F]+;|\w{1,32};?))")
def _replace_entities(s):
s = s.groups()[0]
try:
if s[0] == "#":
s = s[1:]
if s[0] in ['x', 'X']:
c = int(s[1:].rstrip(';'), 16)
else:
c = int(s.rstrip(';'))
return chr(c)
except ValueError:
return '&#' + s
else:
if s in html5:
return html5[s]
elif s.endswith(';'):
return '&' + s
for x in range(2, len(s)):
if s[:x] in html5:
return html5[s[:x]] + s[x:]
else:
return '&' + s
# Maps HTML5 named character references to the equivalent Unicode character(s)
html5 = {
'Aacute': '\xc1',
'aacute': '\xe1',
'Aacute;': '\xc1',
'aacute;': '\xe1',
'Abreve;': '\u0102',
'abreve;': '\u0103',
'ac;': '\u223e',
'acd;': '\u223f',
'acE;': '\u223e\u0333',
'Acirc': '\xc2',
'acirc': '\xe2',
'Acirc;': '\xc2',
'acirc;': '\xe2',
'acute': '\xb4',
'acute;': '\xb4',
'Acy;': '\u0410',
'acy;': '\u0430',
'AElig': '\xc6',
'aelig': '\xe6',
'AElig;': '\xc6',
'aelig;': '\xe6',
'af;': '\u2061',
'Afr;': '\U0001d504',
'afr;': '\U0001d51e',
'Agrave': '\xc0',
'agrave': '\xe0',
'Agrave;': '\xc0',
'agrave;': '\xe0',
'alefsym;': '\u2135',
'aleph;': '\u2135',
'Alpha;': '\u0391',
'alpha;': '\u03b1',
'Amacr;': '\u0100',
'amacr;': '\u0101',
'amalg;': '\u2a3f',
'AMP': '&',
'amp': '&',
'AMP;': '&',
'amp;': '&',
'And;': '\u2a53',
'and;': '\u2227',
'andand;': '\u2a55',
'andd;': '\u2a5c',
'andslope;': '\u2a58',
'andv;': '\u2a5a',
'ang;': '\u2220',
'ange;': '\u29a4',
'angle;': '\u2220',
'angmsd;': '\u2221',
'angmsdaa;': '\u29a8',
'angmsdab;': '\u29a9',
'angmsdac;': '\u29aa',
'angmsdad;': '\u29ab',
'angmsdae;': '\u29ac',
'angmsdaf;': '\u29ad',
'angmsdag;': '\u29ae',
'angmsdah;': '\u29af',
'angrt;': '\u221f',
'angrtvb;': '\u22be',
'angrtvbd;': '\u299d',
'angsph;': '\u2222',
'angst;': '\xc5',
'angzarr;': '\u237c',
'Aogon;': '\u0104',
'aogon;': '\u0105',
'Aopf;': '\U0001d538',
'aopf;': '\U0001d552',
'ap;': '\u2248',
'apacir;': '\u2a6f',
'apE;': '\u2a70',
'ape;': '\u224a',
'apid;': '\u224b',
'apos;': "'",
'ApplyFunction;': '\u2061',
'approx;': '\u2248',
'approxeq;': '\u224a',
'Aring': '\xc5',
'aring': '\xe5',
'Aring;': '\xc5',
'aring;': '\xe5',
'Ascr;': '\U0001d49c',
'ascr;': '\U0001d4b6',
'Assign;': '\u2254',
'ast;': '*',
'asymp;': '\u2248',
'asympeq;': '\u224d',
'Atilde': '\xc3',
'atilde': '\xe3',
'Atilde;': '\xc3',
'atilde;': '\xe3',
'Auml': '\xc4',
'auml': '\xe4',
'Auml;': '\xc4',
'auml;': '\xe4',
'awconint;': '\u2233',
'awint;': '\u2a11',
'backcong;': '\u224c',
'backepsilon;': '\u03f6',
'backprime;': '\u2035',
'backsim;': '\u223d',
'backsimeq;': '\u22cd',
'Backslash;': '\u2216',
'Barv;': '\u2ae7',
'barvee;': '\u22bd',
'Barwed;': '\u2306',
'barwed;': '\u2305',
'barwedge;': '\u2305',
'bbrk;': '\u23b5',
'bbrktbrk;': '\u23b6',
'bcong;': '\u224c',
'Bcy;': '\u0411',
'bcy;': '\u0431',
'bdquo;': '\u201e',
'becaus;': '\u2235',
'Because;': '\u2235',
'because;': '\u2235',
'bemptyv;': '\u29b0',
'bepsi;': '\u03f6',
'bernou;': '\u212c',
'Bernoullis;': '\u212c',
'Beta;': '\u0392',
'beta;': '\u03b2',
'beth;': '\u2136',
'between;': '\u226c',
'Bfr;': '\U0001d505',
'bfr;': '\U0001d51f',
'bigcap;': '\u22c2',
'bigcirc;': '\u25ef',
'bigcup;': '\u22c3',
'bigodot;': '\u2a00',
'bigoplus;': '\u2a01',
'bigotimes;': '\u2a02',
'bigsqcup;': '\u2a06',
'bigstar;': '\u2605',
'bigtriangledown;': '\u25bd',
'bigtriangleup;': '\u25b3',
'biguplus;': '\u2a04',
'bigvee;': '\u22c1',
'bigwedge;': '\u22c0',
'bkarow;': '\u290d',
'blacklozenge;': '\u29eb',
'blacksquare;': '\u25aa',
'blacktriangle;': '\u25b4',
'blacktriangledown;': '\u25be',
'blacktriangleleft;': '\u25c2',
'blacktriangleright;': '\u25b8',
'blank;': '\u2423',
'blk12;': '\u2592',
'blk14;': '\u2591',
'blk34;': '\u2593',
'block;': '\u2588',
'bne;': '=\u20e5',
'bnequiv;': '\u2261\u20e5',
'bNot;': '\u2aed',
'bnot;': '\u2310',
'Bopf;': '\U0001d539',
'bopf;': '\U0001d553',
'bot;': '\u22a5',
'bottom;': '\u22a5',
'bowtie;': '\u22c8',
'boxbox;': '\u29c9',
'boxDL;': '\u2557',
'boxDl;': '\u2556',
'boxdL;': '\u2555',
'boxdl;': '\u2510',
'boxDR;': '\u2554',
'boxDr;': '\u2553',
'boxdR;': '\u2552',
'boxdr;': '\u250c',
'boxH;': '\u2550',
'boxh;': '\u2500',
'boxHD;': '\u2566',
'boxHd;': '\u2564',
'boxhD;': '\u2565',
'boxhd;': '\u252c',
'boxHU;': '\u2569',
'boxHu;': '\u2567',
'boxhU;': '\u2568',
'boxhu;': '\u2534',
'boxminus;': '\u229f',
'boxplus;': '\u229e',
'boxtimes;': '\u22a0',
'boxUL;': '\u255d',
'boxUl;': '\u255c',
'boxuL;': '\u255b',
'boxul;': '\u2518',
'boxUR;': '\u255a',
'boxUr;': '\u2559',
'boxuR;': '\u2558',
'boxur;': '\u2514',
'boxV;': '\u2551',
'boxv;': '\u2502',
'boxVH;': '\u256c',
'boxVh;': '\u256b',
'boxvH;': '\u256a',
'boxvh;': '\u253c',
'boxVL;': '\u2563',
'boxVl;': '\u2562',
'boxvL;': '\u2561',
'boxvl;': '\u2524',
'boxVR;': '\u2560',
'boxVr;': '\u255f',
'boxvR;': '\u255e',
'boxvr;': '\u251c',
'bprime;': '\u2035',
'Breve;': '\u02d8',
'breve;': '\u02d8',
'brvbar': '\xa6',
'brvbar;': '\xa6',
'Bscr;': '\u212c',
'bscr;': '\U0001d4b7',
'bsemi;': '\u204f',
'bsim;': '\u223d',
'bsime;': '\u22cd',
'bsol;': '\\',
'bsolb;': '\u29c5',
'bsolhsub;': '\u27c8',
'bull;': '\u2022',
'bullet;': '\u2022',
'bump;': '\u224e',
'bumpE;': '\u2aae',
'bumpe;': '\u224f',
'Bumpeq;': '\u224e',
'bumpeq;': '\u224f',
'Cacute;': '\u0106',
'cacute;': '\u0107',
'Cap;': '\u22d2',
'cap;': '\u2229',
'capand;': '\u2a44',
'capbrcup;': '\u2a49',
'capcap;': '\u2a4b',
'capcup;': '\u2a47',
'capdot;': '\u2a40',
'CapitalDifferentialD;': '\u2145',
'caps;': '\u2229\ufe00',
'caret;': '\u2041',
'caron;': '\u02c7',
'Cayleys;': '\u212d',
'ccaps;': '\u2a4d',
'Ccaron;': '\u010c',
'ccaron;': '\u010d',
'Ccedil': '\xc7',
'ccedil': '\xe7',
'Ccedil;': '\xc7',
'ccedil;': '\xe7',
'Ccirc;': '\u0108',
'ccirc;': '\u0109',
'Cconint;': '\u2230',
'ccups;': '\u2a4c',
'ccupssm;': '\u2a50',
'Cdot;': '\u010a',
'cdot;': '\u010b',
'cedil': '\xb8',
'cedil;': '\xb8',
'Cedilla;': '\xb8',
'cemptyv;': '\u29b2',
'cent': '\xa2',
'cent;': '\xa2',
'CenterDot;': '\xb7',
'centerdot;': '\xb7',
'Cfr;': '\u212d',
'cfr;': '\U0001d520',
'CHcy;': '\u0427',
'chcy;': '\u0447',
'check;': '\u2713',
'checkmark;': '\u2713',
'Chi;': '\u03a7',
'chi;': '\u03c7',
'cir;': '\u25cb',
'circ;': '\u02c6',
'circeq;': '\u2257',
'circlearrowleft;': '\u21ba',
'circlearrowright;': '\u21bb',
'circledast;': '\u229b',
'circledcirc;': '\u229a',
'circleddash;': '\u229d',
'CircleDot;': '\u2299',
'circledR;': '\xae',
'circledS;': '\u24c8',
'CircleMinus;': '\u2296',
'CirclePlus;': '\u2295',
'CircleTimes;': '\u2297',
'cirE;': '\u29c3',
'cire;': '\u2257',
'cirfnint;': '\u2a10',
'cirmid;': '\u2aef',
'cirscir;': '\u29c2',
'ClockwiseContourIntegral;': '\u2232',
'CloseCurlyDoubleQuote;': '\u201d',
'CloseCurlyQuote;': '\u2019',
'clubs;': '\u2663',
'clubsuit;': '\u2663',
'Colon;': '\u2237',
'colon;': ':',
'Colone;': '\u2a74',
'colone;': '\u2254',
'coloneq;': '\u2254',
'comma;': ',',
'commat;': '@',
'comp;': '\u2201',
'compfn;': '\u2218',
'complement;': '\u2201',
'complexes;': '\u2102',
'cong;': '\u2245',
'congdot;': '\u2a6d',
'Congruent;': '\u2261',
'Conint;': '\u222f',
'conint;': '\u222e',
'ContourIntegral;': '\u222e',
'Copf;': '\u2102',
'copf;': '\U0001d554',
'coprod;': '\u2210',
'Coproduct;': '\u2210',
'COPY': '\xa9',
'copy': '\xa9',
'COPY;': '\xa9',
'copy;': '\xa9',
'copysr;': '\u2117',
'CounterClockwiseContourIntegral;': '\u2233',
'crarr;': '\u21b5',
'Cross;': '\u2a2f',
'cross;': '\u2717',
'Cscr;': '\U0001d49e',
'cscr;': '\U0001d4b8',
'csub;': '\u2acf',
'csube;': '\u2ad1',
'csup;': '\u2ad0',
'csupe;': '\u2ad2',
'ctdot;': '\u22ef',
'cudarrl;': '\u2938',
'cudarrr;': '\u2935',
'cuepr;': '\u22de',
'cuesc;': '\u22df',
'cularr;': '\u21b6',
'cularrp;': '\u293d',
'Cup;': '\u22d3',
'cup;': '\u222a',
'cupbrcap;': '\u2a48',
'CupCap;': '\u224d',
'cupcap;': '\u2a46',
'cupcup;': '\u2a4a',
'cupdot;': '\u228d',
'cupor;': '\u2a45',
'cups;': '\u222a\ufe00',
'curarr;': '\u21b7',
'curarrm;': '\u293c',
'curlyeqprec;': '\u22de',
'curlyeqsucc;': '\u22df',
'curlyvee;': '\u22ce',
'curlywedge;': '\u22cf',
'curren': '\xa4',
'curren;': '\xa4',
'curvearrowleft;': '\u21b6',
'curvearrowright;': '\u21b7',
'cuvee;': '\u22ce',
'cuwed;': '\u22cf',
'cwconint;': '\u2232',
'cwint;': '\u2231',
'cylcty;': '\u232d',
'Dagger;': '\u2021',
'dagger;': '\u2020',
'daleth;': '\u2138',
'Darr;': '\u21a1',
'dArr;': '\u21d3',
'darr;': '\u2193',
'dash;': '\u2010',
'Dashv;': '\u2ae4',
'dashv;': '\u22a3',
'dbkarow;': '\u290f',
'dblac;': '\u02dd',
'Dcaron;': '\u010e',
'dcaron;': '\u010f',
'Dcy;': '\u0414',
'dcy;': '\u0434',
'DD;': '\u2145',
'dd;': '\u2146',
'ddagger;': '\u2021',
'ddarr;': '\u21ca',
'DDotrahd;': '\u2911',
'ddotseq;': '\u2a77',
'deg': '\xb0',
'deg;': '\xb0',
'Del;': '\u2207',
'Delta;': '\u0394',
'delta;': '\u03b4',
'demptyv;': '\u29b1',
'dfisht;': '\u297f',
'Dfr;': '\U0001d507',
'dfr;': '\U0001d521',
'dHar;': '\u2965',
'dharl;': '\u21c3',
'dharr;': '\u21c2',
'DiacriticalAcute;': '\xb4',
'DiacriticalDot;': '\u02d9',
'DiacriticalDoubleAcute;': '\u02dd',
'DiacriticalGrave;': '`',
'DiacriticalTilde;': '\u02dc',
'diam;': '\u22c4',
'Diamond;': '\u22c4',
'diamond;': '\u22c4',
'diamondsuit;': '\u2666',
'diams;': '\u2666',
'die;': '\xa8',
'DifferentialD;': '\u2146',
'digamma;': '\u03dd',
'disin;': '\u22f2',
'div;': '\xf7',
'divide': '\xf7',
'divide;': '\xf7',
'divideontimes;': '\u22c7',
'divonx;': '\u22c7',
'DJcy;': '\u0402',
'djcy;': '\u0452',
'dlcorn;': '\u231e',
'dlcrop;': '\u230d',
'dollar;': '$',
'Dopf;': '\U0001d53b',
'dopf;': '\U0001d555',
'Dot;': '\xa8',
'dot;': '\u02d9',
'DotDot;': '\u20dc',
'doteq;': '\u2250',
'doteqdot;': '\u2251',
'DotEqual;': '\u2250',
'dotminus;': '\u2238',
'dotplus;': '\u2214',
'dotsquare;': '\u22a1',
'doublebarwedge;': '\u2306',
'DoubleContourIntegral;': '\u222f',
'DoubleDot;': '\xa8',
'DoubleDownArrow;': '\u21d3',
'DoubleLeftArrow;': '\u21d0',
'DoubleLeftRightArrow;': '\u21d4',
'DoubleLeftTee;': '\u2ae4',
'DoubleLongLeftArrow;': '\u27f8',
'DoubleLongLeftRightArrow;': '\u27fa',
'DoubleLongRightArrow;': '\u27f9',
'DoubleRightArrow;': '\u21d2',
'DoubleRightTee;': '\u22a8',
'DoubleUpArrow;': '\u21d1',
'DoubleUpDownArrow;': '\u21d5',
'DoubleVerticalBar;': '\u2225',
'DownArrow;': '\u2193',
'Downarrow;': '\u21d3',
'downarrow;': '\u2193',
'DownArrowBar;': '\u2913',
'DownArrowUpArrow;': '\u21f5',
'DownBreve;': '\u0311',
'downdownarrows;': '\u21ca',
'downharpoonleft;': '\u21c3',
'downharpoonright;': '\u21c2',
'DownLeftRightVector;': '\u2950',
'DownLeftTeeVector;': '\u295e',
'DownLeftVector;': '\u21bd',
'DownLeftVectorBar;': '\u2956',
'DownRightTeeVector;': '\u295f',
'DownRightVector;': '\u21c1',
'DownRightVectorBar;': '\u2957',
'DownTee;': '\u22a4',
'DownTeeArrow;': '\u21a7',
'drbkarow;': '\u2910',
'drcorn;': '\u231f',
'drcrop;': '\u230c',
'Dscr;': '\U0001d49f',
'dscr;': '\U0001d4b9',
'DScy;': '\u0405',
'dscy;': '\u0455',
'dsol;': '\u29f6',
'Dstrok;': '\u0110',
'dstrok;': '\u0111',
'dtdot;': '\u22f1',
'dtri;': '\u25bf',
'dtrif;': '\u25be',
'duarr;': '\u21f5',
'duhar;': '\u296f',
'dwangle;': '\u29a6',
'DZcy;': '\u040f',
'dzcy;': '\u045f',
'dzigrarr;': '\u27ff',
'Eacute': '\xc9',
'eacute': '\xe9',
'Eacute;': '\xc9',
'eacute;': '\xe9',
'easter;': '\u2a6e',
'Ecaron;': '\u011a',
'ecaron;': '\u011b',
'ecir;': '\u2256',
'Ecirc': '\xca',
'ecirc': '\xea',
'Ecirc;': '\xca',
'ecirc;': '\xea',
'ecolon;': '\u2255',
'Ecy;': '\u042d',
'ecy;': '\u044d',
'eDDot;': '\u2a77',
'Edot;': '\u0116',
'eDot;': '\u2251',
'edot;': '\u0117',
'ee;': '\u2147',
'efDot;': '\u2252',
'Efr;': '\U0001d508',
'efr;': '\U0001d522',
'eg;': '\u2a9a',
'Egrave': '\xc8',
'egrave': '\xe8',
'Egrave;': '\xc8',
'egrave;': '\xe8',
'egs;': '\u2a96',
'egsdot;': '\u2a98',
'el;': '\u2a99',
'Element;': '\u2208',
'elinters;': '\u23e7',
'ell;': '\u2113',
'els;': '\u2a95',
'elsdot;': '\u2a97',
'Emacr;': '\u0112',
'emacr;': '\u0113',
'empty;': '\u2205',
'emptyset;': '\u2205',
'EmptySmallSquare;': '\u25fb',
'emptyv;': '\u2205',
'EmptyVerySmallSquare;': '\u25ab',
'emsp13;': '\u2004',
'emsp14;': '\u2005',
'emsp;': '\u2003',
'ENG;': '\u014a',
'eng;': '\u014b',
'ensp;': '\u2002',
'Eogon;': '\u0118',
'eogon;': '\u0119',
'Eopf;': '\U0001d53c',
'eopf;': '\U0001d556',
'epar;': '\u22d5',
'eparsl;': '\u29e3',
'eplus;': '\u2a71',
'epsi;': '\u03b5',
'Epsilon;': '\u0395',
'epsilon;': '\u03b5',
'epsiv;': '\u03f5',
'eqcirc;': '\u2256',
'eqcolon;': '\u2255',
'eqsim;': '\u2242',
'eqslantgtr;': '\u2a96',
'eqslantless;': '\u2a95',
'Equal;': '\u2a75',
'equals;': '=',
'EqualTilde;': '\u2242',
'equest;': '\u225f',
'Equilibrium;': '\u21cc',
'equiv;': '\u2261',
'equivDD;': '\u2a78',
'eqvparsl;': '\u29e5',
'erarr;': '\u2971',
'erDot;': '\u2253',
'Escr;': '\u2130',
'escr;': '\u212f',
'esdot;': '\u2250',
'Esim;': '\u2a73',
'esim;': '\u2242',
'Eta;': '\u0397',
'eta;': '\u03b7',
'ETH': '\xd0',
'eth': '\xf0',
'ETH;': '\xd0',
'eth;': '\xf0',
'Euml': '\xcb',
'euml': '\xeb',
'Euml;': '\xcb',
'euml;': '\xeb',
'euro;': '\u20ac',
'excl;': '!',
'exist;': '\u2203',
'Exists;': '\u2203',
'expectation;': '\u2130',
'ExponentialE;': '\u2147',
'exponentiale;': '\u2147',
'fallingdotseq;': '\u2252',
'Fcy;': '\u0424',
'fcy;': '\u0444',
'female;': '\u2640',
'ffilig;': '\ufb03',
'fflig;': '\ufb00',
'ffllig;': '\ufb04',
'Ffr;': '\U0001d509',
'ffr;': '\U0001d523',
'filig;': '\ufb01',
'FilledSmallSquare;': '\u25fc',
'FilledVerySmallSquare;': '\u25aa',
'fjlig;': 'fj',
'flat;': '\u266d',
'fllig;': '\ufb02',
'fltns;': '\u25b1',
'fnof;': '\u0192',
'Fopf;': '\U0001d53d',
'fopf;': '\U0001d557',
'ForAll;': '\u2200',
'forall;': '\u2200',
'fork;': '\u22d4',
'forkv;': '\u2ad9',
'Fouriertrf;': '\u2131',
'fpartint;': '\u2a0d',
'frac12': '\xbd',
'frac12;': '\xbd',
'frac13;': '\u2153',
'frac14': '\xbc',
'frac14;': '\xbc',
'frac15;': '\u2155',
'frac16;': '\u2159',
'frac18;': '\u215b',
'frac23;': '\u2154',
'frac25;': '\u2156',
'frac34': '\xbe',
'frac34;': '\xbe',
'frac35;': '\u2157',
'frac38;': '\u215c',
'frac45;': '\u2158',
'frac56;': '\u215a',
'frac58;': '\u215d',
'frac78;': '\u215e',
'frasl;': '\u2044',
'frown;': '\u2322',
'Fscr;': '\u2131',
'fscr;': '\U0001d4bb',
'gacute;': '\u01f5',
'Gamma;': '\u0393',
'gamma;': '\u03b3',
'Gammad;': '\u03dc',
'gammad;': '\u03dd',
'gap;': '\u2a86',
'Gbreve;': '\u011e',
'gbreve;': '\u011f',
'Gcedil;': '\u0122',
'Gcirc;': '\u011c',
'gcirc;': '\u011d',
'Gcy;': '\u0413',
'gcy;': '\u0433',
'Gdot;': '\u0120',
'gdot;': '\u0121',
'gE;': '\u2267',
'ge;': '\u2265',
'gEl;': '\u2a8c',
'gel;': '\u22db',
'geq;': '\u2265',
'geqq;': '\u2267',
'geqslant;': '\u2a7e',
'ges;': '\u2a7e',
'gescc;': '\u2aa9',
'gesdot;': '\u2a80',
'gesdoto;': '\u2a82',
'gesdotol;': '\u2a84',
'gesl;': '\u22db\ufe00',
'gesles;': '\u2a94',
'Gfr;': '\U0001d50a',
'gfr;': '\U0001d524',
'Gg;': '\u22d9',
'gg;': '\u226b',
'ggg;': '\u22d9',
'gimel;': '\u2137',
'GJcy;': '\u0403',
'gjcy;': '\u0453',
'gl;': '\u2277',
'gla;': '\u2aa5',
'glE;': '\u2a92',
'glj;': '\u2aa4',
'gnap;': '\u2a8a',
'gnapprox;': '\u2a8a',
'gnE;': '\u2269',
'gne;': '\u2a88',
'gneq;': '\u2a88',
'gneqq;': '\u2269',
'gnsim;': '\u22e7',
'Gopf;': '\U0001d53e',
'gopf;': '\U0001d558',
'grave;': '`',
'GreaterEqual;': '\u2265',
'GreaterEqualLess;': '\u22db',
'GreaterFullEqual;': '\u2267',
'GreaterGreater;': '\u2aa2',
'GreaterLess;': '\u2277',
'GreaterSlantEqual;': '\u2a7e',
'GreaterTilde;': '\u2273',
'Gscr;': '\U0001d4a2',
'gscr;': '\u210a',
'gsim;': '\u2273',
'gsime;': '\u2a8e',
'gsiml;': '\u2a90',
'GT': '>',
'gt': '>',
'GT;': '>',
'Gt;': '\u226b',
'gt;': '>',
'gtcc;': '\u2aa7',
'gtcir;': '\u2a7a',
'gtdot;': '\u22d7',
'gtlPar;': '\u2995',
'gtquest;': '\u2a7c',
'gtrapprox;': '\u2a86',
'gtrarr;': '\u2978',
'gtrdot;': '\u22d7',
'gtreqless;': '\u22db',
'gtreqqless;': '\u2a8c',
'gtrless;': '\u2277',
'gtrsim;': '\u2273',
'gvertneqq;': '\u2269\ufe00',
'gvnE;': '\u2269\ufe00',
'Hacek;': '\u02c7',
'hairsp;': '\u200a',
'half;': '\xbd',
'hamilt;': '\u210b',
'HARDcy;': '\u042a',
'hardcy;': '\u044a',
'hArr;': '\u21d4',
'harr;': '\u2194',
'harrcir;': '\u2948',
'harrw;': '\u21ad',
'Hat;': '^',
'hbar;': '\u210f',
'Hcirc;': '\u0124',
'hcirc;': '\u0125',
'hearts;': '\u2665',
'heartsuit;': '\u2665',
'hellip;': '\u2026',
'hercon;': '\u22b9',
'Hfr;': '\u210c',
'hfr;': '\U0001d525',
'HilbertSpace;': '\u210b',
'hksearow;': '\u2925',
'hkswarow;': '\u2926',
'hoarr;': '\u21ff',
'homtht;': '\u223b',
'hookleftarrow;': '\u21a9',
'hookrightarrow;': '\u21aa',
'Hopf;': '\u210d',
'hopf;': '\U0001d559',
'horbar;': '\u2015',
'HorizontalLine;': '\u2500',
'Hscr;': '\u210b',
'hscr;': '\U0001d4bd',
'hslash;': '\u210f',
'Hstrok;': '\u0126',
'hstrok;': '\u0127',
'HumpDownHump;': '\u224e',
'HumpEqual;': '\u224f',
'hybull;': '\u2043',
'hyphen;': '\u2010',
'Iacute': '\xcd',
'iacute': '\xed',
'Iacute;': '\xcd',
'iacute;': '\xed',
'ic;': '\u2063',
'Icirc': '\xce',
'icirc': '\xee',
'Icirc;': '\xce',
'icirc;': '\xee',
'Icy;': '\u0418',
'icy;': '\u0438',
'Idot;': '\u0130',
'IEcy;': '\u0415',
'iecy;': '\u0435',
'iexcl': '\xa1',
'iexcl;': '\xa1',
'iff;': '\u21d4',
'Ifr;': '\u2111',
'ifr;': '\U0001d526',
'Igrave': '\xcc',
'igrave': '\xec',
'Igrave;': '\xcc',
'igrave;': '\xec',
'ii;': '\u2148',
'iiiint;': '\u2a0c',
'iiint;': '\u222d',
'iinfin;': '\u29dc',
'iiota;': '\u2129',
'IJlig;': '\u0132',
'ijlig;': '\u0133',
'Im;': '\u2111',
'Imacr;': '\u012a',
'imacr;': '\u012b',
'image;': '\u2111',
'ImaginaryI;': '\u2148',
'imagline;': '\u2110',
'imagpart;': '\u2111',
'imath;': '\u0131',
'imof;': '\u22b7',
'imped;': '\u01b5',
'Implies;': '\u21d2',
'in;': '\u2208',
'incare;': '\u2105',
'infin;': '\u221e',
'infintie;': '\u29dd',
'inodot;': '\u0131',
'Int;': '\u222c',
'int;': '\u222b',
'intcal;': '\u22ba',
'integers;': '\u2124',
'Integral;': '\u222b',
'intercal;': '\u22ba',
'Intersection;': '\u22c2',
'intlarhk;': '\u2a17',
'intprod;': '\u2a3c',
'InvisibleComma;': '\u2063',
'InvisibleTimes;': '\u2062',
'IOcy;': '\u0401',
'iocy;': '\u0451',
'Iogon;': '\u012e',
'iogon;': '\u012f',
'Iopf;': '\U0001d540',
'iopf;': '\U0001d55a',
'Iota;': '\u0399',
'iota;': '\u03b9',
'iprod;': '\u2a3c',
'iquest': '\xbf',
'iquest;': '\xbf',
'Iscr;': '\u2110',
'iscr;': '\U0001d4be',
'isin;': '\u2208',
'isindot;': '\u22f5',
'isinE;': '\u22f9',
'isins;': '\u22f4',
'isinsv;': '\u22f3',
'isinv;': '\u2208',
'it;': '\u2062',
'Itilde;': '\u0128',
'itilde;': '\u0129',
'Iukcy;': '\u0406',
'iukcy;': '\u0456',
'Iuml': '\xcf',
'iuml': '\xef',
'Iuml;': '\xcf',
'iuml;': '\xef',
'Jcirc;': '\u0134',
'jcirc;': '\u0135',
'Jcy;': '\u0419',
'jcy;': '\u0439',
'Jfr;': '\U0001d50d',
'jfr;': '\U0001d527',
'jmath;': '\u0237',
'Jopf;': '\U0001d541',
'jopf;': '\U0001d55b',
'Jscr;': '\U0001d4a5',
'jscr;': '\U0001d4bf',
'Jsercy;': '\u0408',
'jsercy;': '\u0458',
'Jukcy;': '\u0404',
'jukcy;': '\u0454',
'Kappa;': '\u039a',
'kappa;': '\u03ba',
'kappav;': '\u03f0',
'Kcedil;': '\u0136',
'kcedil;': '\u0137',
'Kcy;': '\u041a',
'kcy;': '\u043a',
'Kfr;': '\U0001d50e',
'kfr;': '\U0001d528',
'kgreen;': '\u0138',
'KHcy;': '\u0425',
'khcy;': '\u0445',
'KJcy;': '\u040c',
'kjcy;': '\u045c',
'Kopf;': '\U0001d542',
'kopf;': '\U0001d55c',
'Kscr;': '\U0001d4a6',
'kscr;': '\U0001d4c0',
'lAarr;': '\u21da',
'Lacute;': '\u0139',
'lacute;': '\u013a',
'laemptyv;': '\u29b4',
'lagran;': '\u2112',
'Lambda;': '\u039b',
'lambda;': '\u03bb',
'Lang;': '\u27ea',
'lang;': '\u27e8',
'langd;': '\u2991',
'langle;': '\u27e8',
'lap;': '\u2a85',
'Laplacetrf;': '\u2112',
'laquo': '\xab',
'laquo;': '\xab',
'Larr;': '\u219e',
'lArr;': '\u21d0',
'larr;': '\u2190',
'larrb;': '\u21e4',
'larrbfs;': '\u291f',
'larrfs;': '\u291d',
'larrhk;': '\u21a9',
'larrlp;': '\u21ab',
'larrpl;': '\u2939',
'larrsim;': '\u2973',
'larrtl;': '\u21a2',
'lat;': '\u2aab',
'lAtail;': '\u291b',
'latail;': '\u2919',
'late;': '\u2aad',
'lates;': '\u2aad\ufe00',
'lBarr;': '\u290e',
'lbarr;': '\u290c',
'lbbrk;': '\u2772',
'lbrace;': '{',
'lbrack;': '[',
'lbrke;': '\u298b',
'lbrksld;': '\u298f',
'lbrkslu;': '\u298d',
'Lcaron;': '\u013d',
'lcaron;': '\u013e',
'Lcedil;': '\u013b',
'lcedil;': '\u013c',
'lceil;': '\u2308',
'lcub;': '{',
'Lcy;': '\u041b',
'lcy;': '\u043b',
'ldca;': '\u2936',
'ldquo;': '\u201c',
'ldquor;': '\u201e',
'ldrdhar;': '\u2967',
'ldrushar;': '\u294b',
'ldsh;': '\u21b2',
'lE;': '\u2266',
'le;': '\u2264',
'LeftAngleBracket;': '\u27e8',
'LeftArrow;': '\u2190',
'Leftarrow;': '\u21d0',
'leftarrow;': '\u2190',
'LeftArrowBar;': '\u21e4',
'LeftArrowRightArrow;': '\u21c6',
'leftarrowtail;': '\u21a2',
'LeftCeiling;': '\u2308',
'LeftDoubleBracket;': '\u27e6',
'LeftDownTeeVector;': '\u2961',
'LeftDownVector;': '\u21c3',
'LeftDownVectorBar;': '\u2959',
'LeftFloor;': '\u230a',
'leftharpoondown;': '\u21bd',
'leftharpoonup;': '\u21bc',
'leftleftarrows;': '\u21c7',
'LeftRightArrow;': '\u2194',
'Leftrightarrow;': '\u21d4',
'leftrightarrow;': '\u2194',
'leftrightarrows;': '\u21c6',
'leftrightharpoons;': '\u21cb',
'leftrightsquigarrow;': '\u21ad',
'LeftRightVector;': '\u294e',
'LeftTee;': '\u22a3',
'LeftTeeArrow;': '\u21a4',
'LeftTeeVector;': '\u295a',
'leftthreetimes;': '\u22cb',
'LeftTriangle;': '\u22b2',
'LeftTriangleBar;': '\u29cf',
'LeftTriangleEqual;': '\u22b4',
'LeftUpDownVector;': '\u2951',
'LeftUpTeeVector;': '\u2960',
'LeftUpVector;': '\u21bf',
'LeftUpVectorBar;': '\u2958',
'LeftVector;': '\u21bc',
'LeftVectorBar;': '\u2952',
'lEg;': '\u2a8b',
'leg;': '\u22da',
'leq;': '\u2264',
'leqq;': '\u2266',
'leqslant;': '\u2a7d',
'les;': '\u2a7d',
'lescc;': '\u2aa8',
'lesdot;': '\u2a7f',
'lesdoto;': '\u2a81',
'lesdotor;': '\u2a83',
'lesg;': '\u22da\ufe00',
'lesges;': '\u2a93',
'lessapprox;': '\u2a85',
'lessdot;': '\u22d6',
'lesseqgtr;': '\u22da',
'lesseqqgtr;': '\u2a8b',
'LessEqualGreater;': '\u22da',
'LessFullEqual;': '\u2266',
'LessGreater;': '\u2276',
'lessgtr;': '\u2276',
'LessLess;': '\u2aa1',
'lesssim;': '\u2272',
'LessSlantEqual;': '\u2a7d',
'LessTilde;': '\u2272',
'lfisht;': '\u297c',
'lfloor;': '\u230a',
'Lfr;': '\U0001d50f',
'lfr;': '\U0001d529',
'lg;': '\u2276',
'lgE;': '\u2a91',
'lHar;': '\u2962',
'lhard;': '\u21bd',
'lharu;': '\u21bc',
'lharul;': '\u296a',
'lhblk;': '\u2584',
'LJcy;': '\u0409',
'ljcy;': '\u0459',
'Ll;': '\u22d8',
'll;': '\u226a',
'llarr;': '\u21c7',
'llcorner;': '\u231e',
'Lleftarrow;': '\u21da',
'llhard;': '\u296b',
'lltri;': '\u25fa',
'Lmidot;': '\u013f',
'lmidot;': '\u0140',
'lmoust;': '\u23b0',
'lmoustache;': '\u23b0',
'lnap;': '\u2a89',
'lnapprox;': '\u2a89',
'lnE;': '\u2268',
'lne;': '\u2a87',
'lneq;': '\u2a87',
'lneqq;': '\u2268',
'lnsim;': '\u22e6',
'loang;': '\u27ec',
'loarr;': '\u21fd',
'lobrk;': '\u27e6',
'LongLeftArrow;': '\u27f5',
'Longleftarrow;': '\u27f8',
'longleftarrow;': '\u27f5',
'LongLeftRightArrow;': '\u27f7',
'Longleftrightarrow;': '\u27fa',
'longleftrightarrow;': '\u27f7',
'longmapsto;': '\u27fc',
'LongRightArrow;': '\u27f6',
'Longrightarrow;': '\u27f9',
'longrightarrow;': '\u27f6',
'looparrowleft;': '\u21ab',
'looparrowright;': '\u21ac',
'lopar;': '\u2985',
'Lopf;': '\U0001d543',
'lopf;': '\U0001d55d',
'loplus;': '\u2a2d',
'lotimes;': '\u2a34',
'lowast;': '\u2217',
'lowbar;': '_',
'LowerLeftArrow;': '\u2199',
'LowerRightArrow;': '\u2198',
'loz;': '\u25ca',
'lozenge;': '\u25ca',
'lozf;': '\u29eb',
'lpar;': '(',
'lparlt;': '\u2993',
'lrarr;': '\u21c6',
'lrcorner;': '\u231f',
'lrhar;': '\u21cb',
'lrhard;': '\u296d',
'lrm;': '\u200e',
'lrtri;': '\u22bf',
'lsaquo;': '\u2039',
'Lscr;': '\u2112',
'lscr;': '\U0001d4c1',
'Lsh;': '\u21b0',
'lsh;': '\u21b0',
'lsim;': '\u2272',
'lsime;': '\u2a8d',
'lsimg;': '\u2a8f',
'lsqb;': '[',
'lsquo;': '\u2018',
'lsquor;': '\u201a',
'Lstrok;': '\u0141',
'lstrok;': '\u0142',
'LT': '<',
'lt': '<',
'LT;': '<',
'Lt;': '\u226a',
'lt;': '<',
'ltcc;': '\u2aa6',
'ltcir;': '\u2a79',
'ltdot;': '\u22d6',
'lthree;': '\u22cb',
'ltimes;': '\u22c9',
'ltlarr;': '\u2976',
'ltquest;': '\u2a7b',
'ltri;': '\u25c3',
'ltrie;': '\u22b4',
'ltrif;': '\u25c2',
'ltrPar;': '\u2996',
'lurdshar;': '\u294a',
'luruhar;': '\u2966',
'lvertneqq;': '\u2268\ufe00',
'lvnE;': '\u2268\ufe00',
'macr': '\xaf',
'macr;': '\xaf',
'male;': '\u2642',
'malt;': '\u2720',
'maltese;': '\u2720',
'Map;': '\u2905',
'map;': '\u21a6',
'mapsto;': '\u21a6',
'mapstodown;': '\u21a7',
'mapstoleft;': '\u21a4',
'mapstoup;': '\u21a5',
'marker;': '\u25ae',
'mcomma;': '\u2a29',
'Mcy;': '\u041c',
'mcy;': '\u043c',
'mdash;': '\u2014',
'mDDot;': '\u223a',
'measuredangle;': '\u2221',
'MediumSpace;': '\u205f',
'Mellintrf;': '\u2133',
'Mfr;': '\U0001d510',
'mfr;': '\U0001d52a',
'mho;': '\u2127',
'micro': '\xb5',
'micro;': '\xb5',
'mid;': '\u2223',
'midast;': '*',
'midcir;': '\u2af0',
'middot': '\xb7',
'middot;': '\xb7',
'minus;': '\u2212',
'minusb;': '\u229f',
'minusd;': '\u2238',
'minusdu;': '\u2a2a',
'MinusPlus;': '\u2213',
'mlcp;': '\u2adb',
'mldr;': '\u2026',
'mnplus;': '\u2213',
'models;': '\u22a7',
'Mopf;': '\U0001d544',
'mopf;': '\U0001d55e',
'mp;': '\u2213',
'Mscr;': '\u2133',
'mscr;': '\U0001d4c2',
'mstpos;': '\u223e',
'Mu;': '\u039c',
'mu;': '\u03bc',
'multimap;': '\u22b8',
'mumap;': '\u22b8',
'nabla;': '\u2207',
'Nacute;': '\u0143',
'nacute;': '\u0144',
'nang;': '\u2220\u20d2',
'nap;': '\u2249',
'napE;': '\u2a70\u0338',
'napid;': '\u224b\u0338',
'napos;': '\u0149',
'napprox;': '\u2249',
'natur;': '\u266e',
'natural;': '\u266e',
'naturals;': '\u2115',
'nbsp': '\xa0',
'nbsp;': '\xa0',
'nbump;': '\u224e\u0338',
'nbumpe;': '\u224f\u0338',
'ncap;': '\u2a43',
'Ncaron;': '\u0147',
'ncaron;': '\u0148',
'Ncedil;': '\u0145',
'ncedil;': '\u0146',
'ncong;': '\u2247',
'ncongdot;': '\u2a6d\u0338',
'ncup;': '\u2a42',
'Ncy;': '\u041d',
'ncy;': '\u043d',
'ndash;': '\u2013',
'ne;': '\u2260',
'nearhk;': '\u2924',
'neArr;': '\u21d7',
'nearr;': '\u2197',
'nearrow;': '\u2197',
'nedot;': '\u2250\u0338',
'NegativeMediumSpace;': '\u200b',
'NegativeThickSpace;': '\u200b',
'NegativeThinSpace;': '\u200b',
'NegativeVeryThinSpace;': '\u200b',
'nequiv;': '\u2262',
'nesear;': '\u2928',
'nesim;': '\u2242\u0338',
'NestedGreaterGreater;': '\u226b',
'NestedLessLess;': '\u226a',
'NewLine;': '\n',
'nexist;': '\u2204',
'nexists;': '\u2204',
'Nfr;': '\U0001d511',
'nfr;': '\U0001d52b',
'ngE;': '\u2267\u0338',
'nge;': '\u2271',
'ngeq;': '\u2271',
'ngeqq;': '\u2267\u0338',
'ngeqslant;': '\u2a7e\u0338',
'nges;': '\u2a7e\u0338',
'nGg;': '\u22d9\u0338',
'ngsim;': '\u2275',
'nGt;': '\u226b\u20d2',
'ngt;': '\u226f',
'ngtr;': '\u226f',
'nGtv;': '\u226b\u0338',
'nhArr;': '\u21ce',
'nharr;': '\u21ae',
'nhpar;': '\u2af2',
'ni;': '\u220b',
'nis;': '\u22fc',
'nisd;': '\u22fa',
'niv;': '\u220b',
'NJcy;': '\u040a',
'njcy;': '\u045a',
'nlArr;': '\u21cd',
'nlarr;': '\u219a',
'nldr;': '\u2025',
'nlE;': '\u2266\u0338',
'nle;': '\u2270',
'nLeftarrow;': '\u21cd',
'nleftarrow;': '\u219a',
'nLeftrightarrow;': '\u21ce',
'nleftrightarrow;': '\u21ae',
'nleq;': '\u2270',
'nleqq;': '\u2266\u0338',
'nleqslant;': '\u2a7d\u0338',
'nles;': '\u2a7d\u0338',
'nless;': '\u226e',
'nLl;': '\u22d8\u0338',
'nlsim;': '\u2274',
'nLt;': '\u226a\u20d2',
'nlt;': '\u226e',
'nltri;': '\u22ea',
'nltrie;': '\u22ec',
'nLtv;': '\u226a\u0338',
'nmid;': '\u2224',
'NoBreak;': '\u2060',
'NonBreakingSpace;': '\xa0',
'Nopf;': '\u2115',
'nopf;': '\U0001d55f',
'not': '\xac',
'Not;': '\u2aec',
'not;': '\xac',
'NotCongruent;': '\u2262',
'NotCupCap;': '\u226d',
'NotDoubleVerticalBar;': '\u2226',
'NotElement;': '\u2209',
'NotEqual;': '\u2260',
'NotEqualTilde;': '\u2242\u0338',
'NotExists;': '\u2204',
'NotGreater;': '\u226f',
'NotGreaterEqual;': '\u2271',
'NotGreaterFullEqual;': '\u2267\u0338',
'NotGreaterGreater;': '\u226b\u0338',
'NotGreaterLess;': '\u2279',
'NotGreaterSlantEqual;': '\u2a7e\u0338',
'NotGreaterTilde;': '\u2275',
'NotHumpDownHump;': '\u224e\u0338',
'NotHumpEqual;': '\u224f\u0338',
'notin;': '\u2209',
'notindot;': '\u22f5\u0338',
'notinE;': '\u22f9\u0338',
'notinva;': '\u2209',
'notinvb;': '\u22f7',
'notinvc;': '\u22f6',
'NotLeftTriangle;': '\u22ea',
'NotLeftTriangleBar;': '\u29cf\u0338',
'NotLeftTriangleEqual;': '\u22ec',
'NotLess;': '\u226e',
'NotLessEqual;': '\u2270',
'NotLessGreater;': '\u2278',
'NotLessLess;': '\u226a\u0338',
'NotLessSlantEqual;': '\u2a7d\u0338',
'NotLessTilde;': '\u2274',
'NotNestedGreaterGreater;': '\u2aa2\u0338',
'NotNestedLessLess;': '\u2aa1\u0338',
'notni;': '\u220c',
'notniva;': '\u220c',
'notnivb;': '\u22fe',
'notnivc;': '\u22fd',
'NotPrecedes;': '\u2280',
'NotPrecedesEqual;': '\u2aaf\u0338',
'NotPrecedesSlantEqual;': '\u22e0',
'NotReverseElement;': '\u220c',
'NotRightTriangle;': '\u22eb',
'NotRightTriangleBar;': '\u29d0\u0338',
'NotRightTriangleEqual;': '\u22ed',
'NotSquareSubset;': '\u228f\u0338',
'NotSquareSubsetEqual;': '\u22e2',
'NotSquareSuperset;': '\u2290\u0338',
'NotSquareSupersetEqual;': '\u22e3',
'NotSubset;': '\u2282\u20d2',
'NotSubsetEqual;': '\u2288',
'NotSucceeds;': '\u2281',
'NotSucceedsEqual;': '\u2ab0\u0338',
'NotSucceedsSlantEqual;': '\u22e1',
'NotSucceedsTilde;': '\u227f\u0338',
'NotSuperset;': '\u2283\u20d2',
'NotSupersetEqual;': '\u2289',
'NotTilde;': '\u2241',
'NotTildeEqual;': '\u2244',
'NotTildeFullEqual;': '\u2247',
'NotTildeTilde;': '\u2249',
'NotVerticalBar;': '\u2224',
'npar;': '\u2226',
'nparallel;': '\u2226',
'nparsl;': '\u2afd\u20e5',
'npart;': '\u2202\u0338',
'npolint;': '\u2a14',
'npr;': '\u2280',
'nprcue;': '\u22e0',
'npre;': '\u2aaf\u0338',
'nprec;': '\u2280',
'npreceq;': '\u2aaf\u0338',
'nrArr;': '\u21cf',
'nrarr;': '\u219b',
'nrarrc;': '\u2933\u0338',
'nrarrw;': '\u219d\u0338',
'nRightarrow;': '\u21cf',
'nrightarrow;': '\u219b',
'nrtri;': '\u22eb',
'nrtrie;': '\u22ed',
'nsc;': '\u2281',
'nsccue;': '\u22e1',
'nsce;': '\u2ab0\u0338',
'Nscr;': '\U0001d4a9',
'nscr;': '\U0001d4c3',
'nshortmid;': '\u2224',
'nshortparallel;': '\u2226',
'nsim;': '\u2241',
'nsime;': '\u2244',
'nsimeq;': '\u2244',
'nsmid;': '\u2224',
'nspar;': '\u2226',
'nsqsube;': '\u22e2',
'nsqsupe;': '\u22e3',
'nsub;': '\u2284',
'nsubE;': '\u2ac5\u0338',
'nsube;': '\u2288',
'nsubset;': '\u2282\u20d2',
'nsubseteq;': '\u2288',
'nsubseteqq;': '\u2ac5\u0338',
'nsucc;': '\u2281',
'nsucceq;': '\u2ab0\u0338',
'nsup;': '\u2285',
'nsupE;': '\u2ac6\u0338',
'nsupe;': '\u2289',
'nsupset;': '\u2283\u20d2',
'nsupseteq;': '\u2289',
'nsupseteqq;': '\u2ac6\u0338',
'ntgl;': '\u2279',
'Ntilde': '\xd1',
'ntilde': '\xf1',
'Ntilde;': '\xd1',
'ntilde;': '\xf1',
'ntlg;': '\u2278',
'ntriangleleft;': '\u22ea',
'ntrianglelefteq;': '\u22ec',
'ntriangleright;': '\u22eb',
'ntrianglerighteq;': '\u22ed',
'Nu;': '\u039d',
'nu;': '\u03bd',
'num;': '#',
'numero;': '\u2116',
'numsp;': '\u2007',
'nvap;': '\u224d\u20d2',
'nVDash;': '\u22af',
'nVdash;': '\u22ae',
'nvDash;': '\u22ad',
'nvdash;': '\u22ac',
'nvge;': '\u2265\u20d2',
'nvgt;': '>\u20d2',
'nvHarr;': '\u2904',
'nvinfin;': '\u29de',
'nvlArr;': '\u2902',
'nvle;': '\u2264\u20d2',
'nvlt;': '<\u20d2',
'nvltrie;': '\u22b4\u20d2',
'nvrArr;': '\u2903',
'nvrtrie;': '\u22b5\u20d2',
'nvsim;': '\u223c\u20d2',
'nwarhk;': '\u2923',
'nwArr;': '\u21d6',
'nwarr;': '\u2196',
'nwarrow;': '\u2196',
'nwnear;': '\u2927',
'Oacute': '\xd3',
'oacute': '\xf3',
'Oacute;': '\xd3',
'oacute;': '\xf3',
'oast;': '\u229b',
'ocir;': '\u229a',
'Ocirc': '\xd4',
'ocirc': '\xf4',
'Ocirc;': '\xd4',
'ocirc;': '\xf4',
'Ocy;': '\u041e',
'ocy;': '\u043e',
'odash;': '\u229d',
'Odblac;': '\u0150',
'odblac;': '\u0151',
'odiv;': '\u2a38',
'odot;': '\u2299',
'odsold;': '\u29bc',
'OElig;': '\u0152',
'oelig;': '\u0153',
'ofcir;': '\u29bf',
'Ofr;': '\U0001d512',
'ofr;': '\U0001d52c',
'ogon;': '\u02db',
'Ograve': '\xd2',
'ograve': '\xf2',
'Ograve;': '\xd2',
'ograve;': '\xf2',
'ogt;': '\u29c1',
'ohbar;': '\u29b5',
'ohm;': '\u03a9',
'oint;': '\u222e',
'olarr;': '\u21ba',
'olcir;': '\u29be',
'olcross;': '\u29bb',
'oline;': '\u203e',
'olt;': '\u29c0',
'Omacr;': '\u014c',
'omacr;': '\u014d',
'Omega;': '\u03a9',
'omega;': '\u03c9',
'Omicron;': '\u039f',
'omicron;': '\u03bf',
'omid;': '\u29b6',
'ominus;': '\u2296',
'Oopf;': '\U0001d546',
'oopf;': '\U0001d560',
'opar;': '\u29b7',
'OpenCurlyDoubleQuote;': '\u201c',
'OpenCurlyQuote;': '\u2018',
'operp;': '\u29b9',
'oplus;': '\u2295',
'Or;': '\u2a54',
'or;': '\u2228',
'orarr;': '\u21bb',
'ord;': '\u2a5d',
'order;': '\u2134',
'orderof;': '\u2134',
'ordf': '\xaa',
'ordf;': '\xaa',
'ordm': '\xba',
'ordm;': '\xba',
'origof;': '\u22b6',
'oror;': '\u2a56',
'orslope;': '\u2a57',
'orv;': '\u2a5b',
'oS;': '\u24c8',
'Oscr;': '\U0001d4aa',
'oscr;': '\u2134',
'Oslash': '\xd8',
'oslash': '\xf8',
'Oslash;': '\xd8',
'oslash;': '\xf8',
'osol;': '\u2298',
'Otilde': '\xd5',
'otilde': '\xf5',
'Otilde;': '\xd5',
'otilde;': '\xf5',
'Otimes;': '\u2a37',
'otimes;': '\u2297',
'otimesas;': '\u2a36',
'Ouml': '\xd6',
'ouml': '\xf6',
'Ouml;': '\xd6',
'ouml;': '\xf6',
'ovbar;': '\u233d',
'OverBar;': '\u203e',
'OverBrace;': '\u23de',
'OverBracket;': '\u23b4',
'OverParenthesis;': '\u23dc',
'par;': '\u2225',
'para': '\xb6',
'para;': '\xb6',
'parallel;': '\u2225',
'parsim;': '\u2af3',
'parsl;': '\u2afd',
'part;': '\u2202',
'PartialD;': '\u2202',
'Pcy;': '\u041f',
'pcy;': '\u043f',
'percnt;': '%',
'period;': '.',
'permil;': '\u2030',
'perp;': '\u22a5',
'pertenk;': '\u2031',
'Pfr;': '\U0001d513',
'pfr;': '\U0001d52d',
'Phi;': '\u03a6',
'phi;': '\u03c6',
'phiv;': '\u03d5',
'phmmat;': '\u2133',
'phone;': '\u260e',
'Pi;': '\u03a0',
'pi;': '\u03c0',
'pitchfork;': '\u22d4',
'piv;': '\u03d6',
'planck;': '\u210f',
'planckh;': '\u210e',
'plankv;': '\u210f',
'plus;': '+',
'plusacir;': '\u2a23',
'plusb;': '\u229e',
'pluscir;': '\u2a22',
'plusdo;': '\u2214',
'plusdu;': '\u2a25',
'pluse;': '\u2a72',
'PlusMinus;': '\xb1',
'plusmn': '\xb1',
'plusmn;': '\xb1',
'plussim;': '\u2a26',
'plustwo;': '\u2a27',
'pm;': '\xb1',
'Poincareplane;': '\u210c',
'pointint;': '\u2a15',
'Popf;': '\u2119',
'popf;': '\U0001d561',
'pound': '\xa3',
'pound;': '\xa3',
'Pr;': '\u2abb',
'pr;': '\u227a',
'prap;': '\u2ab7',
'prcue;': '\u227c',
'prE;': '\u2ab3',
'pre;': '\u2aaf',
'prec;': '\u227a',
'precapprox;': '\u2ab7',
'preccurlyeq;': '\u227c',
'Precedes;': '\u227a',
'PrecedesEqual;': '\u2aaf',
'PrecedesSlantEqual;': '\u227c',
'PrecedesTilde;': '\u227e',
'preceq;': '\u2aaf',
'precnapprox;': '\u2ab9',
'precneqq;': '\u2ab5',
'precnsim;': '\u22e8',
'precsim;': '\u227e',
'Prime;': '\u2033',
'prime;': '\u2032',
'primes;': '\u2119',
'prnap;': '\u2ab9',
'prnE;': '\u2ab5',
'prnsim;': '\u22e8',
'prod;': '\u220f',
'Product;': '\u220f',
'profalar;': '\u232e',
'profline;': '\u2312',
'profsurf;': '\u2313',
'prop;': '\u221d',
'Proportion;': '\u2237',
'Proportional;': '\u221d',
'propto;': '\u221d',
'prsim;': '\u227e',
'prurel;': '\u22b0',
'Pscr;': '\U0001d4ab',
'pscr;': '\U0001d4c5',
'Psi;': '\u03a8',
'psi;': '\u03c8',
'puncsp;': '\u2008',
'Qfr;': '\U0001d514',
'qfr;': '\U0001d52e',
'qint;': '\u2a0c',
'Qopf;': '\u211a',
'qopf;': '\U0001d562',
'qprime;': '\u2057',
'Qscr;': '\U0001d4ac',
'qscr;': '\U0001d4c6',
'quaternions;': '\u210d',
'quatint;': '\u2a16',
'quest;': '?',
'questeq;': '\u225f',
'QUOT': '"',
'quot': '"',
'QUOT;': '"',
'quot;': '"',
'rAarr;': '\u21db',
'race;': '\u223d\u0331',
'Racute;': '\u0154',
'racute;': '\u0155',
'radic;': '\u221a',
'raemptyv;': '\u29b3',
'Rang;': '\u27eb',
'rang;': '\u27e9',
'rangd;': '\u2992',
'range;': '\u29a5',
'rangle;': '\u27e9',
'raquo': '\xbb',
'raquo;': '\xbb',
'Rarr;': '\u21a0',
'rArr;': '\u21d2',
'rarr;': '\u2192',
'rarrap;': '\u2975',
'rarrb;': '\u21e5',
'rarrbfs;': '\u2920',
'rarrc;': '\u2933',
'rarrfs;': '\u291e',
'rarrhk;': '\u21aa',
'rarrlp;': '\u21ac',
'rarrpl;': '\u2945',
'rarrsim;': '\u2974',
'Rarrtl;': '\u2916',
'rarrtl;': '\u21a3',
'rarrw;': '\u219d',
'rAtail;': '\u291c',
'ratail;': '\u291a',
'ratio;': '\u2236',
'rationals;': '\u211a',
'RBarr;': '\u2910',
'rBarr;': '\u290f',
'rbarr;': '\u290d',
'rbbrk;': '\u2773',
'rbrace;': '}',
'rbrack;': ']',
'rbrke;': '\u298c',
'rbrksld;': '\u298e',
'rbrkslu;': '\u2990',
'Rcaron;': '\u0158',
'rcaron;': '\u0159',
'Rcedil;': '\u0156',
'rcedil;': '\u0157',
'rceil;': '\u2309',
'rcub;': '}',
'Rcy;': '\u0420',
'rcy;': '\u0440',
'rdca;': '\u2937',
'rdldhar;': '\u2969',
'rdquo;': '\u201d',
'rdquor;': '\u201d',
'rdsh;': '\u21b3',
'Re;': '\u211c',
'real;': '\u211c',
'realine;': '\u211b',
'realpart;': '\u211c',
'reals;': '\u211d',
'rect;': '\u25ad',
'REG': '\xae',
'reg': '\xae',
'REG;': '\xae',
'reg;': '\xae',
'ReverseElement;': '\u220b',
'ReverseEquilibrium;': '\u21cb',
'ReverseUpEquilibrium;': '\u296f',
'rfisht;': '\u297d',
'rfloor;': '\u230b',
'Rfr;': '\u211c',
'rfr;': '\U0001d52f',
'rHar;': '\u2964',
'rhard;': '\u21c1',
'rharu;': '\u21c0',
'rharul;': '\u296c',
'Rho;': '\u03a1',
'rho;': '\u03c1',
'rhov;': '\u03f1',
'RightAngleBracket;': '\u27e9',
'RightArrow;': '\u2192',
'Rightarrow;': '\u21d2',
'rightarrow;': '\u2192',
'RightArrowBar;': '\u21e5',
'RightArrowLeftArrow;': '\u21c4',
'rightarrowtail;': '\u21a3',
'RightCeiling;': '\u2309',
'RightDoubleBracket;': '\u27e7',
'RightDownTeeVector;': '\u295d',
'RightDownVector;': '\u21c2',
'RightDownVectorBar;': '\u2955',
'RightFloor;': '\u230b',
'rightharpoondown;': '\u21c1',
'rightharpoonup;': '\u21c0',
'rightleftarrows;': '\u21c4',
'rightleftharpoons;': '\u21cc',
'rightrightarrows;': '\u21c9',
'rightsquigarrow;': '\u219d',
'RightTee;': '\u22a2',
'RightTeeArrow;': '\u21a6',
'RightTeeVector;': '\u295b',
'rightthreetimes;': '\u22cc',
'RightTriangle;': '\u22b3',
'RightTriangleBar;': '\u29d0',
'RightTriangleEqual;': '\u22b5',
'RightUpDownVector;': '\u294f',
'RightUpTeeVector;': '\u295c',
'RightUpVector;': '\u21be',
'RightUpVectorBar;': '\u2954',
'RightVector;': '\u21c0',
'RightVectorBar;': '\u2953',
'ring;': '\u02da',
'risingdotseq;': '\u2253',
'rlarr;': '\u21c4',
'rlhar;': '\u21cc',
'rlm;': '\u200f',
'rmoust;': '\u23b1',
'rmoustache;': '\u23b1',
'rnmid;': '\u2aee',
'roang;': '\u27ed',
'roarr;': '\u21fe',
'robrk;': '\u27e7',
'ropar;': '\u2986',
'Ropf;': '\u211d',
'ropf;': '\U0001d563',
'roplus;': '\u2a2e',
'rotimes;': '\u2a35',
'RoundImplies;': '\u2970',
'rpar;': ')',
'rpargt;': '\u2994',
'rppolint;': '\u2a12',
'rrarr;': '\u21c9',
'Rrightarrow;': '\u21db',
'rsaquo;': '\u203a',
'Rscr;': '\u211b',
'rscr;': '\U0001d4c7',
'Rsh;': '\u21b1',
'rsh;': '\u21b1',
'rsqb;': ']',
'rsquo;': '\u2019',
'rsquor;': '\u2019',
'rthree;': '\u22cc',
'rtimes;': '\u22ca',
'rtri;': '\u25b9',
'rtrie;': '\u22b5',
'rtrif;': '\u25b8',
'rtriltri;': '\u29ce',
'RuleDelayed;': '\u29f4',
'ruluhar;': '\u2968',
'rx;': '\u211e',
'Sacute;': '\u015a',
'sacute;': '\u015b',
'sbquo;': '\u201a',
'Sc;': '\u2abc',
'sc;': '\u227b',
'scap;': '\u2ab8',
'Scaron;': '\u0160',
'scaron;': '\u0161',
'sccue;': '\u227d',
'scE;': '\u2ab4',
'sce;': '\u2ab0',
'Scedil;': '\u015e',
'scedil;': '\u015f',
'Scirc;': '\u015c',
'scirc;': '\u015d',
'scnap;': '\u2aba',
'scnE;': '\u2ab6',
'scnsim;': '\u22e9',
'scpolint;': '\u2a13',
'scsim;': '\u227f',
'Scy;': '\u0421',
'scy;': '\u0441',
'sdot;': '\u22c5',
'sdotb;': '\u22a1',
'sdote;': '\u2a66',
'searhk;': '\u2925',
'seArr;': '\u21d8',
'searr;': '\u2198',
'searrow;': '\u2198',
'sect': '\xa7',
'sect;': '\xa7',
'semi;': ';',
'seswar;': '\u2929',
'setminus;': '\u2216',
'setmn;': '\u2216',
'sext;': '\u2736',
'Sfr;': '\U0001d516',
'sfr;': '\U0001d530',
'sfrown;': '\u2322',
'sharp;': '\u266f',
'SHCHcy;': '\u0429',
'shchcy;': '\u0449',
'SHcy;': '\u0428',
'shcy;': '\u0448',
'ShortDownArrow;': '\u2193',
'ShortLeftArrow;': '\u2190',
'shortmid;': '\u2223',
'shortparallel;': '\u2225',
'ShortRightArrow;': '\u2192',
'ShortUpArrow;': '\u2191',
'shy': '\xad',
'shy;': '\xad',
'Sigma;': '\u03a3',
'sigma;': '\u03c3',
'sigmaf;': '\u03c2',
'sigmav;': '\u03c2',
'sim;': '\u223c',
'simdot;': '\u2a6a',
'sime;': '\u2243',
'simeq;': '\u2243',
'simg;': '\u2a9e',
'simgE;': '\u2aa0',
'siml;': '\u2a9d',
'simlE;': '\u2a9f',
'simne;': '\u2246',
'simplus;': '\u2a24',
'simrarr;': '\u2972',
'slarr;': '\u2190',
'SmallCircle;': '\u2218',
'smallsetminus;': '\u2216',
'smashp;': '\u2a33',
'smeparsl;': '\u29e4',
'smid;': '\u2223',
'smile;': '\u2323',
'smt;': '\u2aaa',
'smte;': '\u2aac',
'smtes;': '\u2aac\ufe00',
'SOFTcy;': '\u042c',
'softcy;': '\u044c',
'sol;': '/',
'solb;': '\u29c4',
'solbar;': '\u233f',
'Sopf;': '\U0001d54a',
'sopf;': '\U0001d564',
'spades;': '\u2660',
'spadesuit;': '\u2660',
'spar;': '\u2225',
'sqcap;': '\u2293',
'sqcaps;': '\u2293\ufe00',
'sqcup;': '\u2294',
'sqcups;': '\u2294\ufe00',
'Sqrt;': '\u221a',
'sqsub;': '\u228f',
'sqsube;': '\u2291',
'sqsubset;': '\u228f',
'sqsubseteq;': '\u2291',
'sqsup;': '\u2290',
'sqsupe;': '\u2292',
'sqsupset;': '\u2290',
'sqsupseteq;': '\u2292',
'squ;': '\u25a1',
'Square;': '\u25a1',
'square;': '\u25a1',
'SquareIntersection;': '\u2293',
'SquareSubset;': '\u228f',
'SquareSubsetEqual;': '\u2291',
'SquareSuperset;': '\u2290',
'SquareSupersetEqual;': '\u2292',
'SquareUnion;': '\u2294',
'squarf;': '\u25aa',
'squf;': '\u25aa',
'srarr;': '\u2192',
'Sscr;': '\U0001d4ae',
'sscr;': '\U0001d4c8',
'ssetmn;': '\u2216',
'ssmile;': '\u2323',
'sstarf;': '\u22c6',
'Star;': '\u22c6',
'star;': '\u2606',
'starf;': '\u2605',
'straightepsilon;': '\u03f5',
'straightphi;': '\u03d5',
'strns;': '\xaf',
'Sub;': '\u22d0',
'sub;': '\u2282',
'subdot;': '\u2abd',
'subE;': '\u2ac5',
'sube;': '\u2286',
'subedot;': '\u2ac3',
'submult;': '\u2ac1',
'subnE;': '\u2acb',
'subne;': '\u228a',
'subplus;': '\u2abf',
'subrarr;': '\u2979',
'Subset;': '\u22d0',
'subset;': '\u2282',
'subseteq;': '\u2286',
'subseteqq;': '\u2ac5',
'SubsetEqual;': '\u2286',
'subsetneq;': '\u228a',
'subsetneqq;': '\u2acb',
'subsim;': '\u2ac7',
'subsub;': '\u2ad5',
'subsup;': '\u2ad3',
'succ;': '\u227b',
'succapprox;': '\u2ab8',
'succcurlyeq;': '\u227d',
'Succeeds;': '\u227b',
'SucceedsEqual;': '\u2ab0',
'SucceedsSlantEqual;': '\u227d',
'SucceedsTilde;': '\u227f',
'succeq;': '\u2ab0',
'succnapprox;': '\u2aba',
'succneqq;': '\u2ab6',
'succnsim;': '\u22e9',
'succsim;': '\u227f',
'SuchThat;': '\u220b',
'Sum;': '\u2211',
'sum;': '\u2211',
'sung;': '\u266a',
'sup1': '\xb9',
'sup1;': '\xb9',
'sup2': '\xb2',
'sup2;': '\xb2',
'sup3': '\xb3',
'sup3;': '\xb3',
'Sup;': '\u22d1',
'sup;': '\u2283',
'supdot;': '\u2abe',
'supdsub;': '\u2ad8',
'supE;': '\u2ac6',
'supe;': '\u2287',
'supedot;': '\u2ac4',
'Superset;': '\u2283',
'SupersetEqual;': '\u2287',
'suphsol;': '\u27c9',
'suphsub;': '\u2ad7',
'suplarr;': '\u297b',
'supmult;': '\u2ac2',
'supnE;': '\u2acc',
'supne;': '\u228b',
'supplus;': '\u2ac0',
'Supset;': '\u22d1',
'supset;': '\u2283',
'supseteq;': '\u2287',
'supseteqq;': '\u2ac6',
'supsetneq;': '\u228b',
'supsetneqq;': '\u2acc',
'supsim;': '\u2ac8',
'supsub;': '\u2ad4',
'supsup;': '\u2ad6',
'swarhk;': '\u2926',
'swArr;': '\u21d9',
'swarr;': '\u2199',
'swarrow;': '\u2199',
'swnwar;': '\u292a',
'szlig': '\xdf',
'szlig;': '\xdf',
'Tab;': '\t',
'target;': '\u2316',
'Tau;': '\u03a4',
'tau;': '\u03c4',
'tbrk;': '\u23b4',
'Tcaron;': '\u0164',
'tcaron;': '\u0165',
'Tcedil;': '\u0162',
'tcedil;': '\u0163',
'Tcy;': '\u0422',
'tcy;': '\u0442',
'tdot;': '\u20db',
'telrec;': '\u2315',
'Tfr;': '\U0001d517',
'tfr;': '\U0001d531',
'there4;': '\u2234',
'Therefore;': '\u2234',
'therefore;': '\u2234',
'Theta;': '\u0398',
'theta;': '\u03b8',
'thetasym;': '\u03d1',
'thetav;': '\u03d1',
'thickapprox;': '\u2248',
'thicksim;': '\u223c',
'ThickSpace;': '\u205f\u200a',
'thinsp;': '\u2009',
'ThinSpace;': '\u2009',
'thkap;': '\u2248',
'thksim;': '\u223c',
'THORN': '\xde',
'thorn': '\xfe',
'THORN;': '\xde',
'thorn;': '\xfe',
'Tilde;': '\u223c',
'tilde;': '\u02dc',
'TildeEqual;': '\u2243',
'TildeFullEqual;': '\u2245',
'TildeTilde;': '\u2248',
'times': '\xd7',
'times;': '\xd7',
'timesb;': '\u22a0',
'timesbar;': '\u2a31',
'timesd;': '\u2a30',
'tint;': '\u222d',
'toea;': '\u2928',
'top;': '\u22a4',
'topbot;': '\u2336',
'topcir;': '\u2af1',
'Topf;': '\U0001d54b',
'topf;': '\U0001d565',
'topfork;': '\u2ada',
'tosa;': '\u2929',
'tprime;': '\u2034',
'TRADE;': '\u2122',
'trade;': '\u2122',
'triangle;': '\u25b5',
'triangledown;': '\u25bf',
'triangleleft;': '\u25c3',
'trianglelefteq;': '\u22b4',
'triangleq;': '\u225c',
'triangleright;': '\u25b9',
'trianglerighteq;': '\u22b5',
'tridot;': '\u25ec',
'trie;': '\u225c',
'triminus;': '\u2a3a',
'TripleDot;': '\u20db',
'triplus;': '\u2a39',
'trisb;': '\u29cd',
'tritime;': '\u2a3b',
'trpezium;': '\u23e2',
'Tscr;': '\U0001d4af',
'tscr;': '\U0001d4c9',
'TScy;': '\u0426',
'tscy;': '\u0446',
'TSHcy;': '\u040b',
'tshcy;': '\u045b',
'Tstrok;': '\u0166',
'tstrok;': '\u0167',
'twixt;': '\u226c',
'twoheadleftarrow;': '\u219e',
'twoheadrightarrow;': '\u21a0',
'Uacute': '\xda',
'uacute': '\xfa',
'Uacute;': '\xda',
'uacute;': '\xfa',
'Uarr;': '\u219f',
'uArr;': '\u21d1',
'uarr;': '\u2191',
'Uarrocir;': '\u2949',
'Ubrcy;': '\u040e',
'ubrcy;': '\u045e',
'Ubreve;': '\u016c',
'ubreve;': '\u016d',
'Ucirc': '\xdb',
'ucirc': '\xfb',
'Ucirc;': '\xdb',
'ucirc;': '\xfb',
'Ucy;': '\u0423',
'ucy;': '\u0443',
'udarr;': '\u21c5',
'Udblac;': '\u0170',
'udblac;': '\u0171',
'udhar;': '\u296e',
'ufisht;': '\u297e',
'Ufr;': '\U0001d518',
'ufr;': '\U0001d532',
'Ugrave': '\xd9',
'ugrave': '\xf9',
'Ugrave;': '\xd9',
'ugrave;': '\xf9',
'uHar;': '\u2963',
'uharl;': '\u21bf',
'uharr;': '\u21be',
'uhblk;': '\u2580',
'ulcorn;': '\u231c',
'ulcorner;': '\u231c',
'ulcrop;': '\u230f',
'ultri;': '\u25f8',
'Umacr;': '\u016a',
'umacr;': '\u016b',
'uml': '\xa8',
'uml;': '\xa8',
'UnderBar;': '_',
'UnderBrace;': '\u23df',
'UnderBracket;': '\u23b5',
'UnderParenthesis;': '\u23dd',
'Union;': '\u22c3',
'UnionPlus;': '\u228e',
'Uogon;': '\u0172',
'uogon;': '\u0173',
'Uopf;': '\U0001d54c',
'uopf;': '\U0001d566',
'UpArrow;': '\u2191',
'Uparrow;': '\u21d1',
'uparrow;': '\u2191',
'UpArrowBar;': '\u2912',
'UpArrowDownArrow;': '\u21c5',
'UpDownArrow;': '\u2195',
'Updownarrow;': '\u21d5',
'updownarrow;': '\u2195',
'UpEquilibrium;': '\u296e',
'upharpoonleft;': '\u21bf',
'upharpoonright;': '\u21be',
'uplus;': '\u228e',
'UpperLeftArrow;': '\u2196',
'UpperRightArrow;': '\u2197',
'Upsi;': '\u03d2',
'upsi;': '\u03c5',
'upsih;': '\u03d2',
'Upsilon;': '\u03a5',
'upsilon;': '\u03c5',
'UpTee;': '\u22a5',
'UpTeeArrow;': '\u21a5',
'upuparrows;': '\u21c8',
'urcorn;': '\u231d',
'urcorner;': '\u231d',
'urcrop;': '\u230e',
'Uring;': '\u016e',
'uring;': '\u016f',
'urtri;': '\u25f9',
'Uscr;': '\U0001d4b0',
'uscr;': '\U0001d4ca',
'utdot;': '\u22f0',
'Utilde;': '\u0168',
'utilde;': '\u0169',
'utri;': '\u25b5',
'utrif;': '\u25b4',
'uuarr;': '\u21c8',
'Uuml': '\xdc',
'uuml': '\xfc',
'Uuml;': '\xdc',
'uuml;': '\xfc',
'uwangle;': '\u29a7',
'vangrt;': '\u299c',
'varepsilon;': '\u03f5',
'varkappa;': '\u03f0',
'varnothing;': '\u2205',
'varphi;': '\u03d5',
'varpi;': '\u03d6',
'varpropto;': '\u221d',
'vArr;': '\u21d5',
'varr;': '\u2195',
'varrho;': '\u03f1',
'varsigma;': '\u03c2',
'varsubsetneq;': '\u228a\ufe00',
'varsubsetneqq;': '\u2acb\ufe00',
'varsupsetneq;': '\u228b\ufe00',
'varsupsetneqq;': '\u2acc\ufe00',
'vartheta;': '\u03d1',
'vartriangleleft;': '\u22b2',
'vartriangleright;': '\u22b3',
'Vbar;': '\u2aeb',
'vBar;': '\u2ae8',
'vBarv;': '\u2ae9',
'Vcy;': '\u0412',
'vcy;': '\u0432',
'VDash;': '\u22ab',
'Vdash;': '\u22a9',
'vDash;': '\u22a8',
'vdash;': '\u22a2',
'Vdashl;': '\u2ae6',
'Vee;': '\u22c1',
'vee;': '\u2228',
'veebar;': '\u22bb',
'veeeq;': '\u225a',
'vellip;': '\u22ee',
'Verbar;': '\u2016',
'verbar;': '|',
'Vert;': '\u2016',
'vert;': '|',
'VerticalBar;': '\u2223',
'VerticalLine;': '|',
'VerticalSeparator;': '\u2758',
'VerticalTilde;': '\u2240',
'VeryThinSpace;': '\u200a',
'Vfr;': '\U0001d519',
'vfr;': '\U0001d533',
'vltri;': '\u22b2',
'vnsub;': '\u2282\u20d2',
'vnsup;': '\u2283\u20d2',
'Vopf;': '\U0001d54d',
'vopf;': '\U0001d567',
'vprop;': '\u221d',
'vrtri;': '\u22b3',
'Vscr;': '\U0001d4b1',
'vscr;': '\U0001d4cb',
'vsubnE;': '\u2acb\ufe00',
'vsubne;': '\u228a\ufe00',
'vsupnE;': '\u2acc\ufe00',
'vsupne;': '\u228b\ufe00',
'Vvdash;': '\u22aa',
'vzigzag;': '\u299a',
'Wcirc;': '\u0174',
'wcirc;': '\u0175',
'wedbar;': '\u2a5f',
'Wedge;': '\u22c0',
'wedge;': '\u2227',
'wedgeq;': '\u2259',
'weierp;': '\u2118',
'Wfr;': '\U0001d51a',
'wfr;': '\U0001d534',
'Wopf;': '\U0001d54e',
'wopf;': '\U0001d568',
'wp;': '\u2118',
'wr;': '\u2240',
'wreath;': '\u2240',
'Wscr;': '\U0001d4b2',
'wscr;': '\U0001d4cc',
'xcap;': '\u22c2',
'xcirc;': '\u25ef',
'xcup;': '\u22c3',
'xdtri;': '\u25bd',
'Xfr;': '\U0001d51b',
'xfr;': '\U0001d535',
'xhArr;': '\u27fa',
'xharr;': '\u27f7',
'Xi;': '\u039e',
'xi;': '\u03be',
'xlArr;': '\u27f8',
'xlarr;': '\u27f5',
'xmap;': '\u27fc',
'xnis;': '\u22fb',
'xodot;': '\u2a00',
'Xopf;': '\U0001d54f',
'xopf;': '\U0001d569',
'xoplus;': '\u2a01',
'xotime;': '\u2a02',
'xrArr;': '\u27f9',
'xrarr;': '\u27f6',
'Xscr;': '\U0001d4b3',
'xscr;': '\U0001d4cd',
'xsqcup;': '\u2a06',
'xuplus;': '\u2a04',
'xutri;': '\u25b3',
'xvee;': '\u22c1',
'xwedge;': '\u22c0',
'Yacute': '\xdd',
'yacute': '\xfd',
'Yacute;': '\xdd',
'yacute;': '\xfd',
'YAcy;': '\u042f',
'yacy;': '\u044f',
'Ycirc;': '\u0176',
'ycirc;': '\u0177',
'Ycy;': '\u042b',
'ycy;': '\u044b',
'yen': '\xa5',
'yen;': '\xa5',
'Yfr;': '\U0001d51c',
'yfr;': '\U0001d536',
'YIcy;': '\u0407',
'yicy;': '\u0457',
'Yopf;': '\U0001d550',
'yopf;': '\U0001d56a',
'Yscr;': '\U0001d4b4',
'yscr;': '\U0001d4ce',
'YUcy;': '\u042e',
'yucy;': '\u044e',
'yuml': '\xff',
'Yuml;': '\u0178',
'yuml;': '\xff',
'Zacute;': '\u0179',
'zacute;': '\u017a',
'Zcaron;': '\u017d',
'zcaron;': '\u017e',
'Zcy;': '\u0417',
'zcy;': '\u0437',
'Zdot;': '\u017b',
'zdot;': '\u017c',
'zeetrf;': '\u2128',
'ZeroWidthSpace;': '\u200b',
'Zeta;': '\u0396',
'zeta;': '\u03b6',
'Zfr;': '\u2128',
'zfr;': '\U0001d537',
'ZHcy;': '\u0416',
'zhcy;': '\u0436',
'zigrarr;': '\u21dd',
'Zopf;': '\u2124',
'zopf;': '\U0001d56b',
'Zscr;': '\U0001d4b5',
'zscr;': '\U0001d4cf',
'zwj;': '\u200d',
'zwnj;': '\u200c',
}
|
ollyc/kajiki
|
kajiki/entities.py
|
Python
|
mit
| 58,439
|
[
"Bowtie"
] |
ba8ac0221a50e13053c5fb759f5fdc3f3ca1a490b8be353bdc9cc29cd6215c6f
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = """
---
module: ec2_elb_lb
description:
- Returns information about the load balancer.
- Will be marked changed when called only if state is changed.
short_description: Creates or destroys Amazon ELB.
version_added: "1.5"
author:
- "Jim Dalton (@jsdalton)"
options:
state:
description:
- Create or destroy the ELB
choices: ["present", "absent"]
required: true
name:
description:
- The name of the ELB
required: true
listeners:
description:
- List of ports/protocols for this ELB to listen on (see example)
required: false
purge_listeners:
description:
- Purge existing listeners on ELB that are not found in listeners
required: false
default: true
instance_ids:
description:
- List of instance ids to attach to this ELB
required: false
default: false
version_added: "2.1"
purge_instance_ids:
description:
- Purge existing instance ids on ELB that are not found in instance_ids
required: false
default: false
version_added: "2.1"
zones:
description:
- List of availability zones to enable on this ELB
required: false
purge_zones:
description:
- Purge existing availability zones on ELB that are not found in zones
required: false
default: false
security_group_ids:
description:
- A list of security groups to apply to the elb
require: false
default: None
version_added: "1.6"
security_group_names:
description:
- A list of security group names to apply to the elb
require: false
default: None
version_added: "2.0"
health_check:
description:
- An associative array of health check configuration settings (see example)
require: false
default: None
access_logs:
description:
- An associative array of access logs configuration settings (see example)
require: false
default: None
version_added: "2.0"
subnets:
description:
- A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
required: false
default: None
aliases: []
version_added: "1.7"
purge_subnets:
description:
- Purge existing subnet on ELB that are not found in subnets
required: false
default: false
version_added: "1.7"
scheme:
description:
- The scheme to use when creating the ELB. For a private VPC-visible ELB use 'internal'.
required: false
default: 'internet-facing'
version_added: "1.7"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
connection_draining_timeout:
description:
- Wait a specified timeout allowing connections to drain before terminating an instance
required: false
aliases: []
version_added: "1.8"
idle_timeout:
description:
- ELB connections from clients and to servers are timed out after this amount of time
required: false
version_added: "2.0"
cross_az_load_balancing:
description:
- Distribute load across all configured Availability Zones
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
version_added: "1.8"
stickiness:
description:
- An associative array of stickiness policy settings. Policy will be applied to all listeners ( see example )
required: false
version_added: "2.0"
wait:
description:
- When specified, Ansible will check the status of the load balancer to ensure it has been successfully
removed from AWS.
required: false
default: no
choices: ["yes", "no"]
version_added: "2.1"
wait_timeout:
description:
- Used in conjunction with wait. Number of seconds to wait for the elb to be terminated.
A maximum of 600 seconds (10 minutes) is allowed.
required: false
default: 60
version_added: "2.1"
tags:
description:
- An associative array of tags. To delete all tags, supply an empty dict.
required: false
version_added: "2.1"
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic provisioning example (non-VPC)
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
proxy_protocol: True
- protocol: https
load_balancer_port: 443
instance_protocol: http # optional, defaults to value of protocol setting
instance_port: 80
# ssl certificate required for https or ssl
ssl_certificate_id: "arn:aws:iam::123456789012:server-certificate/company/servercerts/ProdServerCert"
# Internal ELB example
- local_action:
module: ec2_elb_lb
name: "test-vpc"
scheme: internal
state: present
instance_ids:
- i-abcd1234
purge_instance_ids: true
subnets:
- subnet-abcd1234
- subnet-1a2b3c4d
listeners:
- protocol: http # options are http, https, ssl, tcp
load_balancer_port: 80
instance_port: 80
# Configure a health check and the access logs
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
health_check:
ping_protocol: http # options are http, https, ssl, tcp
ping_port: 80
ping_path: "/index.html" # not required for tcp or ssl
response_timeout: 5 # seconds
interval: 30 # seconds
unhealthy_threshold: 2
healthy_threshold: 10
access_logs:
interval: 5 # minutes (defaults to 60)
s3_location: "my-bucket" # This value is required if access_logs is set
s3_prefix: "logs"
# Ensure ELB is gone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
# Ensure ELB is gone and wait for check (for default timeout)
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
wait: yes
# Ensure ELB is gone and wait for check with timeout value
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: absent
wait: yes
wait_timeout: 600
# Normally, this module will purge any listeners that exist on the ELB
# but aren't specified in the listeners parameter. If purge_listeners is
# false it leaves them alone
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_listeners: no
# Normally, this module will leave availability zones that are enabled
# on the ELB alone. If purge_zones is true, then any extraneous zones
# will be removed
- local_action:
module: ec2_elb_lb
name: "test-please-delete"
state: present
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
purge_zones: yes
# Creates a ELB and assigns a list of subnets to it.
- local_action:
module: ec2_elb_lb
state: present
name: 'New ELB'
security_group_ids: 'sg-123456, sg-67890'
region: us-west-2
subnets: 'subnet-123456,subnet-67890'
purge_subnets: yes
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with connection draining, increased idle timeout and cross availability
# zone load balancing
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
connection_draining_timeout: 60
idle_timeout: 300
cross_az_load_balancing: "yes"
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
# Create an ELB with load balancer stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
stickiness:
type: loadbalancer
enabled: yes
expiration: 300
# Create an ELB with application stickiness enabled
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
stickiness:
type: application
enabled: yes
cookie: SESSIONID
# Create an ELB and add tags
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
tags:
Name: "New ELB"
stack: "production"
client: "Bob"
# Delete all tags from an ELB
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
region: us-east-1
zones:
- us-east-1a
- us-east-1d
listeners:
- protocol: http
load_balancer_port: 80
instance_port: 80
tags: {}
"""
try:
import boto
import boto.ec2.elb
import boto.ec2.elb.attributes
import boto.vpc
from boto.ec2.elb.healthcheck import HealthCheck
from boto.ec2.tag import Tag
from boto.regioninfo import RegionInfo
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
import time
import random
def _throttleable_operation(max_retries):
def _operation_wrapper(op):
def _do_op(*args, **kwargs):
retry = 0
while True:
try:
return op(*args, **kwargs)
except boto.exception.BotoServerError as e:
if retry < max_retries and e.code in \
("Throttling", "RequestLimitExceeded"):
retry = retry + 1
time.sleep(min(random.random() * (2 ** retry), 300))
continue
else:
raise
return _do_op
return _operation_wrapper
def _get_vpc_connection(module, region, aws_connect_params):
try:
return connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
_THROTTLING_RETRIES = 5
class ElbManager(object):
"""Handles ELB creation and destruction"""
def __init__(self, module, name, listeners=None, purge_listeners=None,
zones=None, purge_zones=None, security_group_ids=None,
health_check=None, subnets=None, purge_subnets=None,
scheme="internet-facing", connection_draining_timeout=None,
idle_timeout=None,
cross_az_load_balancing=None, access_logs=None,
stickiness=None, wait=None, wait_timeout=None, tags=None,
region=None,
instance_ids=None, purge_instance_ids=None, **aws_connect_params):
self.module = module
self.name = name
self.listeners = listeners
self.purge_listeners = purge_listeners
self.instance_ids = instance_ids
self.purge_instance_ids = purge_instance_ids
self.zones = zones
self.purge_zones = purge_zones
self.security_group_ids = security_group_ids
self.health_check = health_check
self.subnets = subnets
self.purge_subnets = purge_subnets
self.scheme = scheme
self.connection_draining_timeout = connection_draining_timeout
self.idle_timeout = idle_timeout
self.cross_az_load_balancing = cross_az_load_balancing
self.access_logs = access_logs
self.stickiness = stickiness
self.wait = wait
self.wait_timeout = wait_timeout
self.tags = tags
self.aws_connect_params = aws_connect_params
self.region = region
self.changed = False
self.status = 'gone'
self.elb_conn = self._get_elb_connection()
self.elb = self._get_elb()
self.ec2_conn = self._get_ec2_connection()
@_throttleable_operation(_THROTTLING_RETRIES)
def ensure_ok(self):
"""Create the ELB"""
if not self.elb:
# Zones and listeners will be added at creation
self._create_elb()
else:
self._set_zones()
self._set_security_groups()
self._set_elb_listeners()
self._set_subnets()
self._set_health_check()
# boto has introduced support for some ELB attributes in
# different versions, so we check first before trying to
# set them to avoid errors
if self._check_attribute_support('connection_draining'):
self._set_connection_draining_timeout()
if self._check_attribute_support('connecting_settings'):
self._set_idle_timeout()
if self._check_attribute_support('cross_zone_load_balancing'):
self._set_cross_az_load_balancing()
if self._check_attribute_support('access_log'):
self._set_access_log()
# add sitcky options
self.select_stickiness_policy()
# ensure backend server policies are correct
self._set_backend_policies()
# set/remove instance ids
self._set_instance_ids()
self._set_tags()
def ensure_gone(self):
"""Destroy the ELB"""
if self.elb:
self._delete_elb()
if self.wait:
elb_removed = self._wait_for_elb_removed()
# Unfortunately even though the ELB itself is removed quickly
# the interfaces take longer so reliant security groups cannot
# be deleted until the interface has registered as removed.
elb_interface_removed = self._wait_for_elb_interface_removed()
if not (elb_removed and elb_interface_removed):
self.module.fail_json(msg='Timed out waiting for removal of load balancer.')
def get_info(self):
try:
check_elb = self.elb_conn.get_all_load_balancers(self.name)[0]
except:
check_elb = None
if not check_elb:
info = {
'name': self.name,
'status': self.status,
'region': self.region
}
else:
try:
lb_cookie_policy = check_elb.policies.lb_cookie_stickiness_policies[0].__dict__['policy_name']
except:
lb_cookie_policy = None
try:
app_cookie_policy = check_elb.policies.app_cookie_stickiness_policies[0].__dict__['policy_name']
except:
app_cookie_policy = None
info = {
'name': check_elb.name,
'dns_name': check_elb.dns_name,
'zones': check_elb.availability_zones,
'security_group_ids': check_elb.security_groups,
'status': self.status,
'subnets': self.subnets,
'scheme': check_elb.scheme,
'hosted_zone_name': check_elb.canonical_hosted_zone_name,
'hosted_zone_id': check_elb.canonical_hosted_zone_name_id,
'lb_cookie_policy': lb_cookie_policy,
'app_cookie_policy': app_cookie_policy,
'proxy_policy': self._get_proxy_protocol_policy(),
'backends': self._get_backend_policies(),
'instances': [instance.id for instance in check_elb.instances],
'out_of_service_count': 0,
'in_service_count': 0,
'unknown_instance_state_count': 0,
'region': self.region
}
# status of instances behind the ELB
if info['instances']:
info['instance_health'] = [ dict(
instance_id = instance_state.instance_id,
reason_code = instance_state.reason_code,
state = instance_state.state
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
else:
info['instance_health'] = []
# instance state counts: InService or OutOfService
if info['instance_health']:
for instance_state in info['instance_health']:
if instance_state['state'] == "InService":
info['in_service_count'] += 1
elif instance_state['state'] == "OutOfService":
info['out_of_service_count'] += 1
else:
info['unknown_instance_state_count'] += 1
if check_elb.health_check:
info['health_check'] = {
'target': check_elb.health_check.target,
'interval': check_elb.health_check.interval,
'timeout': check_elb.health_check.timeout,
'healthy_threshold': check_elb.health_check.healthy_threshold,
'unhealthy_threshold': check_elb.health_check.unhealthy_threshold,
}
if check_elb.listeners:
info['listeners'] = [self._api_listener_as_tuple(l)
for l in check_elb.listeners]
elif self.status == 'created':
# When creating a new ELB, listeners don't show in the
# immediately returned result, so just include the
# ones that were added
info['listeners'] = [self._listener_as_tuple(l)
for l in self.listeners]
else:
info['listeners'] = []
if self._check_attribute_support('connection_draining'):
info['connection_draining_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout
if self._check_attribute_support('connecting_settings'):
info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout
if self._check_attribute_support('cross_zone_load_balancing'):
is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing')
if is_cross_az_lb_enabled:
info['cross_az_load_balancing'] = 'yes'
else:
info['cross_az_load_balancing'] = 'no'
# return stickiness info?
info['tags'] = self.tags
return info
@_throttleable_operation(_THROTTLING_RETRIES)
def _wait_for_elb_removed(self):
polling_increment_secs = 15
max_retries = (self.wait_timeout / polling_increment_secs)
status_achieved = False
for x in range(0, max_retries):
try:
result = self.elb_conn.get_all_lb_attributes(self.name)
except (boto.exception.BotoServerError, StandardError) as e:
if "LoadBalancerNotFound" in e.code:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
return status_achieved
@_throttleable_operation(_THROTTLING_RETRIES)
def _wait_for_elb_interface_removed(self):
polling_increment_secs = 15
max_retries = (self.wait_timeout / polling_increment_secs)
status_achieved = False
elb_interfaces = self.ec2_conn.get_all_network_interfaces(
filters={'attachment.instance-owner-id': 'amazon-elb',
'description': 'ELB {0}'.format(self.name) })
for x in range(0, max_retries):
for interface in elb_interfaces:
try:
result = self.ec2_conn.get_all_network_interfaces(interface.id)
if result == []:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
except (boto.exception.BotoServerError, StandardError) as e:
if 'InvalidNetworkInterfaceID' in e.code:
status_achieved = True
break
else:
self.module.fail_json(msg=str(e))
return status_achieved
@_throttleable_operation(_THROTTLING_RETRIES)
def _get_elb(self):
elbs = self.elb_conn.get_all_load_balancers()
for elb in elbs:
if self.name == elb.name:
self.status = 'ok'
return elb
def _get_elb_connection(self):
try:
return connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
self.module.fail_json(msg=str(e))
def _get_ec2_connection(self):
try:
return connect_to_aws(boto.ec2, self.region,
**self.aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError) as e:
self.module.fail_json(msg=str(e))
@_throttleable_operation(_THROTTLING_RETRIES)
def _delete_elb(self):
# True if succeeds, exception raised if not
result = self.elb_conn.delete_load_balancer(name=self.name)
if result:
self.changed = True
self.status = 'deleted'
def _create_elb(self):
listeners = [self._listener_as_tuple(l) for l in self.listeners]
self.elb = self.elb_conn.create_load_balancer(name=self.name,
zones=self.zones,
security_groups=self.security_group_ids,
complex_listeners=listeners,
subnets=self.subnets,
scheme=self.scheme)
if self.elb:
# HACK: Work around a boto bug in which the listeners attribute is
# always set to the listeners argument to create_load_balancer, and
# not the complex_listeners
# We're not doing a self.elb = self._get_elb here because there
# might be eventual consistency issues and it doesn't necessarily
# make sense to wait until the ELB gets returned from the EC2 API.
# This is necessary in the event we hit the throttling errors and
# need to retry ensure_ok
# See https://github.com/boto/boto/issues/3526
self.elb.listeners = self.listeners
self.changed = True
self.status = 'created'
def _create_elb_listeners(self, listeners):
"""Takes a list of listener tuples and creates them"""
# True if succeeds, exception raised if not
self.changed = self.elb_conn.create_load_balancer_listeners(self.name,
complex_listeners=listeners)
def _delete_elb_listeners(self, listeners):
"""Takes a list of listener tuples and deletes them from the elb"""
ports = [l[0] for l in listeners]
# True if succeeds, exception raised if not
self.changed = self.elb_conn.delete_load_balancer_listeners(self.name,
ports)
def _set_elb_listeners(self):
"""
Creates listeners specified by self.listeners; overwrites existing
listeners on these ports; removes extraneous listeners
"""
listeners_to_add = []
listeners_to_remove = []
listeners_to_keep = []
# Check for any listeners we need to create or overwrite
for listener in self.listeners:
listener_as_tuple = self._listener_as_tuple(listener)
# First we loop through existing listeners to see if one is
# already specified for this port
existing_listener_found = None
for existing_listener in self.elb.listeners:
# Since ELB allows only one listener on each incoming port, a
# single match on the incoming port is all we're looking for
if existing_listener[0] == int(listener['load_balancer_port']):
existing_listener_found = self._api_listener_as_tuple(existing_listener)
break
if existing_listener_found:
# Does it match exactly?
if listener_as_tuple != existing_listener_found:
# The ports are the same but something else is different,
# so we'll remove the existing one and add the new one
listeners_to_remove.append(existing_listener_found)
listeners_to_add.append(listener_as_tuple)
else:
# We already have this listener, so we're going to keep it
listeners_to_keep.append(existing_listener_found)
else:
# We didn't find an existing listener, so just add the new one
listeners_to_add.append(listener_as_tuple)
# Check for any extraneous listeners we need to remove, if desired
if self.purge_listeners:
for existing_listener in self.elb.listeners:
existing_listener_tuple = self._api_listener_as_tuple(existing_listener)
if existing_listener_tuple in listeners_to_remove:
# Already queued for removal
continue
if existing_listener_tuple in listeners_to_keep:
# Keep this one around
continue
# Since we're not already removing it and we don't need to keep
# it, let's get rid of it
listeners_to_remove.append(existing_listener_tuple)
if listeners_to_remove:
self._delete_elb_listeners(listeners_to_remove)
if listeners_to_add:
self._create_elb_listeners(listeners_to_add)
def _api_listener_as_tuple(self, listener):
"""Adds ssl_certificate_id to ELB API tuple if present"""
base_tuple = listener.get_complex_tuple()
if listener.ssl_certificate_id and len(base_tuple) < 5:
return base_tuple + (listener.ssl_certificate_id,)
return base_tuple
def _listener_as_tuple(self, listener):
"""Formats listener as a 4- or 5-tuples, in the order specified by the
ELB API"""
# N.B. string manipulations on protocols below (str(), upper()) is to
# ensure format matches output from ELB API
listener_list = [
int(listener['load_balancer_port']),
int(listener['instance_port']),
str(listener['protocol'].upper()),
]
# Instance protocol is not required by ELB API; it defaults to match
# load balancer protocol. We'll mimic that behavior here
if 'instance_protocol' in listener:
listener_list.append(str(listener['instance_protocol'].upper()))
else:
listener_list.append(str(listener['protocol'].upper()))
if 'ssl_certificate_id' in listener:
listener_list.append(str(listener['ssl_certificate_id']))
return tuple(listener_list)
def _enable_zones(self, zones):
try:
self.elb.enable_zones(zones)
except boto.exception.BotoServerError as e:
if "Invalid Availability Zone" in e.error_message:
self.module.fail_json(msg=e.error_message)
else:
self.module.fail_json(msg="an unknown server error occurred, please try again later")
self.changed = True
def _disable_zones(self, zones):
try:
self.elb.disable_zones(zones)
except boto.exception.BotoServerError as e:
if "Invalid Availability Zone" in e.error_message:
self.module.fail_json(msg=e.error_message)
else:
self.module.fail_json(msg="an unknown server error occurred, please try again later")
self.changed = True
def _attach_subnets(self, subnets):
self.elb_conn.attach_lb_to_subnets(self.name, subnets)
self.changed = True
def _detach_subnets(self, subnets):
self.elb_conn.detach_lb_from_subnets(self.name, subnets)
self.changed = True
def _set_subnets(self):
"""Determine which subnets need to be attached or detached on the ELB"""
if self.subnets:
if self.purge_subnets:
subnets_to_detach = list(set(self.elb.subnets) - set(self.subnets))
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
else:
subnets_to_detach = None
subnets_to_attach = list(set(self.subnets) - set(self.elb.subnets))
if subnets_to_attach:
self._attach_subnets(subnets_to_attach)
if subnets_to_detach:
self._detach_subnets(subnets_to_detach)
def _set_zones(self):
"""Determine which zones need to be enabled or disabled on the ELB"""
if self.zones:
if self.purge_zones:
zones_to_disable = list(set(self.elb.availability_zones) -
set(self.zones))
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
else:
zones_to_disable = None
zones_to_enable = list(set(self.zones) -
set(self.elb.availability_zones))
if zones_to_enable:
self._enable_zones(zones_to_enable)
# N.B. This must come second, in case it would have removed all zones
if zones_to_disable:
self._disable_zones(zones_to_disable)
def _set_security_groups(self):
if self.security_group_ids != None and set(self.elb.security_groups) != set(self.security_group_ids):
self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids)
self.changed = True
def _set_health_check(self):
"""Set health check values on ELB as needed"""
if self.health_check:
# This just makes it easier to compare each of the attributes
# and look for changes. Keys are attributes of the current
# health_check; values are desired values of new health_check
health_check_config = {
"target": self._get_health_check_target(),
"timeout": self.health_check['response_timeout'],
"interval": self.health_check['interval'],
"unhealthy_threshold": self.health_check['unhealthy_threshold'],
"healthy_threshold": self.health_check['healthy_threshold'],
}
update_health_check = False
# The health_check attribute is *not* set on newly created
# ELBs! So we have to create our own.
if not self.elb.health_check:
self.elb.health_check = HealthCheck()
for attr, desired_value in health_check_config.iteritems():
if getattr(self.elb.health_check, attr) != desired_value:
setattr(self.elb.health_check, attr, desired_value)
update_health_check = True
if update_health_check:
self.elb.configure_health_check(self.elb.health_check)
self.changed = True
def _check_attribute_support(self, attr):
return hasattr(boto.ec2.elb.attributes.LbAttributes(), attr)
def _set_cross_az_load_balancing(self):
attributes = self.elb.get_attributes()
if self.cross_az_load_balancing:
if not attributes.cross_zone_load_balancing.enabled:
self.changed = True
attributes.cross_zone_load_balancing.enabled = True
else:
if attributes.cross_zone_load_balancing.enabled:
self.changed = True
attributes.cross_zone_load_balancing.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing',
attributes.cross_zone_load_balancing.enabled)
def _set_access_log(self):
attributes = self.elb.get_attributes()
if self.access_logs:
if 's3_location' not in self.access_logs:
self.module.fail_json(msg='s3_location information required')
access_logs_config = {
"enabled": True,
"s3_bucket_name": self.access_logs['s3_location'],
"s3_bucket_prefix": self.access_logs.get('s3_prefix', ''),
"emit_interval": self.access_logs.get('interval', 60),
}
update_access_logs_config = False
for attr, desired_value in access_logs_config.iteritems():
if getattr(attributes.access_log, attr) != desired_value:
setattr(attributes.access_log, attr, desired_value)
update_access_logs_config = True
if update_access_logs_config:
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
self.changed = True
elif attributes.access_log.enabled:
attributes.access_log.enabled = False
self.changed = True
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
def _set_connection_draining_timeout(self):
attributes = self.elb.get_attributes()
if self.connection_draining_timeout is not None:
if not attributes.connection_draining.enabled or \
attributes.connection_draining.timeout != self.connection_draining_timeout:
self.changed = True
attributes.connection_draining.enabled = True
attributes.connection_draining.timeout = self.connection_draining_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
else:
if attributes.connection_draining.enabled:
self.changed = True
attributes.connection_draining.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
def _set_idle_timeout(self):
attributes = self.elb.get_attributes()
if self.idle_timeout is not None:
if attributes.connecting_settings.idle_timeout != self.idle_timeout:
self.changed = True
attributes.connecting_settings.idle_timeout = self.idle_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings)
def _policy_name(self, policy_type):
return __file__.split('/')[-1].split('.')[0].replace('_', '-') + '-' + policy_type
def _create_policy(self, policy_param, policy_meth, policy):
getattr(self.elb_conn, policy_meth )(policy_param, self.elb.name, policy)
def _delete_policy(self, elb_name, policy):
self.elb_conn.delete_lb_policy(elb_name, policy)
def _update_policy(self, policy_param, policy_meth, policy_attr, policy):
self._delete_policy(self.elb.name, policy)
self._create_policy(policy_param, policy_meth, policy)
def _set_listener_policy(self, listeners_dict, policy=[]):
for listener_port in listeners_dict:
if listeners_dict[listener_port].startswith('HTTP'):
self.elb_conn.set_lb_policies_of_listener(self.elb.name, listener_port, policy)
def _set_stickiness_policy(self, elb_info, listeners_dict, policy, **policy_attrs):
for p in getattr(elb_info.policies, policy_attrs['attr']):
if str(p.__dict__['policy_name']) == str(policy[0]):
if str(p.__dict__[policy_attrs['dict_key']]) != str(policy_attrs['param_value'] or 0):
self._set_listener_policy(listeners_dict)
self._update_policy(policy_attrs['param_value'], policy_attrs['method'], policy_attrs['attr'], policy[0])
self.changed = True
break
else:
self._create_policy(policy_attrs['param_value'], policy_attrs['method'], policy[0])
self.changed = True
self._set_listener_policy(listeners_dict, policy)
def select_stickiness_policy(self):
if self.stickiness:
if 'cookie' in self.stickiness and 'expiration' in self.stickiness:
self.module.fail_json(msg='\'cookie\' and \'expiration\' can not be set at the same time')
elb_info = self.elb_conn.get_all_load_balancers(self.elb.name)[0]
d = {}
for listener in elb_info.listeners:
d[listener[0]] = listener[2]
listeners_dict = d
if self.stickiness['type'] == 'loadbalancer':
policy = []
policy_type = 'LBCookieStickinessPolicyType'
if self.module.boolean(self.stickiness['enabled']) == True:
if 'expiration' not in self.stickiness:
self.module.fail_json(msg='expiration must be set when type is loadbalancer')
expiration = self.stickiness['expiration'] if self.stickiness['expiration'] is not 0 else None
policy_attrs = {
'type': policy_type,
'attr': 'lb_cookie_stickiness_policies',
'method': 'create_lb_cookie_stickiness_policy',
'dict_key': 'cookie_expiration_period',
'param_value': expiration
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif self.module.boolean(self.stickiness['enabled']) == False:
if len(elb_info.policies.lb_cookie_stickiness_policies):
if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
else:
self.changed = False
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
elif self.stickiness['type'] == 'application':
policy = []
policy_type = 'AppCookieStickinessPolicyType'
if self.module.boolean(self.stickiness['enabled']) == True:
if 'cookie' not in self.stickiness:
self.module.fail_json(msg='cookie must be set when type is application')
policy_attrs = {
'type': policy_type,
'attr': 'app_cookie_stickiness_policies',
'method': 'create_app_cookie_stickiness_policy',
'dict_key': 'cookie_name',
'param_value': self.stickiness['cookie']
}
policy.append(self._policy_name(policy_attrs['type']))
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
elif self.module.boolean(self.stickiness['enabled']) == False:
if len(elb_info.policies.app_cookie_stickiness_policies):
if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
self.changed = True
self._set_listener_policy(listeners_dict)
self._delete_policy(self.elb.name, self._policy_name(policy_type))
else:
self._set_listener_policy(listeners_dict)
def _get_backend_policies(self):
"""Get a list of backend policies"""
policies = []
if self.elb.backends is not None:
for backend in self.elb.backends:
if backend.policies is not None:
for policy in backend.policies:
policies.append(str(backend.instance_port) + ':' + policy.policy_name)
return policies
def _set_backend_policies(self):
"""Sets policies for all backends"""
ensure_proxy_protocol = False
replace = []
backend_policies = self._get_backend_policies()
# Find out what needs to be changed
for listener in self.listeners:
want = False
if 'proxy_protocol' in listener and listener['proxy_protocol']:
ensure_proxy_protocol = True
want = True
if str(listener['instance_port']) + ':ProxyProtocol-policy' in backend_policies:
if not want:
replace.append({'port': listener['instance_port'], 'policies': []})
elif want:
replace.append({'port': listener['instance_port'], 'policies': ['ProxyProtocol-policy']})
# enable or disable proxy protocol
if ensure_proxy_protocol:
self._set_proxy_protocol_policy()
# Make the backend policies so
for item in replace:
self.elb_conn.set_lb_policies_of_backend_server(self.elb.name, item['port'], item['policies'])
self.changed = True
def _get_proxy_protocol_policy(self):
"""Find out if the elb has a proxy protocol enabled"""
if self.elb.policies is not None and self.elb.policies.other_policies is not None:
for policy in self.elb.policies.other_policies:
if policy.policy_name == 'ProxyProtocol-policy':
return policy.policy_name
return None
def _set_proxy_protocol_policy(self):
"""Install a proxy protocol policy if needed"""
proxy_policy = self._get_proxy_protocol_policy()
if proxy_policy is None:
self.elb_conn.create_lb_policy(
self.elb.name, 'ProxyProtocol-policy', 'ProxyProtocolPolicyType', {'ProxyProtocol': True}
)
self.changed = True
# TODO: remove proxy protocol policy if not needed anymore? There is no side effect to leaving it there
def _diff_list(self, a, b):
"""Find the entries in list a that are not in list b"""
b = set(b)
return [aa for aa in a if aa not in b]
def _get_instance_ids(self):
"""Get the current list of instance ids installed in the elb"""
instances = []
if self.elb.instances is not None:
for instance in self.elb.instances:
instances.append(instance.id)
return instances
def _set_instance_ids(self):
"""Register or deregister instances from an lb instance"""
assert_instances = self.instance_ids or []
has_instances = self._get_instance_ids()
add_instances = self._diff_list(assert_instances, has_instances)
if add_instances:
self.elb_conn.register_instances(self.elb.name, add_instances)
self.changed = True
if self.purge_instance_ids:
remove_instances = self._diff_list(has_instances, assert_instances)
if remove_instances:
self.elb_conn.deregister_instances(self.elb.name, remove_instances)
self.changed = True
def _set_tags(self):
"""Add/Delete tags"""
if self.tags is None:
return
params = {'LoadBalancerNames.member.1': self.name}
tagdict = dict()
# get the current list of tags from the ELB, if ELB exists
if self.elb:
current_tags = self.elb_conn.get_list('DescribeTags', params,
[('member', Tag)])
tagdict = dict((tag.Key, tag.Value) for tag in current_tags
if hasattr(tag, 'Key'))
# Add missing tags
dictact = dict(set(self.tags.items()) - set(tagdict.items()))
if dictact:
for i, key in enumerate(dictact):
params['Tags.member.%d.Key' % (i + 1)] = key
params['Tags.member.%d.Value' % (i + 1)] = dictact[key]
self.elb_conn.make_request('AddTags', params)
self.changed=True
# Remove extra tags
dictact = dict(set(tagdict.items()) - set(self.tags.items()))
if dictact:
for i, key in enumerate(dictact):
params['Tags.member.%d.Key' % (i + 1)] = key
self.elb_conn.make_request('RemoveTags', params)
self.changed=True
def _get_health_check_target(self):
"""Compose target string from healthcheck parameters"""
protocol = self.health_check['ping_protocol'].upper()
path = ""
if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check:
path = self.health_check['ping_path']
return "%s:%s%s" % (protocol, self.health_check['ping_port'], path)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state={'required': True, 'choices': ['present', 'absent']},
name={'required': True},
listeners={'default': None, 'required': False, 'type': 'list'},
purge_listeners={'default': True, 'required': False, 'type': 'bool'},
instance_ids={'default': None, 'required': False, 'type': 'list'},
purge_instance_ids={'default': False, 'required': False, 'type': 'bool'},
zones={'default': None, 'required': False, 'type': 'list'},
purge_zones={'default': False, 'required': False, 'type': 'bool'},
security_group_ids={'default': None, 'required': False, 'type': 'list'},
security_group_names={'default': None, 'required': False, 'type': 'list'},
health_check={'default': None, 'required': False, 'type': 'dict'},
subnets={'default': None, 'required': False, 'type': 'list'},
purge_subnets={'default': False, 'required': False, 'type': 'bool'},
scheme={'default': 'internet-facing', 'required': False},
connection_draining_timeout={'default': None, 'required': False},
idle_timeout={'default': None, 'required': False},
cross_az_load_balancing={'default': None, 'required': False},
stickiness={'default': None, 'required': False, 'type': 'dict'},
access_logs={'default': None, 'required': False, 'type': 'dict'},
wait={'default': False, 'type': 'bool', 'required': False},
wait_timeout={'default': 60, 'type': 'int', 'required': False},
tags={'default': None, 'required': False, 'type': 'dict'}
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive = [['security_group_ids', 'security_group_names']]
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
name = module.params['name']
state = module.params['state']
listeners = module.params['listeners']
purge_listeners = module.params['purge_listeners']
instance_ids = module.params['instance_ids']
purge_instance_ids = module.params['purge_instance_ids']
zones = module.params['zones']
purge_zones = module.params['purge_zones']
security_group_ids = module.params['security_group_ids']
security_group_names = module.params['security_group_names']
health_check = module.params['health_check']
access_logs = module.params['access_logs']
subnets = module.params['subnets']
purge_subnets = module.params['purge_subnets']
scheme = module.params['scheme']
connection_draining_timeout = module.params['connection_draining_timeout']
idle_timeout = module.params['idle_timeout']
cross_az_load_balancing = module.params['cross_az_load_balancing']
stickiness = module.params['stickiness']
wait = module.params['wait']
wait_timeout = module.params['wait_timeout']
tags = module.params['tags']
if state == 'present' and not listeners:
module.fail_json(msg="At least one listener is required for ELB creation")
if state == 'present' and not (zones or subnets):
module.fail_json(msg="At least one availability zone or subnet is required for ELB creation")
if wait_timeout > 600:
module.fail_json(msg='wait_timeout maximum is 600 seconds')
if security_group_names:
security_group_ids = []
try:
ec2 = ec2_connect(module)
if subnets: # We have at least one subnet, ergo this is a VPC
vpc_conn = _get_vpc_connection(module=module, region=region, aws_connect_params=aws_connect_params)
vpc_id = vpc_conn.get_all_subnets([subnets[0]])[0].vpc_id
filters = {'vpc_id': vpc_id}
else:
filters = None
grp_details = ec2.get_all_security_groups(filters=filters)
for group_name in security_group_names:
if isinstance(group_name, basestring):
group_name = [group_name]
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
security_group_ids.extend(group_id)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
purge_zones, security_group_ids, health_check,
subnets, purge_subnets, scheme,
connection_draining_timeout, idle_timeout,
cross_az_load_balancing,
access_logs, stickiness, wait, wait_timeout, tags,
region=region, instance_ids=instance_ids, purge_instance_ids=purge_instance_ids,
**aws_connect_params)
# check for unsupported attributes for this version of boto
if cross_az_load_balancing and not elb_man._check_attribute_support('cross_zone_load_balancing'):
module.fail_json(msg="You must install boto >= 2.18.0 to use the cross_az_load_balancing attribute")
if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'):
module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute")
if idle_timeout and not elb_man._check_attribute_support('connecting_settings'):
module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute")
if state == 'present':
elb_man.ensure_ok()
elif state == 'absent':
elb_man.ensure_gone()
ansible_facts = {'ec2_elb': 'info'}
ec2_facts_result = dict(changed=elb_man.changed,
elb=elb_man.get_info(),
ansible_facts=ansible_facts)
module.exit_json(**ec2_facts_result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
mkrupcale/ansible
|
lib/ansible/modules/cloud/amazon/ec2_elb_lb.py
|
Python
|
gpl-3.0
| 53,270
|
[
"Dalton"
] |
7f825946e8d28b1aaa2eeb707d04b8a748c8e7c1a9f330a09a152da248ca6288
|
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import os, sys
def open_in_browser(link):
browser = os.environ.get('BROWSER', 'firefox')
child = os.fork()
if child == 0:
# We are the child
try:
os.spawnlp(os.P_NOWAIT, browser, browser, link)
os._exit(0)
except Exception, ex:
print >>sys.stderr, "Error", ex
os._exit(1)
os.waitpid(child, 0)
|
pombredanne/zero-install
|
zeroinstall/0launch-gui/browser.py
|
Python
|
lgpl-2.1
| 419
|
[
"VisIt"
] |
ac0a460501b2c4709bdacb3f3844d65733e993467a46a18e5f94c78f5e965fa8
|
#!/usr/bin/env python2
#
# Copyright (C) 2013-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# -*- coding: utf-8 -*-
import sys
import time
import espressopp
import mpi4py.MPI as MPI
import unittest
class TestLangevinThermostat(unittest.TestCase):
global box
box = (10, 10, 10)
def setUp(self):
# set up system
system = espressopp.System()
rng = espressopp.esutil.RNG()
rng.seed(1)
system.rng = rng
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
system.skin = 0.3
system.comm = MPI.COMM_WORLD
self.system = system
def test_normal(self):
# set up normal domain decomposition
nodeGrid = espressopp.tools.decomp.nodeGrid(espressopp.MPI.COMM_WORLD.size,box,rc=1.5,skin=0.3)
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, rc=1.5, skin=0.3)
self.system.storage = espressopp.storage.DomainDecomposition(self.system, nodeGrid, cellGrid)
# add some particles (normal, coarse-grained particles only)
particle_list = [
(1, 1, 0, espressopp.Real3D(5.5, 5.0, 5.0), 1.0, 0),
(2, 1, 0, espressopp.Real3D(6.5, 5.0, 5.0), 1.0, 0),
(3, 1, 0, espressopp.Real3D(7.5, 5.0, 5.0), 1.0, 0),
]
self.system.storage.addParticles(particle_list, 'id', 'type', 'q', 'pos', 'mass','adrat')
self.system.storage.decompose()
# generate a verlet list
vl = espressopp.VerletList(self.system, cutoff=1.5)
# integrator
integrator = espressopp.integrator.VelocityVerlet(self.system)
integrator.dt = 0.01
# Langevin Thermostat
langevin = espressopp.integrator.LangevinThermostat(self.system)
langevin.gamma = 1.0
langevin.temperature = 1.0
langevin.adress = False
langevin.addExclusions([1])
integrator.addExtension(langevin)
# coordinates of particles before integration
before =[self.system.storage.getParticle(i).pos[j] for i in range(1,4) for j in range(3)]
# run ten steps
integrator.run(10)
# coordinates of particles after integration
after = [self.system.storage.getParticle(i).pos[j] for i in range(1,4) for j in range(3)]
# run checks (first particle excluded, hence it should not move. The other should have moved, however, as they feel the thermostat)
self.assertEqual(before[0], after[0])
self.assertEqual(before[1], after[1])
self.assertEqual(before[2], after[2])
self.assertNotEqual(before[3], after[3])
self.assertNotEqual(before[4], after[4])
self.assertNotEqual(before[5], after[5])
self.assertNotEqual(before[6], after[6])
self.assertNotEqual(before[7], after[7])
self.assertNotEqual(before[8], after[8])
def test_AdResS(self):
# set up AdResS domain decomposition
nodeGrid = espressopp.tools.decomp.nodeGrid(espressopp.MPI.COMM_WORLD.size,box,rc=1.5,skin=0.3)
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, rc=1.5, skin=0.3)
self.system.storage = espressopp.storage.DomainDecompositionAdress(self.system, nodeGrid, cellGrid)
# add some particles (atomistic and coarse-grained particles now)
particle_list = [
(1, 1, 0, espressopp.Real3D(5.5, 5.0, 5.0), 1.0, 0),
(2, 1, 0, espressopp.Real3D(6.5, 5.0, 5.0), 1.0, 0),
(3, 1, 0, espressopp.Real3D(7.5, 5.0, 5.0), 1.0, 0),
(4, 0, 0, espressopp.Real3D(5.5, 5.0, 5.0), 1.0, 1),
(5, 0, 0, espressopp.Real3D(6.5, 5.0, 5.0), 1.0, 1),
(6, 0, 0, espressopp.Real3D(7.5, 5.0, 5.0), 1.0, 1),
]
tuples = [(1,4),(2,5),(3,6)]
self.system.storage.addParticles(particle_list, 'id', 'type', 'q', 'pos', 'mass','adrat')
ftpl = espressopp.FixedTupleListAdress(self.system.storage)
ftpl.addTuples(tuples)
self.system.storage.setFixedTuplesAdress(ftpl)
self.system.storage.decompose()
# generate a verlet list
vl = espressopp.VerletListAdress(self.system, cutoff=1.5, adrcut=1.5,
dEx=1.0, dHy=1.0, adrCenter=[5.0, 5.0, 5.0], sphereAdr=False)
# integrator including AdResS
integrator = espressopp.integrator.VelocityVerlet(self.system)
integrator.dt = 0.01
adress = espressopp.integrator.Adress(self.system, vl, ftpl)
integrator.addExtension(adress)
# Langevin Thermostat
langevin = espressopp.integrator.LangevinThermostat(self.system)
langevin.gamma = 1.0
langevin.temperature = 1.0
langevin.adress = True
langevin.addExclusions([4])
integrator.addExtension(langevin)
# coordinates of particles before integration
before = [self.system.storage.getParticle(i).pos[j] for i in range(1,4) for j in range(3)]
# run ten steps
integrator.run(10)
# coordinates of particles after integration
after = [self.system.storage.getParticle(i).pos[j] for i in range(1,4) for j in range(3)]
# run checks (same as test before)
self.assertEqual(before[0], after[0])
self.assertEqual(before[1], after[1])
self.assertEqual(before[2], after[2])
self.assertNotEqual(before[3], after[3])
self.assertNotEqual(before[4], after[4])
self.assertNotEqual(before[5], after[5])
self.assertNotEqual(before[6], after[6])
self.assertNotEqual(before[7], after[7])
self.assertNotEqual(before[8], after[8])
if __name__ == '__main__':
unittest.main()
|
govarguz/espressopp
|
testsuite/LangevinThermostat/test_LangevinThermostat.py
|
Python
|
gpl-3.0
| 6,355
|
[
"ESPResSo"
] |
2f45fa3ed1311ca4826a855ebe443f83300560917d652d2483d4da64ef5b4ed4
|
# Dual4BitVerticalBargraph3x.py
#
# A DEMO DUAL 4 bit slow analogue bargraph generator in colour for STANDARD Python 3.x.x
# and Linux... This is a DUAL vertical version of the SINGLE one also given away by myself.
# It is written so that anyone can understand how it works.
#
# (Original copyright, (C)2011, B.Walker, G0LCU.)
#
# Saved as Dual4BitVerticalBargraph3x.py wherever you like.
#
# This DEMO goes from safe green, to warning amber, to danger red, with a crirical
# error beep above 14 on both the vertical displays...
# It is a slow "AT A GLANCE" display for quick assessments, not for accuracy.
#
# Two system commands are required, "clear" and "setterm", for this to work.
# I assume that these are available on all recent and current Linux distros.
# The device /dev/audio is used so this must be free also.
#
# It is useful for quick "AT A GLANCE" readings from say two 8 bit ADCs used as a simple
# voltmeters, ammeters, etc...
#
# To run use the following from inside a Python prompt...
# >>> exec(open("/full/path/to/code/Dual4BitVerticalBargraph3x.py").read())
#
# This looks like two "LED" style "VU" displays side by side...
# Add the required imports for this DEMO.
import os
import random
import time
# Just for this DEMO set up variables as global...
global count
global byteone
global bytetwo
global blank
global greenlines
global yellowlines
global redlines
global waveform
global unichar
global spacer
# Startup variable values here.
count=0
byteone=0
bytetwo=0
blank="(C)2011, B.Walker, G0LCU."
greenlines=blank
yellowlines=blank
redlines=blank
unichar=chr(0x2588)+chr(0x2588)
spacer=" ____ "
# This is a squarewave binary for the critical error beep(s).
waveform=b"\x00\x00\x00\x00\xff\xff\xff\xff"
def main():
# Disable the cursor as it looks much nicer... ;o)
os.system("setterm -cursor off")
while 1:
# Run continuously and use Ctrl-C to STOP!
blank="\033[0m "
# Generate two byte values as though grabbed from a serial, parallel or USB port.
# E.G... The Arduino Diecimila Dev Board as a multiple analogue source.
byteone=int(random.random()*256)
bytetwo=int(random.random()*256)
# Now divide by 16 to simulate a 4 bit values.
byteone=int(byteone/16)
bytetwo=int(bytetwo/16)
# Although this should never occur, don't allow any errors.
if byteone>=15: byteone=15
if byteone<=0: byteone=0
if bytetwo>=15: bytetwo=15
if bytetwo<=0: bytetwo=0
# Do a full, clean, clear screen and start looping.
os.system("clear"),chr(13)," ",chr(13),
print("\033[0mDual Four Bit Level Vertical Analogue Bar Graph Display...")
print()
print("Original copyright, (C)2011, B.Walker, G0LCU.")
print()
print(blank+"\033[1;31m15 __ __ ____ __ __ 15")
redlines=blank+"\033[1;31m14 __ "
if byteone>=15: redlines=redlines+unichar+spacer
else: redlines=redlines+" "+spacer
if bytetwo>=15: redlines=redlines+unichar+" __ 14"
else: redlines=redlines+" __ 14"
print(redlines)
redlines=blank+"\033[1;31m13 __ "
if byteone>=14: redlines=redlines+unichar+spacer
else: redlines=redlines+" "+spacer
if bytetwo>=14: redlines=redlines+unichar+" __ 13"
else: redlines=redlines+" __ 13"
print(redlines)
yellowlines=blank+"\033[1;33m12 __ "
if byteone>=13: yellowlines=yellowlines+unichar+spacer
else: yellowlines=yellowlines+" "+spacer
if bytetwo>=13: yellowlines=yellowlines+unichar+" __ 12"
else: yellowlines=yellowlines+" __ 12"
print(yellowlines)
yellowlines=blank+"\033[1;33m11 __ "
if byteone>=12: yellowlines=yellowlines+unichar+spacer
else: yellowlines=yellowlines+" "+spacer
if bytetwo>=12: yellowlines=yellowlines+unichar+" __ 11"
else: yellowlines=yellowlines+" __ 11"
print(yellowlines)
yellowlines=blank+"\033[1;33m10 __ "
if byteone>=11: yellowlines=yellowlines+unichar+spacer
else: yellowlines=yellowlines+" "+spacer
if bytetwo>=11: yellowlines=yellowlines+unichar+" __ 10"
else: yellowlines=yellowlines+" __ 10"
print(yellowlines)
greenlines=blank+"\033[1;32m 9 __ "
if byteone>=10: greenlines=greenlines+unichar+spacer
else: greenlines=greenlines+" "+spacer
if bytetwo>=10: greenlines=greenlines+unichar+" __ 9"
else: greenlines=greenlines+" __ 9"
print(greenlines)
greenlines=blank+"\033[1;32m 8 __ "
if byteone>=9: greenlines=greenlines+unichar+spacer
else: greenlines=greenlines+" "+spacer
if bytetwo>=9: greenlines=greenlines+unichar+" __ 8"
else: greenlines=greenlines+" __ 8"
print(greenlines)
greenlines=blank+"\033[1;32m 7 __ "
if byteone>=8: greenlines=greenlines+unichar+spacer
else: greenlines=greenlines+" "+spacer
if bytetwo>=8: greenlines=greenlines+unichar+" __ 7"
else: greenlines=greenlines+" __ 7"
print(greenlines)
greenlines=blank+"\033[1;32m 6 __ "
if byteone>=7: greenlines=greenlines+unichar+spacer
else: greenlines=greenlines+" "+spacer
if bytetwo>=7: greenlines=greenlines+unichar+" __ 6"
else: greenlines=greenlines+" __ 6"
print(greenlines)
greenlines=blank+"\033[1;32m 5 __ "
if byteone>=6: greenlines=greenlines+unichar+spacer
else: greenlines=greenlines+" "+spacer
if bytetwo>=6: greenlines=greenlines+unichar+" __ 5"
else: greenlines=greenlines+" __ 5"
print(greenlines)
greenlines=blank+"\033[1;32m 4 __ "
if byteone>=5: greenlines=greenlines+unichar+spacer
else: greenlines=greenlines+" "+spacer
if bytetwo>=5: greenlines=greenlines+unichar+" __ 4"
else: greenlines=greenlines+" __ 4"
print(greenlines)
greenlines=blank+"\033[1;32m 3 __ "
if byteone>=4: greenlines=greenlines+unichar+spacer
else: greenlines=greenlines+" "+spacer
if bytetwo>=4: greenlines=greenlines+unichar+" __ 3"
else: greenlines=greenlines+" __ 3"
print(greenlines)
greenlines=blank+"\033[1;32m 2 __ "
if byteone>=3: greenlines=greenlines+unichar+spacer
else: greenlines=greenlines+" "+spacer
if bytetwo>=3: greenlines=greenlines+unichar+" __ 2"
else: greenlines=greenlines+" __ 2"
print(greenlines)
greenlines=blank+"\033[1;32m 1 __ "
if byteone>=2: greenlines=greenlines+unichar+spacer
else: greenlines=greenlines+" "+spacer
if bytetwo>=2: greenlines=greenlines+unichar+" __ 1"
else: greenlines=greenlines+" __ 1"
print(greenlines)
greenlines=blank+"\033[1;32m 0 __ "
if byteone>=1: greenlines=greenlines+unichar+spacer
else: greenlines=greenlines+"__"+spacer
if bytetwo>=1: greenlines=greenlines+unichar+" __ 0"
else: greenlines=greenlines+"__ __ 0"
print(greenlines)
# Print the two byte values onto the screen...
print("\033[1;34mByteone =",byteone,"\b, bytetwo =",bytetwo,"\b... ")
# Now reset to the default colours, etc...
print("\033[0mPress Ctrl-C to stop...")
time.sleep(1)
# Use two different beeps for the two displays.
# Both are different frequency squarewaves.
if byteone==15 or bytetwo==15:
# Select an error beep for each display...
if byteone==15: waveform=b"\x00\x00\x00\x00\xff\xff\xff\xff"
if bytetwo==15: waveform=b"\x00\x00\xff\xff\x00\x00\xff\xff"
# Set audio timing to zero, "0".
count=0
# Open up the audio device to write to.
# This could be /dev/dsp also...
audio=open("/dev/audio", "wb")
# A "count" value of 1 = 1mS, so 1000 = 1S.
while count<=1000:
# Send 8 bytes of data to the audio device 1000 times.
audio.write(waveform)
count=count+1
# Close the audio device access.
audio.close()
# Enable the cursor again if it ever gets here... ;oO
os.system("setterm -cursor on")
main()
# End of DEMO...
# Enjoy finding simple solutions to often very difficult problems...
|
ActiveState/code
|
recipes/Python/577685_DUAL_4_Bit_Vertical_Coloured_Analogue_Bar_Graph_/recipe-577685.py
|
Python
|
mit
| 7,602
|
[
"Amber"
] |
6036e84656480d28ec39678a0cb10c14d8479354a89b5d82310a0e9f5992ff43
|
from __future__ import print_function
import sys
import random
import os
import numpy as np
from builtins import range
import time
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.grid.grid_search import H2OGridSearch
class Test_glm_grid_search:
"""
PUBDEV-1843: Grid testing. Subtask 7,8.
This class is created to test the gridsearch with the GLM algo using Guassian, Binonmial or
Multinomial family. Three tests are written to test the following conditions:
1. For hyper-parameters containing all legal parameter names and parameter value lists with legal
and illegal values, grid-models should be built for all combinations of legal parameter values. For
illegal parameter values, a warning/error message should be printed out to warn the user but the
program should not throw an exception;
2. For hyper-parameters with illegal names, an exception should be thrown and no models should be built;
3. For parameters that are specified both in the hyper-parameters and model parameters, unless the values
specified in the model parameters are set to default values, an exception will be thrown since parameters are
not supposed to be specified in both places.
Test Descriptions:
test1_glm_grid_search_over_params: test for condition 1 and performs the following:
a. grab all truely griddable parameters and randomly or manually set the parameter values.
b. Next, build H2O GLM models using grid search. Count and make sure models
are only built for hyper-parameters set to legal values. No model is built for bad hyper-parameters
values. We should instead get a warning/error message printed out.
c. For each model built using grid search, we will extract the parameters used in building
that model and manually build a H2O GLM model. Training metrics are calculated from the
gridsearch model and the manually built model. If their metrics
differ by too much, print a warning message but don't fail the test.
d. we will check and make sure the models are built within the max_runtime_secs time limit that was set
for it as well. If max_runtime_secs was exceeded, declare test failure.
test2_illegal_name_value: test for condition 1 and 2. Randomly go into the hyper_parameters that we
have specified, either
a. randomly alter the name of a hyper-parameter name (fatal, exception will be thrown)
b. randomly choose a hyper-parameter and remove all elements in its list (fatal)
c. add randomly generated new hyper-parameter names with random list (fatal)
d: randomly choose a hyper-parameter and insert an illegal type into it (non fatal, model built with
legal hyper-parameters settings only and error messages printed out for illegal hyper-parameters
settings)
test3_duplicated_parameter_specification: test for condition 3. Go into our hyper_parameters list, randomly
choose some hyper-parameters to specify and specify it as part of the model parameters. Hence, the same
parameter is specified both in the model parameters and hyper-parameters. Make sure the test failed with
error messages when the parameter values are not set to default if they are specified in the model parameters
as well as in the hyper-parameters.
"""
# parameters set by users, change with care
max_grid_model = 200
curr_time = str(round(time.time())) # store current timestamp, used as part of filenames.
# parameters denoting filenames of interested that store training/validation/test data sets in csv format
training1_filename = ["smalldata/gridsearch/gaussian_training1_set.csv",
"smalldata/gridsearch/binomial_training1_set.csv",
"smalldata/gridsearch/multinomial_training1_set.csv"]
training2_filename = ["smalldata/gridsearch/gaussian_training2_set.csv",
"smalldata/gridsearch/binomial_training2_set.csv",
"smalldata/gridsearch/multinomial_training2_set.csv"]
json_filename = "gridsearch_hyper_parameter_" + curr_time + ".json"
json_filename_bad = "gridsearch_hyper_parameter_bad_" + curr_time + ".json"
weight_filename = "gridsearch_" + curr_time + "_weight.csv"
allowed_diff = 1e-5 # value of p-values difference allowed between theoretical and h2o p-values
allowed_runtime_diff = 0.15 # runtime difference allowed before comparing manually built model and
# gridsearch model metrics.
# System parameters, do not change. Dire consequences may follow if you do
current_dir = os.path.dirname(os.path.realpath(sys.argv[1])) # directory of this test file
train_row_count = 0 # training data row count, randomly generated later
train_col_count = 0 # training data column count, randomly generated later
# following parameters are used to generate hyper-parameters
max_int_val = 10 # maximum size of random integer values
min_int_val = -10 # minimum size of random integer values
max_int_number = 3 # maximum number of integer random grid values to generate
max_real_val = 1 # maximum size of random float values
min_real_val = -0.1 # minimum size of random float values
max_real_number = 3 # maximum number of real grid values to generate
lambda_scale = 50 # scale the lambda values to be higher than 0 to 1
alpha_scale = 1.2 # scale alpha into bad ranges
time_scale = 3 # maximum runtime scale
extra_time_fraction = 0.5 # since timing is never perfect, give some extra time on top of maximum runtime limit
min_runtime_per_epoch = 0 # minimum run time found. Determined later
families = ['gaussian', 'binomial', 'multinomial'] # distribution family to perform grid search over
family = 'gaussian' # choose default family to be gaussian
test_name = "pyunit_glm_gridsearch_over_all_params_large.py" # name of this test
sandbox_dir = "" # sandbox directory where we are going to save our failed test data sets
# store information about training/test data sets
x_indices = [] # store predictor indices in the data set
y_index = 0 # store response index in the data set
total_test_number = 3 # number of tests carried out
test_failed = 0 # count total number of tests that have failed
test_failed_array = [0] * total_test_number # denote test results for all tests run. 1 error, 0 pass
test_num = 0 # index representing which test is being run
# give the user opportunity to pre-assign hyper parameters for fixed values
hyper_params_bad = dict()
hyper_params_bad["fold_assignment"] = ['AUTO', 'Random', 'Modulo', "Stratified"]
hyper_params_bad["missing_values_handling"] = ['MeanImputation', 'Skip']
hyper_params = dict()
hyper_params["fold_assignment"] = ['AUTO', 'Random', 'Modulo', "Stratified"]
hyper_params["missing_values_handling"] = ['MeanImputation', 'Skip']
final_hyper_params_bad = dict() # actual hyper_params used to limit number of models built with bad values
final_hyper_params = dict() # actual hyper_params used to limit number of models built with only good values
scale_model = 1
# parameters to be excluded from hyper parameter list even though they may be gridable
exclude_parameter_lists = ['tweedie_link_power', 'tweedie_variance_power', 'seed'] # do not need these
# these are supposed to be gridable but are not really
exclude_parameter_lists.extend(['fold_column', 'weights_column', 'offset_column'])
# these are excluded for extracting parameters to manually build H2O GLM models
exclude_parameter_lists.extend(['model_id'])
gridable_parameters = [] # store griddable parameter names
gridable_types = [] # store the corresponding griddable parameter types
gridable_defaults = [] # store the gridabble parameter default values
possible_number_models = 0 # possible number of models built based on hyper-parameter specification
correct_model_number = 0 # count number of models built with bad hyper-parameter specification
true_correct_model_number = 0 # count number of models built with good hyper-parameter specification
nfolds = 5 # enable cross validation to test fold_assignment
# denote legal values for certain parameters. May include other parameters for other algos.
params_zero_one = ['alpha', 'stopping_tolerance']
params_more_than_zero = []
params_more_than_one = []
params_zero_positive = ['max_runtime_secs', 'stopping_rounds', "lambda"] # >= 0
def __init__(self):
self.setup_data()
self.setup_model()
def setup_data(self):
"""
This function performs all initializations necessary:
1. Randomly choose which distribution family to use
2. load the correct data sets and set the training set indices and response column index
"""
# create and clean out the sandbox directory first
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# randomly choose which family of GLM algo to use
self.family = self.families[random.randint(0, len(self.families) - 1)]
# set class number for classification
if 'binomial' in self.family:
self.training1_data = h2o.import_file(path=pyunit_utils.locate(self.training1_filename[1]))
self.training2_data = h2o.import_file(path=pyunit_utils.locate(self.training2_filename[1]))
elif 'multinomial' in self.family:
self.training1_data = h2o.import_file(path=pyunit_utils.locate(self.training1_filename[2]))
self.training2_data = h2o.import_file(path=pyunit_utils.locate(self.training2_filename[2]))
else:
self.training1_data = h2o.import_file(path=pyunit_utils.locate(self.training1_filename[0]))
self.training2_data = h2o.import_file(path=pyunit_utils.locate(self.training2_filename[0]))
self.scale_model = 0.75
self.hyper_params["fold_assignment"] = ['AUTO', 'Random', 'Modulo']
# set data set indices for predictors and response
self.y_index = self.training1_data.ncol - 1
self.x_indices = list(range(self.y_index))
# set response to be categorical for classification tasks
if ('binomial' in self.family) or ('multinomial' in self.family):
self.training1_data[self.y_index] = self.training1_data[self.y_index].round().asfactor()
self.training2_data[self.y_index] = self.training2_data[self.y_index].round().asfactor()
# save the training data files just in case the code crashed.
pyunit_utils.remove_csv_files(self.current_dir, ".csv", action='copy', new_dir_path=self.sandbox_dir)
def setup_model(self):
"""
This function setup the gridsearch hyper-parameters that will be used later on:
1. It will first try to grab all the parameters that are griddable and parameters used by GLM.
2. It will find the intersection of parameters that are both griddable and used by GLM.
3. There are several extra parameters that are used by GLM that are denoted as griddable but actually is not.
These parameters have to be discovered manually and they These are captured in self.exclude_parameter_lists.
4. We generate the gridsearch hyper-parameter. For numerical parameters, we will generate those randomly.
For enums, we will include all of them.
:return: None
"""
# build bare bone model to get all parameters
model = H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds)
model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
run_time = pyunit_utils.find_grid_runtime([model]) # find model train time
print("Time taken to build a base barebone model is {0}".format(run_time))
summary_list = model._model_json["output"]["model_summary"]
num_iteration = summary_list.cell_values[0][summary_list.col_header.index("number_of_iterations")]
if num_iteration == 0:
self.min_runtime_per_epoch = run_time
else:
self.min_runtime_per_epoch = run_time / num_iteration
# grab all gridable parameters and its type
(self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
pyunit_utils.get_gridables(model._model_json["parameters"])
# randomly generate griddable parameters including values outside legal range, like setting alpha values to
# be outside legal range of 0 and 1 and etc
(self.hyper_params_bad, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params_bad,
self.exclude_parameter_lists,
self.gridable_parameters, self.gridable_types, self.gridable_defaults,
random.randint(1, self.max_int_number),
self.max_int_val, self.min_int_val,
random.randint(1, self.max_real_number),
self.max_real_val*self.alpha_scale, self.min_real_val*self.alpha_scale)
# scale the value of lambda parameters
if "lambda" in list(self.hyper_params_bad):
self.hyper_params_bad["lambda"] = [self.lambda_scale * x for x in self.hyper_params_bad["lambda"]]
# scale the max_runtime_secs parameters
time_scale = self.time_scale * run_time
if "max_runtime_secs" in list(self.hyper_params_bad):
self.hyper_params_bad["max_runtime_secs"] = [time_scale * x for x
in self.hyper_params_bad["max_runtime_secs"]]
[self.possible_number_models, self.final_hyper_params_bad] = \
pyunit_utils.check_and_count_models(self.hyper_params_bad, self.params_zero_one, self.params_more_than_zero,
self.params_more_than_one, self.params_zero_positive,
self.max_grid_model)
if ("max_runtime_secs" not in list(self.final_hyper_params_bad)) and \
("max_runtime_secs" in list(self.hyper_params_bad)):
self.final_hyper_params_bad["max_runtime_secs"] = self.hyper_params_bad["max_runtime_secs"]
len_good_time = len([x for x in self.hyper_params_bad["max_runtime_secs"] if (x >= 0)])
self.possible_number_models = self.possible_number_models * len_good_time
# Stratified is illegal for Gaussian GLM
self.possible_number_models = self.possible_number_models * self.scale_model
# randomly generate griddable parameters with only good values
(self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params, self.exclude_parameter_lists,
self.gridable_parameters, self.gridable_types, self.gridable_defaults,
random.randint(1, self.max_int_number),
self.max_int_val, 0,
random.randint(1, self.max_real_number),
self.max_real_val, 0)
# scale the value of lambda parameters
if "lambda" in list(self.hyper_params):
self.hyper_params["lambda"] = [self.lambda_scale * x for x in self.hyper_params["lambda"]]
# scale the max_runtime_secs parameters
if "max_runtime_secs" in list(self.hyper_params):
self.hyper_params["max_runtime_secs"] = [time_scale * x for x
in self.hyper_params["max_runtime_secs"]]
[self.true_correct_model_number, self.final_hyper_params] = \
pyunit_utils.check_and_count_models(self.hyper_params, self.params_zero_one, self.params_more_than_zero,
self.params_more_than_one, self.params_zero_positive,
self.max_grid_model)
if ("max_runtime_secs" not in list(self.final_hyper_params)) and \
("max_runtime_secs" in list(self.hyper_params)):
self.final_hyper_params["max_runtime_secs"] = self.hyper_params["max_runtime_secs"]
self.true_correct_model_number = \
self.true_correct_model_number * len(self.final_hyper_params["max_runtime_secs"])
# write out the hyper-parameters used into json files.
pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename_bad,
self.final_hyper_params_bad)
pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename,
self.final_hyper_params)
def tear_down(self):
pyunit_utils.remove_files(os.path.join(self.current_dir, self.json_filename))
pyunit_utils.remove_files(os.path.join(self.current_dir, self.json_filename_bad))
def test1_glm_grid_search_over_params(self):
"""
test1_glm_grid_search_over_params: test for condition 1 and performs the following:
a. grab all truely griddable parameters and randomly or manually set the parameter values.
b. Next, build H2O GLM models using grid search. Count and make sure models
are only built for hyper-parameters set to legal values. No model is built for bad hyper-parameters
values. We should instead get a warning/error message printed out.
c. For each model built using grid search, we will extract the parameters used in building
that model and manually build a H2O GLM model. Training metrics are calculated from the
gridsearch model and the manually built model. If their metrics
differ by too much, print a warning message but don't fail the test.
d. we will check and make sure the models are built within the max_runtime_secs time limit that was set
for it as well. If max_runtime_secs was exceeded, declare test failure.
"""
print("*******************************************************************************************")
print("test1_glm_grid_search_over_params for GLM " + self.family)
h2o.cluster_info()
try:
print("Hyper-parameters used here is {0}".format(self.final_hyper_params_bad))
# start grid search
grid_model = H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=self.final_hyper_params_bad)
grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
self.correct_model_number = len(grid_model) # store number of models built
# make sure the correct number of models are built by gridsearch
if (self.correct_model_number - self.possible_number_models)>0.9: # wrong grid model number
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test_glm_search_over_params for GLM failed: number of models built by gridsearch: {0} "
"does not equal to all possible combinations of hyper-parameters: "
"{1}".format(self.correct_model_number, self.possible_models))
else:
# add parameters into params_dict. Use this to manually build model
params_dict = dict()
params_dict["family"] = self.family
params_dict["nfolds"] = self.nfolds
total_run_time_limits = 0.0 # calculate upper bound of max_runtime_secs
true_run_time_limits = 0.0
manual_run_runtime = 0.0
# compare MSE performance of model built by gridsearch with manually built model
for each_model in grid_model:
# grab parameters used by grid search and build a dict out of it
params_list = grid_model.get_hyperparams_dict(each_model._id)
params_list.update(params_dict)
model_params = dict() # some parameters are to be added in .train()
if "lambda" in list(params_list):
params_list["Lambda"] = params_list["lambda"]
del params_list["lambda"]
# need to taken out max_runtime_secs, stopping_rounds, stopping_tolerance
# # from model parameters, it is now set in .train()
if "max_runtime_secs" in params_list:
model_params["max_runtime_secs"] = params_list["max_runtime_secs"]
del params_list["max_runtime_secs"]
if "stopping_rounds" in params_list:
model_params["stopping_rounds"] = params_list["stopping_rounds"]
del params_list["stopping_rounds"]
if "stopping_tolerance" in params_list:
model_params["stopping_tolerance"] = params_list["stopping_tolerance"]
del params_list["stopping_tolerance"]
manual_model = H2OGeneralizedLinearEstimator(**params_list)
manual_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data,
**model_params)
# collect the time taken to manually built all models
model_runtime = pyunit_utils.find_grid_runtime([manual_model]) # time taken to build this model
manual_run_runtime += model_runtime
summary_list = manual_model._model_json['output']['model_summary']
iteration_num = summary_list.cell_values["number_of_iterations"][0]
if model_params["max_runtime_secs"] > 0:
# shortest possible time it takes to build this model
if (model_params["max_runtime_secs"] < self.min_runtime_per_epoch) or (iteration_num <= 1):
total_run_time_limits += model_runtime
else:
total_run_time_limits += model_params["max_runtime_secs"]
true_run_time_limits += model_params["max_runtime_secs"]
# compute and compare test metrics between the two models
grid_model_metrics = each_model.model_performance(test_data=self.training2_data)
manual_model_metrics = manual_model.model_performance(test_data=self.training2_data)
# just compare the mse in this case within tolerance:
if not((type(grid_model_metrics.mse()) == str) or (type(manual_model_metrics.mse()) == str)):
mse = grid_model_metrics.mse()
if abs(mse) > 0 and abs(mse - manual_model_metrics.mse()) / mse > self.allowed_diff:
print("test1_glm_grid_search_over_params for GLM warning: grid search model metric ({0}) "
"and manually built H2O model metric ({1}) differ too much"
"!".format(grid_model_metrics.mse(), manual_model_metrics.mse()))
total_run_time_limits = max(total_run_time_limits, true_run_time_limits) * (1+self.extra_time_fraction)
# make sure the correct number of models are built by gridsearch
if not (self.correct_model_number == self.possible_number_models): # wrong grid model number
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test1_glm_grid_search_over_params for GLM failed: number of models built by gridsearch "
"does not equal to all possible combinations of hyper-parameters")
# make sure the max_runtime_secs is working to restrict model built time, GLM does not respect that.
if not(manual_run_runtime <= total_run_time_limits):
# self.test_failed += 1
# self.test_failed_array[self.test_num] = 1
print("test1_glm_grid_search_over_params for GLM warning: allow time to build models: {0}, actual "
"time taken: {1}".format(total_run_time_limits, manual_run_runtime))
self.test_num += 1
if self.test_failed == 0:
print("test1_glm_grid_search_over_params for GLM has passed!")
except:
if self.possible_number_models > 0:
print("test1_glm_grid_search_over_params for GLM failed: exception was thrown for no reason.")
def test2_illegal_name_value(self):
"""
test2_illegal_name_value: test for condition 1 and 2. Randomly go into the hyper_parameters that we
have specified, either
a. randomly alter the name of a hyper-parameter name (fatal, exception will be thrown)
b. randomly choose a hyper-parameter and remove all elements in its list (fatal)
c. add randomly generated new hyper-parameter names with random list (fatal)
d: randomly choose a hyper-parameter and insert an illegal type into it (non fatal, model built with
legal hyper-parameters settings only and error messages printed out for illegal hyper-parameters
settings)
The following error conditions will be created depending on the error_number generated:
error_number = 0: randomly alter the name of a hyper-parameter name;
error_number = 1: randomly choose a hyper-parameter and remove all elements in its list
error_number = 2: add randomly generated new hyper-parameter names with random list
error_number = 3: randomly choose a hyper-parameter and insert an illegal type into it
:return: None
"""
print("*******************************************************************************************")
print("test2_illegal_name_value for GLM " + self.family)
h2o.cluster_info()
error_number = np.random.random_integers(0, 3, 1) # randomly choose an error
error_hyper_params = \
pyunit_utils.insert_error_grid_search(self.final_hyper_params, self.gridable_parameters,
self.gridable_types, error_number[0])
print("test2_illegal_name_value: the bad hyper-parameters are: ")
print(error_hyper_params)
# copied from Eric to catch execution run errors and not quit
try:
grid_model = H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=error_hyper_params)
grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
if error_number[0] > 2:
# grid search should not failed in this case and check number of models built.
if not (len(grid_model) == self.true_correct_model_number):
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test2_illegal_name_value failed. Number of model generated is "
"incorrect.")
else:
print("test2_illegal_name_value passed.")
else: # other errors should cause exceptions being thrown and if not, something is wrong.
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test2_illegal_name_value failed: exception should have been thrown for illegal"
"parameter name or empty hyper-parameter parameter list but did not!")
except:
if (error_number[0] <= 2) and (error_number[0] >= 0):
print("test2_illegal_name_value passed: exception is thrown for illegal parameter name or empty"
"hyper-parameter parameter list.")
else:
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test2_illegal_name_value failed: exception should not have been thrown but did!")
self.test_num += 1
def test3_duplicated_parameter_specification(self):
"""
This function will randomly choose a parameter in hyper_parameters and specify it as a parameter in the
model. Depending on the random error_number generated, the following is being done to the model parameter
and hyper-parameter:
error_number = 0: set model parameter to be a value in the hyper-parameter value list, should
generate error;
error_number = 1: set model parameter to be default value, should not generate error in this case;
error_number = 2: make sure model parameter is not set to default and choose a value not in the
hyper-parameter value list.
:return: None
"""
print("*******************************************************************************************")
print("test3_duplicated_parameter_specification for GLM " + self.family)
error_number = np.random.random_integers(0, 2, 1) # randomly choose an error
print("error_number is {0}".format(error_number[0]))
params_dict, error_hyper_params = \
pyunit_utils.generate_redundant_parameters(self.final_hyper_params, self.gridable_parameters,
self.gridable_defaults, error_number[0])
params_dict["family"] = self.family
params_dict["nfolds"] = self.nfolds
# remove stopping_rounds, stopping_tolerance if included
if "stopping_rounds" in list(params_dict):
del params_dict["stopping_rounds"]
if "stopping_tolerance" in list(params_dict):
del params_dict["stopping_tolerance"]
print("Your hyper-parameter dict is: ")
print(error_hyper_params)
print("Your model parameters are: ")
print(params_dict)
# copied from Eric to catch execution run errors and not quit
try:
grid_model = H2OGridSearch(H2OGeneralizedLinearEstimator(**params_dict),
hyper_params=error_hyper_params)
grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
# if error_number = 1, it is okay. Else it should fail.
if not (error_number[0] == 1):
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test3_duplicated_parameter_specification failed: Java error exception should have been "
"thrown but did not!")
else:
print("test3_duplicated_parameter_specification passed: Java error exception should not have "
"been thrown and did not!")
except Exception as e:
if error_number[0] == 1:
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test3_duplicated_parameter_specification failed: Java error exception ({0}) should not "
"have been thrown! ".format(e))
else:
print("test3_duplicated_parameter_specification passed: Java error exception ({0}) should "
"have been thrown and did.".format(e))
def test_grid_search_for_glm_over_all_params():
"""
Create and instantiate class and perform tests specified for GLM
:return: None
"""
test_glm_grid = Test_glm_grid_search()
test_glm_grid.test1_glm_grid_search_over_params()
test_glm_grid.test2_illegal_name_value()
test_glm_grid.test3_duplicated_parameter_specification()
sys.stdout.flush()
if test_glm_grid.test_failed: # exit with error if any tests have failed
sys.exit(1)
else: # remove json files if everything passes
test_glm_grid.tear_down()
if __name__ == "__main__":
pyunit_utils.standalone_test(test_grid_search_for_glm_over_all_params)
else:
test_grid_search_for_glm_over_all_params()
|
mathemage/h2o-3
|
h2o-py/dynamic_tests/testdir_algos/glm/pyunit_glm_gridsearch_over_all_params_large.py
|
Python
|
apache-2.0
| 33,225
|
[
"Gaussian"
] |
189b9386cc2c7dec30982da7e38a238f358e067b3ae58388bd5bde5e14db8b94
|
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str, bytes, dict, int
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from pattern.graph.commonsense import Commonsense
# A semantic network is a graph in which each node represents a concept
# (e.g., flower, red, rose) and each edge represents a relation between
# concepts, for example rose is-a flower, red is-property-of rose.
# Module pattern.graph.commonsense implements a semantic network of commonsense.
# It contains a Concept class (Node subclass), Relation class (Edge subclass),
# and a Commonsense class (Graph subclass).
# It contains about 10,000 manually annotated relations between mundane concepts,
# for example gondola is-related-to romance, or spoon is-related-to soup.
# This is the PERCEPTION dataset. See the visualizer at:
# http://nodebox.net/perception/
# Relation.type can be:
# - is-a,
# - is-part-of,
# - is-opposite-of,
# - is-property-of,
# - is-related-to,
# - is-same-as,
# - is-effect-of.
g = Commonsense()
g.add_node("spork")
g.add_edge("spork", "spoon", type="is-a")
# Concept.halo a list of concepts surrounding the given concept,
# and as such reinforce its meaning:
print()
print(g["spoon"].halo) # fork, etiquette, slurp, hot, soup, mouth, etc.
# Concept.properties is a list of properties (= adjectives) in the halo,
# sorted by betweenness centrality:
print()
print(g["spoon"].properties) # hot
# Commonsense.field() returns a list of concepts
# that belong to the given class (or "semantic field"):
print()
print(g.field("color", depth=3, fringe=2)) # brown, orange, blue, ...
#print g.field("person") # Leonard Nimoy, Al Capone, ...
#print g.field("building") # opera house, supermarket, ...
# Commonsense.similarity() calculates the similarity between two concepts,
# based on common properties between both
# (e.g., tigers and zebras are both striped).
print()
print(g.similarity("tiger", "zebra"))
print(g.similarity("tiger", "amoeba"))
# Commonsense.nearest_neighbors() compares the properties of a given concept
# to a list of other concepts, and selects the concept from the list that
# is most similar to the given concept.
# This will take some time to calculate (thinking is hard).
print()
print("Creepy animals:")
print(g.nearest_neighbors("creepy", g.field("animal"))[:10])
print()
print("Party animals:")
print(g.nearest_neighbors("party", g.field("animal"))[:10])
# Creepy animals are: owl, vulture, octopus, bat, raven, ...
# Party animals are: puppy, grasshopper, reindeer, dog, ...
|
clips/pattern
|
examples/06-graph/06-commonsense.py
|
Python
|
bsd-3-clause
| 2,601
|
[
"Octopus"
] |
d5dca4dc1ef32280aa3a298e5a311b696222d6faa57c31c996b5425feaae78b3
|
from DIRAC.WorkloadManagementSystem.Client.SandboxStoreClient import SandboxStoreClient
from DIRAC.FrameworkSystem.Client.SystemAdministratorClient import SystemAdministratorClient
from DIRAC.FrameworkSystem.Client.NotificationClient import NotificationClient
from DIRAC.FrameworkSystem.Client.SystemAdministratorIntegrator import SystemAdministratorIntegrator
from WebAppDIRAC.Lib.WebHandler import WebHandler, WErr, WOK, asyncGen
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC import gConfig, S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities import Time
from DIRAC.Core.Utilities.List import uniqueElements
import json
import datetime
import DIRAC.ConfigurationSystem.Client.Helpers.Registry as Registry
class SystemAdministrationHandler( WebHandler ):
AUTH_PROPS = "authenticated"
@asyncGen
def web_getSysInfo( self ):
userData = self.getSessionData()
DN = str( userData["user"]["DN"] )
group = str( userData["user"]["group"] )
callback = []
import pprint
# self.finish( { "success" : "false" , "error" : "No system information found" } )
# return
client = SystemAdministratorIntegrator( delegatedDN = DN ,
delegatedGroup = group )
resultHosts = yield self.threadTask( client.getHostInfo )
if resultHosts[ "OK" ]:
hosts = resultHosts['Value']
for i in hosts:
if hosts[i]['OK']:
host = hosts[i]['Value']
host['Host'] = i
callback.append( host )
else:
callback.append( {'Host':i} )
else:
self.finish( { "success" : "false" , "error" : resultHosts['Message']} )
total = len( callback )
if not total > 0:
self.finish( { "success" : "false" , "error" : "No system information found" } )
return
callback = sorted(callback, key=lambda i: i['Host'])
self.finish( { "success" : "true" , "result" : callback , "total" : total } )
@asyncGen
def web_getHostData( self ):
"""
Returns flatten list of components (services, agents) installed on hosts
returned by getHosts function
"""
# checkUserCredentials()
userData = self.getSessionData()
DN = str( userData["user"]["DN"] )
group = str( userData["user"]["group"] )
callback = list()
if not ( self.request.arguments.has_key( "hostname" ) and len( self.request.arguments["hostname"][0] ) > 0 ):
self.finish( { "success" : "false" , "error" : "Name of the host is absent" } )
return
host = self.request.arguments["hostname"][0]
client = SystemAdministratorClient( host , None , delegatedDN = DN ,
delegatedGroup = group )
result = yield self.threadTask( client.getOverallStatus )
gLogger.debug( "Result of getOverallStatus(): %s" % result )
if not result[ "OK" ]:
self.finish( { "success" : "false" , "error" : result[ "Message" ] } )
return
overall = result[ "Value" ]
for record in self.flatten( overall ):
record[ "Host" ] = host
callback.append( record )
self.finish( { "success" : "true" , "result" : callback } )
def flatten( self , dataDict ):
"""
Flatten dict of dicts structure returned by getOverallStatus() method of
SystemAdministrator client
"""
for kind , a in dataDict.items():
for system , b in a.items():
for name , c in b.items():
if ( "Installed" in c ) and ( c[ "Installed" ] ):
c[ "Type" ] = kind
c[ "System" ] = system
c[ "Name" ] = name
yield c
@asyncGen
def web_getHostErrors( self ):
userData = self.getSessionData()
DN = str( userData["user"]["DN"] )
group = str( userData["user"]["group"] )
if not "host" in self.request.arguments:
self.finish( { "success" : "false" , "error" : "Name of the host is missing or not defined" } )
return
host = str( self.request.arguments[ "host" ][0] )
client = SystemAdministratorClient( host , None , delegatedDN = DN , delegatedGroup = group )
result = yield self.threadTask( client.checkComponentLog, "*" )
gLogger.debug( result )
if not result[ "OK" ]:
self.finish( { "success" : "false" , "error" : result[ "Message" ] } )
return
result = result[ "Value" ]
callback = list()
for key, value in result.items():
system, component = key.split( "/" )
value[ "System" ] = system
value[ "Name" ] = component
value[ "Host" ] = host
callback.append( value )
total = len( callback )
self.finish( { "success" : "true" , "result" : callback , "total" : total } )
@asyncGen
def web_getHostLog( self ):
userData = self.getSessionData()
DN = str( userData["user"]["DN"] )
group = str( userData["user"]["group"] )
if not "host" in self.request.arguments:
self.finish( { "success" : "false" , "error":"Name of the host is missing or not defined"} )
return
host = str( self.request.arguments[ "host" ][0] )
if not "system" in self.request.arguments:
self.finish( { "success" : "false" , "error":"Name of the system is missing or not defined"} )
return
system = str( self.request.arguments[ "system" ][0] )
if not "component" in self.request.arguments:
self.finish( { "success" : "false" , "error":"Name of component is missing or not defined"} )
return
name = str( self.request.arguments[ "component" ][0] )
client = SystemAdministratorClient( host , None , delegatedDN = DN , delegatedGroup = group )
result = yield self.threadTask( client.getLogTail, system , name )
gLogger.debug( result )
if not result[ "OK" ]:
self.finish( { "success" : "false" , "error":result[ "Message" ]} )
return
result = result[ "Value" ]
key = system + "_" + name
if not key in result:
self.finish( { "success" : "false" , "error":"%s key is absent in service response" % key} )
return
log = result[ key ]
self.finish( { "success" : "true" , "result":log.replace( "\n" , "<br>" )} )
@asyncGen
def web_hostAction( self ):
"""
Restart all DIRAC components on a given host
"""
if not "host" in self.request.arguments:
self.finish( { "success" : "false" , "error" : "No hostname defined" } )
return
if not "action" in self.request.arguments:
self.finish( { "success" : "false" , "error" : "No action defined" } )
return
action = str( self.request.arguments[ "action" ][0] )
hosts = self.request.arguments[ "host" ][0].split( "," )
userData = self.getSessionData()
DN = str( userData["user"]["DN"] )
group = str( userData["user"]["group"] )
actionSuccess = list()
actionFailed = list()
for i in hosts:
client = SystemAdministratorClient( str( i ) , None , delegatedDN = DN ,
delegatedGroup = group )
if action is "restart":
result = yield self.threadTask( client.restartComponent, str( "*" ) , str( "*" ) )
elif action is "revert":
result = yield self.threadTask( client.revertSoftware )
else:
error = i + ": Action %s is not defined" % action
actionFailed.append( error )
continue
gLogger.always( result )
if not result[ "OK" ]:
if result[ "Message" ].find( "Unexpected EOF" ) > 0:
msg = "Signal 'Unexpected EOF' received. Most likely DIRAC components"
msg = i + ": " + msg + " were successfully restarted."
actionSuccess.append( msg )
continue
error = i + ": " + result[ "Message" ]
actionFailed.append( error )
gLogger.error( error )
else:
gLogger.info( result[ "Value" ] )
actionSuccess.append( i )
self.finish( self.aftermath( actionSuccess, actionFailed, action, "Host" ) )
@asyncGen
def web_componentAction( self ):
"""
Actions which should be done on components. The only parameters is an action
to perform.
Returns standard JSON response structure with with service response
or error messages
"""
userData = self.getSessionData()
DN = str( userData["user"]["DN"] )
group = str( userData["user"]["group"] )
if not ( ( "action" in self.request.arguments ) and ( len( self.request.arguments[ "action" ][0] ) > 0 ) ):
self.finish( { "success" : "false" , "error" : "No action defined" } )
return
action = str( self.request.arguments[ "action" ][0] )
if action not in [ "restart" , "start" , "stop" ]:
error = "The request parameters action '%s' is unknown" % action
gLogger.debug( error )
self.finish( { "success" : "false" , "error" : error } )
return
result = dict()
for i in self.request.arguments:
if i == "action":
continue
target = i.split( " @ " , 1 )
if not len( target ) == 2:
continue
system = self.request.arguments[ i ][0]
gLogger.always( "System: %s" % system )
host = target[ 1 ]
gLogger.always( "Host: %s" % host )
component = target[ 0 ]
gLogger.always( "Component: %s" % component )
if not host in result:
result[ host ] = list()
result[ host ].append( [ system , component ] )
if not len( result ) > 0:
error = "Failed to get component(s) for %s" % action
gLogger.debug( error )
self.finish( { "success" : "false" , "error" : error } )
gLogger.always( result )
actionSuccess = list()
actionFailed = list()
for hostname in result.keys():
if not len( result[ hostname ] ) > 0:
continue
client = SystemAdministratorClient( hostname , None , delegatedDN = DN ,
delegatedGroup = group )
for i in result[ hostname ]:
system = i[ 0 ]
component = i[ 1 ]
try:
if action == "restart":
result = yield self.threadTask( client.restartComponent, system , component )
elif action == "start":
result = yield self.threadTask( client.startComponent, system , component )
elif action == "stop":
result = yield self.threadTask( client.stopComponent, system , component )
else:
result = list()
result[ "Message" ] = "Action %s is not valid" % action
except Exception, x:
result = list()
result[ "Message" ] = "Exception: %s" % str( x )
gLogger.debug( "Result: %s" % result )
if not result[ "OK" ]:
error = hostname + ": " + result[ "Message" ]
actionFailed.append( error )
gLogger.error( "Failure during component %s: %s" % ( action , error ) )
else:
gLogger.always( "Successfully %s component %s" % ( action , component ) )
actionSuccess.append( component )
self.finish( self.aftermath( actionSuccess, actionFailed, action, "Component" ) )
def aftermath( self, actionSuccess, actionFailed, action, prefix ):
success = ", ".join( actionSuccess )
failure = "\n".join( actionFailed )
if len( actionSuccess ) > 1:
sText = prefix + "s"
else:
sText = prefix
if len( actionFailed ) > 1:
fText = prefix + "s"
else:
fText = prefix
if len( success ) > 0 and len( failure ) > 0:
sMessage = "%s %sed successfully: " % ( sText , action , success )
fMessage = "Failed to %s %s:\n%s" % ( action , fText , failure )
result = sMessage + "\n\n" + fMessage
return { "success" : "true" , "result" : result }
elif len( success ) > 0 and len( failure ) < 1:
result = "%s %sed successfully: %s" % ( sText , action , success )
return { "success" : "true" , "result" : result }
elif len( success ) < 1 and len( failure ) > 0:
result = "Failed to %s %s:\n%s" % ( action , fText , failure )
gLogger.always( result )
return { "success" : "false" , "error" : result }
else:
result = "No action has performed due technical failure. Check the logs please"
gLogger.debug( result )
return { "success" : "false" , "error" : result }
def web_getUsersGroups( self ):
result = gConfig.getSections( "/Registry/Users" )
if not result[ "OK" ]:
self.write( { "success" : "false" , "error" : result[ "Message" ] } )
return
result = result[ "Value" ]
users = map( lambda x : [x] , result )
result = gConfig.getSections( "/Registry/Groups" )
if not result[ "OK" ]:
self.write( { "success" : "false" , "error" : result[ "Message" ] } )
return
result = result[ "Value" ]
groups = map( lambda x : [x] , result )
self.write( { "success" : "true" , "users" : users, "groups" : groups, "email": self.getUserEmail() } )
def getUserEmail( self ):
userData = self.getSessionData()
user = userData["user"]["username"]
if not user:
gLogger.debug( "user value is empty" )
return None
if user == "anonymous":
gLogger.debug( "user is anonymous" )
return None
email = gConfig.getValue( "/Registry/Users/%s/Email" % user , "" )
gLogger.debug( "/Registry/Users/%s/Email - '%s'" % ( user , email ) )
emil = email.strip()
if not email:
return None
return email
def web_sendMessage( self ):
"""
Send message(not implemented yet) or email getting parameters from request
"""
email = self.getUserEmail()
if not "subject" in self.request.arguments:
result = "subject parameter is not in request... aborting"
gLogger.debug( result )
self.write( { "success" : "false" , "error" : result } )
return
subject = self.checkUnicode( self.request.arguments[ "subject" ][0] )
if not len( subject ) > 0:
subject = "Message from %s" % email
if not "message" in self.request.arguments:
result = "msg parameter is not in request... aborting"
gLogger.debug( result )
self.write( { "success" : "false" , "error" : result } )
return
body = self.checkUnicode( self.request.arguments[ "message" ][0] )
if not len( body ) > 0:
result = "Message body has zero length... aborting"
gLogger.debug( result )
self.write( { "success" : "false" , "error" : result } )
return
users = self.request.arguments[ "users" ][0].split( "," )
groups = self.request.arguments[ "groups" ][0].split( "," )
gLogger.info( "List of groups from request: %s" % groups )
if groups:
for g in groups:
userList = self.getUsersFromGroup( g )
gLogger.info( "Get users: %s from group %s" % ( userList , g ) )
if userList:
users.extend( userList )
gLogger.info( "Merged list of users from users and group %s" % users )
if not len( users ) > 0:
error = "Length of list of recipients is zero size"
gLogger.info( error )
self.write( { "success" : "false" , "error" : error } )
return
users = uniqueElements( users )
gLogger.info( "Final list of users to send message/mail: %s" % users )
sendDict = self.getMailDict( users )
self.write( self.sendMail( sendDict , subject , body , email ) )
def checkUnicode( self , text = None ):
"""
Check if value is unicode or not and return properly converted string
Arguments are string and unicode/string, return value is a string
"""
try:
text = text.decode( 'utf-8' , "replace" )
except :
pass
text = text.encode( "utf-8" )
gLogger.debug( text )
return text
def getUsersFromGroup( self , groupname = None ):
if not groupname:
gLogger.debug( "Argument groupname is missing" )
return None
users = gConfig.getValue( "/Registry/Groups/%s/Users" % groupname , [] )
gLogger.debug( "%s users: %s" % ( groupname , users ) )
if not len( users ) > 0:
gLogger.debug( "No users for group %s found" % groupname )
return None
return users
def getMailDict( self , names = None ):
"""
Convert list of usernames to dict like { e-mail : full name }
Argument is a list. Return value is a dict
"""
resultDict = dict()
if not names:
return resultDict
for user in names:
email = gConfig.getValue( "/Registry/Users/%s/Email" % user , "" )
gLogger.debug( "/Registry/Users/%s/Email - '%s'" % ( user , email ) )
emil = email.strip()
if not email:
gLogger.error( "Can't find value for option /Registry/Users/%s/Email" % user )
continue
fname = gConfig.getValue( "/Registry/Users/%s/FullName" % user , "" )
gLogger.debug( "/Registry/Users/%s/FullName - '%s'" % ( user , fname ) )
fname = fname.strip()
if not fname:
fname = user
gLogger.debug( "FullName is absent, name to be used: %s" % fname )
resultDict[ email ] = fname
return resultDict
def sendMail( self , sendDict = None , title = None , body = None , fromAddress = None ):
"""
Sending an email using sendDict: { e-mail : name } as addressbook
title and body is the e-mail's Subject and Body
fromAddress is an email address in behalf of whom the message is sent
Return success/failure JSON structure
"""
if not sendDict:
result = ""
gLogger.debug( result )
return { "success" : "false" , "error" : result }
if not title:
result = "title argument is missing"
gLogger.debug( result )
return { "success" : "false" , "error" : result }
if not body:
result = "body argument is missing"
gLogger.debug( result )
return { "success" : "false" , "error" : result }
if not fromAddress:
result = "fromAddress argument is missing"
gLogger.debug( result )
return { "success" : "false" , "error" : result }
sentSuccess = list()
sentFailed = list()
gLogger.debug( "Initializing Notification client" )
ntc = NotificationClient( lambda x , timeout: RPCClient( x , timeout = timeout , static = True ) )
for email , name in sendDict.iteritems():
result = ntc.sendMail( email , title , body , fromAddress , False )
if not result[ "OK" ]:
error = name + ": " + result[ "Message" ]
sentFailed.append( error )
gLogger.error( "Sent failure: " , error )
else:
gLogger.info( "Successfully sent to %s" % name )
sentSuccess.append( name )
success = ", ".join( sentSuccess )
failure = "\n".join( sentFailed )
if len( success ) > 0 and len( failure ) > 0:
result = "Successfully sent e-mail to: "
result = result + success + "\n\nFailed to send e-mail to:\n" + failure
gLogger.debug( result )
return { "success" : "true" , "result" : result }
elif len( success ) > 0 and len( failure ) < 1:
result = "Successfully sent e-mail to: %s" % success
gLogger.debug( result )
return { "success" : "true" , "result" : result }
elif len( success ) < 1 and len( failure ) > 0:
result = "Failed to sent email to:\n%s" % failure
gLogger.debug( result )
return { "success" : "false" , "error" : result }
else:
result = "No messages were sent due technical failure"
gLogger.debug( result )
return { "success" : "false" , "error" : result }
@asyncGen
def web_getComponentNames( self ):
result = None
userData = self.getSessionData()
setup = userData['setup'].split( '-' )[-1]
systemList = []
system = None
if "system" in self.request.arguments:
system = self.request.arguments[ 'system' ][-1]
componentTypes = ['Services', 'Agents']
if "ComponentType" in self.request.arguments:
componentTypes = self.request.arguments['ComponentType']
retVal = gConfig.getSections( '/Systems' )
if retVal['OK']:
systems = retVal['Value']
for i in systems:
for compType in componentTypes:
compPath = '/Systems/%s/%s/%s' % ( i, setup, compType )
retVal = gConfig.getSections( compPath )
if retVal['OK']:
components = retVal['Value']
systemList += [ {"Name":j} for j in components ]
result = { "success" : "true" , "result" : systemList }
else:
result = { "success" : "false" , "error" : result['Message'] }
self.finish( result )
@asyncGen
def web_getSelectionData( self ):
data = {}
userData = self.getSessionData()
setup = userData['setup'].split( '-' )[-1]
systemList = []
system = None
hosts = []
result = Registry.getHosts()
if result['OK']:
hosts = [ [i] for i in result['Value'] ]
data['Hosts'] = hosts
if "system" in self.request.arguments:
system = self.request.arguments[ 'system' ][-1]
componentTypes = ['Services', 'Agents']
if "ComponentType" in self.request.arguments:
componentTypes = self.request.arguments['ComponentType']
retVal = gConfig.getSections( '/Systems' )
components = []
componentNames = []
data['ComponentModule'] = []
data['ComponentName'] = []
if retVal['OK']:
systems = retVal['Value']
for i in systems:
for compType in componentTypes:
compPath = '/Systems/%s/%s/%s' % ( i, setup, compType )
retVal = gConfig.getSections( compPath )
if retVal['OK']:
records = retVal['Value']
componentNames += [ [cnames] for cnames in records ]
for record in records:
modulepath = "%s/%s/Module" % ( compPath, record )
module = gConfig.getValue( modulepath, '' )
if module != '' and module not in components:
components += [module]
data['ComponentModule'].append( [module] )
elif record not in components and module == '':
data['ComponentModule'].append( [record] )
components += [record]
data['ComponentName'] = componentNames
data['ComponentName'].sort()
data['ComponentModule'].sort()
else:
data = { "success" : "false" , "error" : result['Message'] }
self.finish( data )
@asyncGen
def web_ComponentLocation( self ):
rpcClient = RPCClient( "Framework/Monitoring" )
userData = self.getSessionData()
setup = userData['setup'].split( '-' )[-1]
hosts = []
result = Registry.getHosts()
if result['OK']:
hosts = result['Value']
componentTypes = ['Services', 'Agents']
if "ComponentType" in self.request.arguments:
componentTypes = self.request.arguments['ComponentType']
componentNames = []
if "ComponentName" in self.request.arguments:
componentNames = list( json.loads( self.request.arguments[ 'ComponentName' ][-1] ) )
componentModules = []
if "ComponentModule" in self.request.arguments:
componentModules = list( json.loads( self.request.arguments[ 'ComponentModule' ][-1] ) )
showAll = 0
if "showAll" in self.request.arguments:
showAll = int( self.request.arguments[ 'showAll' ][-1] )
selectedHosts = []
if "Hosts" in self.request.arguments: # we only use the selected host(s)
selectedHosts = list( json.loads( self.request.arguments[ 'Hosts' ][-1] ) )
retVal = gConfig.getSections( '/Systems' )
compMatching = {}
fullNames = []
if retVal['OK']:
systems = retVal['Value']
for i in systems:
for compType in componentTypes:
compPath = '/Systems/%s/%s/%s' % ( i, setup, compType )
retVal = gConfig.getSections( compPath )
if retVal['OK']:
components = retVal['Value']
for j in components:
path = '%s/%s' % ( i, j )
if j in componentNames:
fullNames += [path]
compMatching[path] = path
modulepath = "%s/%s/Module" % ( compPath, j )
module = gConfig.getValue( modulepath, '' )
if module != '' and module in componentModules:
fullNames += [path]
elif module == '' and j in componentModules:
fullNames += [path]
compMatching[path] = module if module != '' else path
records = []
if len( fullNames ) > 0:
condDict = {'Setup':userData['setup'], 'ComponentName':fullNames}
else:
if len( componentTypes ) < 2:
type = 'agent' if componentTypes[-1] == 'Agents' else 'service'
condDict = {'Setup':userData['setup'], 'Type':type}
else:
condDict = {'Setup':userData['setup']}
gLogger.debug( "condDict" + str( condDict ) )
retVal = rpcClient.getComponentsStatus( condDict )
today = datetime.datetime.today()
if retVal['OK']:
components = retVal['Value'][0]
for setup in components:
for type in components[ setup ]:
for name in components[ setup ][ type ]:
for component in components[ setup ][ type ][ name ]:
if len( selectedHosts ) > 0 and 'Host' in component and component['Host'] not in selectedHosts:
continue
elif 'Host' in component and component['Host'] not in hosts:
continue
if 'LastHeartbeat' in component:
dateDiff = today - component['LastHeartbeat']
else:
dateDiff = today - today
if showAll == 0 and dateDiff.days >= 2 and 'Host' in component:
continue
for conv in component:
component[conv] = str( component[conv] )
component['ComponentModule'] = compMatching[component['ComponentName']] if component['ComponentName'] in compMatching else component['ComponentName']
records += [component]
result = { "success" : "true" , "result" : records }
else:
result = { "success" : "false" , "error" : result['Message'] }
self.finish( result )
|
chaen/WebAppDIRAC
|
WebApp/handler/SystemAdministrationHandler.py
|
Python
|
gpl-3.0
| 26,553
|
[
"DIRAC"
] |
5fcd7beae06c68162212b440960959897ff7419d552845000e13b0151771b0bd
|
"""Support for Konnected devices."""
import copy
import hmac
from http import HTTPStatus
import json
import logging
from aiohttp.hdrs import AUTHORIZATION
from aiohttp.web import Request, Response
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.binary_sensor import DEVICE_CLASSES_SCHEMA
from homeassistant.components.http import HomeAssistantView
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_ACCESS_TOKEN,
CONF_BINARY_SENSORS,
CONF_DEVICES,
CONF_DISCOVERY,
CONF_HOST,
CONF_ID,
CONF_NAME,
CONF_PIN,
CONF_PORT,
CONF_REPEAT,
CONF_SENSORS,
CONF_SWITCHES,
CONF_TYPE,
CONF_ZONE,
STATE_OFF,
STATE_ON,
Platform,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.typing import ConfigType
from .config_flow import ( # Loading the config flow file will register the flow
CONF_DEFAULT_OPTIONS,
CONF_IO,
CONF_IO_BIN,
CONF_IO_DIG,
CONF_IO_SWI,
OPTIONS_SCHEMA,
)
from .const import (
CONF_ACTIVATION,
CONF_API_HOST,
CONF_BLINK,
CONF_INVERSE,
CONF_MOMENTARY,
CONF_PAUSE,
CONF_POLL_INTERVAL,
DOMAIN,
PIN_TO_ZONE,
STATE_HIGH,
STATE_LOW,
UNDO_UPDATE_LISTENER,
UPDATE_ENDPOINT,
ZONE_TO_PIN,
ZONES,
)
from .handlers import HANDLERS
from .panel import AlarmPanel
_LOGGER = logging.getLogger(__name__)
def ensure_pin(value):
"""Check if valid pin and coerce to string."""
if value is None:
raise vol.Invalid("pin value is None")
if PIN_TO_ZONE.get(str(value)) is None:
raise vol.Invalid("pin not valid")
return str(value)
def ensure_zone(value):
"""Check if valid zone and coerce to string."""
if value is None:
raise vol.Invalid("zone value is None")
if str(value) not in ZONES is None:
raise vol.Invalid("zone not valid")
return str(value)
def import_device_validator(config):
"""Validate zones and reformat for import."""
config = copy.deepcopy(config)
io_cfgs = {}
# Replace pins with zones
for conf_platform, conf_io in (
(CONF_BINARY_SENSORS, CONF_IO_BIN),
(CONF_SENSORS, CONF_IO_DIG),
(CONF_SWITCHES, CONF_IO_SWI),
):
for zone in config.get(conf_platform, []):
if zone.get(CONF_PIN):
zone[CONF_ZONE] = PIN_TO_ZONE[zone[CONF_PIN]]
del zone[CONF_PIN]
io_cfgs[zone[CONF_ZONE]] = conf_io
# Migrate config_entry data into default_options structure
config[CONF_IO] = io_cfgs
config[CONF_DEFAULT_OPTIONS] = OPTIONS_SCHEMA(config)
# clean up fields migrated to options
config.pop(CONF_BINARY_SENSORS, None)
config.pop(CONF_SENSORS, None)
config.pop(CONF_SWITCHES, None)
config.pop(CONF_BLINK, None)
config.pop(CONF_DISCOVERY, None)
config.pop(CONF_API_HOST, None)
config.pop(CONF_IO, None)
return config
def import_validator(config):
"""Reformat for import."""
config = copy.deepcopy(config)
# push api_host into device configs
for device in config.get(CONF_DEVICES, []):
device[CONF_API_HOST] = config.get(CONF_API_HOST, "")
return config
# configuration.yaml schemas (legacy)
BINARY_SENSOR_SCHEMA_YAML = vol.All(
vol.Schema(
{
vol.Exclusive(CONF_ZONE, "s_io"): ensure_zone,
vol.Exclusive(CONF_PIN, "s_io"): ensure_pin,
vol.Required(CONF_TYPE): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_INVERSE, default=False): cv.boolean,
}
),
cv.has_at_least_one_key(CONF_PIN, CONF_ZONE),
)
SENSOR_SCHEMA_YAML = vol.All(
vol.Schema(
{
vol.Exclusive(CONF_ZONE, "s_io"): ensure_zone,
vol.Exclusive(CONF_PIN, "s_io"): ensure_pin,
vol.Required(CONF_TYPE): vol.All(vol.Lower, vol.In(["dht", "ds18b20"])),
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_POLL_INTERVAL, default=3): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
}
),
cv.has_at_least_one_key(CONF_PIN, CONF_ZONE),
)
SWITCH_SCHEMA_YAML = vol.All(
vol.Schema(
{
vol.Exclusive(CONF_ZONE, "s_io"): ensure_zone,
vol.Exclusive(CONF_PIN, "s_io"): ensure_pin,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ACTIVATION, default=STATE_HIGH): vol.All(
vol.Lower, vol.Any(STATE_HIGH, STATE_LOW)
),
vol.Optional(CONF_MOMENTARY): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(CONF_PAUSE): vol.All(vol.Coerce(int), vol.Range(min=10)),
vol.Optional(CONF_REPEAT): vol.All(vol.Coerce(int), vol.Range(min=-1)),
}
),
cv.has_at_least_one_key(CONF_PIN, CONF_ZONE),
)
DEVICE_SCHEMA_YAML = vol.All(
vol.Schema(
{
vol.Required(CONF_ID): cv.matches_regex("[0-9a-f]{12}"),
vol.Optional(CONF_BINARY_SENSORS): vol.All(
cv.ensure_list, [BINARY_SENSOR_SCHEMA_YAML]
),
vol.Optional(CONF_SENSORS): vol.All(cv.ensure_list, [SENSOR_SCHEMA_YAML]),
vol.Optional(CONF_SWITCHES): vol.All(cv.ensure_list, [SWITCH_SCHEMA_YAML]),
vol.Inclusive(CONF_HOST, "host_info"): cv.string,
vol.Inclusive(CONF_PORT, "host_info"): cv.port,
vol.Optional(CONF_BLINK, default=True): cv.boolean,
vol.Optional(CONF_API_HOST, default=""): vol.Any("", cv.url),
vol.Optional(CONF_DISCOVERY, default=True): cv.boolean,
}
),
import_device_validator,
)
# pylint: disable=no-value-for-parameter
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
import_validator,
vol.Schema(
{
vol.Required(CONF_ACCESS_TOKEN): cv.string,
vol.Optional(CONF_API_HOST): vol.Url(),
vol.Optional(CONF_DEVICES): vol.All(
cv.ensure_list, [DEVICE_SCHEMA_YAML]
),
}
),
)
},
extra=vol.ALLOW_EXTRA,
)
YAML_CONFIGS = "yaml_configs"
PLATFORMS = [Platform.BINARY_SENSOR, Platform.SENSOR, Platform.SWITCH]
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the Konnected platform."""
if (cfg := config.get(DOMAIN)) is None:
cfg = {}
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {
CONF_ACCESS_TOKEN: cfg.get(CONF_ACCESS_TOKEN),
CONF_API_HOST: cfg.get(CONF_API_HOST),
CONF_DEVICES: {},
}
hass.http.register_view(KonnectedView)
# Check if they have yaml configured devices
if CONF_DEVICES not in cfg:
return True
for device in cfg.get(CONF_DEVICES, []):
# Attempt to importing the cfg. Use
# hass.async_add_job to avoid a deadlock.
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=device
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up panel from a config entry."""
client = AlarmPanel(hass, entry)
# creates a panel data store in hass.data[DOMAIN][CONF_DEVICES]
await client.async_save_data()
# if the cfg entry was created we know we could connect to the panel at some point
# async_connect will handle retries until it establishes a connection
await client.async_connect()
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
# config entry specific data to enable unload
hass.data[DOMAIN][entry.entry_id] = {
UNDO_UPDATE_LISTENER: entry.add_update_listener(async_entry_updated)
}
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
hass.data[DOMAIN][entry.entry_id][UNDO_UPDATE_LISTENER]()
if unload_ok:
hass.data[DOMAIN][CONF_DEVICES].pop(entry.data[CONF_ID])
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def async_entry_updated(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Reload the config entry when options change."""
await hass.config_entries.async_reload(entry.entry_id)
class KonnectedView(HomeAssistantView):
"""View creates an endpoint to receive push updates from the device."""
url = UPDATE_ENDPOINT
name = "api:konnected"
requires_auth = False # Uses access token from configuration
def __init__(self):
"""Initialize the view."""
@staticmethod
def binary_value(state, activation):
"""Return binary value for GPIO based on state and activation."""
if activation == STATE_HIGH:
return 1 if state == STATE_ON else 0
return 0 if state == STATE_ON else 1
async def update_sensor(self, request: Request, device_id) -> Response:
"""Process a put or post."""
hass = request.app["hass"]
data = hass.data[DOMAIN]
auth = request.headers.get(AUTHORIZATION)
tokens = []
if hass.data[DOMAIN].get(CONF_ACCESS_TOKEN):
tokens.extend([hass.data[DOMAIN][CONF_ACCESS_TOKEN]])
tokens.extend(
[
entry.data[CONF_ACCESS_TOKEN]
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.data.get(CONF_ACCESS_TOKEN)
]
)
if auth is None or not next(
(True for token in tokens if hmac.compare_digest(f"Bearer {token}", auth)),
False,
):
return self.json_message(
"unauthorized", status_code=HTTPStatus.UNAUTHORIZED
)
try: # Konnected 2.2.0 and above supports JSON payloads
payload = await request.json()
except json.decoder.JSONDecodeError:
_LOGGER.error(
"Your Konnected device software may be out of "
"date. Visit https://help.konnected.io for "
"updating instructions"
)
if (device := data[CONF_DEVICES].get(device_id)) is None:
return self.json_message(
"unregistered device", status_code=HTTPStatus.BAD_REQUEST
)
if (panel := device.get("panel")) is not None:
# connect if we haven't already
hass.async_create_task(panel.async_connect())
try:
zone_num = str(payload.get(CONF_ZONE) or PIN_TO_ZONE[payload[CONF_PIN]])
payload[CONF_ZONE] = zone_num
zone_data = (
device[CONF_BINARY_SENSORS].get(zone_num)
or next(
(s for s in device[CONF_SWITCHES] if s[CONF_ZONE] == zone_num), None
)
or next(
(s for s in device[CONF_SENSORS] if s[CONF_ZONE] == zone_num), None
)
)
except KeyError:
zone_data = None
if zone_data is None:
return self.json_message(
"unregistered sensor/actuator", status_code=HTTPStatus.BAD_REQUEST
)
zone_data["device_id"] = device_id
for attr in ("state", "temp", "humi", "addr"):
value = payload.get(attr)
handler = HANDLERS.get(attr)
if value is not None and handler:
hass.async_create_task(handler(hass, zone_data, payload))
return self.json_message("ok")
async def get(self, request: Request, device_id) -> Response:
"""Return the current binary state of a switch."""
hass = request.app["hass"]
data = hass.data[DOMAIN]
if not (device := data[CONF_DEVICES].get(device_id)):
return self.json_message(
f"Device {device_id} not configured", status_code=HTTPStatus.NOT_FOUND
)
if (panel := device.get("panel")) is not None:
# connect if we haven't already
hass.async_create_task(panel.async_connect())
# Our data model is based on zone ids but we convert from/to pin ids
# based on whether they are specified in the request
try:
zone_num = str(
request.query.get(CONF_ZONE) or PIN_TO_ZONE[request.query[CONF_PIN]]
)
zone = next(
switch
for switch in device[CONF_SWITCHES]
if switch[CONF_ZONE] == zone_num
)
except StopIteration:
zone = None
except KeyError:
zone = None
zone_num = None
if not zone:
target = request.query.get(
CONF_ZONE, request.query.get(CONF_PIN, "unknown")
)
return self.json_message(
f"Switch on zone or pin {target} not configured",
status_code=HTTPStatus.NOT_FOUND,
)
resp = {}
if request.query.get(CONF_ZONE):
resp[CONF_ZONE] = zone_num
else:
resp[CONF_PIN] = ZONE_TO_PIN[zone_num]
# Make sure entity is setup
if zone_entity_id := zone.get(ATTR_ENTITY_ID):
resp["state"] = self.binary_value(
hass.states.get(zone_entity_id).state, zone[CONF_ACTIVATION]
)
return self.json(resp)
_LOGGER.warning("Konnected entity not yet setup, returning default")
resp["state"] = self.binary_value(STATE_OFF, zone[CONF_ACTIVATION])
return self.json(resp)
async def put(self, request: Request, device_id) -> Response:
"""Receive a sensor update via PUT request and async set state."""
return await self.update_sensor(request, device_id)
async def post(self, request: Request, device_id) -> Response:
"""Receive a sensor update via POST request and async set state."""
return await self.update_sensor(request, device_id)
|
rohitranjan1991/home-assistant
|
homeassistant/components/konnected/__init__.py
|
Python
|
mit
| 14,301
|
[
"VisIt"
] |
204aef4d4fa00d45f5226b7a735533ae6f31943ae0b3b7e857e84b2822e14453
|
from __future__ import print_function
import sys
import random
import os
from builtins import range
import time
import json
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.random_forest import H2ORandomForestEstimator
from h2o.grid.grid_search import H2OGridSearch
class Test_rf_gridsearch_sorting_metrics:
"""
PUBDEV-2967: gridsearch sorting metric with cross-validation.
This class is created to test that when cross-validation is enabled, the gridsearch models are returned sorted
according to the cross-validation metrics.
Test Descriptions:
a. grab all truely griddable parameters and randomly or manually set the parameter values.
b. Next, build H2O random forest models using grid search. No model is built for bad hyper-parameters
values. We should instead get a warning/error message printed out.
c. Check and make sure that the models are returned sorted with the correct cross-validation metrics.
Note that for hyper-parameters containing all legal parameter names and parameter value lists with legal
and illegal values, grid-models should be built for all combinations of legal parameter values. For
illegal parameter values, a warning/error message should be printed out to warn the user but the
program should not throw an exception;
We will re-use the dataset generation methods for GLM. There will be only one data set for classification.
"""
# parameters set by users, change with care
max_grid_model = 25 # maximum number of grid models generated before adding max_runtime_secs
diff = 1e-10 # comparison threshold
curr_time = str(round(time.time())) # store current timestamp, used as part of filenames.
seed = int(round(time.time()))
# parameters denoting filenames of interested that store training/validation/test data sets in csv format
training1_filename = "smalldata/gridsearch/multinomial_training1_set.csv"
json_filename = "gridsearch_rf_hyper_parameter_" + curr_time + ".json"
# System parameters, do not change. Dire consequences may follow if you do
current_dir = os.path.dirname(os.path.realpath(sys.argv[1])) # directory of this test file
train_row_count = 0 # training data row count, randomly generated later
train_col_count = 0 # training data column count, randomly generated later
# following parameters are used to generate hyper-parameters
max_int_val = 10 # maximum size of random integer values
min_int_val = 0 # minimum size of random integer values
max_int_number = 2 # maximum number of integer random grid values to generate
max_real_val = 1 # maximum size of random float values
min_real_val = 0 # minimum size of random float values
max_real_number = 2 # maximum number of real grid values to generate
time_scale = 2 # maximum runtime scale
family = 'multinomial' # choose default family to be gaussian
training_metric = 'logloss' # metrics by which we evaluate model performance
test_name = "pyunit_rf_gridsearch_sorting_metrics.py" # name of this test
sandbox_dir = "" # sandbox directory where we are going to save our failed test data sets
# store information about training/test data sets
x_indices = [] # store predictor indices in the data set
y_index = 0 # store response index in the data set
training1_data = [] # store training data sets
test_failed = 0 # count total number of tests that have failed
# give the user opportunity to pre-assign hyper parameters for fixed values
hyper_params = dict()
hyper_params["balance_classes"] = [True, False]
hyper_params["fold_assignment"] = ["AUTO", "Random", "Modulo", "Stratified"]
hyper_params["stopping_metric"] = ['logloss']
# parameters to be excluded from hyper parameter list even though they may be gridable
exclude_parameter_lists = ['validation_frame', 'response_column', 'fold_column', 'offset_column',
'col_sample_rate_change_per_level', 'sample_rate_per_class', 'col_sample_rate_per_tree',
'nbins', 'nbins_top_level', 'nbins_cats', 'seed', 'class_sampling_factors',
'max_after_balance_size', 'min_split_improvement', 'histogram_type', 'mtries',
'weights_column', 'min_rows', 'r2_stopping', 'score_tree_interval']
params_zero_one = ["sample_rate"]
params_more_than_zero = ['ntrees', 'max_depth']
params_more_than_one = []
params_zero_positive = ['max_runtime_secs', 'stopping_rounds', 'stopping_tolerance'] # >= 0
final_hyper_params = dict() # store the final hyper-parameters that we are going to use
gridable_parameters = [] # store griddable parameter names
gridable_types = [] # store the corresponding griddable parameter types
gridable_defaults = [] # store the gridabble parameter default values
possible_number_models = 0 # possible number of models built based on hyper-parameter specification
correct_model_number = 0 # count number of models built with bad hyper-parameter specification
true_correct_model_number = 0 # count number of models built with good hyper-parameter specification
nfolds = 5 # enable cross validation to test fold_assignment
def __init__(self):
self.setup_data()
self.setup_model()
def setup_data(self):
"""
This function performs all initializations necessary:
load the data sets and set the training set indices and response column index
"""
# create and clean out the sandbox directory first
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# preload data sets
self.training1_data = h2o.import_file(path=pyunit_utils.locate(self.training1_filename))
# set data set indices for predictors and response
self.y_index = self.training1_data.ncol-1
self.x_indices = list(range(self.y_index))
# set response to be categorical for classification tasks
self.training1_data[self.y_index] = self.training1_data[self.y_index].round().asfactor()
# save the training data files just in case the code crashed.
pyunit_utils.remove_csv_files(self.current_dir, ".csv", action='copy', new_dir_path=self.sandbox_dir)
def setup_model(self):
"""
This function setup the gridsearch hyper-parameters that will be used later on:
1. It will first try to grab all the parameters that are griddable and parameters used by random forest.
2. It will find the intersection of parameters that are both griddable and used by random forest.
3. There are several extra parameters that are used by random forest that are denoted as griddable but actually
are not. These parameters have to be discovered manually and they are captured in
self.exclude_parameter_lists.
4. We generate the gridsearch hyper-parameter. For numerical parameters, we will generate those randomly.
For enums, we will include all of them.
:return: None
"""
# build bare bone model to get all parameters
model = H2ORandomForestEstimator(ntrees=self.max_int_val, nfolds=self.nfolds, score_tree_interval=0)
model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
self.model_run_time = pyunit_utils.find_grid_runtime([model]) # find model train time
print("Time taken to build a base barebone model is {0}".format(self.model_run_time))
summary_list = model._model_json["output"]["model_summary"]
num_trees = summary_list["number_of_trees"][0]
if num_trees == 0:
self.min_runtime_per_tree = self.model_run_time
else:
self.min_runtime_per_tree = self.model_run_time/num_trees
# grab all gridable parameters and its type
(self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
pyunit_utils.get_gridables(model._model_json["parameters"])
# randomly generate griddable parameters including values outside legal range, like setting alpha values to
# be outside legal range of 0 and 1 and etc
(self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params,
self.exclude_parameter_lists,
self.gridable_parameters, self.gridable_types, self.gridable_defaults,
random.randint(1, self.max_int_number),
self.max_int_val, self.min_int_val,
random.randint(1, self.max_real_number),
self.max_real_val, self.min_real_val)
# scale the max_runtime_secs parameter and others as well to make sure they make sense
time_scale = self.time_scale * self.model_run_time
if "max_runtime_secs" in list(self.hyper_params):
self.hyper_params["max_runtime_secs"] = [time_scale * x for x
in self.hyper_params["max_runtime_secs"]]
# generate a new final_hyper_params which only takes a subset of all griddable parameters while
# hyper_params take all griddable parameters and generate the grid search hyper-parameters
[self.possible_number_models, self.final_hyper_params] = \
pyunit_utils.check_and_count_models(self.hyper_params, self.params_zero_one, self.params_more_than_zero,
self.params_more_than_one, self.params_zero_positive,
self.max_grid_model)
# must add max_runtime_secs to restrict unit test run time and as a promise to Arno to test for this
if ("max_runtime_secs" not in list(self.final_hyper_params)) and \
("max_runtime_secs" in list(self.hyper_params)):
self.final_hyper_params["max_runtime_secs"] = self.hyper_params["max_runtime_secs"]
len_good_time = len([x for x in self.hyper_params["max_runtime_secs"] if (x >= 0)])
self.possible_number_models = self.possible_number_models*len_good_time
# write out the hyper-parameters used into json files.
pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename,
self.final_hyper_params)
def test_rf_gridsearch_sorting_metrics(self):
"""
test_rf_gridsearch_sorting_metrics performs the following:
b. build H2O random forest models using grid search. No model is built for bad hyper-parameters
values. We should instead get a warning/error message printed out.
c. Check and make sure that the models are returned sorted with the correct cross-validation metrics.
"""
if self.possible_number_models > 0:
print("*******************************************************************************************")
print("test_rf_gridsearch_sorting_metrics for random forest ")
h2o.cluster_info()
print("Hyper-parameters used here is {0}".format(self.final_hyper_params))
# start grid search
grid_model = H2OGridSearch(H2ORandomForestEstimator(nfolds=self.nfolds, seed=self.seed,
score_tree_interval=0),
hyper_params=self.final_hyper_params)
grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
result_table = grid_model._grid_json["summary_table"]
model_index = 0
grid_model_metrics = []
diff = 0 # calculate difference between gridsearch model metrics and manually extracted model.
diff_train = 0 # calculate difference between training and cross-validation metrics
# grab performance metric for each model of grid_model and collect correct sorting metrics by hand
for each_model in grid_model:
grid_model_metric = float(result_table[self.training_metric][model_index])
grid_model_metrics.append(grid_model_metric)
manual_metric = each_model._model_json["output"]["cross_validation_metrics"]._metric_json["logloss"]
if not(type(grid_model_metrics) == unicode) and not(type(manual_metric)==unicode):
diff += abs(grid_model_metric - manual_metric)
manual_training_metric = each_model._model_json["output"]["training_metrics"]._metric_json["logloss"]
if not(type(grid_model_metrics) == unicode) and not(type(manual_training_metric)==unicode):
diff_train += abs(grid_model_metric-manual_training_metric)
print("grid model logloss: {0}, grid model training logloss: "
"{1}".format(grid_model_metric, manual_training_metric))
model_index += 1
if (diff > self.diff) or not(grid_model_metrics == sorted(grid_model_metrics)) or (diff_train < self.diff):
self.test_failed = 1
print("test_rf_gridsearch_sorting_metrics for random forest has failed!")
if self.test_failed == 0:
print("test_rf_gridsearch_sorting_metrics for random forest has passed!")
def test_gridsearch_sorting_metrics():
"""
Create and instantiate class and perform tests specified for random forest
:return: None
"""
test_rf_grid = Test_rf_gridsearch_sorting_metrics()
test_rf_grid.test_rf_gridsearch_sorting_metrics()
sys.stdout.flush()
if test_rf_grid.test_failed: # exit with error if any tests have failed
sys.exit(1)
if __name__ == "__main__":
pyunit_utils.standalone_test(test_gridsearch_sorting_metrics)
else:
test_gridsearch_sorting_metrics()
|
michalkurka/h2o-3
|
h2o-py/dynamic_tests/testdir_algos/rf/pyunit_rf_gridsearch_sorting_metrics.py
|
Python
|
apache-2.0
| 14,484
|
[
"Gaussian"
] |
5bc6d00ccef89b768c2360d2b9168d5b4ab256de5efea8f63f08b1b5b55badff
|
#!/usr/bin/python
import os, sys # low level handling, such as command line stuff
import string # string methods available
import re # regular expressions
import getopt # comand line argument handling
from low import * # custom functions, written by myself
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
# =============================================================================
def show_help( ):
""" displays the program parameter list and usage information """
stdout( "usage: " + sys.argv[0] + " -f <path>" )
stdout( " " )
stdout( " option description" )
stdout( " -h help (this text here)" )
stdout( " -f fasta file" )
stdout( " -i seq ID" )
stdout( " -u startpos (BLAST-like, starting at 1)" )
stdout( " -v endpos (BLAST-like)" )
stdout( " -x do not count gaps (BLAST-like)" )
stdout( " -C return complement (antisense strand sequence instead of sense)" )
stdout( " -R return reverse sequence" )
stdout( " " )
sys.exit(1)
# =============================================================================
def handle_arguments():
""" verifies the presence of all necessary arguments and returns the data dir """
if len ( sys.argv ) == 1:
stderr( "no arguments provided." )
show_help()
try: # check for the right arguments
keys, values = getopt.getopt( sys.argv[1:], "hf:i:u:v:xCR" )
except getopt.GetoptError:
stderr( "invalid arguments provided." )
show_help()
args = {'countGaps':True, 'complement':False, 'reverse':False}
for key, value in keys:
if key == '-f': args['fastafile'] = value
if key == '-i': args['seqid'] = value
if key == '-u': args['startpos'] = int(value) -1
if key == '-v': args['endpos'] = int(value)
if key == '-g': args['countGaps'] = False
if key == '-C': args['complement'] = True
if key == '-R': args['reverse'] = True
for key in ['fastafile', 'seqid']:
if key.endswith("file"):
if not args_file_exists(args, key): show_help()
elif key.endswith("dir"):
if not args_dir_exists(args, key): show_help()
elif not args.has_key(key):
print >> sys.stderr, "missing argument", key
show_help()
return args
# =============================================================================
def statusbar(current, total, message="", width=40):
progress = 1.0*current/total
if message != "": message = "[" + message + "]"
progressbar = "=" * int(progress*width)
while len(progressbar) < width: progressbar += " "
sys.stderr.write("\r 0% " + progressbar + " 100% " + message)
if progress == 1.0: sys.stderr.write("\n")
# =============================================================================
def extract_sequence(args):
fo = open(args['fastafile'])
seqid = args['seqid']
found = False
for line in fo:
if line.startswith('>'):
line = line.rstrip()
fid = line[1:].split()[0]
if fid == seqid: found = True
elif found: break
if found: print line
fo.close()
# =============================================================================
def extract_fragment(args):
fo = open(args['fastafile'])
seqid = args['seqid']
pos = 0
found = False
startpos = args.get('startpos', 0)
endpos = args.get('endpos', False)
seq = ""
for line in fo:
line = line.rstrip()
if line.startswith('>'):
fid = line[1:].split()[0]
if fid == seqid:
found = True
out = '>' + fid
if args.has_key('startpos') or args.has_key('endpos'): out += " %s:%s" %(startpos, endpos)
if args['reverse']: out += " reverse"
if args['complement']: out += " complement"
print out
elif found: break
else: continue
elif found:
if not args['countGaps']: line = line.replace('-','')
if pos > endpos: break
if pos < startpos and pos+len(line) < startpos:
pos += len(line)
continue
if pos < startpos and pos+len(line) >= startpos: out = line[startpos-pos:]
elif pos >= startpos: out = line
if not endpos == False and endpos < pos+len(line): out = out[:endpos-pos]
if not args['reverse'] and not args['complement']:
print out
else:
seq += out
pos += len(line)
fo.close()
if args['reverse'] or args['complement']:
seq = Seq(seq, IUPAC.unambiguous_dna)
if args['reverse'] and args['complement']:
seq = seq.reverse_complement()
elif args['complement']: seq = seq.complement()
elif args['reverse']: seq = seq[::-1]
seq = str(seq)
print seq
# =============================================================================
# === MAIN ====================================================================
# =============================================================================
def main( args ):
if args.has_key('startpos') or args.has_key('endpos'):
extract_fragment(args)
else:
extract_sequence(args)
# =============================================================================
args = handle_arguments()
main( args )
|
lotharwissler/bioinformatics
|
python/fasta/fasta-extract-fragment.py
|
Python
|
mit
| 5,139
|
[
"BLAST"
] |
e4eb8eb5883dfa7c378917ecaa1a80442848a34fbf7a5a197dc5fdaa01e654ed
|
""" Class that contains client access to the StorageManagerDB handler.
"""
import six
import random
import errno
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.Core.Base.Client import Client, createClient
from DIRAC.Core.Utilities.DErrno import cmpError
from DIRAC.Core.Utilities.Proxy import UserProxy
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers
from DIRAC.Resources.Storage.StorageElement import StorageElement
def getFilesToStage(lfnList, jobState=None, checkOnlyTapeSEs=None, jobLog=None):
"""Utility that returns out of a list of LFNs those files that are offline,
and those for which at least one copy is online
"""
if not lfnList:
return S_OK({"onlineLFNs": [], "offlineLFNs": {}, "failedLFNs": [], "absentLFNs": {}})
dm = DataManager()
if isinstance(lfnList, six.string_types):
lfnList = [lfnList]
lfnListReplicas = dm.getReplicasForJobs(lfnList, getUrl=False)
if not lfnListReplicas["OK"]:
return lfnListReplicas
offlineLFNsDict = {}
onlineLFNs = {}
offlineLFNs = {}
absentLFNs = {}
failedLFNs = set()
if lfnListReplicas["Value"]["Failed"]:
# Check if files are not existing
for lfn, reason in lfnListReplicas["Value"]["Failed"].items():
# FIXME: awful check until FC returns a proper error
if cmpError(reason, errno.ENOENT) or "No such file" in reason:
# The file doesn't exist, job must be Failed
# FIXME: it is not possible to return here an S_ERROR(), return the message only
absentLFNs[lfn] = S_ERROR(errno.ENOENT, "File not in FC")["Message"]
if absentLFNs:
return S_OK(
{
"onlineLFNs": list(onlineLFNs),
"offlineLFNs": offlineLFNsDict,
"failedLFNs": list(failedLFNs),
"absentLFNs": absentLFNs,
}
)
return S_ERROR("Failures in getting replicas")
lfnListReplicas = lfnListReplicas["Value"]["Successful"]
# If a file is reported here at a tape SE, it is not at a disk SE as we use disk in priority
# We shall check all file anyway in order to make sure they exist
seToLFNs = dict()
for lfn, ses in lfnListReplicas.items():
for se in ses:
seToLFNs.setdefault(se, list()).append(lfn)
if seToLFNs:
if jobState:
# Get user name and group from the job state
userName = jobState.getAttribute("Owner")
if not userName["OK"]:
return userName
userName = userName["Value"]
userGroup = jobState.getAttribute("OwnerGroup")
if not userGroup["OK"]:
return userGroup
userGroup = userGroup["Value"]
else:
userName = None
userGroup = None
# Check whether files are Online or Offline, or missing at SE
result = _checkFilesToStage(
seToLFNs,
onlineLFNs,
offlineLFNs,
absentLFNs, # pylint: disable=unexpected-keyword-arg
checkOnlyTapeSEs=checkOnlyTapeSEs,
jobLog=jobLog,
proxyUserName=userName,
proxyUserGroup=userGroup,
executionLock=True,
)
if not result["OK"]:
return result
failedLFNs = set(lfnList) - set(onlineLFNs) - set(offlineLFNs) - set(absentLFNs)
# Get the online SEs
dmsHelper = DMSHelpers()
onlineSEs = set(se for ses in onlineLFNs.values() for se in ses)
onlineSites = set(dmsHelper.getLocalSiteForSE(se).get("Value") for se in onlineSEs) - {None}
for lfn in offlineLFNs:
ses = offlineLFNs[lfn]
if len(ses) == 1:
# No choice, let's go
offlineLFNsDict.setdefault(ses[0], list()).append(lfn)
continue
# Try and get an SE at a site already with online files
found = False
if onlineSites:
# If there is at least one online site, select one
for se in ses:
site = dmsHelper.getLocalSiteForSE(se)
if site["OK"]:
if site["Value"] in onlineSites:
offlineLFNsDict.setdefault(se, list()).append(lfn)
found = True
break
# No online site found in common, select randomly
if not found:
offlineLFNsDict.setdefault(random.choice(ses), list()).append(lfn)
return S_OK(
{
"onlineLFNs": list(onlineLFNs),
"offlineLFNs": offlineLFNsDict,
"failedLFNs": list(failedLFNs),
"absentLFNs": absentLFNs,
"onlineSites": onlineSites,
}
)
def _checkFilesToStage(
seToLFNs,
onlineLFNs,
offlineLFNs,
absentLFNs,
checkOnlyTapeSEs=None,
jobLog=None,
proxyUserName=None,
proxyUserGroup=None,
executionLock=None,
):
"""
Checks on SEs whether the file is NEARLINE or ONLINE
onlineLFNs, offlineLFNs and absentLFNs are modified to contain the files found online
If checkOnlyTapeSEs is True, disk replicas are not checked
As soon as a replica is found Online for a file, no further check is made
"""
# Only check on storage if it is a tape SE
if jobLog is None:
logger = gLogger
else:
logger = jobLog
if checkOnlyTapeSEs is None:
# Default value is True
checkOnlyTapeSEs = True
failed = {}
for se, lfnsInSEList in seToLFNs.items():
# If we have found already all files online at another SE, no need to check the others
# but still we want to set the SE as Online if not a TapeSE
vo = getVOForGroup(proxyUserGroup)
seObj = StorageElement(se, vo=vo)
status = seObj.getStatus()
if not status["OK"]:
return status
tapeSE = status["Value"]["TapeSE"]
diskSE = status["Value"]["DiskSE"]
# If requested to check only Tape SEs and the file is at a diskSE, we guess it is Online...
filesToCheck = []
for lfn in lfnsInSEList:
# If the file had already been found accessible at an SE, only check that this one is on disk
diskIsOK = checkOnlyTapeSEs or (lfn in onlineLFNs)
if diskIsOK and diskSE:
onlineLFNs.setdefault(lfn, []).append(se)
elif not diskIsOK or (tapeSE and (lfn not in onlineLFNs)):
filesToCheck.append(lfn)
if not filesToCheck:
continue
# We have to use a new SE object because it caches the proxy!
with UserProxy(
proxyUserName=proxyUserName, proxyUserGroup=proxyUserGroup, executionLock=executionLock
) as proxyResult:
if proxyResult["OK"]:
fileMetadata = StorageElement(se, vo=vo).getFileMetadata(filesToCheck)
else:
fileMetadata = proxyResult
if not fileMetadata["OK"]:
failed[se] = dict.fromkeys(filesToCheck, fileMetadata["Message"])
else:
if fileMetadata["Value"]["Failed"]:
failed[se] = fileMetadata["Value"]["Failed"]
# is there at least one replica online?
for lfn, mDict in fileMetadata["Value"]["Successful"].items():
# SRM returns Cached, but others may only return Accessible
if mDict.get("Cached", mDict["Accessible"]):
onlineLFNs.setdefault(lfn, []).append(se)
elif tapeSE:
# A file can be staged only at Tape SE
offlineLFNs.setdefault(lfn, []).append(se)
else:
# File not available at a diskSE... we shall retry later
pass
# Doesn't matter if some files are Offline if they are also online
for lfn in set(offlineLFNs) & set(onlineLFNs):
offlineLFNs.pop(lfn)
# If the file was found staged, ignore possible errors, but print out errors
for se, failedLfns in list(failed.items()):
logger.error("Errors when getting files metadata", "at %s" % se)
for lfn, reason in list(failedLfns.items()):
if lfn in onlineLFNs:
logger.warn(reason, "for %s, but there is an online replica" % lfn)
failed[se].pop(lfn)
else:
logger.error(reason, "for %s, no online replicas" % lfn)
if cmpError(reason, errno.ENOENT):
absentLFNs.setdefault(lfn, []).append(se)
failed[se].pop(lfn)
if not failed[se]:
failed.pop(se)
# Find the files that do not exist at SE
if failed:
logger.error(
"Error getting metadata", "for %d files" % len(set(lfn for lfnList in failed.values() for lfn in lfnList))
)
for lfn in absentLFNs:
seList = absentLFNs[lfn]
# FIXME: it is not possible to return here an S_ERROR(), return the message only
absentLFNs[lfn] = S_ERROR(errno.ENOENT, "File not at %s" % ",".join(sorted(seList)))["Message"]
# Format the error for absent files
return S_OK()
@createClient("StorageManagement/StorageManager")
class StorageManagerClient(Client):
"""This is the client to the StorageManager service, so even if it is not seen, it exposes all its RPC calls"""
def __init__(self, **kwargs):
super(StorageManagerClient, self).__init__(**kwargs)
self.setServer("StorageManagement/StorageManager")
|
DIRACGrid/DIRAC
|
src/DIRAC/StorageManagementSystem/Client/StorageManagerClient.py
|
Python
|
gpl-3.0
| 9,832
|
[
"DIRAC"
] |
1fb4e6738cf50306ef2703fac0a756503e24e9a8c7cbf453df6f4fba3045714a
|
try:
import cPickle as pickle
except ImportError:
import pickle
from stagecraft.tools import get_credentials_or_die
import requests
import django
django.setup()
from collections import defaultdict
from django.db import connection
from pprint import pprint
from stagecraft.apps.organisation.models import Node, NodeType
from stagecraft.apps.dashboards.models import Dashboard
from stagecraft.tools.spreadsheets import SpreadsheetMunger
# These may not be 100% accurate however the derived
# typeOf will be overwritten with more certain information
# based on iterating through all tx rows in build_up_node_dict.
# We use this to to get the full org graph with types even when orgs are
# not associated with a transaction in txex. This is the best guess mapping.
govuk_to_pp_type = {
"Advisory non-departmental public body": 'agency',
"Tribunal non-departmental public body": 'agency',
"Court": 'agency',
"Sub-organisation": 'agency',
"Executive agency": 'agency',
"Devolved administration": 'agency',
"Ministerial department": 'department',
"Non-ministerial department": 'agency',
"Executive office": 'agency',
"Civil Service": 'agency',
"Other": 'agency',
"Executive non-departmental public body": 'agency',
"Independent monitoring body": 'agency',
"Public corporation": 'agency',
"Ad-hoc advisory group": 'agency'
}
def get_govuk_organisations():
"""
Fetch organisations from the GOV.UK API. This is the canonical source.
"""
try:
with open('govuk_orgs.pickle', 'rb') as pickled:
results = pickle.load(pickled)
except IOError:
def get_page(page):
response = requests.get('https://www.gov.uk/api/organisations?page={}'.format(page))
return response.json()
first_page = get_page(1)
results = first_page['results']
for page_num in range(2, first_page['pages'] + 1):
page = get_page(page_num)
results = results + page['results']
# Remove any organisations that have closed.
results = [org for org in results if org['details']['closed_at'] is None]
with open('govuk_orgs.pickle', 'wb') as pickled:
pickle.dump(results, pickled, pickle.HIGHEST_PROTOCOL)
return results
def load_data(client_email, private_key):
spreadsheets = SpreadsheetMunger({
'names_transaction_name': 11,
'names_transaction_slug': 12,
'names_service_name': 9,
'names_service_slug': 10,
'names_tx_id': 19,
'names_other_notes': 17,
'names_description': 8
})
records = spreadsheets.load(client_email, private_key)
govuk_response = get_govuk_organisations()
return records, govuk_response
def make_node(id, title, slug, abbr, type):
if abbr is not None:
abbr = abbr.encode('utf-8').strip()
if abbr == '':
abbr = None
return (
id,
title.encode('utf-8').strip(),
slug,
abbr,
type,
)
def govuk_graph(entries):
nodes = set()
edges = set()
for entry in entries:
node_slug = entry['details']['slug']
node_id = 'govuk-{}'.format(node_slug).lower()
nodes.add(make_node(
node_id,
entry['title'],
node_slug,
entry['details']['abbreviation'],
govuk_to_pp_type[entry['format']],
))
for child in entry['child_organisations']:
child_id = 'govuk-{}'.format(child['id'].split('/')[-1]).lower()
edges.add((
node_id,
child_id,
))
return nodes, edges
def index_nodes(nodes, field_index):
indexed = {}
for node in nodes:
value = node[field_index]
if value in indexed:
print('Overwriting node in index. [node[{}]: {}]'.format(
field_index, value))
if value is not None and value != '':
indexed[value.lower()] = node
return indexed
def govuk_node_for_record(record, by_title, by_abbr):
parent_org = record['agency'] if 'agency' in record else record['department']
abbr = parent_org['abbr'].lower()
title = parent_org['name'].lower()
node = by_abbr.get(abbr, by_title.get(title, None))
if node is None:
if abbr == 'inss':
node = by_title['the insolvency service']
elif abbr == 'nmo':
node = by_title['national measurement and regulation office']
elif abbr == 'tsol':
node = by_title['government legal department']
elif abbr == 'research councils':
node = by_abbr['beis']
elif abbr == 'visit england':
node = by_title['visitengland']
elif abbr == 'arts council':
node = by_title['arts council england']
elif abbr.startswith('planning portal / '):
node = by_abbr['mhclg']
elif abbr == 'ha':
node = by_title['highways england']
elif abbr == 'english heritage':
node = by_title['historic england']
elif abbr == 'hmlr':
node = by_title['hm land registry']
elif abbr == "dh":
node = by_abbr['dhsc']
elif abbr == "dclg":
node = by_abbr['mhclg']
elif abbr == "national college for teaching and leadership":
node = by_abbr['dfe']
elif abbr == "glaa":
node = by_abbr['home office']
return node
def transactions_graph(records, by_title, by_abbr):
nodes = set()
edges = set()
node_to_transactions = defaultdict(list)
for record in records:
parent_node = govuk_node_for_record(record, by_title, by_abbr)
if 'service' not in record:
print("'{}' doesn't have a service attached".format(record['name']))
else:
service = record['service']
service_slug = service['slug']
service_id = 'service-{}'.format(service_slug).lower()
nodes.add(make_node(
service_id,
service['name'],
service_slug,
None,
'service',
))
edges.add((parent_node[0], service_id))
if 'transaction' in record:
transaction = record['transaction']
transaction_id = 'transaction-{}-{}'.format(service_slug, transaction['slug']).lower()
nodes.add(make_node(
transaction_id,
transaction['name'],
transaction['slug'],
None,
'transaction',
))
edges.add((service_id, transaction_id))
node_to_transactions[transaction_id].append(record['tx_id'])
else:
node_to_transactions[service_id].append(record['tx_id'])
return nodes, edges, node_to_transactions
def load_organisations(client_email, private_key):
records, govuk_response = load_data(client_email, private_key)
govuk_nodes, govuk_edges = govuk_graph(govuk_response)
by_title = index_nodes(govuk_nodes, 1)
by_abbr = index_nodes(govuk_nodes, 3)
tx_nodes, tx_edges, node_to_transactions = transactions_graph(records, by_title, by_abbr)
nodes = govuk_nodes | tx_nodes
edges = govuk_edges | tx_edges
print('Duplicated nodes')
print(govuk_nodes & tx_nodes)
print('Duplicated edges')
print(govuk_edges & tx_edges)
print('Nodes with multiple tx dashboards')
pprint([(node_id, transactions) for node_id,
transactions in node_to_transactions.items()
if len(transactions) > 1], width=1)
return nodes, edges, node_to_transactions
def clear_organisation_relations():
past_relations = {}
for dashboard in Dashboard.objects.all():
if dashboard.organisation is not None:
past_relations[dashboard.id] = {
'id': dashboard.organisation.id,
'name': dashboard.organisation.name,
'slug': dashboard.organisation.slug,
'abbr': dashboard.organisation.abbreviation,
}
dashboard.organisation = None
dashboard.transaction_cache = None
dashboard.service_cache = None
dashboard.agency_cache = None
dashboard.department_cache = None
dashboard.save()
Node.objects.all().delete()
return past_relations
def node_types():
return {
'department': NodeType.objects.get_or_create(name='department')[0],
'agency': NodeType.objects.get_or_create(name='agency')[0],
'service': NodeType.objects.get_or_create(name='service')[0],
'transaction': NodeType.objects.get_or_create(name='transaction')[0],
}
def create_edge(parent_id, child_id):
cursor = connection.cursor()
cursor.execute(
'INSERT INTO organisation_node_parents(from_node_id, to_node_id) VALUES(%s,%s);', # noqa
[child_id, parent_id]
)
def create_nodes(nodes, edges, type_to_NodeType):
nodes_to_db = {}
for node in nodes:
slug = node[2]
abbr = node[3]
if len(slug) > 150:
print('slug too long "{}"'.format(slug))
slug = slug[:150]
if abbr and len(abbr) > 50:
print('abbreviation too long "{}"'.format(abbr))
abbr = abbr[:50]
db_node = Node(
name=node[1].decode(
'utf-8').encode('latin1', 'ignore').decode('latin1'),
slug=slug.decode(
'utf-8').encode('latin1', 'ignore').decode('latin1'),
abbreviation=abbr,
typeOf=type_to_NodeType[node[4]],
)
db_node.save()
nodes_to_db[node[0]] = db_node
for edge in edges:
parent_node = nodes_to_db.get(edge[0], None)
child_node = nodes_to_db.get(edge[1], None)
if parent_node and child_node:
if parent_node.id == child_node.id:
print("Skipping self-referencing edge: {}".format(
parent_node.slug))
else:
create_edge(parent_node.id, child_node.id)
return nodes_to_db
def link_transactions(nodes_to_transactions, nodes_to_db):
dashboards_linked = set()
for node, transactions in nodes_to_transactions.items():
db_node = nodes_to_db[node]
for transaction in transactions:
try:
dashboard = Dashboard.objects.get(slug=transaction)
dashboard.organisation = db_node
dashboard.save()
dashboards_linked.add(dashboard.id)
except Dashboard.DoesNotExist:
dashboards = list(Dashboard.objects.by_tx_id(transaction))
if len(dashboards) == 0:
print('Ahh no dashboards for {}'.format(transaction))
else:
for dashboard in dashboards:
dashboard.organisation = db_node
dashboard.save()
dashboards_linked.add(dashboard.id)
return dashboards_linked
def link_remaining(past_relations, dashboards_linked, nodes_to_db, by_abbr):
for id, node in past_relations.items():
try:
dashboard = Dashboard.objects.get(id=id)
except Dashboard.DoesNotExist:
print('Lost a dashboard: {}'.format(id))
if id not in dashboards_linked:
if 'abbr' not in node or node['abbr'] is None:
print('could not find an org for {}'.format(dashboard.slug))
print(node)
else:
new_node = by_abbr.get(node['abbr'].lower(), None)
if new_node:
db_node = nodes_to_db[new_node[0]]
dashboard.organisation = db_node
dashboard.save()
else:
print('could not find an org for {}'.format(dashboard.slug))
print(node)
if __name__ == '__main__':
client_email, private_key = get_credentials_or_die()
print('Loading organisations')
nodes, edges, nodes_to_transactions = load_organisations(client_email, private_key)
print('Clearing organisations')
past_relations = clear_organisation_relations()
print('Creating nodes')
nodes_to_db = create_nodes(nodes, edges, node_types())
print('Linking transactions')
dashboards_linked = link_transactions(nodes_to_transactions, nodes_to_db)
print('Linking outstanding dashboards')
link_remaining(past_relations, dashboards_linked, nodes_to_db, index_nodes(nodes, 3))
|
alphagov/stagecraft
|
stagecraft/tools/import_organisations.py
|
Python
|
mit
| 12,682
|
[
"VisIt"
] |
a02edb39995595ae93e5a25f9f29dfe26a18488b48e7375940e90fcc55b0f6ab
|
from shinymud.lib.world import World
from shinymud.data.config import *
from shinymud.commands.emotes import *
from shinymud.commands.attacks import *
from shinymud.commands import *
from shinymud.lib.battle import Battle
import re
# ************************ GENERIC COMMANDS ************************
command_list = CommandRegister()
battle_commands = CommandRegister()
class Quit(BaseCommand):
help = (
"""Quit (Command)
The quit command logs you out of the game, saving your character in the
process.
\nALIASES:
exit
"""
)
def execute(self):
self.pc.quit_flag = True
command_list.register(Quit, ['quit', 'exit'])
command_help.register(Quit.help, ['quit', 'exit'])
class WorldEcho(BaseCommand):
"""Echoes a message to everyone in the world.
args:
args = message to be sent to every player in the world.
"""
required_permissions = ADMIN
help = (
"""WorldEcho (Command)
WorldEcho echoes a message to all players currently in the world.
\nRequired Permissions: ADMIN
"""
)
def execute(self):
self.world.tell_players(self.args)
command_list.register(WorldEcho, ['wecho', 'worldecho'])
command_help.register(WorldEcho.help, ['wecho', 'world echo', 'worldecho'])
class RoomEcho(BaseCommand):
"""Echoes a message to everyone in a specific room.
args - message to be sent to the room
"""
required_permissions = DM | ADMIN
help = (
"""<title>Room Echo (Command)</title>
The RoomEcho command (or, recho) sends a "disembodied" message to everyone at your
current room location.
\nREQUIRED PERMISSIONS: DM or ADMIN
\nUSAGE:
recho <message>
"""
)
def execute(self):
if not self.pc.location:
self.pc.update_output('Your message echoes faintly off into the void.')
return
if not self.args:
self.pc.update_output('Usage: recho <message>. See "help recho" for details.')
return
self.pc.location.tell_room(self.args)
command_list.register(RoomEcho, ['recho', 'room echo', 'roomecho'])
command_help.register(RoomEcho.help, ['recho', 'room echo', 'roomecho'])
class Chat(BaseCommand):
"""Sends a message to every player on the chat channel."""
help = (
"""Chat (command)
The Chat command will let you send a message to everyone in the world whose
chat channels are on. See "help channel" for help on turning you chat
channel off.
"""
)
def execute(self):
if not self.args:
self.pc.update_output("What do you want to chat?")
return
if not self.pc.channels['chat']:
self.pc.channels['chat'] = True
self.pc.update_output('Your chat channel has been turned on.\n')
exclude = [player.name for player in self.world.player_list.values() if not player.channels['chat']]
if 'drunk' in self.pc.effects:
self.args = self.pc.effects['drunk'].filter_speech(self.args)
message = '%s chats, "%s"' % (self.pc.fancy_name(), self.args)
self.world.tell_players(message, exclude, chat_color)
command_list.register(Chat, ['chat', 'c'])
command_help.register(Chat.help, ['chat'])
class Channel(BaseCommand):
"""Toggles communication channels on and off."""
help = (
"""Channel (command)
The channel command toggles your communication channels on or off.
\nUSAGE:
To see which channels you have on/off, just call channel with no options.
To turn a channel on:
channel <channel-name> on
To turn a channel off:
channel <channel-name> off
\nExamples:
If you no longer want to receive chat messages, you can turn your chat channel
off by doing the following:
channel chat off
\nCHANNELS:
Current channels that can be turned on and off via Channel:
chat
\nNOTE: If you use a channel that has been turned off (such as trying to send
a chat message after you've turned off your chat channel), it will
automatically be turned back on.
"""
)
def execute(self):
if not self.args:
chnls = 'Channels:'
for key, value in self.pc.channels.items():
if value:
chnls += '\n ' + key + ': ' + 'on'
else:
chnls += '\n ' + key + ': ' + 'off'
self.pc.update_output(chnls)
return
toggle = {'on': True, 'off': False}
args = self.args.split()
channel = args[0].lower()
choice = args[1].lower()
if channel in self.pc.channels.keys():
if choice in toggle.keys():
self.pc.channels[channel] = toggle[choice]
self.pc.update_output('The %s channel has been turned %s.\n' % (channel, choice))
else:
self.pc.update_output('You can only turn the %s channel on or off.\n' % channel)
else:
self.pc.update_output('Which channel do you want to change?\n')
command_list.register(Channel, ['channel'])
command_help.register(Channel.help, ['channel'])
class Build(BaseCommand):
"""Activate or deactivate build mode."""
required_permissions = BUILDER
help = (
"""Build (Command)
The build command allows builders (players with the BUILDER permission) to
access BuildMode, which allows them to construct areas, rooms, items, etc.
\nRequired Permissions: BUILDER
\nUSAGE:
To enter BuildMode:
build [<area_name>]
To exit BuildMode:
build exit
To enter BuildMode and edit your current location:
build here
\nFor a list of BuildCommands, see "help build commands".
"""
)
def execute(self):
if not self.args:
# Player wants to enter BuildMode
self.enter_build_mode()
elif self.args == 'exit':
if self.pc.get_mode() == 'BuildMode':
self.pc.set_mode('normal')
self.pc.update_output('Exiting BuildMode.')
else:
self.pc.update_output('You\'re not in BuildMode right now.')
elif self.args == 'here':
# Builder wants to start building at her current location
if self.pc.location:
self.edit(self.pc.location.area, self.pc.location)
else:
self.pc.update_output('You\'re in the void; there\'s nothing to build.')
else:
area = self.world.get_area(self.args)
if not area:
self.pc.update_output('Area "%s" doesn\'t exist.' % self.args)
self.pc.update_output('See "help buildmode" for help with this command.')
else:
self.edit(area)
def enter_build_mode(self):
"""The player should enter BuildMode."""
if self.pc.get_mode() == 'BuildMode':
self.pc.update_output('To exit BuildMode, type "build exit".')
else:
self.pc.set_mode('build')
self.pc.update_output('Entering BuildMode.')
def edit(self, area, room=None):
"""Initialize the player's edit_area (and possible edit_object.). This
is super hackish, as I'm reproducing code from the BuildCommand Edit.
I just didn't want to create an import cycle for the sake of accessing
one command and was too lazy to change things around to work better.
Should probably clean this up in the future.
"""
if self.pc.get_mode() != 'BuildMode':
self.enter_build_mode()
if (self.pc.name in area.builders) or (self.pc.permissions & GOD):
self.pc.mode.edit_area = area
self.pc.mode.edit_object = None
if room:
self.pc.mode.edit_object = room
self.pc.update_output('Now editing room %s in area "%s".' %
(room.id, area.name))
else:
self.pc.update_output('Now editing area "%s".' % area.name)
else:
self.pc.update_output('You can\'t edit someone else\'s area.')
command_list.register(Build, ['build'])
command_help.register(Build.help, ['build', 'build mode', 'buildmode'])
class Look(BaseCommand):
"""Look at a room, item, npc, or PC."""
help = (
"""<title>Look (Command)</title>
The Look command allows a player to examine a room, item, or npc in detail.
\nUSAGE:
To look at your current location:
look
To look at something in your current location or inventory:
look [at] <npc/item-keyword> [in <room/inventory>]
\nEXAMPLES:
Most of the time it's enough just to use "look <npc/item>", which will look for
the item (or npc) in your current location first, then in your inventory if an
item can't be found in your surroundings:
look at dagger
look at bernie the shopkeeper
If there happens to be a dagger in your current room and a dagger in your
inventory, you might want to be more specific as to which place you look in:
look at dagger in inventory
look at dagger in room
"""
)
def execute(self):
message = 'You don\'t see that here.\n'
if not self.args:
# if the player didn't specify anything to look at, just show them the
# room they're in.
message = self.look_at_room()
else:
exp = r'(at[ ]+)?((?P<thing1>(\w|[ ])+)([ ]in[ ](?P<place>(room)|(inventory)|)))|((at[ ]+)?(?P<thing2>(\w|[ ])+))'
match = re.match(exp, self.args, re.I)
if match:
thing1, thing2, place = match.group('thing1', 'thing2', 'place')
thing = thing1 or thing2
thing = thing.strip()
# just in case someone thought they needed to be uber specific
# to look at their room...
if thing == 'room':
message = self.look_at_room()
else:
if place:
obj_desc = getattr(self, 'look_in_' + place)(thing)
else:
obj_desc = self.look_in_room(thing) or self.look_in_inventory(thing)
if obj_desc:
message = obj_desc
self.pc.update_output(message)
def look_at_room(self):
if self.pc.location:
return self.pc.look_at_room()
else:
return 'You see a dark void.'
def look_in_room(self, keyword):
if self.pc.location:
obj = self.pc.location.check_for_keyword(keyword)
if obj:
if self.alias == 'read':
return obj.description
message = "You look at %s:\n%s" % (obj.name, obj.description)
# This is a bit hackish -- npcs don't have a has_type attr, so we're making sure
# we have an item by process of elimination. If we have an item, and
# that item has a container type, we should display its contents.
if hasattr(obj, 'has_type') and obj.has_type('container'):
message += '\n' + obj.item_types.get('container').display_inventory()
return message
return None
def look_in_inventory(self, keyword):
item = self.pc.check_inv_for_keyword(keyword)
if item:
message = "You look at %s:\n%s" % (item.name, item.description)
if item.has_type('container'):
message += '\n' + item.item_types.get('container').display_inventory()
return message
return None
command_list.register(Look, ['look', 'read'])
command_help.register(Look.help, ['look', 'read'])
class Goto(BaseCommand):
"""Go to a location."""
required_permissions = BUILDER | DM | ADMIN
help = (
"""Goto (Command)
The goto command will transport your character to a specific location.
\nUSAGE:
To go to the same room as a specific player:
goto <player-name>
To go to a room in the same area you're in:
goto [room] <room-id>
To go to a room in a different area:
goto [room] <room-id> [in area] <area-name>
"""
)
def execute(self):
if not self.args:
self.pc.update_output('Where did you want to go to?')
return
exp = r'((room)?([ ]?(?P<room_id>\d+))(([ ]+in)?([ ]+area)?([ ]+(?P<area>\w+)))?)|(?P<name>\w+)'
match = re.match(exp, self.args.strip())
message = 'Type "help goto" for help with this command.'
if not match:
self.pc.update_output(message)
return
name, area_name, room_id = match.group('name', 'area', 'room_id')
# They're trying to go to the same room as another player
if name:
# go to the same room that player is in
per = self.world.get_player(name)
if per:
if per.location:
self.pc.go(per.location, self.pc.goto_appear,
self.pc.goto_disappear)
else:
self.pc.update_output('You can\'t reach %s.\n' % per.fancy_name())
else:
self.pc.update_output('That person doesn\'t exist.\n')
# They're trying to go to a specific room
elif room_id:
# See if they specified an area -- if they did, go there
if area_name:
area = self.world.get_area(area_name)
if not area:
self.pc.update_output('Area "%s" doesn\'t exist.' % area_name)
return
else:
if not self.pc.location:
self.pc.update_output(message)
return
area = self.pc.location.area
room = area.get_room(room_id)
if room:
self.pc.go(room, self.pc.goto_appear,
self.pc.goto_disappear)
else:
self.pc.update_output('Room "%s" doesn\'t exist in area %s.' % (room_id, area.name))
else:
self.pc.update_output(message)
command_list.register(Goto, ['goto'])
command_help.register(Goto.help, ['goto', 'go to'])
class Go(BaseCommand):
"""Go to the next room in the direction given."""
help = (
"""<title>Go (Command)</title>
The Go command and its aliases allows a player to travel in a given direction.
\nUSAGE:
go <direction>
Or just give the direction you want to go:
<direction>
\nDirections and their shorthands:
north (n)
west (w)
east (e)
south (s)
up (u)
down (d)
"""
)
def execute(self):
dir_map = {'n': 'north', 's': 'south', 'e': 'east', 'w': 'west',
'u': 'up', 'd': 'down'
}
if self.alias == 'go':
if not self.args:
self.pc.update_output('Go in which direction?')
return
if self.args in dir_map:
direction = dir_map[self.args]
else:
direction = self.args
else:
if self.alias in dir_map:
direction = dir_map[self.alias]
else:
direction = self.alias
if self.pc.location:
go_exit = self.pc.location.exits.get(direction)
if go_exit:
if go_exit._closed:
self.pc.update_output('The door is closed.\n')
else:
if go_exit.to_room:
if go_exit.linked_exit:
arrives = {'up': 'above', 'down': 'below', 'north': 'the north',
'south': 'the south', 'east': 'the east', 'west': 'the west'}
tell_new = '%s arrives from %s.' % (self.pc.fancy_name(),
arrives.get(go_exit.linked_exit))
else:
tell_new = '%s suddenly appears in the room.' % self.pc.fancy_name()
tell_old = '%s leaves %s.' % (self.pc.fancy_name(),
go_exit.direction)
self.pc.go(go_exit.to_room, tell_new, tell_old)
else:
# SOMETHING WENT BADLY WRONG IF WE GOT HERE!!!
# somehow the room that this exit pointed to got deleted without informing
# this exit.
self.world.log.critical('EXIT FAIL: Exit %s from room %s in area %s failed to resolve.' % (go_exit.direction, go_exit.room.id, go_exit.room.area.name))
# Delete this exit in the database and the room - we don't want it
# popping up again
go_exit.destruct()
self.pc.location.exits[go_exit.direction] = None
# Tell the player to ignore the man behind the curtain
self.pc.location.tell_room('A disturbance was detected in the Matrix: Entity "%s exit" should not exist.\nThe anomaly has been repaired.\n' % self.args)
else:
self.pc.update_output('You can\'t go that way.\n')
else:
self.pc.update_output('You exist in a void; there is nowhere to go.\n')
command_list.register(Go, ['go', 'move', 'north', 'n', 'south', 's', 'east', 'e',
'west', 'w', 'up', 'down', 'u', 'd'])
command_help.register(Go.help, ['go', 'move', 'north', 'east', 'south', 'west', 'up',
'down', 'u', 'd', 'n', 'e', 's', 'w'])
class Say(BaseCommand):
"""Echo a message from the player to the room that player is in."""
help = (
"""Say (Command)
The Say command sends a message to everyone in the same room (and only the
people in the same room). Players that are asleep will not hear things that
are said while they are sleeping.
\nUSAGE:
say <message>
"""
)
def execute(self):
if self.args:
if self.pc.location:
if 'drunk' in self.pc.effects:
self.args = self.pc.effects['drunk'].filter_speech(self.args)
message = '%s says, "%s"' % (self.pc.fancy_name(), self.args)
message = say_color + message + clear_fcolor
self.pc.location.tell_room(message, teller=self.pc)
else:
self.pc.update_output('Your words are sucked into the void.')
else:
self.pc.update_output('Say what?')
command_list.register(Say, ['say'])
command_help.register(Say.help, ['say'])
class Load(BaseCommand):
required_permissions = ADMIN | BUILDER | DM
help = (
"""Load (Command)
Load a specific item or npc by id. Npcs will be loaded into your room,
and items will be loaded into your inventory. If you do not specify what area
the item should be loaded from, the item will be loaded from the area you are
currently located in, or the area you are currently editing (if you're editing
one).
\nRequired Permissions: ADMIN, BUILDER, DM
\nUSAGE:
To load an item:
load item <item-id> [from area <area-name>]
To load an npc:
load npc <npc-id> [from area <area-name>]
"""
)
def execute(self):
if not self.args:
self.pc.update_output('What do you want to load?\n')
else:
help_message = 'Type "help load" for help on this command.\n'
exp = r'(?P<obj_type>(item)|(npc))([ ]+(?P<obj_id>\d+))(([ ]+from)?([ ]+area)?([ ]+(?P<area_name>\w+)))?'
match = re.match(exp, self.args, re.I)
if match:
obj_type, obj_id, area_name = match.group('obj_type', 'obj_id', 'area_name')
if not obj_type or not obj_id:
self.pc.update_output(help_message)
else:
if not area_name and self.pc.location:
getattr(self, 'load_' + obj_type)(obj_id, self.pc.location.area)
elif not area_name and self.pc.mode.edit_area:
getattr(self, 'load_' + obj_type)(obj_id, self.pc.mode.edit_area)
elif area_name and self.world.area_exists(area_name):
getattr(self, 'load_' + obj_type)(obj_id, self.world.get_area(area_name))
else:
self.pc.update_output('You need to specify an area to load from.\n')
else:
self.pc.update_output(help_message)
def load_npc(self, npc_id, npc_area):
"""Load an npc into the same room as the player."""
if not self.pc.location:
self.pc.update_output('But you\'re in a void!')
return
prototype = npc_area.get_npc(npc_id)
if prototype:
npc = prototype.load()
npc.location = self.pc.location
self.pc.location.add_char(npc)
self.pc.update_output('You summon %s into the world.\n' % npc.name)
if self.alias == 'spawn':
self.pc.location.tell_room('%s summons %s.' % (self.pc.fancy_name(), npc.name),
[self.pc.name], self.pc)
else:
self.pc.update_output('That npc doesn\'t exist.')
def load_item(self, item_id, item_area):
"""Load an item into the player's inventory."""
prototype = item_area.get_item(item_id)
if prototype:
item = prototype.load()
self.pc.item_add(item)
self.pc.update_output('You summon %s into the world.\n' % item.name)
if self.pc.location and (self.alias == 'spawn'):
self.pc.location.tell_room('%s summons %s into the world.' % (self.pc.fancy_name(), item.name),
[self.pc.name], self.pc)
else:
self.pc.update_output('That item doesn\'t exist.\n')
command_list.register(Load, ['load', 'spawn'])
command_help.register(Load.help, ['load', 'spawn'])
class Inventory(BaseCommand):
"""Show the player their inventory."""
help = (
"""<title>Inventory (command)</title>
The Inventory command shows a player to see a list of items in their inventory.
\nALIASES: i, inv
\nUSAGE:
inventory
"""
)
def execute(self):
i = 'You have %s %s.\n' % (str(self.pc.currency), CURRENCY)
if not self.pc.inventory:
i += 'Your inventory is empty.'
else:
i += 'Your inventory consists of:\n'
for item in self.pc.inventory:
if item not in self.pc.isequipped:
i += item.name + '\n'
self.pc.update_output(i)
command_list.register(Inventory, ['i', 'inventory', 'inv'])
command_help.register(Inventory.help, ['i', 'inv', 'inventory'])
class Give(BaseCommand):
"""Give an item to another player or npc."""
help = (
"""Give (Command)
Give an object to another player or an npc.
\nUSAGE:
give <item-keyword> to <npc/player-name>
"""
)
def execute(self):
if not self.args:
self.pc.update_output('Try: "give <item> to <person>", or type "help give".')
return
exp = r'(?P<amount>\d+)?([ ]+)?(?P<thing>.*?)([ ]+)(to[ ]+)?(?P<givee>\w+)'
match = re.match(exp, self.args, re.I)
if not match:
self.pc.update_output('Try: "give <item> to <person>", or type "help give".')
elif not self.pc.location:
self.pc.update_output('You are alone in the void; there\'s no one to give anything to.\n')
else:
amount, thing, person = match.group('amount', 'thing', 'givee')
#check that the person we're giving to exists
givee = self.pc.location.get_player(person)
if not givee:
givee = self.pc.location.get_npc_by_kw(person)
if not givee:
self.pc.update_output('%s isn\'t here.' % person.capitalize())
return
#check for giving an item
thing_name = 'something'
item = self.pc.check_inv_for_keyword(thing)
if item:
self.pc.item_remove(item)
givee.item_add(item)
thing_name = item.name
#check for giving currency
elif not item and thing == CURRENCY:
if not amount:
amount = 1
else:
amount = int(amount)
if self.pc.currency < amount:
self.pc.update_output('You don\'t have that much ' + CURRENCY + '.')
return
else:
self.pc.currency -= amount
givee.currency += amount
thing_name = str(amount) + ' ' + CURRENCY
else:
self.pc.update_output('You don\'t have %s.' % thing)
return
#We've completed the giving process, now tell everyone about it.
self.pc.update_output('You give %s to %s.' % (thing_name, givee.fancy_name()))
givee.update_output('%s gives you %s.' % (self.pc.fancy_name(), thing_name))
self.pc.location.tell_room('%s gives %s to %s.' % (self.pc.fancy_name(),
thing_name,
givee.fancy_name()),
[self.pc.name, givee.name])
if item and givee.is_npc():
givee.notify('given_item', {'giver': self.pc,
'item': item})
def give_currency(self, amount, givee):
if self.pc.currency < amount:
self.pc.update_output('You don\'t have that much ' + CURRENCY + '.')
else:
self.pc.currency -= amount
givee.currency += amount
curr = 'some ' + CURRENCY
command_list.register(Give, ['give'])
command_help.register(Give.help, ['give'])
class Drop(BaseCommand):
"""Drop an item from the player's inventory into the player's current room."""
help = (
"""<title>Drop (Command)</title>
The Drop command allows you to drop an item from your inventory onto the ground.
\nUSAGE:
drop <item-name>
"""
)
def execute(self):
if not self.args:
self.pc.update_output('What do you want to drop?\n')
else:
item = self.pc.check_inv_for_keyword(self.args)
if item:
self.pc.item_remove(item)
self.pc.update_output('You drop %s.\n' % item.name)
if self.pc.location:
self.pc.location.item_add(item)
self.pc.location.tell_room('%s drops %s.\n' % (self.pc.fancy_name(),
item.name), [self.pc.name])
else:
self.pc.update_output('%s disappears into the void.\n' % item.name)
item.destruct()
else:
self.pc.update_output('You don\'t have that.\n')
command_list.register(Drop, ['drop'])
command_help.register(Drop.help, ['drop'])
class Get(BaseCommand):
"""Get an item and store it in the player's inventory."""
help = (
"""Get (Command)
The Get command is used to transfer an item from the room into your inventory,
or from a containing item into your inventory.
\nUSAGE:
To get an item you see in a room:
get <item-keyword>
To get an item from a container (the container can exist in the room, or
can be in your inventory):
get <item-keyword> from <container-item-keyword>
\nEXAMPLES:
Let's say you see a loot bag sitting in your room. You could take it by typing:
get loot bag
Let's say you were to look at the loot bag (using the Look command) and see
that it contained a golden ring. You could transfer that ring from the loot
bag into your inventory by typing:
get ring from loot bag
You could have also gotten the ring out of the loot bag, using the same method
just mentioned, even if you hadn't gotten the loot bag from the room first
(i.e, you don't have to get the loot bag before you can take the ring from
it).
\nNOTE: Containers must be open before you can see anything inside them, or
take anything out of them. For help with opening containers, see "help open".
"""
)
def execute(self):
if not self.args:
self.pc.update_output('What do you want to get?\n')
return
exp = r'(up[ ]+)?((?P<target_kw>(\w|[ ])+)([ ]+from)([ ]+(?P<source_kw>(\w|[ ])+)))|((up[ ]+)?(?P<item_kw>(\w|[ ])+))'
match = re.match(exp, self.args, re.I)
if not match:
self.pc.update_output('Type "help get" for help with this command.')
return
target_kw, source_kw, item_kw = match.group('target_kw', 'source_kw',
'item_kw')
if source_kw:
message = self.get_item_from_container(source_kw, target_kw)
else:
message = self.get_item_from_room(item_kw)
self.pc.update_output(message)
def get_item_from_container(self, source_kw, target_kw):
c_item = self.pc.location.get_item_by_kw(source_kw) or \
self.pc.check_inv_for_keyword(source_kw)
if not c_item:
return 'There\'s no "%s" containers here.' % source_kw
if not c_item.has_type('container'):
return 'That\'s not a container.'
container = c_item.item_types.get('container')
if container.closed:
return '%s is closed.' % c_item.name.capitalize()
item = container.get_item_by_kw(target_kw)
if not item:
return '%s doesn\'t exist.' % target_kw.capitalize()
if item.carryable or (self.pc.permissions & GOD):
container.item_remove(item)
self.pc.item_add(item)
if self.pc.location:
room_tell = '%s gets %s from %s.' % (self.pc.fancy_name(),
item.name, c_item.name)
self.pc.location.tell_room(room_tell, [self.pc.name])
return 'You get %s.' % item.name
else:
return 'You can\'t take that.'
def get_item_from_room(self, item_kw):
if not self.pc.location:
return 'Only cold blackness exists in the void. ' +\
'It\'s not the sort of thing you can take.'
item = self.pc.location.get_item_by_kw(item_kw)
if not item:
return '%s doesn\'t exist.' % item_kw
if item.carryable or (self.pc.permissions & GOD):
self.pc.location.item_remove(item)
self.pc.item_add(item)
room_tell = '%s gets %s.' % (self.pc.fancy_name(), item.name)
self.pc.location.tell_room(room_tell, [self.pc.name])
return 'You get %s.' % item.name
else:
return 'You can\'t take that.'
command_list.register(Get, ['get', 'take', 'pick'])
command_help.register(Get.help, ['get', 'take', 'pick'])
class Put(BaseCommand):
"""Put an object inside a container."""
help = (
"""<title>Put (Command)</title>
\nThe put command allows you to put an item inside a container. If you are
just looking to get rid of an inventory item or leave an item in a room, try
the Drop command.
\nUSAGE:
To put an item inside a container:
put <item-name> in <container-name>
\nThe preposition (the word "in") is required, but can also be replaced with
"inside" or "on". To get an item out of a container, see "help get".
"""
)
def execute(self):
if not self.args:
self.pc.update_output('Put what where?')
return
exp = r'(?P<target_kw>(\w|[ ])+)([ ]+(?P<prep>(in)|(inside)|(on)))(?P<container>(\w|[ ])+)'
match = re.match(exp, self.args.lower().strip())
if not match:
self.pc.update_output('Type "help put" for help with this command.')
return
target_kw, preposition, cont_kw = match.group('target_kw',
'prep',
'container')
target = self.pc.check_inv_for_keyword(target_kw)
if not target:
self.pc.update_output('You don\'t have "%s".' % target_kw)
return
container = self.pc.check_inv_for_keyword(cont_kw)
# Make sure the container object exists
if not container:
if self.pc.location:
container = self.pc.location.get_item_by_kw(cont_kw)
if not container:
self.pc.update_output('%s isn\'t here.' % cont_kw)
return
# Make sure it's a container
if not container.has_type('container'):
self.pc.update_output('%s is not a container.' %
container.name.capitalize())
return
c_type = container.item_types['container']
if c_type.closed:
self.pc.update_output('It\'s closed.')
return
if c_type.item_add(target):
self.pc.update_output('You put %s %s %s.' % (target.name,
preposition,
container.name))
if self.pc.location:
tr = '%s puts %s %s %s.' % (self.pc.fancy_name(),
target.name, preposition,
container.name)
self.pc.location.tell_room(tr, [self.pc.name])
else:
self.pc.update_output('%s won\'t fit %s %s.' % (target.name.capitalize(),
preposition,
container.name))
command_list.register(Put, ['put'])
command_help.register(Put.help, ['put'])
class Equip(BaseCommand):
"""Equip an item from the player's inventory."""
help = (
"""<title>Equip (Command)</title>
The equip command allows you to wear or hold an item that can be worn or held.
Weapons, armor, clothing, and jewelry for example, can all be equipped. You have
a number of places where you may wear/hold equipped items, such as on your head
(for hats) or on your feet (for shoes or boots).
\n<b>USAGE</b>:
To just see a list of currently equipped items, type
<b>equip</b>
To equip an item in your inventory, type
<b>equip <item></b>
\nFor removing items, see <b>help unequip</b>.
""")
def execute(self):
message = ''
if not self.args:
#Note: Does not output slot types in any order.
message = 'Equipped items:'
for i, j in self.pc.equipped.iteritems():
message += '\n' + i + ': '
if j:
message += j.name
else:
message += 'None.'
message += '\n'
else:
item = self.pc.check_inv_for_keyword(self.args)
if not item:
message = 'You don\'t have it.\n'
else:
equip_type = item.item_types.get('equippable')
if not equip_type:
message = 'You can\'t equip that!\n'
elif equip_type.equip_slot not in EQUIP_SLOTS:
message = 'How do you equip that?'
else:
if self.pc.equipped.get(equip_type.equip_slot): #if slot not empty
self.pc.isequipped.remove(item) #remove item in slot
self.pc.equipped[equip_type.equip_slot] = item
self.pc.isequipped += [item]
equip_type.on_equip()
message = EQUIP_SLOTS[equip_type.equip_slot].replace('#item', item.name) + '\n'
self.pc.update_output(message)
command_list.register(Equip, ['equip', 'wear', 'wield'])
command_help.register(Equip.help, ['equip', 'wear', 'wield'])
class Unequip(BaseCommand):
"""Unequip items."""
help = (
"""<title>Unequip (Command)</title>
Removes an equipped item and places it back in your inventory.
<b>USAGE:</b>
To remove an item that is currently equipped, try
<b>unequip <item></b>
To equip items, or to see a list of currently equipped items,
see <b>help equip</b>
"""
)
def execute(self):
item = self.pc.check_inv_for_keyword(self.args)
message = ''
if not self.args:
message = 'What do you want to unequip?\n'
elif not item:
message = 'You don\'t have that!\n'
equip_type = item.item_types.get('equippable')
if not equip_type:
message = 'That item is not equippable.\n'
elif not self.pc.equipped[equip_type.equip_slot]:
message = 'You aren\'t using anything in that slot.\n'
else:
self.pc.equipped[equip_type.equip_slot] = ''
self.pc.isequipped.remove(item)
equip_type.on_unequip()
message = 'You remove ' + item.name + '.\n'
self.pc.update_output(message)
command_list.register(Unequip, ['unequip'])
command_help.register(Unequip.help, ['unequip'])
class Who(BaseCommand):
"""Return a list of names comprised of players who are currently playing the game."""
help = (
"""Who (Command)
The Who command returns a list of all the players currently in the game.
\nUSAGE:
who
"""
)
def execute(self):
persons = [per for per in self.world.player_list.values() if isinstance(per.name, basestring)]
message = 'Currently Online'.center(50, '-') + '\n'
for per in persons:
if per.permissions > PLAYER:
# We want to list the permissions of the people who have perms
# higher than player so other players know where they can go
# for help
perm_list = get_permission_names(per.permissions)
# Everyone's a player -- don't bother listing that one
if 'Player' in perm_list:
perm_list.remove('Player')
# God trumps everything else...
if 'God' in perm_list:
perms = 'God'
else:
perms = ', '.join(perm_list)
message += '%s (%s) - %s\n' % (per.fancy_name(), perms, per.title)
else:
message += '%s - %s\n' % (per.fancy_name(), per.title)
message += '-'.center(50, '-')
self.pc.update_output(message)
command_list.register(Who, ['who'])
command_help.register(Who.help, ['who'])
class Enter(BaseCommand):
"""Enter a portal."""
help = (
"""<title>Enter (Command)</title>
The Enter Command allows a character to enter a portal object.
\nUSAGE:
enter <portal-name>
"""
)
def execute(self):
if not self.args:
self.pc.update_output('Enter what?\n')
return
fail = 'You don\'t see "%s" here.' % self.args
if self.pc.location:
# Check the room for the portal object first
obj = self.pc.location.get_item_by_kw(self.args)
if obj:
if 'portal' in obj.item_types:
self.go_portal(obj.item_types['portal'])
else:
self.pc.update_output('%s is not a portal.' % obj.name.capitalize())
else:
# If the portal isn't in the room, check their inventory
obj = self.pc.check_inv_for_keyword(self.args)
if obj:
if 'portal' in obj.item_types:
# If the portal is in their inventory, make them drop it first
# (a portal can't go through itself)
Drop(self.pc, self.args, 'drop').execute()
self.go_portal(obj.item_types['portal'])
else:
self.pc.update_output('%s is not a portal.' % obj.name.capitalize())
else:
# We've struck out an all counts -- the player doesn't have a portal
self.pc.update_output(fail)
def go_portal(self, portal):
"""Go through a portal."""
if portal.location:
if self.pc.location:
self.pc.location.tell_room(self.personalize(portal.leave_message, self.pc),
[self.pc.name])
self.pc.update_output(self.personalize(portal.entrance_message, self.pc))
self.pc.go(portal.location)
self.pc.location.tell_room(self.personalize(portal.emerge_message, self.pc),
[self.pc.name])
else:
self.pc.update_output('Nothing happened. It must be a dud.')
command_list.register(Enter, ['enter'])
command_help.register(Enter.help, ['enter'])
class Purge(BaseCommand):
"""Purge all of the items and npc's in the room."""
required_permissions = BUILDER | DM | ADMIN
help = (
"""Purge (Command)
Purge will destroy all items and npcs in your room or in your inventory. Purge
cannot, however, destroy players or prototype items/npcs (see "help
prototypes" if this is confusing).
USAGE:
Calling purge without any options will purge the room by default.
To purge your inventory:
purge inventory/i
"""
)
def execute(self):
if not self.args:
# If they specified nothing, just purge the room
if self.pc.location:
self.pc.location.purge_room()
self.pc.update_output('The room has been purged.\n')
else:
self.pc.update_output('You\'re in a void, there\'s nothing to purge.\n')
elif self.args in ['i', 'inventory']:
# Purge their inventory!
for i in range(len(self.pc.inventory)):
item = self.pc.inventory[0]
self.pc.item_remove(item)
item.destruct()
self.pc.update_output('Your inventory has been purged.\n')
else:
# Purge a specific npc or item based on keyword
self.pc.update_output('Someone didn\'t endow me with the functionality to purge that for you.\n')
command_list.register(Purge, ['purge'])
command_help.register(Purge.help, ['purge'])
class Areas(BaseCommand):
help = (
"""Areas (Command)
The Areas command gives a list of all the areas in the game along with a
suggested level range.
"""
)
def execute(self):
"""Give a list of areas in the world, as well as their level ranges."""
the_areas = ['%s (level range: %s) ' % (area.title, area.level_range) \
for area in self.world.areas.values()]
message = ' Areas '.center(50, '-') + '\n'
if not the_areas:
message += 'Sorry, God has taken a day off. There are no areas yet.'
else:
message += '\n'.join(the_areas)
message += '\n' + ('-' * 50)
self.pc.update_output(message)
command_list.register(Areas, ['areas'])
command_help.register(Areas.help, ['areas'])
class Emote(BaseCommand):
"""Emote to another player or ones self. (slap them, cry hysterically, etc.)"""
help = (
"""Emote (Command)
\nThe emote command allows your character express actions in the third person.
There are also many pre-defined emotes. For a list of actions you can type to
initiate a pre-defined emote, see "help emote list".
\nUSAGE:
To express a custom emote:
emote <emote-text>
To use a predefined emote:
<emote-action> [<target-player-name>]
\nEXAMPLES:
If Bob wished to express his severe doubt at Steven's battle plans with
actions instead of words, he could type:
emote facepalms at Steven's epic ineptitude.
Anyone in the same room would then see the following:
Bob facepalms at Steven's epic ineptitude.
"""
)
elist = "The following is the list of emote actions: \n" +\
'\n'.join(EMOTES.keys())
aliases = ['emote']
aliases.extend(EMOTES.keys())
def execute(self):
if not self.pc.location:
self.pc.update_output('You try, but the action gets sucked into the void. The void apologizes.')
elif self.alias == 'emote':
self.pc.location.tell_room('%s %s' % (self.pc.fancy_name(),
self.args),
teller=self.pc)
else:
emote_list = EMOTES[self.alias]
# The person didn't specify a target -- they want to do the emote
# action by themself
if not self.args:
self.single_person_emote(emote_list)
# If this emote doesn't have an option for a double emote,
# just ignore the args and do a single-person emote
elif self.args and not emote_list[1]:
self.single_person_emote(emote_list)
else:
victim = self.pc.location.get_player(self.args.lower()) or\
self.pc.location.get_npc_by_kw(self.args.lower()) or\
self.world.get_player(self.args.lower())
if not victim:
# victim = self.world.get_player(self.args.lower())
# if not victim:
self.pc.update_output('%s isn\'t here.' %
self.args.capitalize())
elif victim == self.pc:
self.single_person_emote(emote_list)
else:
self.double_person_emote(emote_list, victim)
def single_person_emote(self, emote_list):
"""A player wishes to emote an action alone."""
actor_m = self.personalize(emote_list[0][0], self.pc)
room_m = self.personalize(emote_list[0][1], self.pc)
self.pc.update_output(actor_m)
self.pc.location.tell_room(room_m, [self.pc.name])
def double_person_emote(self, emote_list, victim):
"""A player wishes to emote an action on a target."""
# We got this far, we know the victim exists in the world
actor = self.personalize(emote_list[1][0], self.pc, victim)
victimm = self.personalize(emote_list[1][1], self.pc, victim)
if victim.location == self.pc.location:
room_m = self.personalize(emote_list[1][2], self.pc, victim)
self.pc.update_output(actor)
victim.update_output(victimm)
self.pc.location.tell_room(room_m, [self.pc.name, victim.name])
else:
self.pc.update_output('From far away, ' + actor)
victim.update_output('From far away, ' + victimm)
if victim.is_npc():
victim.notify('emoted', {'emote': self.alias, 'emoter': self.pc})
command_list.register(Emote, Emote.aliases)
command_help.register(Emote.help, ['emote', 'emotes'])
command_help.register(Emote.elist, ['emote list'])
class Bestow(BaseCommand):
"""Bestow a new class of permissions on a PC."""
required_permissions = GOD
help = (
"""<title>Bestow (Command)</title>
Bestow allows you to extend a player's permissions by giving them another
permission group.
\nRequired Permissions: GOD
\nUSAGE:
To bestow a privilege upon a player:
bestow <permission-group> [upon] <player-name>
To bestow a privilege upon a player:
bestow <permission-group> [upon] npc <player-name>
\nPermission Groups:
player
dm
admin
god
\n<b>BEWARE!!!!!
Bestowing privileges beyond DM upon npcs can be very dangerous. Since npcs can
be controlled by scripts, it is possible for other builders to run commands
above their authority level through this npc if they have access to this npc's
build area (they are on the area's BuildersList). You might want to make sure
you're the only one with builder privileges to the area of the npc you're giving
escalated privileges to.</b>
\nTo revoke permissions, see "help revoke". For more information on
permissions and permission groups, see "help permissions".
"""
)
def execute(self):
if not self.args:
self.pc.update_output('Bestow what authority upon whom?\n')
return
exp = r'(?P<permission>(god)|(dm)|(builder)|(admin))[ ]?(to)?(on)?(upon)?([ ]+(?P<npc>npc))?([ ]+(?P<player>\w+))'
match = re.match(exp, self.args.lower(), re.I)
if not match:
self.pc.update_output('Type "help bestow" for help on this command.')
return
perm, npc, player = match.group('permission', 'npc', 'player')
if npc:
error = 'Npc doesn\'t exist.'
if not self.pc.location:
self.pc.update_output(error)
return
player = self.pc.location.get_npc_by_kw(player)
self.pc.update_output('WARNING: giving npcs wider permissions can be dangerous. See "help bestow".')
else:
player = self.world.get_player(player)
permission = globals().get(perm.upper())
if not player:
self.pc.update_output('That player isn\'t on right now.')
return
if not permission:
self.pc.update_output('Valid permission types are: god, dm, builder, and admin.')
return
if player.permissions & permission:
self.pc.update_output('%s already has that authority.' % player.fancy_name())
return
player.permissions = player.permissions | permission
if not player.is_npc():
player.save()
self.world.play_log.info('%s bestowed the authority of %s upon %s.' % (
self.pc.fancy_name(), perm.upper(), player.fancy_name()))
self.pc.update_output('%s now has the privilige of being %s.' % (player.fancy_name(), perm.upper()))
player.update_output('%s has bestowed the authority of %s upon you!' % (self.pc.fancy_name(), perm.upper()))
self.world.tell_players('%s has bestowed the authority of %s upon %s!' %
(self.pc.fancy_name(), perm.upper(), player.fancy_name()),
[self.pc.name, player.name])
command_list.register(Bestow, ['bestow'])
command_help.register(Bestow.help, ['bestow'])
class Revoke(BaseCommand):
"""Revoke permission privilges for a PC."""
required_permissions = GOD
help = (
"""<title>Revoke (Command)</title>
The revoke command allows you to revoke the priviliges of other players.
\nRequired Permissions: GOD
\nUSAGE:
revoke <permission-group> [from/for] <player-name>
\nPermission Groups:
player
dm
admin
god
\nTo bestow permissions, see "help bestow". For more information on
permissions and permission groups, see "help permissions".
"""
)
def execute(self):
if not self.args:
self.pc.update_output('Revoke whose authority over what?\n')
return
exp = r'(?P<permission>(god)|(dm)|(builder)|(admin))[ ]?(from)?(on)?(for)?([ ]+(?P<player>\w+))'
match = re.match(exp, self.args.lower(), re.I)
if not match:
self.pc.update_output('Type "help revoke" for help on this command.')
return
perm, player = match.group('permission', 'player')
if self.pc.location:
npc = self.pc.location.get_npc_by_kw(player)
player = self.world.get_player(player) or npc
permission = globals().get(perm.upper())
if not player:
self.pc.update_output('That player isn\'t on right now.')
return
if not permission:
self.pc.update_output('Valid permission types are: god, dm, builder, and admin.')
return
if not (player.permissions & permission):
self.pc.update_output('%s doesn\'t have that authority anyway.' % player.fancy_name())
return
player.permissions = player.permissions ^ permission
self.world.play_log.info('%s revoked %s\'s %s authority.' % (
self.pc.fancy_name(), player.fancy_name(), perm.upper()))
self.pc.update_output('%s has had the privilige of %s revoked.' % (player.fancy_name(), perm))
player.update_output('%s has revoked your %s priviliges.' % (self.pc.fancy_name(), perm))
if not player.is_npc():
player.save()
if player.get_mode() == 'BuildMode':
player.set_mode('normal')
player.update_output('You have been kicked from BuildMode.')
command_list.register(Revoke, ['revoke'])
command_help.register(Revoke.help, ['revoke'])
class Reset(BaseCommand):
"""The command for resetting a room or area."""
required_permissions = DM | BUILDER | ADMIN
help = ("Reset (command)\n"
"""The reset command is used to force a room to respawn all of the items
and npcs on its spawn list. If you call reset on an entire area, all of the
rooms in that area will be told to reset.\n
USAGE:
To reset the room you are in, just call reset without any options. If you need
to reset a room without being inside it (or you want to reset a whole area),
use one of the following:
To reset a room:
reset [room] <room-id> [in area] <area-name>
To reset an area:
reset [area] <area-name>\n
Permissions:
This command requires DM, BUILDER, or ADMIN permissions.\n
Adding Resets to Rooms
For help on adding items and npcs to a room's spawn list,
see "help spawns".""")
def execute(self):
if not self.args:
# Reset the room the player is in
if self.pc.location:
self.pc.location.reset()
self.pc.update_output('Room %s has been reset.\n' % self.pc.location.id)
else:
self.pc.update_output('That\'s a pretty useless thing to do in the void.\n')
else:
exp = r'(room[ ]+(?P<room_id>\d+)([ ]+in)?([ ]+area)?([ ]+(?P<room_area>\w+))?)|(area[ ]+(?P<area>\w+))'
match = re.match(exp, self.args, re.I)
if not match:
self.pc.update_output('Type "help reset" to get help with this command.\n')
return
room_id, room_area, area = match.group('room_id', 'room_area', 'area')
if area:
# reset an entire area
reset_area = self.world.get_area(area)
if reset_area:
reset_area.reset()
self.pc.update_output('Area %s has been reset.\n' % reset_area.name)
return
else:
self.pc.update_output('That area doesn\'t exist.\n')
return
# Reset a single room
if room_area:
area = self.world.get_area(room_area)
if not area:
self.pc.update_output('That area doesn\'t exist.\n')
return
else:
if self.pc.location:
area = self.pc.location.area
else:
self.pc.update_output('Type "help resets" to get help with this command.\n')
return
room = area.get_room(room_id)
if not room:
self.pc.update_output('Room %s doesn\'t exist in area %s.\n' % (room_id,
area.name))
room.reset()
self.pc.update_output('Room %s in area %s has been reset.\n' % (room.id, area.name))
command_list.register(Reset, ['reset'])
command_help.register(Reset.help, ['reset', 'resets'])
class Help(BaseCommand):
help = ("Try 'help <command-name>' for help with a command.\n"
"For example, 'help go' will give details about the go command."
)
def execute(self):
# get the help parameter and see if it matches a command_help alias
# if so, send player the help string
# return
# else, look up in database
# if there is a match, send the help string to the player
# return
# else, send "I can't help you with that" string to player
# return
if not self.args:
self.pc.update_output(self.help)
return
help = command_help[self.args]
if help:
# This should probably be replaced with a better color parsing
# function when we decide to have better use of colors
help = help.replace('<b>', BOLD)
help = help.replace('<blink>', BLINK)
help = help.replace('<title>', help_title).replace('</title>',
CLEAR + '\n')
help = re.sub(r'</\w+>', CLEAR, help)
self.pc.update_output(help)
else:
self.pc.update_output("Sorry, I can't help you with that.\n")
command_list.register(Help, ['help', 'explain', 'describe'])
command_help.register(Help.help, ['help', 'explain', 'describe'])
class Clear(BaseCommand):
"""Clear the player's screen and give them a new prompt."""
help = (
"""Clear (command)
Clears your screen of text and gives you a new prompt.
"""
)
def execute(self):
# First send the ANSI command to clear the entire screen, then
# send the ANSI command to move the cursor to the "home" position
# (the upper left position in the terminal)
self.pc.update_output('\x1b[2J' + '\x1b[H')
command_list.register(Clear, ['clear'])
command_help.register(Clear.help, ['clear'])
class Set(BaseCommand):
"""Set a (settable) player attribute."""
help = (
"""Set (Command)
The Set command allows you to set details and options about your character.
\nUSAGE:
set <option> <argument>
Options you can set:
email - your e-mail address
title - the title of your character
description - your character's description
"""
)
def execute(self):
if not self.args:
self.pc.update_output('What do you want to set?\n')
else:
match = re.match(r'\s*(\w+)([ ](.+))?$', self.args, re.I)
if not match:
message = 'Type "help set" for help with this command.\n'
else:
func, _, arg = match.groups()
message = 'You can\'t set that.\n'
if hasattr(self.pc, 'set_' + func):
message = (getattr(self.pc, 'set_' + func)(arg))
self.pc.update_output(message)
command_list.register(Set, ['set', 'cset'])
command_help.register(Set.help, ['set'])
class ToggleOpen(BaseCommand):
"""Open/Close doors and containers."""
help = (
"""Open, Close (Command)
The open and close commands can be used to open/close doors or containers.
\nUSAGE:
To open a door:
open <door-direction> [door]
To open a container:
open <container-keyword>
To close a door or container, use the same syntax as above, but replace "open"
with "close."
"""
)
def execute(self):
if not self.args:
self.pc.update_output('Open what?')
return
exp = r'(?P<dir>(north)|(south)|(east)|(west)|(up)|(down))|(?P<kw>(\w|[ ])+)'
match = re.match(exp, self.args.lower(), re.I)
if not match:
self.pc.update_output('Type "help open" for help with this command.')
return
direction, kw = match.group('dir', 'kw')
if direction:
message = self.toggle_door(direction, self.alias)
elif kw:
message = self.toggle_container(kw, self.alias)
self.pc.update_output(message)
def toggle_door(self, direction, toggle):
if not self.pc.location:
return 'There aren\'t any doors in the void.'
exit = self.pc.location.exits.get(direction)
if not exit:
return 'There isn\'t a door there.'
if toggle == 'open' or toggle == 'unlock':
return exit.open_me(self.pc)
else:
return exit.close_me(self.pc)
def toggle_container(self, kw, toggle):
obj = self.pc.check_inv_for_keyword(kw)
# if nothing in inventory, check room
if not obj:
obj = self.pc.location.get_item_by_kw(kw)
if not obj:
return 'You don\'t see that here.'
if not obj.has_type('container'):
return 'That\'s not a container.'
container = obj.item_types.get('container')
if toggle == 'close':
if not container.openable:
return '%s defies your attempts to close it.' % obj.name.capitalize()
if container.closed:
return 'It\'s already closed.'
container.closed = True
return 'You close %s.' % obj.name
else:
if not container.openable:
return '%s defies your attempts to open it.' % obj.name.capitalize()
if not container.closed:
return 'It\'s already open.'
container.closed = False
return 'You open %s.' % obj.name
command_list.register(ToggleOpen, ['open', 'close', 'unlock'])
command_help.register(ToggleOpen.help, ['open', 'close', 'unlock'])
class Lock(BaseCommand):
help = (
"""<title>Lock (Command)</title>
Locks a door, only those with a key may unlock it.
<b>USAGE:</b>
to lock (and close) a door, try
<b>lock <direction></b>""")
def execute(self):
if not self.args:
return "unlock what?"
dir_map = {'n':'north', 's':'south', 'e':'east', 'w':'west', 'u':'up', 'd':'down'}
direction = dir_map.get(self.args[0].lower())
if not direction:
self.pc.update_output('Try "help lock" for help with this command')
return
if self.pc.location.exits[direction]:
message = self.pc.location.exits[direction].lock_me(self.pc)
self.pc.update_output(message)
return
self.pc.update_output("There is nothing to lock in that direction")
return
command_list.register(Lock, ['lock'])
command_help.register(Lock.help, ['lock'])
class Version(BaseCommand):
"""Display the credits and the version of ShinyMUD currently running."""
help = (
"""** ShinyMUD, version %s **
\nDeveloped by (and copyright):
Jess Coulter (Surrey)
Patrick Murphy (Murph)
Nickolaus Saint (Nick)""" % VERSION
)
def execute(self):
self.pc.update_output(self.help)
command_list.register(Version, ['version', 'credit', 'credits'])
command_help.register(Version.help, ['version', 'credit', 'credits'])
class Sit(BaseCommand):
"""Change the player's position to sitting."""
help = (
"""<title>Sit (Command)</title>
The Sit command changes your position to sitting.
\nUSAGE:
To sit on the floor:
sit
To sit on furniture:
sit [on/in] <furniture-name>
"""
)
def execute(self):
if self.pc.position[0].find(self.alias) != -1:
self.pc.update_output('You are already sitting.')
return
if not self.args:
if self.pc.position[0] == 'sleeping':
self.pc.update_output('You wake and sit up.')
if self.pc.location:
self.pc.location.tell_room('%s wakes and sits up.' % self.pc.fancy_name(), [self.pc.name], self.pc)
self.pc.change_position('sitting', self.pc.position[1])
else:
self.pc.update_output('You sit down.')
if self.pc.location:
self.pc.location.tell_room('%s sits down.' % self.pc.fancy_name(), [self.pc.name], self.pc)
self.pc.change_position('sitting')
else:
if not self.pc.location:
self.pc.update_output('The void is bereft of anything to sit on.')
return
exp = r'((on)|(in))?([ ]?)?(?P<furn>(\w|[ ])+)'
furn_kw = re.match(exp, self.args.lower().strip()).group('furn')
furn = self.pc.location.get_item_by_kw(furn_kw)
if not furn:
self.pc.update_output('You don\'t see that here.')
return
f_obj = furn.item_types.get('furniture')
if not f_obj:
self.pc.update_output('That\'s not a type of furniture.')
return
if not f_obj.player_add(self.pc):
self.pc.update_output('It\'s full right now.')
return
else:
if self.pc.position[1]:
self.pc.position[1].item_types['furniture'].player_remove(self.pc)
f_obj.player_add(self.pc)
self.pc.position = ('sitting', furn)
self.pc.update_output('You sit down on %s.' % furn.name)
self.pc.location.tell_room('%s sits down on %s.' % (self.pc.fancy_name(), furn.name),
[self.pc.name], self.pc)
command_list.register(Sit, ['sit'])
command_help.register(Sit.help, ['sit'])
class Stand(BaseCommand):
"""Change the player's position to standing."""
help = (
"""<title>Stand (Command)</title>
The Stand command changes your position from sitting or sleeping to standing.
"""
)
def execute(self):
if self.pc.position[0].find(self.alias) != -1:
self.pc.update_output('You are already standing.')
return
if self.pc.position[0] == 'sleeping':
self.pc.update_output('You wake and stand up.')
if self.pc.location:
self.pc.location.tell_room('%s wakes and stands up.' % self.pc.fancy_name(), [self.pc.name], self.pc)
else:
self.pc.update_output('You stand up.')
if self.pc.location:
self.pc.location.tell_room('%s stands up.' % self.pc.fancy_name(), [self.pc.name], self.pc)
self.pc.change_position('standing')
command_list.register(Stand, ['stand'])
command_help.register(Stand.help, ['stand'])
class Sleep(BaseCommand):
"""Change the player's position to sleeping."""
help = (
"""<title>Sleep (Command)</title>
The Sleep command changes your character's position to sleeping. NOTE:
sleeping characters are oblivious to things happening around them (in the same
room) and can be more vulnerable to attack. To awake from sleeping, us the
Wake, Sit, or Stand commands.
\nUSAGE:
To sleep on the floor:
sleep
To sleep on a piece of furniture:
sleep [on/in] <furniture-name>
"""
)
def execute(self):
if self.pc.position[0].find(self.alias) != -1:
self.pc.update_output('You are already sleeping.')
return
if not self.args:
self.pc.update_output('You go to sleep.')
if self.pc.location:
self.pc.location.tell_room('%s goes to sleep.' % self.pc.fancy_name(), [self.pc.name], self.pc)
# If they were previously sitting on furniture before they went to
# sleep, we might as well maintain their position on that
# furniture when they go to sleep
self.pc.change_position('sleeping', self.pc.position[1])
else:
if not self.pc.location:
self.pc.update_output('The void is bereft of anything to sleep on.')
return
exp = r'((on)|(in))?([ ]?)?(?P<furn>(\w|[ ])+)'
furn_kw = re.match(exp, self.args.lower().strip()).group('furn')
furn = self.pc.location.get_item_by_kw(furn_kw)
if not furn:
self.pc.update_output('You don\'t see that here.')
return
f_obj = furn.item_types.get('furniture')
if not f_obj:
self.pc.update_output('That\'s not a type of furniture.')
return
if not f_obj.player_add(self.pc):
self.pc.update_output('It\'s full right now.')
return
else:
self.pc.change_position('sleeping', furn)
self.pc.update_output('You go to sleep on %s.' % furn.name)
self.pc.location.tell_room('%s goes to sleep on %s.' % (self.pc.fancy_name(), furn.name),
[self.pc.name], self.pc)
command_list.register(Sleep, ['sleep'])
command_help.register(Sleep.help, ['sleep'])
class Wake(BaseCommand):
"""Change a player's status from sleeping to awake (and standing)."""
help = (
"""<title>Wake (Command)</title>
Wake can be used to wake yourself or another character from sleeping.
\nUSAGE:
To wake yourself:
wake
To wake someone else:
wake <player-name>
"""
)
def execute(self):
if not self.args:
# Wake up yourself
if self.pc.position[0] != 'sleeping':
self.pc.update_output('You are already awake.')
return
self.pc.update_output('You wake and stand up.')
if self.pc.location:
self.pc.location.tell_room('%s wakes and stands up.' % self.pc.fancy_name(), [self.pc.name], self.pc)
self.pc.change_position('standing')
else:
# Wake up someone else!
if not self.pc.location:
self.pc.update_output('You are alone in the void.')
return
sleeper = self.pc.location.get_player(self.args.lower().strip())
if not sleeper:
self.pc.update_output('That person isn\'t here.')
return
if sleeper.position[0] != 'sleeping':
self.pc.update_output('%s isn\'t asleep.' % sleeper.fancy_name())
return
sleeper.change_position('standing')
self.pc.update_output('You wake up %s.' % sleeper.fancy_name())
sleeper.update_output('%s wakes you up.' % self.pc.fancy_name())
troom = '%s wakes up %s.' % (self.pc.fancy_name(),
sleeper.fancy_name())
self.pc.location.tell_room(troom, [self.pc.name, sleeper.name],
self.pc)
command_list.register(Wake, ['wake', 'awake'])
command_help.register(Wake.help, ['wake'])
class Award(BaseCommand):
"""Award an item to a player."""
required_permissions = required_permissions = DM | ADMIN
help = (
"""<title>Award (Command)</title>
\nThe Award command allows a DM or an Admin to award an item to a player. Note
that you must have the item you wish to award in your inventory for the Award
command to work, and you must also be in the same room as the person you wish
to award the item to.
\nRequired Permissions: ADMIN, DM
\nUSAGE:
To award an item to a player:
award <item-keyword> to <player-name> ["<actor-message>":"<room-message>"]
\nThe actor-message is the message you want the player to see upon receipt of
your item. The room-message is the message you want everyone else in the same
room to hear upon the player's receipt of the item. Neither message is
required, and if they are not given then the item will be awarded to the
player silently.
\nEXAMPLES:
Say for example that you would like to award the player Jameson a medal to
celebrate the quest he just completed. First you would make sure the medal was
in your inventory (easily done by using the Load command). Then you would type
the following (which would normally be on one line -- in this case it is
linewrapped for readibility):
award medal to jameson "You receive a medal for your fine work.":
"#actor receives a medal for his fine work."
\nJameson would have the medal added to his inventory and receive the message
"You receive a medal for your fine work." Anyone else in the room would see
the message "Jameson receives a medal for his fine work."
"""
)
def execute(self):
if not self.args:
self.pc.update_output('Award what to whom?')
return
exp = r'(?P<item>(\w+|[ ])+)([ ]+to)([ ]+(?P<player>\w+))([ ]+(?P<actor>\"(.*?)\")(:(?P<room>\"(.*?)\"))?)?'
match = re.match(exp, self.args, re.I)
if not match:
self.pc.update_output('Type "help award" for help with this command.')
return
item_kw, player_kw, actor, room = match.group('item', 'player', 'actor', 'room')
player = self.pc.location.get_player(player_kw.lower())
if not player:
self.pc.update_output('Whom do you want to award %s to?' % item_kw)
return
item = self.pc.check_inv_for_keyword(item_kw)
if not item:
self.pc.update_output('You don\'t have any %s.' % item_kw)
return
self.pc.item_remove(item)
player.item_add(item)
self.pc.update_output('%s has been awarded %s.' % (player.fancy_name(),
item.name))
if actor:
message = self.personalize(actor.strip('\"'), self.pc)
player.update_output(message)
if room:
message = self.personalize(room.strip('\"'), self.pc, player)
self.pc.location.tell_room(message, [player.name], self.pc)
command_list.register(Award, ['award'])
command_help.register(Award.help, ['award'])
class Consume(BaseCommand):
"""Consume a food or drink item."""
help = (
"""<title>Eat, Drink, Use (Command)</title>
\nThe commands Eat, Drink, and Use can all be used interchangeably to consume
an edible food or drink item. Be careful what you eat or drink though;
consuming some items may cause undesirable effects, such as poisoning or
drunkeness.
\nUSAGE:
To consume an edible item:
eat <item-keyword>
"""
)
def execute(self):
if not self.args:
self.pc.update_output("%s what?" % self.alias.capitalize())
return
food = self.pc.check_inv_for_keyword(self.args.lower().strip())
if not food:
self.pc.update_output('You don\'t have any %s.' % self.args)
return
food_obj = food.item_types.get('food')
if not food_obj:
# Gods have a more robust digestive system -- they can afford to
# eat objects that aren't edible to mere mortals
if self.pc.permissions & GOD:
self.pc.item_remove(food)
food.destruct()
self.pc.update_output('You consume %s.' % food.name)
if self.pc.location:
self.pc.location.tell_room('%s consumed %s.' %\
(self.pc.fancy_name(),
food.name),
[self.pc.name], self.pc)
return
else:
self.pc.update_output('That\'s not edible!')
return
if 'drunk' in self.pc.effects:
self.pc.update_output('You\'re too drunk to manage it.')
return
# Remove the food object
self.world.log.debug(food_obj)
self.pc.item_remove(food)
food.destruct()
# Replace it with another object, if applicable
if food_obj.replace_obj:
self.pc.item_add(food_obj.replace_obj.load())
# Tell the player and the room an "eat" message
u_tell = self.personalize(food_obj.get_actor_message(), self.pc)
self.pc.update_output(u_tell)
if self.pc.location:
r_tell = self.personalize(food_obj.get_room_message(), self.pc)
self.pc.location.tell_room(r_tell, [self.pc.name], self.pc)
# Add this food's effects to the player
self.world.log.debug('Adding effects.')
self.pc.effects_add(food_obj.load_effects())
command_list.register(Consume, ['eat', 'drink', 'use'])
command_help.register(Consume.help, ['eat', 'drink'])
class Attack(BaseCommand):
help = (
"""<title>Attack, Kill (Command)</title>
The Attack command causes you to attack another character.
If you are already in a battle, this will change your target to
the character you specify. If you are not yet in a battle, this
will start a fight between you and the character you specify. You
must be in the same room as your target to fight them, and can
only attack other players if they have PVP enabled.
\n<b>USAGE:</b>
To start a fight with a character (or if you are fighting multiple
monsters/characters and want to focus your attacks on a different
character)
attack <character_name>
"""
)
def execute(self):
#find the target:
target = self.pc.location.get_player(self.args)
if not target:
target = self.pc.location.get_npc_by_kw(self.args)
if not target:
self.pc.update_output("Attack whom?")
return
# set the players default target.
self.pc.battle_target = target
if not self.pc.battle:
self.world.log.debug("Beginning battle between %s and %s" %(self.pc.fancy_name(), target.fancy_name()))
# Start the battle if it doesn't exist yet.
self.pc.enter_battle()
b = Battle()
b.teamA.append(self.pc)
self.pc.battle = b
b.teamB.append(target)
target.battle = b
target.enter_battle()
self.world.battle_add(b)
target.battle_target = self.pc
self.pc.free_attack()
self.pc.update_output("")
#command_list.register(Attack, ['attack', 'kill'])
#command_help.register(Attack.help, ['attack', 'kill'])
class Run(BaseCommand):
help = (
"""<title>Run, Flee, Escape (Command)</title>
Use Run like the Go command to escape from a battle.
\n<b>USAGE:</b>
If you are in a hurry, you can run away from a battle in a random
direction using
<b>run</b>
If you want to escape in a specific direction, try
<b>run <direction></b>
\nFor more info on directions, see <b>help go</b>.
"""
)
def execute(self):
if self.pc.battle:
direction = self.args.lower()[0] if self.args else None
if direction == 'e':
room = self.pc.location.exits.get('east')
elif direction == 'w':
room = self.pc.location.exits.get('west')
elif direction == 'n':
room = self.pc.location.exits.get('north')
elif direction == 's':
room = self.pc.location.exits.get('south')
elif direction == 'u':
room = self.pc.location.exits.get('up')
elif direction == 'd':
room = self.pc.location.exits.get('down')
else:
room = None
if not room:
# just grab one at random
room = [_ for _ in self.pc.location.exits.values() if _ is not None][0]
if not room:
self.pc.update_output('There\'s nowhere to run!')
return
action = Action_list['run'](self.pc, (room.to_room.area.name, room.to_room.id), self.pc.battle)
self.pc.next_action = action
#battle_commands.register(Run, ['run', 'flee', 'escape', 'go'])
#command_help.register(Run.help, ['run', 'flee', 'escape'])
class Me(BaseCommand):
"""Get a score card of your player details."""
help = (
"""<title>Me (Command)</title>
The Me command returns a score-card of your player details.
\nNOTE:
You can set your password via the "password" command. See "help password" for
details.
\nSome of your details are editable using the set command:
<b>title - (set title <title-text>)</b> A title (or status) that's displayed
next to your name in a Who listing.
<b>email - (set email <e-mail-address>)</b> Your email address. It's never displayed
publicly, and its only real purpose will be to help you to reset your password
if you forget it.
<b>description - (set description, starts TextEditMode)</b> The description that's
seen when people Look at you.
\nIf you have authority to use the Goto command, you can also set the following:
<b>goto appear message - (set goto_appear <appear-message>)</b> The message heard in
a room when you use the Goto command to enter it
<b>set goto_disappear - (set goto_disappear <disappear-message)</b> The message heard
in a room when you use the Goto command to leave it
"""
)
def execute(self):
# We aught to be using the player's terminal width, but for now I'm just
# doing a boring static screen width
width = 72
empty_line = '|' + (' ' * (width - 2)) + '|\n'
effects = ', '.join([str(e) for e in self.pc.effects.values()])
if not effects:
effects = 'You feel normal.'
me = '|' + (' %s ' % self.pc.fancy_name()).center(width -2 , '-') + '|\n'
me += ('| title: ' + self.pc.title).ljust(width - 1) + '|\n'
me += ('| money: %s %s' % (self.pc.currency, CURRENCY)).ljust(width - 1) + '|\n'
me += ('| position: ' + self.pc.position[0]).ljust(width - 1) + '|\n'
me += ('| effects: ' + effects).ljust(width - 1) + '|\n'
if self.pc.permissions > PLAYER:
me += '|' + (' Permission Details ').center(width - 2, '-') + '|\n'
me += ('| permissions: ' +\
', '.join(get_permission_names(self.pc.permissions))).ljust(width - 1) + '|\n'
me += ('| goto "appear": ' + self.pc.goto_appear).ljust(width - 1) + '|\n'
me += ('| goto "disappear": ' + self.pc.goto_disappear).ljust(width - 1) + '|\n'
me += empty_line
me += '|' + (' Private Details ').center(width - 2, '-') + '|\n'
me += ('| email: ' + str(self.pc.email)).ljust(width - 1) + '|\n'
me += '|' + (' Player Stats ').center(width - 2, '-') + '|\n'
me += ('| hit: ' + str(self.pc.hit.calculate())).ljust(width-1) + '|\n'
me += ('| evade: ' + str(self.pc.evade.calculate())).ljust(width-1) + '|\n'
if self.pc.absorb:
me += '| absorb: '.ljust(width-1) + '|\n'
for key, val in self.pc.absorb.calculate().items():
me += ('| %s: %s' % (key, val)).ljust(width-1) + '|\n'
else:
me += '| absorb: None'.ljust(width-1) + '|\n'
if self.pc.damage:
me += '| damage: '.ljust(width-1) + '|\n'
for t, mn, mx in self.pc.damage.display():
me += ('| %s: %s-%s' % (t, mn, mx)).ljust(width-1) + '|\n'
else:
me += '| damage: None'.ljust(width-1) + '|\n'
me += '|' + ('-' * (width - 2)) + '|'
self.pc.update_output(me)
command_list.register(Me, ['me', 'status', 'stats'])
command_help.register(Me.help, ['me', 'status'])
class Tell(BaseCommand):
"""Tell another character a private message."""
help = (
"""<title>Tell (Command)</title>
The Tell command sends a private message to a specific character (can be a pc or
npc). However, you can only Tell an npc something if that npc is in the same
room as you are. You can Tell a player character something regardless of where
they are in the world, provided they are online.
\nUSAGE:
tell <person-name> <message>
\nEXAMPLE:
tell brian Your princess is in another castle!
"""
)
def execute(self):
#tell target_name message
syntax_error = 'Try "tell <person> <message>", or see "help tell" for help.'
if not self.args:
self.pc.update_output(syntax_error)
return
exp = r'(?P<target>.*?)[ ](?P<message>.*)'
match = re.match(exp, self.args, re.I)
if not match:
self.pc.update_output(syntax_error)
return
target_name, message = match.group('target', 'message')
# first, check to see if there is a character in the same room by that keyword
# if we were only to check the world's list of people, we wouldn't be able to tell npcs
# anything! However, let's be prudent and only try to tell npcs in the same room...
if self.pc.location:
r = self.pc.location
# prioritize players over npcs, if there are two characters in the same room with the
# same name. Players only have one keyword in their name, while npcs may have many
target = r.get_player(target_name) or r.get_npc_by_kw(target_name)
if target:
self.tell_msg(target, message)
return
# There was nobody in that room who matched the name (or the player is in the void)
pc = self.world.get_player(target_name)
if not pc:
self.pc.update_output('"%s" doesn\'t exist. You\'ll have to tell someone else.' % target_name)
else:
self.tell_msg(pc, message)
def tell_msg(self, target_char, message):
"""Tell a target_character a message.
target_char - the character object to be "telled"
message - the message to be passed along.
"""
if target_char.is_npc():
# TODO: dispatch a 'tell' notification to this npc (the 'tell'
# npc event hasn't been written as of this comment)
pass
self.pc.update_output('You tell %s, "%s"' % (target_char.fancy_name(), message))
target_char.update_output('%s tells you, "%s"' % (self.pc.fancy_name(), message))
command_list.register(Tell, ['tell', 'ask'])
command_help.register(Tell.help, ['tell', 'ask'])
class ChangePassword(BaseCommand):
"""Allow the player to change their password in-game."""
help = (
"""Password (Command)
The Password command lets you change your password.
\nUSAGE:
passwd
"""
)
def execute(self):
if self.pc.mode:
self.pc.last_mode = self.pc.mode
self.pc.set_mode('passwd')
command_list.register(ChangePassword, ['password', 'passwd'])
command_help.register(ChangePassword.help, ['change password', 'passwd', 'password', 'set password'])
class Commands(BaseCommand):
"""Spit out a list of basic commands."""
help = (
"""<title>Commands (Basic Commands)</title>
Commands are how players interact with the game; in fact, you've just used the
Help command in order to call up this help page (fancy!). A command is a word
(usually a verb) that represents an action you want to do, and is sometimes
followed by one or more words that clarify how that action is being done, or who
that action is being done to.
\n<b>USAGE:</b>
The help pages for commands all come with a USAGE section that explain the
syntax of a command. For example, the USAGE section for the Tell command looks
like this:
<b>tell <person-name> <message></b>
The angle brackets (<>) mean that the word inside them is a place-holder. In the
Tell example above, I would want to replace <person-name> with the name of the
person I want to send a message to, and <message> with the message I want to
send. Some commands have optional words, like the USAGE for the Look command:
<b>look [[at] <person-name>]</b>
Anything enclosed in square brackets ([]) is optional. In the example above,
both the words "at" and "<person-name>" are optional. I could type any of the
following and the Look command would work:
look (this would make me look at the room by default)
look brian (this would make me look at the character Brian)
look at brian (this would also make me look at the character Brian)
"""
)
def execute(self):
# TODO: This should be cleaned up so it's not just a horrible long list.
# also, we should probably throw emotes out of this list since they don't
# add real actions
l = 'Type "help <command-name>" for help on that particular command.\n' +\
'Type "help commands" for help on commands in general.\n' +\
('-' * 50) + '\nBasic Commands (some commands may be aliases of another):\n'
coms = []
for alias, cmd in command_list.commands.items():
if cmd.required_permissions & PLAYER:
coms.append(alias)
l += '\n'.join(coms)
self.pc.update_output(l)
command_list.register(Commands, ['command', 'commands'])
command_help.register(Commands.help, ['command', 'commands'])
class Buy(BaseCommand):
help = (
"""<title>Buy (Command)</title>
The Buy command allows you to purchase items from merchants (provided you have
enough money to do so). Money in %s comes in the form of %s,
and you can check how much you have by using the Inventory or Me commands.
\nUSAGE:
To get a list of items a merchant is selling:
buy [list]
To buy an item from a merchant:
buy <item-keyword> [from <merchant-name>]
""" % (GAME_NAME, CURRENCY)
)
def execute(self):
if not self.pc.location:
self.pc.update_output('There aren\'t any merchants in the void.')
return
# No arguments, or list aliases, display inventory
if (not self.args) or (self.args == 'list'):
try:
merchant = self.verify_merchant(None)
except SaleFail as e:
self.pc.update_output(str(e))
else:
self.pc.update_output(merchant.ai_packs['merchant'].player_sale_list())
return
# transaction handling of buy
exp = r'(?P<item>.+?)([ ]+from[ ]+(?P<merchant>\w+))?$'
match = re.match(exp, self.args, re.I)
if not match:
self.pc.update_output('Try "help buy" for help on buying items.')
return
item_name, merchant_name = match.group('item', 'merchant')
try:
merchant = self.verify_merchant(merchant_name)
item, price = self.verify_item(item_name, merchant)
except SaleFail as e:
self.pc.update_output(str(e))
else:
self.pc.currency -= price
self.pc.save()
self.pc.item_add(item)
self.pc.update_output('You buy %s from %s for %s %s.' % (item.name,
merchant.name, str(price), CURRENCY))
self.pc.location.tell_room('%s buys %s from %s.' % (self.pc.fancy_name(),
item.name, merchant.name),
[self.pc.name])
def verify_item(self, item_name, merchant):
item_n_price = merchant.ai_packs['merchant'].get_item(item_name)
if not item_n_price:
raise SaleFail('%s isn\'t selling any %s.' % (merchant.name, item_name))
if self.pc.currency < item_n_price[1]:
self.world.log.debug('pc: %s,%s item: %s,%s' % (str(self.pc.currency),
str(type(self.pc.currency)),
str(item_n_price[1]),
str(type(item_n_price[1]))))
raise SaleFail('%s costs %s %s. You don\'t have enough %s.' %\
(item_n_price[0].name.capitalize(),
str(item_n_price[1]), CURRENCY, CURRENCY))
# Make sure we create a new game item from the merchant's prototype
item = item_n_price[0].load()
return [item, item_n_price[1]]
def verify_merchant(self, merchant_name):
"""Return a merchant if there is a valid one, """
# If the player specified a merchant, try to grab that one
if merchant_name:
merchant = self.pc.location.get_npc_by_kw(merchant_name)
if not merchant:
raise SaleFail('%s isn\'t here.' % merchant_name)
elif not merchant.has_ai('merchant'):
raise SaleFail('%s isn\'t a merchant.' % (merchant.name))
else:
return merchant
# if they didn't list a merchant, grab the first npc merchant you
# can find in the room
else:
merchant = None
for npc in self.pc.location.npcs:
if npc.has_ai('merchant'):
merchant = npc
break
if not merchant:
raise SaleFail('There aren\'t any merchants here to buy from.')
else:
return merchant
command_list.register(Buy, ['buy'])
command_help.register(Buy.help, ['buy'])
class Sell(BaseCommand):
help = (
"""<title>Sell (Command)</title>
The Sell command allows a player to sell an item to a merchant. Keep in mind
that not all merchants buys all types of items!
\nUSAGE:
To sell an item:
sell <item-keyword> [to <merchant-name>]
If you would like to see how much your item is worth to a merchant
before you sell it:
show <item-keyword> [to <merchant-name>]
"""
)
def execute(self):
if not self.args:
self.pc.update_output('Sell what? Try "help sell" for help on sale transactions.')
return
if not self.pc.location:
self.pc.update_output('There aren\'t any merchants in the void.')
return
#<item> [to <merchant>]
exp = r'(?P<item>.+?)([ ]+to[ ]+(?P<merchant>\w+))?$'
match = re.match(exp, self.args, re.I)
if not match:
self.pc.update_output('Try "help sell" for help on sale transactions.')
return
item_name, merchant_name = match.group('item', 'merchant')
try:
item = self.verify_item(item_name)
merchant = self.verify_merchant(merchant_name)
except SaleFail as e:
self.pc.update_output(str(e))
return
else:
if not merchant.ai_packs['merchant'].will_buy(item):
merchant.perform('tell %s %s' % (self.pc.name,
merchant.ai_packs['merchant'].tell_buy_types()))
return
# calculate sale price
sale_price = int((item.base_value * merchant.ai_packs['merchant'].markup) + 0.5) or 1
if self.alias in ['show', 'appraise']:
merchant.perform('tell %s I\'ll give you %s %s for %s.' % (self.pc.fancy_name(),
str(sale_price), CURRENCY, item.name))
# self.pc.update_output('%s is worth %s %s to %s.' % (item.name.capitalize(),
# str(sale_price),
# CURRENCY,
# merchant.name))
return
self.pc.item_remove(item)
# Just destroy the item for now -- maybe later we'll have merchants resell
# player-sold items later
item.destruct()
self.pc.currency += sale_price
self.pc.save()
self.pc.update_output('You sell %s for %s %s to %s.' % (item.name,
str(sale_price),
CURRENCY,
merchant.name))
self.pc.location.tell_room('%s sells %s to %s.' % (self.pc.fancy_name(),
item.name,
merchant.name),
[self.pc.name])
def verify_item(self, item_name):
item = self.pc.check_inv_for_keyword(item_name)
if not item:
raise SaleFail('You don\'t have "%s".' % item_name)
return item
def verify_merchant(self, merchant_name):
"""Return a merchant if there is a valid one, """
# If the player specified a merchant, try to grab that one
if merchant_name:
merchant = self.pc.location.get_npc_by_kw(merchant_name)
if not merchant:
raise SaleFail('%s isn\'t here.' % merchant_name)
elif not merchant.has_ai('merchant'):
raise SaleFail('%s isn\'t a merchant.' % (merchant.name))
else:
return merchant
# if they didn't list a merchant, grab the first npc merchant you
# can find in the room
else:
merchant = None
for npc in self.pc.location.npcs:
if npc.has_ai('merchant'):
merchant = npc
break
if not merchant:
raise SaleFail('There aren\'t any merchants here to sell to.')
else:
return merchant
command_list.register(Sell, ['sell', 'show', 'appraise'])
command_help.register(Sell.help, ['sell', 'show', 'appraise'])
class Log(BaseCommand):
required_permissions = BUILDER | DM | ADMIN
help = (
"""<title>Log (BuildCommand)</title>
Npcs receive the same feedback for preforming actions (commands) as a player
character, but since they can't read, their feedback gets accumulated in an
action log rather than being output to a screen. The Log command allows you to
read the action log (and memory) of an npc to help you debug scripting errors.
\nUSAGE:
log <npc-name>
You must be in the same room as an npc to log it.
"""
)
def execute(self):
if not self.args:
self.pc.update_output('Type "help log" for help with this command.')
return
if not self.pc.location:
self.pc.update_output('There aren\'t any npcs in the void to log.')
return
npc = self.pc.location.get_npc_by_kw(self.args.lower().strip())
if not npc:
self.pc.update_output('That npc doesn\'t exist.')
return
string = (' Log for %s ' % npc.name).center(50, '-') + '\n'
perms = ', '.join(get_permission_names(npc.permissions))
if npc.remember:
string += 'REMEMBERS: '
string += ', '.join(npc.remember) + '\n'
else:
string += 'REMEMBERS: This npc doesn\'t remember any players.\n'
string += 'PERMISSIONS: ' + perms + '\n'
string += 'ACTION LOG:\n'
if npc.actionq:
string += '\n'.join([a.strip('\n').strip('\r') for a in npc.actionq])
else:
string += 'Action log is empty.'
string += '\n' + ('-' * 50)
self.pc.update_output(string)
command_list.register(Log, ['log'])
command_help.register(Log.help, ['log'])
# **************** Command Specific Exceptions *******************
class SaleFail(Exception):
"""Throw this exception if there's an error in the player's sale
process.
"""
pass
# **************** Extra Command-related Help pages *******************
command_help.register(("<title>TextEditMode (Mode)</title>"
"""TextEditMode is a special mode for editing large amounts of text, such as
room or character descriptions. TextEditMode lets you enter text
(line-by-line), until you are finished, letting you replace, delete, and
insert lines as you go. Help for TextEditMode commands can be accessed by
typing @help while in TextEditMode.
"""
), ['TextEditMode', 'text edit mode', 'textedit', 'text edit'])
command_help.register(("<title>Newbie Help</title>"
"""Welcome! You've successfully invoked our help system on the "newbie" topic.
Anytime you need help with something, just type "help" and then the name of the
topic you want help with. Try it on the topics below to expand your knowledge of
how to play the game:
\nSee "<b>help commands</b>" for a tutorial on what to type and how to interact
with the game.
\nSee "<b>help go</b>" for a tutorial on how to move around and explore
the game.
\nSee "<b>help talking</b>" for a tutorial on how to communicate with other
players in the game.
\nSee "<b>help mud</b>" to get a better idea of what kind of game a MUD is.
\nHappy MUDding!
"""
), ['newbie', 'newb'])
command_help.register(("<title>Communication</title>"
"""There are lots of ways to get your message across, depending on who you want
to hear it.
<b>Chat</b>
The Chat command will broadcast your message to every player in the game (unless
they have their chat channel turned off). It's meant solely for discussion
between players, and topics may not be relevant to the game. For even more help
with the chat command, type "help chat".
Tutorial - try typing:
chat I'm testing out the chat command!
\n<b>Say</b>
The Say command will only broadcast your message to people in the same room as
you. Both players and npcs (non-player-characters) can hear a message you send with
the Say command. Type "help say" for help with this command.
Tutorial - try typing:
say I'm testing out the say command!
\n<b>Tell</b>
The tell command will send a private message to a specific person. It only works
if the player you're trying to Tell is online, or the npc you're trying to tell
is in the same room as you.
Try telling someone near you hello! (just replace <person> below with the name
of the person you want to tell):
tell <person> Hello!
"""
), ['talking', 'talk', 'communication'])
|
shinymud/ShinyMUD
|
src/shinymud/commands/commands.py
|
Python
|
mit
| 101,475
|
[
"Brian"
] |
d40875cefd1e41a2ed41d748384a8d2b51adaa06942f5b8a26962c90fa61c183
|
import sys,logging,json,signal,hashlib,time
import xml.dom.minidom, xml.sax.saxutils
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import SocketServer
import urllib
import splunk.entity as entity
import ssl
def signal_handler(signal, frame):
try:
httpd.server_close()
except NameError:
logging.error("Error closing the Meraki HTTP Server, httpd variable was not initialized" )
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGQUIT, signal_handler)
#set up logging
logging.root
logging.root.setLevel(logging.ERROR)
formatter = logging.Formatter('%(levelname)s %(message)s')
#with zero args , should go to STD ERR
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logging.root.addHandler(handler)
SCHEME = """<scheme>
<title>Cisco Meraki</title>
<description>Cisco Meraki</description>
<use_external_validation>true</use_external_validation>
<streaming_mode>xml</streaming_mode>
<use_single_instance>false</use_single_instance>
<endpoint>
<args>
<arg name="name">
<title>Name of this Meraki input</title>
<description>Name of this Meraki input</description>
</arg>
<arg name="activation_key">
<title>Activation Key</title>
<description>Visit http://www.baboonbones.com/#activation to obtain a non-expiring key</description>
<required_on_edit>true</required_on_edit>
<required_on_create>true</required_on_create>
</arg>
<arg name="http_port">
<title>HTTP(s) Web Server Port</title>
<description>Port on which to listen for incoming HTTP requests</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="http_bind_address">
<title>HTTP Web Server Bind Address</title>
<description>Host address to bind to for this HTTP server</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="api_version">
<title>CMX API Version</title>
<description>CMX API Version</description>
<required_on_edit>false</required_on_edit>
<required_on_create>true</required_on_create>
</arg>
<arg name="ssl_enabled">
<title>Use HTTPs?</title>
<description>Whether or not to use HTTPs</description>
<required_on_edit>false</required_on_edit>
<required_on_create>true</required_on_create>
</arg>
<arg name="ssl_certfile">
<title>SSL Certificate File path</title>
<description>Full path on your server to the SSL Certificate File</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="ssl_keyfile">
<title>SSL Key File path</title>
<description>Full path on your server to the SSL Key File</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
</args>
</endpoint>
</scheme>
"""
def get_credentials(session_key):
myapp = 'meraki_ta'
try:
# list all credentials
entities = entity.getEntities(['admin', 'passwords'], namespace=myapp,
owner='nobody', sessionKey=session_key)
except Exception, e:
raise Exception("Could not get Meraki %s credentials from splunk. Error: %s"
% (myapp, str(e)))
# return first set of credentials
for i, c in entities.items():
return c['username'], c['clear_password']
raise Exception("No Meraki secret or validator have been found, have you setup the App yet ?")
def do_validate():
config = get_validation_config()
#TODO
#if error , print_validation_error & sys.exit(2)
def do_run():
global meraki_validator
global meraki_secret
global api_version
global httpd
config = get_input_config()
activation_key = config.get("activation_key").strip()
app_name = "Cisco Meraki Presence Modular Input"
if len(activation_key) > 32:
activation_hash = activation_key[:32]
activation_ts = activation_key[32:][::-1]
current_ts = time.time()
m = hashlib.md5()
m.update((app_name + activation_ts))
if not m.hexdigest().upper() == activation_hash.upper():
logging.error("FATAL Trial Activation key for App '%s' failed. Please ensure that you copy/pasted the key correctly." % app_name)
sys.exit(2)
if ((current_ts - long(activation_ts)) > 604800):
logging.error("FATAL Trial Activation key for App '%s' has now expired. Please visit http://www.baboonbones.com/#activation to purchase a non expiring key." % app_name)
sys.exit(2)
else:
m = hashlib.md5()
m.update((app_name))
if not m.hexdigest().upper() == activation_key.upper():
logging.error("FATAL Activation key for App '%s' failed. Please ensure that you copy/pasted the key correctly." % app_name)
sys.exit(2)
http_bind_address=config.get("http_bind_address",'')
api_version=config.get("api_version","1.0")
session_key = config.get("session_key")
meraki_validator, meraki_secret = get_credentials(session_key)
ssl_enabled = int(config.get("ssl_enabled",0))
ssl_certfile = config.get("ssl_certfile","")
ssl_keyfile = config.get("ssl_keyfile","")
http_port=config.get("http_port",443 if ssl_enabled else 80)
try :
server_address = (http_bind_address, int(http_port))
httpd = HTTPServer(server_address, MerakiHandler)
if (ssl_enabled) :
httpd.socket = ssl.wrap_socket(httpd.socket,keyfile=ssl_keyfile, certfile=ssl_certfile, server_side=True)
httpd.serve_forever()
except: # catch *all* exceptions
e = sys.exc_info()[1]
logging.error("Error running the Meraki HTTP Server: %s" % str(e))
finally:
try:
httpd.server_close()
except NameError:
logging.error("Error closing the Meraki HTTP Server, httpd variable was not initialized" )
class MerakiHandler(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
if self.path == '/events' :
try:
self._set_headers()
self.wfile.write(meraki_validator)
except: # catch *all* exceptions
e = sys.exc_info()[1]
logging.error("Exception handling GET request for /events")
else :
logging.error("GET path %s is not recognised" % self.path )
def do_POST(self):
if self.path == '/events' :
try:
content_len = int(self.headers.getheader('content-length'))
post_body = self.rfile.read(content_len)
post_body_decoded = urllib.unquote(post_body).decode("utf8")
if api_version == '2.0':
content = json.loads(post_body_decoded)
request_secret = content["secret"]
if request_secret == meraki_secret :
for observation in content["data"]["observations"]:
observation["apMac"] = content["data"]["apMac"]
print_xml_stream(json.dumps(observation))
sys.stdout.flush()
else :
logging.error("Request Secret %s does not match" % request_secret )
elif api_version == '1.0':
post_params = dict((k.strip(), v.strip()) for k,v in (item.split('=') for item in post_body_decoded.split('&')))
content = json.loads(post_params["data"])
request_secret = content["secret"]
if request_secret == meraki_secret :
for probing_event in content["probing"]:
print_xml_stream(json.dumps(probing_event))
sys.stdout.flush()
else :
logging.error("Request Secret %s does not match" % request_secret )
except: # catch *all* exceptions
e = sys.exc_info()[1]
logging.error("Exception handling POST request for /events.Body Content : %s" % post_body)
else :
logging.error("POST path %s is not recognised" % self.path )
# prints validation error data to be consumed by Splunk
def print_validation_error(s):
print "<error><message>%s</message></error>" % xml.sax.saxutils.escape(s)
# prints XML stream
def print_xml_stream(s):
print "<stream><event unbroken=\"1\"><data>%s</data><done/></event></stream>" % encodeXMLText(s)
def encodeXMLText(text):
text = text.replace("&", "&")
text = text.replace("\"", """)
text = text.replace("'", "'")
text = text.replace("<", "<")
text = text.replace(">", ">")
text = text.replace("\n", "")
return text
def usage():
print "usage: %s [--scheme|--validate-arguments]"
logging.error("Incorrect Program Usage")
sys.exit(2)
def do_scheme():
print SCHEME
#read XML configuration passed from splunkd, need to refactor to support single instance mode
def get_input_config():
config = {}
try:
# read everything from stdin
config_str = sys.stdin.read()
# parse the config XML
doc = xml.dom.minidom.parseString(config_str)
root = doc.documentElement
session_key_node = root.getElementsByTagName("session_key")[0]
if session_key_node and session_key_node.firstChild and session_key_node.firstChild.nodeType == session_key_node.firstChild.TEXT_NODE:
data = session_key_node.firstChild.data
config["session_key"] = data
conf_node = root.getElementsByTagName("configuration")[0]
if conf_node:
logging.debug("XML: found configuration")
stanza = conf_node.getElementsByTagName("stanza")[0]
if stanza:
stanza_name = stanza.getAttribute("name")
if stanza_name:
logging.debug("XML: found stanza " + stanza_name)
config["name"] = stanza_name
params = stanza.getElementsByTagName("param")
for param in params:
param_name = param.getAttribute("name")
logging.debug("XML: found param '%s'" % param_name)
if param_name and param.firstChild and \
param.firstChild.nodeType == param.firstChild.TEXT_NODE:
data = param.firstChild.data
config[param_name] = data
logging.debug("XML: '%s' -> '%s'" % (param_name, data))
checkpnt_node = root.getElementsByTagName("checkpoint_dir")[0]
if checkpnt_node and checkpnt_node.firstChild and \
checkpnt_node.firstChild.nodeType == checkpnt_node.firstChild.TEXT_NODE:
config["checkpoint_dir"] = checkpnt_node.firstChild.data
if not config:
raise Exception, "Invalid configuration received from Splunk."
except Exception, e:
raise Exception, "Error getting Splunk configuration via STDIN: %s" % str(e)
return config
#read XML configuration passed from splunkd, need to refactor to support single instance mode
def get_validation_config():
val_data = {}
# read everything from stdin
val_str = sys.stdin.read()
# parse the validation XML
doc = xml.dom.minidom.parseString(val_str)
root = doc.documentElement
logging.debug("XML: found items")
item_node = root.getElementsByTagName("item")[0]
if item_node:
logging.debug("XML: found item")
name = item_node.getAttribute("name")
val_data["stanza"] = name
params_node = item_node.getElementsByTagName("param")
for param in params_node:
name = param.getAttribute("name")
logging.debug("Found param %s" % name)
if name and param.firstChild and \
param.firstChild.nodeType == param.firstChild.TEXT_NODE:
val_data[name] = param.firstChild.data
return val_data
if __name__ == '__main__':
if len(sys.argv) > 1:
if sys.argv[1] == "--scheme":
do_scheme()
elif sys.argv[1] == "--validate-arguments":
do_validate()
else:
usage()
else:
do_run()
sys.exit(0)
|
damiendallimore/SplunkModularInputsPythonFramework
|
implementations/meraki/bin/meraki.py
|
Python
|
apache-2.0
| 13,395
|
[
"VisIt"
] |
1b69329e04095f7153d24d37abf8c418c88fac9a3133c2ad2e49207788dacfe7
|
import code
from code import softspace
import os
import sys
import wx
import new
def sanitise_text(text):
"""When we process text before saving or executing, we sanitise it
by changing all CR/LF pairs into LF, and then nuking all remaining CRs.
This consistency also ensures that the files we save have the correct
line-endings depending on the operating system we are running on.
It also turns out that things break when after an indentation
level at the very end of the code, there is no empty line. For
example (thanks to Emiel v. IJsseldijk for reproducing!):
def hello():
print "hello" # and this is the last line of the text
Will not completely define method hello.
To remedy this, we add an empty line at the very end if there's
not one already.
"""
text = text.replace('\r\n', '\n')
text = text.replace('\r', '')
lines = text.split('\n')
if lines and len(lines[-1]) != 0:
return text + '\n'
else:
return text
def runcode(self, code):
"""Execute a code object.
Our extra-special verson of the runcode method. We use this when we
want py_shell_mixin._run_source() to generate real exceptions, and
not just output to stdout, for example when CodeRunner is executed
as part of a network. This runcode() is explicitly called by our local
runsource() method.
"""
try:
exec code in self.locals
except SystemExit:
raise
except:
raise
#self.showtraceback()
else:
if softspace(sys.stdout, 0):
print
def runsource(self, source, filename="<input>", symbol="single",
runcode=runcode):
"""Compile and run some source in the interpreter.
Our extra-special verson of the runsource method. We use this when we
want py_shell_mixin._run_source() to generate real exceptions, and
not just output to stdout, for example when CodeRunner is executed
as part of a network. This method calls our special runcode() method
as well.
Arguments are as for compile_command(), but pass in interp instance as
first parameter!
"""
try:
# this could raise OverflowError, SyntaxEror, ValueError
code = self.compile(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1
raise
#return False
if code is None:
# Case 2
return True
# Case 3
runcode(self, code)
return False
class PythonShellMixin:
def __init__(self, shell_window, module_manager):
# init close handlers
self.close_handlers = []
self.shell_window = shell_window
self.module_manager = module_manager
self._last_fileselector_dir = ''
def close(self, exception_printer):
for ch in self.close_handlers:
try:
ch()
except Exception, e:
exception_printer(
'Exception during PythonShellMixin close_handlers: %s' %
(str(e),))
del self.shell_window
def _open_python_file(self, parent_window):
filename = wx.FileSelector(
'Select file to open into current edit',
self._last_fileselector_dir, "", "py",
"Python files (*.py)|*.py|All files (*.*)|*.*",
wx.OPEN, parent_window)
if filename:
# save directory for future use
self._last_fileselector_dir = \
os.path.dirname(filename)
f = open(filename, 'r')
t = f.read()
t = sanitise_text(t)
f.close()
return filename, t
else:
return (None, None)
def _save_python_file(self, filename, text):
text = sanitise_text(text)
f = open(filename, 'w')
f.write(text)
f.close()
def _saveas_python_file(self, text, parent_window):
filename = wx.FileSelector(
'Select filename to save current edit to',
self._last_fileselector_dir, "", "py",
"Python files (*.py)|*.py|All files (*.*)|*.*",
wx.SAVE, parent_window)
if filename:
# save directory for future use
self._last_fileselector_dir = \
os.path.dirname(filename)
# if the user has NOT specified any fileextension, we
# add .py. (on Win this gets added by the
# FileSelector automatically, on Linux it doesn't)
if os.path.splitext(filename)[1] == '':
filename = '%s.py' % (filename,)
self._save_python_file(filename, text)
return filename
return None
def _run_source(self, text, raise_exceptions=False):
"""Compile and run the source given in text in the shell interpreter's
local namespace.
The idiot pyshell goes through the whole shell.push -> interp.push
-> interp.runsource -> InteractiveInterpreter.runsource hardcoding the
'mode' parameter (effectively) to 'single', thus breaking multi-line
definitions and whatnot.
Here we do some deep magic (ha ha) to externally override the interp
runsource. Python does completely rule.
We do even deeper magic when raise_exceptions is True: we then
raise real exceptions when errors occur instead of just outputting to
stederr.
"""
text = sanitise_text(text)
interp = self.shell_window.interp
if raise_exceptions:
# run our local runsource, don't do any stdout/stderr redirection,
# this is happening as part of a network.
more = runsource(interp, text, '<input>', 'exec')
else:
# our 'traditional' version for normal in-shell introspection and
# execution. Exceptions are turned into nice stdout/stderr
# messages.
stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr
sys.stdin, sys.stdout, sys.stderr = \
interp.stdin, interp.stdout, interp.stderr
# look: calling class method with interp instance as first
# parameter comes down to the same as interp calling runsource()
# as its parent method.
more = code.InteractiveInterpreter.runsource(
interp, text, '<input>', 'exec')
# make sure the user can type again
self.shell_window.prompt()
sys.stdin = stdin
sys.stdout = stdout
sys.stderr = stderr
return more
def output_text(self, text):
self.shell_window.write(text + '\n')
self.shell_window.prompt()
def support_vtk(self, interp):
if hasattr(self, 'vtk_renderwindows'):
return
import module_kits
if 'vtk_kit' not in module_kits.module_kit_list:
self.output_text('No VTK support.')
return
from module_kits import vtk_kit
vtk = vtk_kit.vtk
def get_render_info(instance_name):
instance = self.module_manager.get_instance(instance_name)
if instance is None:
return None
class RenderInfo:
pass
render_info = RenderInfo()
render_info.renderer = instance.get_3d_renderer()
render_info.render_window = instance.get_3d_render_window()
render_info.interactor = instance.\
get_3d_render_window_interactor()
return render_info
new_dict = {'vtk' : vtk,
'vtk_get_render_info' : get_render_info}
interp.locals.update(new_dict)
self.__dict__.update(new_dict)
self.output_text('VTK support loaded.')
def support_matplotlib(self, interp):
if hasattr(self, 'mpl_figure_handles'):
return
import module_kits
if 'matplotlib_kit' not in module_kits.module_kit_list:
self.output_text('No matplotlib support.')
return
from module_kits import matplotlib_kit
pylab = matplotlib_kit.pylab
# setup shutdown logic ########################################
self.mpl_figure_handles = []
def mpl_close_handler():
for fh in self.mpl_figure_handles:
pylab.close(fh)
self.close_handlers.append(mpl_close_handler)
# hook our mpl_new_figure method ##############################
# mpl_new_figure hook so that all created figures are registered
# and will be closed when the module is closed
def mpl_new_figure(*args, **kwargs):
handle = pylab.figure(*args, **kwargs)
self.mpl_figure_handles.append(handle)
return handle
def mpl_close_figure(handle):
"""Close matplotlib figure.
"""
pylab.close(handle)
if handle in self.mpl_figure_handles:
idx = self.mpl_figure_handles.index(handle)
del self.mpl_figure_handles[idx]
# replace our hook's documentation with the 'real' documentation
mpl_new_figure.__doc__ = pylab.figure.__doc__
# stuff the required symbols into the module's namespace ######
new_dict = {'matplotlib' : matplotlib_kit.matplotlib,
'pylab' : matplotlib_kit.pylab,
'mpl_new_figure' : mpl_new_figure,
'mpl_close_figure' : mpl_close_figure}
interp.locals.update(new_dict)
self.__dict__.update(new_dict)
self.output_text('matplotlib support loaded.')
|
nagyistoce/devide
|
module_kits/wx_kit/python_shell_mixin.py
|
Python
|
bsd-3-clause
| 9,948
|
[
"VTK"
] |
2c240761562db8b6d1119fd5dd143193d41c2b3fe4122d324d1fa8e461614eb0
|
# Copyright (C) 2012 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Convenience functions to genenerate gravitational wave templates and
waveforms.
"""
import lal, lalsimulation, numpy, copy
from pycbc.types import TimeSeries, FrequencySeries, zeros, Array
from pycbc.types import real_same_precision_as, complex_same_precision_as
import pycbc.scheme as _scheme
import inspect
from pycbc.fft import fft
from pycbc import pnutils
from pycbc.waveform import utils as wfutils
from pycbc.waveform import parameters
from pycbc.filter import interpolate_complex_frequency, resample_to_delta_t
import pycbc
from spa_tmplt import spa_tmplt, spa_tmplt_norm, spa_tmplt_end, \
spa_tmplt_precondition, spa_amplitude_factor, \
spa_length_in_time
class NoWaveformError(Exception):
"""This should be raised if generating a waveform would just result in all
zeros being returned, e.g., if a requested `f_final` is <= `f_lower`.
"""
pass
# If this is set to True, waveform generation codes will try to regenerate
# waveforms with known failure conditions to try to avoid the failure. For
# example SEOBNRv3 waveforms would be regenerated with double the sample rate.
# If this is set to False waveform failures will always raise exceptions
fail_tolerant_waveform_generation = True
default_args = (parameters.fd_waveform_params.default_dict() + \
parameters.td_waveform_params).default_dict()
default_sgburst_args = {'eccentricity':0, 'polarization':0}
td_required_args = parameters.td_waveform_params.nodefaults.aslist
fd_required_args = parameters.fd_waveform_params.nodefaults.aslist
sgburst_required_args = ['q','frequency','hrss']
# td, fd, filter waveforms generated on the CPU
_lalsim_td_approximants = {}
_lalsim_fd_approximants = {}
_lalsim_enum = {}
_lalsim_sgburst_approximants = {}
def _check_lal_pars(p):
""" Create a laldict object from the dictionary of waveform parameters
Parameters
----------
p: dictionary
The dictionary of lalsimulation paramaters
Returns
-------
laldict: LalDict
The lal type dictionary to pass to the lalsimulation waveform functions.
"""
lal_pars = lal.CreateDict()
#nonGRparams can be straightforwardly added if needed, however they have to
# be invoked one by one
if p['phase_order']!=-1:
lalsimulation.SimInspiralWaveformParamsInsertPNPhaseOrder(lal_pars,int(p['phase_order']))
if p['amplitude_order']!=-1:
lalsimulation.SimInspiralWaveformParamsInsertPNAmplitudeOrder(lal_pars,int(p['amplitude_order']))
if p['spin_order']!=-1:
lalsimulation.SimInspiralWaveformParamsInsertPNSpinOrder(lal_pars,int(p['spin_order']))
if p['tidal_order']!=-1:
lalsimulation.SimInspiralWaveformParamsInsertPNTidalOrder(lal_pars, p['tidal_order'])
if p['eccentricity_order']!=-1:
lalsimulation.SimInspiralWaveformParamsInsertPNEccentricityOrder(lal_pars, p['eccentricity_order'])
if p['lambda1']:
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda1(lal_pars, p['lambda1'])
if p['lambda2']:
lalsimulation.SimInspiralWaveformParamsInsertTidalLambda2(lal_pars, p['lambda2'])
if p['lambda_octu1'] != parameters.lambda_octu1.default:
lalsimulation.SimInspiralWaveformParamsInsertTidalOctupolarLambda1(lal_pars, p['lambda_octu1'])
if p['lambda_octu2'] != parameters.lambda_octu2.default:
lalsimulation.SimInspiralWaveformParamsInsertTidalOctupolarLambda2(lal_pars, p['lambda_octu2'])
if p['quadfmode1'] != parameters.quadfmode1.default:
lalsimulation.SimInspiralWaveformParamsInsertTidalQuadrupolarFMode1(lal_pars, p['quadfmode1'])
if p['quadfmode2'] != parameters.quadfmode2.default:
lalsimulation.SimInspiralWaveformParamsInsertTidalQuadrupolarFMode2(lal_pars, p['lambda_octu2'])
if p['octufmode1'] != parameters.octufmode1.default:
lalsimulation.SimInspiralWaveformParamsInsertTidalOctupolarFMode1(lal_pars, p['octufmode1'])
if p['octufmode2'] != parameters.octufmode2.default:
lalsimulation.SimInspiralWaveformParamsInsertTidalOctupolarFMode2(lal_pars, p['octufmode2'])
if p['dquad_mon1']:
lalsimulation.SimInspiralWaveformParamsInsertdQuadMon1(lal_pars, p['dquad_mon1'])
if p['dquad_mon2']:
lalsimulation.SimInspiralWaveformParamsInsertdQuadMon2(lal_pars, p['dquad_mon2'])
if p['numrel_data']:
lalsimulation.SimInspiralWaveformParamsInsertNumRelData(lal_pars, str(p['numrel_data']))
if p['modes_choice']:
lalsimulation.SimInspiralWaveformParamsInsertModesChoice(lal_pars, p['modes_choice'])
if p['frame_axis']:
lalsimulation.SimInspiralWaveformParamsInsertFrameAxis(lal_pars, p['frame_axis'])
if p['side_bands']:
lalsimulation.SimInspiralWaveformParamsInsertSideband(lal_pars, p['side_bands'])
if p['mode_array']:
ma = lalsimulation.SimInspiralCreateModeArray()
for l,m in p['mode_array']:
lalsimulation.SimInspiralModeArrayActivateMode(ma, l, m)
lalsimulation.SimInspiralWaveformParamsInsertModeArray(lal_pars, ma)
return lal_pars
def _lalsim_td_waveform(**p):
fail_tolerant_waveform_generation
lal_pars = _check_lal_pars(p)
#nonGRparams can be straightforwardly added if needed, however they have to
# be invoked one by one
try:
hp1, hc1 = lalsimulation.SimInspiralChooseTDWaveform(
float(pnutils.solar_mass_to_kg(p['mass1'])),
float(pnutils.solar_mass_to_kg(p['mass2'])),
float(p['spin1x']), float(p['spin1y']), float(p['spin1z']),
float(p['spin2x']), float(p['spin2y']), float(p['spin2z']),
pnutils.megaparsecs_to_meters(float(p['distance'])),
float(p['inclination']), float(p['coa_phase']),
float(p['long_asc_nodes']), float(p['eccentricity']), float(p['mean_per_ano']),
float(p['delta_t']), float(p['f_lower']), float(p['f_ref']),
lal_pars,
_lalsim_enum[p['approximant']])
except RuntimeError:
if not fail_tolerant_waveform_generation:
raise
# For some cases failure modes can occur. Here we add waveform-specific
# instructions to try to work with waveforms that are known to fail.
if p['approximant'] == 'SEOBNRv3':
# In this case we'll try doubling the sample time and trying again
# Don't want to get stuck in a loop though!
if 'delta_t_orig' not in p:
p['delta_t_orig'] = p['delta_t']
p['delta_t'] = p['delta_t'] / 2.
if p['delta_t_orig'] / p['delta_t'] > 9:
raise
hp, hc = _lalsim_td_waveform(**p)
p['delta_t'] = p['delta_t_orig']
hp = resample_to_delta_t(hp, hp.delta_t*2)
hc = resample_to_delta_t(hc, hc.delta_t*2)
return hp, hc
raise
#lal.DestroyDict(lal_pars)
hp = TimeSeries(hp1.data.data[:], delta_t=hp1.deltaT, epoch=hp1.epoch)
hc = TimeSeries(hc1.data.data[:], delta_t=hc1.deltaT, epoch=hc1.epoch)
return hp, hc
def _spintaylor_aligned_prec_swapper(**p):
"""
SpinTaylorF2 is only single spin, it also struggles with anti-aligned spin
waveforms. This construct chooses between the aligned-twospin TaylorF2 model
and the precessing singlespin SpinTaylorF2 models. If aligned spins are
given, use TaylorF2, if nonaligned spins are given use SpinTaylorF2. In
the case of nonaligned doublespin systems the code will fail at the
waveform generator level.
"""
orig_approximant = p['approximant']
if p['spin2x'] == 0 and p['spin2y'] == 0 and p['spin1x'] == 0 and \
p['spin1y'] == 0:
p['approximant'] = 'TaylorF2'
else:
p['approximant'] = 'SpinTaylorF2'
hp, hc = _lalsim_fd_waveform(**p)
p['approximant'] = orig_approximant
return hp, hc
def _lalsim_fd_waveform(**p):
lal_pars = _check_lal_pars(p)
hp1, hc1 = lalsimulation.SimInspiralChooseFDWaveform(
float(pnutils.solar_mass_to_kg(p['mass1'])),
float(pnutils.solar_mass_to_kg(p['mass2'])),
float(p['spin1x']), float(p['spin1y']), float(p['spin1z']),
float(p['spin2x']), float(p['spin2y']), float(p['spin2z']),
pnutils.megaparsecs_to_meters(float(p['distance'])),
float(p['inclination']), float(p['coa_phase']),
float(p['long_asc_nodes']), float(p['eccentricity']), float(p['mean_per_ano']),
p['delta_f'], float(p['f_lower']), float(p['f_final']), float(p['f_ref']),
lal_pars,
_lalsim_enum[p['approximant']])
hp = FrequencySeries(hp1.data.data[:], delta_f=hp1.deltaF,
epoch=hp1.epoch)
hc = FrequencySeries(hc1.data.data[:], delta_f=hc1.deltaF,
epoch=hc1.epoch)
#lal.DestroyDict(lal_pars)
return hp, hc
def _lalsim_sgburst_waveform(**p):
hp, hc = lalsimulation.SimBurstSineGaussian(float(p['q']),
float(p['frequency']),
float(p['hrss']),
float(p['eccentricity']),
float(p['polarization']),
float(p['delta_t']))
hp = TimeSeries(hp.data.data[:], delta_t=hp.deltaT, epoch=hp.epoch)
hc = TimeSeries(hc.data.data[:], delta_t=hc.deltaT, epoch=hc.epoch)
return hp, hc
for approx_enum in xrange(0, lalsimulation.NumApproximants):
if lalsimulation.SimInspiralImplementedTDApproximants(approx_enum):
approx_name = lalsimulation.GetStringFromApproximant(approx_enum)
_lalsim_enum[approx_name] = approx_enum
_lalsim_td_approximants[approx_name] = _lalsim_td_waveform
for approx_enum in xrange(0, lalsimulation.NumApproximants):
if lalsimulation.SimInspiralImplementedFDApproximants(approx_enum):
approx_name = lalsimulation.GetStringFromApproximant(approx_enum)
_lalsim_enum[approx_name] = approx_enum
_lalsim_fd_approximants[approx_name] = _lalsim_fd_waveform
# sine-Gaussian burst
for approx_enum in xrange(0, lalsimulation.NumApproximants):
if lalsimulation.SimInspiralImplementedFDApproximants(approx_enum):
approx_name = lalsimulation.GetStringFromApproximant(approx_enum)
_lalsim_enum[approx_name] = approx_enum
_lalsim_sgburst_approximants[approx_name] = _lalsim_sgburst_waveform
cpu_sgburst = _lalsim_sgburst_approximants
cpu_td = dict(_lalsim_td_approximants.items())
cpu_fd = _lalsim_fd_approximants
# Waveforms written in CUDA
_cuda_td_approximants = {}
_cuda_fd_approximants = {}
if pycbc.HAVE_CUDA:
from pycbc.waveform.pycbc_phenomC_tmplt import imrphenomc_tmplt
from pycbc.waveform.SpinTaylorF2 import spintaylorf2 as cuda_spintaylorf2
_cuda_fd_approximants["IMRPhenomC"] = imrphenomc_tmplt
_cuda_fd_approximants["SpinTaylorF2"] = cuda_spintaylorf2
cuda_td = dict(_lalsim_td_approximants.items() + _cuda_td_approximants.items())
cuda_fd = dict(_lalsim_fd_approximants.items() + _cuda_fd_approximants.items())
# List the various available approximants ####################################
def print_td_approximants():
print("LalSimulation Approximants")
for approx in _lalsim_td_approximants.keys():
print(" " + approx)
print("CUDA Approximants")
for approx in _cuda_td_approximants.keys():
print(" " + approx)
def print_fd_approximants():
print("LalSimulation Approximants")
for approx in _lalsim_fd_approximants.keys():
print(" " + approx)
print("CUDA Approximants")
for approx in _cuda_fd_approximants.keys():
print(" " + approx)
def print_sgburst_approximants():
print("LalSimulation Approximants")
for approx in _lalsim_sgburst_approximants.keys():
print(" " + approx)
def td_approximants(scheme=_scheme.mgr.state):
"""Return a list containing the available time domain approximants for
the given processing scheme.
"""
return td_wav[type(scheme)].keys()
def fd_approximants(scheme=_scheme.mgr.state):
"""Return a list containing the available fourier domain approximants for
the given processing scheme.
"""
return fd_wav[type(scheme)].keys()
def sgburst_approximants(scheme=_scheme.mgr.state):
"""Return a list containing the available time domain sgbursts for
the given processing scheme.
"""
return sgburst_wav[type(scheme)].keys()
def filter_approximants(scheme=_scheme.mgr.state):
"""Return a list of fourier domain approximants including those
written specifically as templates.
"""
return filter_wav[type(scheme)].keys()
# Input parameter handling ###################################################
def get_obj_attrs(obj):
""" Return a dictionary built from the attributes of the given object.
"""
pr = {}
if obj is not None:
if isinstance(obj, numpy.core.records.record):
for name in obj.dtype.names:
pr[name] = getattr(obj, name)
elif hasattr(obj, '__dict__'):
pr = obj.__dict__
elif hasattr(obj, '__slots__'):
for slot in obj.__slots__:
if hasattr(obj, slot):
pr[slot] = getattr(obj, slot)
elif isinstance(obj, dict):
pr = obj.copy()
else:
for name in dir(obj):
try:
value = getattr(obj, name)
if not name.startswith('__') and not inspect.ismethod(value):
pr[name] = value
except:
continue
return pr
def props(obj, **kwargs):
""" Return a dictionary built from the combination of defaults, kwargs,
and the attributes of the given object.
"""
pr = get_obj_attrs(obj)
# Get the parameters to generate the waveform
# Note that keyword arguments override values in the template object
input_params = default_args.copy()
input_params.update(pr)
input_params.update(kwargs)
return input_params
# Input parameter handling for bursts ########################################
def props_sgburst(obj, **kwargs):
pr = {}
if obj is not None:
for name in dir(obj):
try:
value = getattr(obj, name)
if not name.startswith('__') and not inspect.ismethod(value):
pr[name] = value
except:
continue
# Get the parameters to generate the waveform
# Note that keyword arguments override values in the template object
input_params = default_sgburst_args.copy()
input_params.update(pr)
input_params.update(kwargs)
return input_params
# Waveform generation ########################################################
def get_fd_waveform_sequence(template=None, **kwds):
"""Return values of the waveform evaluated at the sequence of frequency
points.
Parameters
----------
template: object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
{params}
Returns
-------
hplustilde: Array
The plus phase of the waveform in frequency domain evaluated at the
frequency points.
hcrosstilde: Array
The cross phase of the waveform in frequency domain evaluated at the
frequency points.
"""
kwds['delta_f'] = -1
kwds['f_lower'] = -1
p = props(template, **kwds)
lal_pars = _check_lal_pars(p)
flags = lalsimulation.SimInspiralCreateWaveformFlags()
lalsimulation.SimInspiralSetSpinOrder(flags, p['spin_order'])
lalsimulation.SimInspiralSetTidalOrder(flags, p['tidal_order'])
hp, hc = lalsimulation.SimInspiralChooseFDWaveformSequence(float(p['coa_phase']),
float(pnutils.solar_mass_to_kg(p['mass1'])),
float(pnutils.solar_mass_to_kg(p['mass2'])),
float(p['spin1x']), float(p['spin1y']), float(p['spin1z']),
float(p['spin2x']), float(p['spin2y']), float(p['spin2z']),
float(p['f_ref']),
pnutils.megaparsecs_to_meters(float(p['distance'])),
float(p['inclination']),
lal_pars,
_lalsim_enum[p['approximant']],
p['sample_points'].lal())
return Array(hp.data.data), Array(hc.data.data)
get_fd_waveform_sequence.__doc__ = get_fd_waveform_sequence.__doc__.format(
params=parameters.fd_waveform_sequence_params.docstr(prefix=" ",
include_label=False).lstrip(' '))
def get_td_waveform(template=None, **kwargs):
"""Return the plus and cross polarizations of a time domain waveform.
Parameters
----------
template: object
An object that has attached properties. This can be used to subsitute
for keyword arguments. A common example would be a row in an xml table.
{params}
Returns
-------
hplus: TimeSeries
The plus polarization of the waveform.
hcross: TimeSeries
The cross polarization of the waveform.
"""
input_params = props(template,**kwargs)
wav_gen = td_wav[type(_scheme.mgr.state)]
if 'approximant' not in input_params or input_params['approximant'] is None:
raise ValueError("Please provide an approximant name")
elif input_params['approximant'] not in wav_gen:
raise ValueError("Approximant %s not available" %
(input_params['approximant']))
for arg in td_required_args:
if arg not in input_params:
raise ValueError("Please provide " + str(arg) )
return wav_gen[input_params['approximant']](**input_params)
get_td_waveform.__doc__ = get_td_waveform.__doc__.format(
params=parameters.td_waveform_params.docstr(prefix=" ",
include_label=False).lstrip(' '))
def get_fd_waveform(template=None, **kwargs):
"""Return a frequency domain gravitational waveform.
Parameters
----------
template: object
An object that has attached properties. This can be used to substitute
for keyword arguments. A common example would be a row in an xml table.
{params}
Returns
-------
hplustilde: FrequencySeries
The plus phase of the waveform in frequency domain.
hcrosstilde: FrequencySeries
The cross phase of the waveform in frequency domain.
"""
input_params = props(template,**kwargs)
wav_gen = fd_wav[type(_scheme.mgr.state)]
if 'approximant' not in input_params:
raise ValueError("Please provide an approximant name")
elif input_params['approximant'] not in wav_gen:
raise ValueError("Approximant %s not available" %
(input_params['approximant']))
for arg in fd_required_args:
if arg not in input_params:
raise ValueError("Please provide " + str(arg) )
try:
ffunc = input_params.pop('f_final_func')
if ffunc != '':
# convert the frequency function to a value
input_params['f_final'] = pnutils.named_frequency_cutoffs[ffunc](
input_params)
# if the f_final is < f_lower, raise a NoWaveformError
if 'f_final' in input_params and (
input_params['f_lower']+input_params['delta_f']
>= input_params['f_final']):
raise NoWaveformError("cannot generate waveform: f_lower >= f_final")
except KeyError:
pass
return wav_gen[input_params['approximant']](**input_params)
get_fd_waveform.__doc__ = get_fd_waveform.__doc__.format(
params=parameters.fd_waveform_params.docstr(prefix=" ",
include_label=False).lstrip(' '))
def get_interpolated_fd_waveform(dtype=numpy.complex64, return_hc=True,
**params):
""" Return a fourier domain waveform approximant, using interpolation
"""
def rulog2(val):
return 2.0 ** numpy.ceil(numpy.log2(float(val)))
orig_approx = params['approximant']
params['approximant'] = params['approximant'].replace('_INTERP', '')
df = params['delta_f']
if 'duration' not in params:
duration = get_waveform_filter_length_in_time(**params)
elif params['duration'] > 0:
duration = params['duration']
else:
err_msg = "Waveform duration must be greater than 0."
raise ValueError(err_msg)
#FIXME We should try to get this length directly somehow
# I think this number should be conservative
ringdown_padding = 0.5
df_min = 1.0 / rulog2(duration + ringdown_padding)
# FIXME: I don't understand this, but waveforms with df_min < 0.5 will chop
# off the inspiral when using ringdown_padding - 0.5.
# Also, if ringdown_padding is set to a very small
# value we can see cases where the ringdown is chopped.
if df_min > 0.5:
df_min = 0.5
params['delta_f'] = df_min
hp, hc = get_fd_waveform(**params)
hp = hp.astype(dtype)
if return_hc:
hc = hc.astype(dtype)
else:
hc = None
f_end = get_waveform_end_frequency(**params)
if f_end is None:
f_end = (len(hp) - 1) * hp.delta_f
if 'f_final' in params and params['f_final'] > 0:
f_end_params = params['f_final']
if f_end is not None:
f_end = min(f_end_params, f_end)
n_min = int(rulog2(f_end / df_min)) + 1
if n_min < len(hp):
hp = hp[:n_min]
if hc is not None:
hc = hc[:n_min]
offset = int(ringdown_padding * (len(hp)-1)*2 * hp.delta_f)
hp = interpolate_complex_frequency(hp, df, zeros_offset=offset, side='left')
if hc is not None:
hc = interpolate_complex_frequency(hc, df, zeros_offset=offset,
side='left')
params['approximant'] = orig_approx
return hp, hc
def get_sgburst_waveform(template=None, **kwargs):
"""Return the plus and cross polarizations of a time domain
sine-Gaussian burst waveform.
Parameters
----------
template: object
An object that has attached properties. This can be used to subsitute
for keyword arguments. A common example would be a row in an xml table.
approximant : string
A string that indicates the chosen approximant. See `td_approximants`
for available options.
q : float
The quality factor of a sine-Gaussian burst
frequency : float
The centre-frequency of a sine-Gaussian burst
delta_t : float
The time step used to generate the waveform
hrss : float
The strain rss
amplitude: float
The strain amplitude
Returns
-------
hplus: TimeSeries
The plus polarization of the waveform.
hcross: TimeSeries
The cross polarization of the waveform.
"""
input_params = props_sgburst(template,**kwargs)
for arg in sgburst_required_args:
if arg not in input_params:
raise ValueError("Please provide " + str(arg))
return _lalsim_sgburst_waveform(**input_params)
# Waveform filter routines ###################################################
# Organize Filter Generators
_inspiral_fd_filters = {}
_cuda_fd_filters = {}
_cuda_fd_filters['SPAtmplt'] = spa_tmplt
_inspiral_fd_filters['SPAtmplt'] = spa_tmplt
filter_wav = _scheme.ChooseBySchemeDict()
filter_wav.update( {_scheme.CPUScheme:_inspiral_fd_filters,
_scheme.CUDAScheme:_cuda_fd_filters,
} )
# Organize functions for function conditioning/precalculated values
_filter_norms = {}
_filter_ends = {}
_filter_preconditions = {}
_template_amplitude_norms = {}
_filter_time_lengths = {}
def seobnrv2_final_frequency(**kwds):
return pnutils.get_final_freq("SEOBNRv2", kwds['mass1'], kwds['mass2'],
kwds['spin1z'], kwds['spin2z'])
def get_imr_length(approx, **kwds):
"""Call through to pnutils to obtain IMR waveform durations
"""
m1 = float(kwds['mass1'])
m2 = float(kwds['mass2'])
s1z = float(kwds['spin1z'])
s2z = float(kwds['spin2z'])
f_low = float(kwds['f_lower'])
# 10% margin of error is incorporated in the pnutils function
return pnutils.get_imr_duration(m1, m2, s1z, s2z, f_low, approximant=approx)
def seobnrv2_length_in_time(**kwds):
"""Stub for holding the calculation of SEOBNRv2* waveform duration.
"""
return get_imr_length("SEOBNRv2", **kwds)
def seobnrv4_length_in_time(**kwds):
"""Stub for holding the calculation of SEOBNRv4* waveform duration.
"""
return get_imr_length("SEOBNRv4", **kwds)
def imrphenomd_length_in_time(**kwds):
"""Stub for holding the calculation of IMRPhenomD waveform duration.
"""
return get_imr_length("IMRPhenomD", **kwds)
_filter_norms["SPAtmplt"] = spa_tmplt_norm
_filter_preconditions["SPAtmplt"] = spa_tmplt_precondition
_filter_ends["SPAtmplt"] = spa_tmplt_end
_filter_ends["TaylorF2"] = spa_tmplt_end
#_filter_ends["SEOBNRv1_ROM_EffectiveSpin"] = seobnrv2_final_frequency
#_filter_ends["SEOBNRv1_ROM_DoubleSpin"] = seobnrv2_final_frequency
#_filter_ends["SEOBNRv2_ROM_EffectiveSpin"] = seobnrv2_final_frequency
#_filter_ends["SEOBNRv2_ROM_DoubleSpin"] = seobnrv2_final_frequency
#_filter_ends["SEOBNRv2_ROM_DoubleSpin_HI"] = seobnrv2_final_frequency
# PhenomD returns higher frequencies than this, so commenting this out for now
#_filter_ends["IMRPhenomC"] = seobnrv2_final_frequency
#_filter_ends["IMRPhenomD"] = seobnrv2_final_frequency
_template_amplitude_norms["SPAtmplt"] = spa_amplitude_factor
_filter_time_lengths["SPAtmplt"] = spa_length_in_time
_filter_time_lengths["TaylorF2"] = spa_length_in_time
_filter_time_lengths["SEOBNRv1_ROM_EffectiveSpin"] = seobnrv2_length_in_time
_filter_time_lengths["SEOBNRv1_ROM_DoubleSpin"] = seobnrv2_length_in_time
_filter_time_lengths["SEOBNRv2_ROM_EffectiveSpin"] = seobnrv2_length_in_time
_filter_time_lengths["SEOBNRv2_ROM_DoubleSpin"] = seobnrv2_length_in_time
_filter_time_lengths["EOBNRv2_ROM"] = seobnrv2_length_in_time
_filter_time_lengths["EOBNRv2HM_ROM"] = seobnrv2_length_in_time
_filter_time_lengths["SEOBNRv2_ROM_DoubleSpin_HI"] = seobnrv2_length_in_time
_filter_time_lengths["SEOBNRv4_ROM"] = seobnrv4_length_in_time
_filter_time_lengths["IMRPhenomC"] = imrphenomd_length_in_time
_filter_time_lengths["IMRPhenomD"] = imrphenomd_length_in_time
_filter_time_lengths["IMRPhenomPv2"] = imrphenomd_length_in_time
_filter_time_lengths["SpinTaylorF2"] = spa_length_in_time
_filter_time_lengths["TaylorF2NL"] = spa_length_in_time
# Also add generators for switching between approximants
apx_name = "SpinTaylorF2_SWAPPER"
cpu_fd[apx_name] = _spintaylor_aligned_prec_swapper
_filter_time_lengths[apx_name] = _filter_time_lengths["SpinTaylorF2"]
from . nltides import nonlinear_tidal_spa
cpu_fd["TaylorF2NL"] = nonlinear_tidal_spa
# We can do interpolation for waveforms that have a time length
for apx in copy.copy(_filter_time_lengths):
if apx in cpu_fd:
apx_int = apx + '_INTERP'
cpu_fd[apx_int] = get_interpolated_fd_waveform
_filter_time_lengths[apx_int] = _filter_time_lengths[apx]
td_wav = _scheme.ChooseBySchemeDict()
fd_wav = _scheme.ChooseBySchemeDict()
td_wav.update({_scheme.CPUScheme:cpu_td,_scheme.CUDAScheme:cuda_td})
fd_wav.update({_scheme.CPUScheme:cpu_fd,_scheme.CUDAScheme:cuda_fd})
sgburst_wav = {_scheme.CPUScheme:cpu_sgburst}
def get_waveform_filter(out, template=None, **kwargs):
"""Return a frequency domain waveform filter for the specified approximant
"""
n = len(out)
input_params = props(template, **kwargs)
if input_params['approximant'] in filter_approximants(_scheme.mgr.state):
wav_gen = filter_wav[type(_scheme.mgr.state)]
htilde = wav_gen[input_params['approximant']](out=out, **input_params)
htilde.resize(n)
htilde.chirp_length = get_waveform_filter_length_in_time(**input_params)
htilde.length_in_time = htilde.chirp_length
return htilde
if input_params['approximant'] in fd_approximants(_scheme.mgr.state):
wav_gen = fd_wav[type(_scheme.mgr.state)]
duration = get_waveform_filter_length_in_time(**input_params)
hp, _ = wav_gen[input_params['approximant']](duration=duration,
return_hc=False, **input_params)
hp.resize(n)
out[0:len(hp)] = hp[:]
hp = FrequencySeries(out, delta_f=hp.delta_f, copy=False)
hp.length_in_time = hp.chirp_length = duration
return hp
elif input_params['approximant'] in td_approximants(_scheme.mgr.state):
wav_gen = td_wav[type(_scheme.mgr.state)]
hp, _ = wav_gen[input_params['approximant']](**input_params)
# taper the time series hp if required
if ('taper' in input_params.keys() and \
input_params['taper'] is not None):
hp = wfutils.taper_timeseries(hp, input_params['taper'],
return_lal=False)
return td_waveform_to_fd_waveform(hp, out=out)
else:
raise ValueError("Approximant %s not available" %
(input_params['approximant']))
def td_waveform_to_fd_waveform(waveform, out=None, length=None,
buffer_length=100):
""" Convert a time domain into a frequency domain waveform by FFT.
As a waveform is assumed to "wrap" in the time domain one must be
careful to ensure the waveform goes to 0 at both "boundaries". To
ensure this is done correctly the waveform must have the epoch set such
the merger time is at t=0 and the length of the waveform should be
shorter than the desired length of the FrequencySeries (times 2 - 1)
so that zeroes can be suitably pre- and post-pended before FFTing.
If given, out is a memory array to be used as the output of the FFT.
If not given memory is allocated internally.
If present the length of the returned FrequencySeries is determined
from the length out. If out is not given the length can be provided
expicitly, or it will be chosen as the nearest power of 2. If choosing
length explicitly the waveform length + buffer_length is used when
choosing the nearest binary number so that some zero padding is always
added.
"""
# Figure out lengths and set out if needed
if out is None:
if length is None:
N = pnutils.nearest_larger_binary_number(len(waveform) + \
buffer_length)
n = int(N//2) + 1
else:
n = length
N = (n-1)*2
out = zeros(n, dtype=complex_same_precision_as(waveform))
else:
n = len(out)
N = (n-1)*2
delta_f = 1. / (N * waveform.delta_t)
# total duration of the waveform
tmplt_length = len(waveform) * waveform.delta_t
# for IMR templates the zero of time is at max amplitude (merger)
# thus the start time is minus the duration of the template from
# lower frequency cutoff to merger, i.e. minus the 'chirp time'
tChirp = - float( waveform.start_time ) # conversion from LIGOTimeGPS
waveform.resize(N)
k_zero = int(waveform.start_time / waveform.delta_t)
waveform.roll(k_zero)
htilde = FrequencySeries(out, delta_f=delta_f, copy=False)
fft(waveform.astype(real_same_precision_as(htilde)), htilde)
htilde.length_in_time = tmplt_length
htilde.chirp_length = tChirp
return htilde
def get_two_pol_waveform_filter(outplus, outcross, template, **kwargs):
"""Return a frequency domain waveform filter for the specified approximant.
Unlike get_waveform_filter this function returns both h_plus and h_cross
components of the waveform, which are needed for searches where h_plus
and h_cross are not related by a simple phase shift.
"""
n = len(outplus)
# If we don't have an inclination column alpha3 might be used
if not hasattr(template, 'inclination')\
and not kwargs.has_key('inclination'):
if hasattr(template, 'alpha3'):
kwargs['inclination'] = template.alpha3
input_params = props(template, **kwargs)
if input_params['approximant'] in fd_approximants(_scheme.mgr.state):
wav_gen = fd_wav[type(_scheme.mgr.state)]
hp, hc = wav_gen[input_params['approximant']](**input_params)
hp.resize(n)
hc.resize(n)
outplus[0:len(hp)] = hp[:]
hp = FrequencySeries(outplus, delta_f=hp.delta_f, copy=False)
outcross[0:len(hc)] = hc[:]
hc = FrequencySeries(outcross, delta_f=hc.delta_f, copy=False)
hp.chirp_length = get_waveform_filter_length_in_time(**input_params)
hp.length_in_time = hp.chirp_length
hc.chirp_length = hp.chirp_length
hc.length_in_time = hp.length_in_time
return hp, hc
elif input_params['approximant'] in td_approximants(_scheme.mgr.state):
# N: number of time samples required
N = (n-1)*2
delta_f = 1.0 / (N * input_params['delta_t'])
wav_gen = td_wav[type(_scheme.mgr.state)]
hp, hc = wav_gen[input_params['approximant']](**input_params)
# taper the time series hp if required
if ('taper' in input_params.keys() and \
input_params['taper'] is not None):
hp = wfutils.taper_timeseries(hp, input_params['taper'],
return_lal=False)
hc = wfutils.taper_timeseries(hc, input_params['taper'],
return_lal=False)
# total duration of the waveform
tmplt_length = len(hp) * hp.delta_t
# for IMR templates the zero of time is at max amplitude (merger)
# thus the start time is minus the duration of the template from
# lower frequency cutoff to merger, i.e. minus the 'chirp time'
tChirp = - float( hp.start_time ) # conversion from LIGOTimeGPS
hp.resize(N)
hc.resize(N)
k_zero = int(hp.start_time / hp.delta_t)
hp.roll(k_zero)
hc.roll(k_zero)
hp_tilde = FrequencySeries(outplus, delta_f=delta_f, copy=False)
hc_tilde = FrequencySeries(outcross, delta_f=delta_f, copy=False)
fft(hp.astype(real_same_precision_as(hp_tilde)), hp_tilde)
fft(hc.astype(real_same_precision_as(hc_tilde)), hc_tilde)
hp_tilde.length_in_time = tmplt_length
hp_tilde.chirp_length = tChirp
hc_tilde.length_in_time = tmplt_length
hc_tilde.chirp_length = tChirp
return hp_tilde, hc_tilde
else:
raise ValueError("Approximant %s not available" %
(input_params['approximant']))
def waveform_norm_exists(approximant):
if approximant in _filter_norms:
return True
else:
return False
def get_template_amplitude_norm(template=None, **kwargs):
""" Return additional constant template normalization. This only affects
the effective distance calculation. Returns None for all templates with a
physically meaningful amplitude.
"""
input_params = props(template,**kwargs)
approximant = kwargs['approximant']
if approximant in _template_amplitude_norms:
return _template_amplitude_norms[approximant](**input_params)
else:
return None
def get_waveform_filter_precondition(approximant, length, delta_f):
"""Return the data preconditioning factor for this approximant.
"""
if approximant in _filter_preconditions:
return _filter_preconditions[approximant](length, delta_f)
else:
return None
def get_waveform_filter_norm(approximant, psd, length, delta_f, f_lower):
""" Return the normalization vector for the approximant
"""
if approximant in _filter_norms:
return _filter_norms[approximant](psd, length, delta_f, f_lower)
else:
return None
def get_waveform_end_frequency(template=None, **kwargs):
"""Return the stop frequency of a template
"""
input_params = props(template,**kwargs)
approximant = kwargs['approximant']
if approximant in _filter_ends:
return _filter_ends[approximant](**input_params)
else:
return None
def get_waveform_filter_length_in_time(approximant, template=None, **kwargs):
"""For filter templates, return the length in time of the template.
"""
kwargs = props(template, **kwargs)
if approximant in _filter_time_lengths:
return _filter_time_lengths[approximant](**kwargs)
else:
return None
__all__ = ["get_td_waveform", "get_fd_waveform", "get_fd_waveform_sequence",
"print_td_approximants", "print_fd_approximants",
"td_approximants", "fd_approximants",
"get_waveform_filter", "filter_approximants",
"get_waveform_filter_norm", "get_waveform_end_frequency",
"waveform_norm_exists", "get_template_amplitude_norm",
"get_waveform_filter_length_in_time", "get_sgburst_waveform",
"print_sgburst_approximants", "sgburst_approximants",
"td_waveform_to_fd_waveform", "get_two_pol_waveform_filter",
"NoWaveformError"]
|
hagabbar/pycbc_copy
|
pycbc/waveform/waveform.py
|
Python
|
gpl-3.0
| 38,346
|
[
"Gaussian"
] |
c90478cccc14fbb928e15f0ba137e8c04f9792d79b6b7dcc31149007b519653e
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RXmapbridge(RPackage):
"""Export plotting files to the xmapBridge for visualisation in X:Map
xmapBridge can plot graphs in the X:Map genome browser. This package
exports plotting files in a suitable format."""
homepage = "https://bioconductor.org/packages/xmapbridge"
git = "https://git.bioconductor.org/packages/xmapbridge.git"
version('1.48.0', commit='1cefe6b56c6dcb1f18028b3b7d6a67d490bc9730')
version('1.42.0', commit='d79c80dfc1a0ed3fd6d3e7a7c3a4aff778537ca9')
version('1.40.0', commit='00a2993863f28711e237bc937fa0ba2d05f81684')
version('1.38.0', commit='08138f00385fa0c669ff4cc33d7eac3d29cd615d')
version('1.36.0', commit='e44f648c9da9eaa130849a738d90dc11685050e2')
version('1.34.0', commit='f162e1f72ead5f5a1aede69032d5771a6572d965')
depends_on('r@2.0:', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-xmapbridge/package.py
|
Python
|
lgpl-2.1
| 1,076
|
[
"Bioconductor"
] |
05fffb971f7dc26472b6135df8cc9359febe99e4f933f03d2400a1d20ffecd48
|
#Needs to:
# Manage the notebook/worksheet object.
# Communicate to the javascript in the WebView object.
# Open and save .sws files.
# Delete the internal notebook object on disk when done.
import os
try:
# simplejson is faster, so try to import it first
import simplejson as json
except ImportError:
import json
from PySide.QtCore import (QObject, SIGNAL, Slot, Signal)
from sagenb.notebook.notebook import Notebook
from sagenb.notebook.misc import encode_response
from sagenb.misc.misc import (unicode_str, walltime)
from guru.globals import GURU_PORT, GURU_USERNAME, guru_notebook
from guru.ServerConfigurations import ServerConfigurations
import guru.SageProcessManager as SageProcessManager
worksheet_commands = {}
dirty_commands = [
"eval",
"introspect"
]
class WorksheetController(QObject):
#Class variables
notebook = guru_notebook
worksheet_count = 0 #Reference counting.
def __init__(self, webViewController):
super(WorksheetController, self).__init__()
WorksheetController.worksheet_count += 1
self.thingy = 0
self.webview_controller = webViewController
#Set up the Python-javascript bridge.
self.webFrame = self.webview_controller.webView().page().mainFrame()
self.connect(self.webFrame, SIGNAL("javaScriptWindowObjectCleared()"), self.addJavascriptBridge)
self.request_values = None
self.isDirty = False
#Sanity check.
if guru_notebook is None:
raise RuntimeError
self.notebook_username = GURU_USERNAME
self._worksheet = None
self.init_updates()
self.server_configuration = None
#When we use a Notebook Server, we want to be able to access resources on the server
#from the webview. In order to do so, we need to set a session cookie. We set this
#cookie on loadStarted()
self.connect(self.webFrame, SIGNAL('loadFinished(bool)'), self.setSessionCookie)
@staticmethod
def withNewWorksheet(webViewController, server=None):
# server is a Sage server configuration that will determine the Sage process this
# worksheet will use.
if not server:
server = ServerConfigurations.getDefault()
wsc = WorksheetController(webViewController)
wsc.server_configuration = server
ws = guru_notebook.create_new_worksheet('Untitled', wsc.notebook_username)
SageProcessManager.setWorksheetProcessServer(ws, server)
wsc.setWorksheet(ws)
return wsc
@staticmethod
def withWorksheetFile(webViewController, filename, server=None):
if not server:
server = ServerConfigurations.getDefault()
wsc = WorksheetController(webViewController)
wsc.server_configuration = server
ws = wsc.notebook.import_worksheet(filename, wsc.notebook_username)
SageProcessManager.setWorksheetProcessServer(ws, server)
wsc.setWorksheet(ws)
return wsc
def useServerConfiguration(self, server_config):
self.server_configuration = server_config
new_worksheet = SageProcessManager.setWorksheetProcessServer(self._worksheet, server_config)
#If we are switching TO a Notebook Server, we want to be able to access resources on the
#server from the browser page.
self.setSessionCookie()
#There are some instances when we have to swap out worksheets, i.e., when switching FROM a
#notebook server.
if self._worksheet is not new_worksheet:
self._worksheet._notebook.delete_worksheet(self._worksheet.filename())
self._worksheet = new_worksheet
#Open the worksheet in the webView
self.webFrame.setUrl(self.worksheetUrl())
def setWorksheet(self, worksheet):
# This "opens" the worksheet in the webview as if it were a new file.
#Check that the worksheet we were given has the notebook setup correctly.
if (not hasattr(worksheet, "_notebook")) or (worksheet._notebook is None):
worksheet._notebook = guru_notebook
self._worksheet = worksheet
#Handle the dirty status of the worksheet.
self.isDirty = False
#Open the worksheet in the webView
self.webFrame.setUrl(self.worksheetUrl())
def addJavascriptBridge(self):
#This method is called whenever new content is loaded into the webFrame.
#Each time this happens, we need to reconnect the Python-javascript bridge.
self.webFrame.addToJavaScriptWindowObject("Guru", self)
def setSessionCookie(self):
cookie_string = SageProcessManager.getSessionCookie(self.server_configuration)
if cookie_string:
javascript = "document.cookie='%s';" % cookie_string
print javascript
self.webFrame.evaluateJavaScript(javascript)
@Slot(str)
def asyncRequest(self, token):
#This is the counterpart to sagenb.async_request() in sagenb.js.
#The original sagenb.async_request() made an ajax request. We can
#significantly improve UI performance by calling this python method
#directly and bypassing the Flask server.
# Sometimes the worksheet is deleted before the webview is GCed.
if self._worksheet is None:
return
if True:
#Handle the command ourselves.
javascript = "Guru.requests['%s']['url'];" % token
url = self.webFrame.evaluateJavaScript(javascript)
javascript = "encode_response(Guru.requests['%s']['postvars']);" % token
postvars = self.webFrame.evaluateJavaScript(javascript)
if postvars:
self.request_values = json.loads(postvars);
#The url encodes the command. They look like:
# url = "/home/admin/0/worksheet_properties"
print "URL: %s" % url
command = url.split("/")[-1]
# Check and see if the operation will make the worksheet dirty. If so, emit a "dirty" signal.
if command in dirty_commands:
self.isDirty = True
self.emit(SIGNAL("dirty(bool)"), True)
if self.server_configuration["type"] == "local":
result = worksheet_commands[command](self, self._worksheet)
elif self.server_configuration["type"] == "notebook server":
try:
result = SageProcessManager.remoteCommand(self._worksheet, (command, self.request_values))
except Exception as e:
#This signal may be emitted over and over, so the owner of this WorksheetController
#needs to take care of handling only the first error signal (or whatever).
#self.emit(SIGNAL("remoteCommandError(str)"), "Error executing remote command:\n%s"%e.message)
return
elif self.server_configuration["type"] == "cell server":
pass
self.sendResultToPage(result, token)
else:
#Let the Sage Notebook Server handle the request as usual.
javascript = "sagenb.guru_async_request_fall_through('%s');" % token
self.webFrame.evaluateJavaScript(javascript)
def sendResultToPage(self, result, token):
#Because we encode result in a javascript string literal, we need
#to format the string as follows.
result_string = repr(result)
if result_string.startswith("u'"):
result_string = result_string[2:-1]
else:
result_string = result_string[1:-1]
#Now give the result back to the page.
self.webview_controller.putAjaxConsole("result: " + result + "\n")
javascript = "Guru.requests['%s']['callback']('success', '%s');" % (token, result_string)
self.webFrame.evaluateJavaScript(javascript)
javascript = "delete Guru.requests['%s'];" % token
self.webFrame.evaluateJavaScript(javascript)
@Slot(str)
def putAjaxConsole(self, text):
self.webview_controller.putAjaxConsole(text + "\n")
def cleanup(self):
#This method is called when this WorksheetController instance is about
#to be eligible for garbage collection.
WorksheetController.worksheet_count -= 1
if self._worksheet is not None:
SageProcessManager.stopSageProcess(self._worksheet, create_new=False)
#Now remove the worksheet.
self._worksheet._notebook.delete_worksheet(self._worksheet.filename())
self._worksheet = None
def saveWorksheet(self, file_name):
#Write out the worksheet to filename, overwriting if necessary.
if os.path.exists(file_name):
os.remove(file_name) #This may be unnecessary.
if self.server_configuration["type"] == "local":
self.worksheet_download(self._worksheet, file_name)
elif self.server_configuration["type"] == "notebook server":
SageProcessManager.saveRemoteWorksheet(self._worksheet, file_name)
elif self.server_configuration["type"] == "cell server":
pass
#The worksheet is no longer dirty.
self.isDirty = False
self.emit(SIGNAL("dirty(bool)"), False)
def worksheetUrl(self):
if self._worksheet is None:
return ''
#There is probably a better way to do this.
url_vars = {'port' : GURU_PORT, 'name': self._worksheet.filename()}
url = "http://localhost:%(port)s/home/%(name)s/" % url_vars
return url
def getTitle(self):
return self._worksheet.name()
########### FILE MENU WORKSHEET COMMANDS ###########
def evaluateAll(self):
javascript = "sagenb.worksheetapp.worksheet.evaluate_all()"
self.webFrame.evaluateJavaScript(javascript)
def interrupt(self):
javascript = "sagenb.worksheetapp.worksheet.interrupt()"
self.webFrame.evaluateJavaScript(javascript)
def hideAllOutput(self):
javascript = "sagenb.worksheetapp.worksheet.hide_all_output()"
self.webFrame.evaluateJavaScript(javascript)
def showAllOutput(self):
javascript = "sagenb.worksheetapp.worksheet.show_all_output()"
self.webFrame.evaluateJavaScript(javascript)
def deleteAllOutput(self):
javascript = "sagenb.worksheetapp.worksheet.delete_all_output()"
self.webFrame.evaluateJavaScript(javascript)
def restartWorksheet(self):
javascript = "sagenb.worksheetapp.worksheet.restart_sage()"
self.webFrame.evaluateJavaScript(javascript)
def typesetOutput(self, enabled):
#set_pretty_print takes a lowercase string.
if enabled:
self._worksheet.set_pretty_print('true')
else:
self._worksheet.set_pretty_print('false')
########### FLASK SERVER WORKSHEET COMMANDS ###########
def worksheet_command(target):
#This decorator registers the command as a command that the worksheet controller
#knows how to handle.
def decorator(f):
#Register the worksheet command.
worksheet_commands[target] = f
#We will need to take care of commands with multiple arguments.
def wrapper(*args, **kwds):
return f(*args, **kwds)
return wrapper
return decorator
def get_cell_id(self):
"""
Returns the cell ID from the request.
We cast the incoming cell ID to an integer, if it's possible.
Otherwise, we treat it as a string.
"""
try:
return int(self.request_values['id'])
except ValueError:
return self.request_values['id']
@worksheet_command('rename')
def worksheet_rename(self, worksheet):
worksheet.set_name(self.request_values['name'])
return 'done'
@worksheet_command('alive')
def worksheet_alive(self, worksheet):
return str(worksheet.state_number())
@worksheet_command('system/<system>')
def worksheet_system(self, worksheet, system):
worksheet.set_system(system)
return 'success'
@worksheet_command('pretty_print/<enable>')
def worksheet_pretty_print(self, worksheet, enable):
worksheet.set_pretty_print(enable)
return 'success'
@worksheet_command('conf')
def worksheet_conf(self, worksheet):
return str(worksheet.conf())
########################################################
# Save a worksheet
########################################################
@worksheet_command('save')
def worksheet_save(self, worksheet):
"""
Save the contents of a worksheet after editing it in plain-text
edit mode.
"""
if 'button_save' in request.form:
E = self.request_values['textfield']
worksheet.edit_save(E)
worksheet.record_edit(self.notebook_username)
return redirect(url_for_worksheet(worksheet))
@worksheet_command('save_snapshot')
def worksheet_save_snapshot(self, worksheet):
"""Save a snapshot of a worksheet."""
worksheet.save_snapshot(self.notebook_username)
return 'saved'
@worksheet_command('save_and_quit')
def worksheet_save_and_quit(self, worksheet):
"""Save a snapshot of a worksheet then quit it. """
worksheet.save_snapshot(self.notebook_username)
worksheet.quit()
return 'saved'
#XXX: Redundant due to the above?
@worksheet_command('save_and_close')
def worksheet_save_and_close(self, worksheet):
"""Save a snapshot of a worksheet then quit it. """
worksheet.save_snapshot(self.notebook_username)
worksheet.quit()
return 'saved'
@worksheet_command('discard_and_quit')
def worksheet_discard_and_quit(self, worksheet):
"""Quit the worksheet, discarding any changes."""
worksheet.revert_to_last_saved_state()
worksheet.quit()
return 'saved' #XXX: Should this really be saved?
@worksheet_command('revert_to_last_saved_state')
def worksheet_revert_to_last_saved_state(self, worksheet):
worksheet.revert_to_last_saved_state()
return 'reverted'
########################################################
# Worksheet properties
########################################################
@worksheet_command('worksheet_properties')
def worksheet_properties(self, worksheet):
"""
Send worksheet properties as a JSON object
"""
r = worksheet.basic()
if worksheet.has_published_version():
hostname = request.headers.get('host', self.notebook.interface + ':' + str(self.notebook.port))
r['published_url'] = 'http%s://%s/home/%s' % ('' if not self.notebook.secure else 's',
hostname,
worksheet.published_version().filename())
return encode_response(r)
########################################################
# Used in refreshing the cell list
########################################################
@worksheet_command('cell_properties')
def worksheet_cell_properties(self, worksheet):
"""
Return the cell with the given id as a JSON object
"""
id = self.get_cell_id()
return encode_response(worksheet.get_cell_with_id(id).basic())
@worksheet_command('cell_list')
def worksheet_cell_list(self, worksheet):
"""
Return a list of cells in JSON format.
"""
r = {}
r['state_number'] = worksheet.state_number()
r['cell_list'] = [c.basic() for c in worksheet.cell_list()]
return encode_response(r)
########################################################
# Set output type of a cell
########################################################
@worksheet_command('set_cell_output_type')
def worksheet_set_cell_output_type(self, worksheet):
"""
Set the output type of the cell.
This enables the type of output cell, such as to allowing wrapping
for output that is very long.
"""
id = self.get_cell_id()
type = self.request_values['type']
worksheet.get_cell_with_id(id).set_cell_output_type(type)
return ''
########################################################
#Cell creation
########################################################
@worksheet_command('new_cell_before')
def worksheet_new_cell_before(self, worksheet):
"""Add a new cell before a given cell."""
r = {}
r['id'] = id = self.get_cell_id()
input = unicode_str(self.request_values.get('input', ''))
cell = worksheet.new_cell_before(id, input=input)
worksheet.increase_state_number()
r['new_id'] = cell.id()
#r['new_html'] = cell.html(div_wrap=False)
return encode_response(r)
@worksheet_command('new_text_cell_before')
def worksheet_new_text_cell_before(self, worksheet):
"""Add a new text cell before a given cell."""
r = {}
r['id'] = id = self.get_cell_id()
input = unicode_str(self.request_values.get('input', ''))
cell = worksheet.new_text_cell_before(id, input=input)
worksheet.increase_state_number()
r['new_id'] = cell.id()
#r['new_html'] = cell.html(editing=True)
# XXX: Does editing correspond to TinyMCE? If so, we should try
# to centralize that code.
return encode_response(r)
@worksheet_command('new_cell_after')
def worksheet_new_cell_after(self, worksheet):
"""Add a new cell after a given cell."""
r = {}
r['id'] = id = self.get_cell_id()
input = unicode_str(self.request_values.get('input', ''))
cell = worksheet.new_cell_after(id, input=input)
worksheet.increase_state_number()
r['new_id'] = cell.id()
#r['new_html'] = cell.html(div_wrap=True)
return encode_response(r)
@worksheet_command('new_text_cell_after')
def worksheet_new_text_cell_after(self, worksheet):
"""Add a new text cell after a given cell."""
r = {}
r['id'] = id = self.get_cell_id()
input = unicode_str(self.request_values.get('input', ''))
cell = worksheet.new_text_cell_after(id, input=input)
worksheet.increase_state_number()
r['new_id'] = cell.id()
#r['new_html'] = cell.html(editing=True)
# XXX: Does editing correspond to TinyMCE? If so, we should try
# to centralize that code.
return encode_response(r)
########################################################
# Cell deletion
########################################################
@worksheet_command('delete_cell')
def worksheet_delete_cell(self, worksheet):
"""
Deletes a worksheet cell, unless there's only one compute cell
left. This allows functions which evaluate relative to existing
cells, e.g., inserting a new cell, to continue to work.
"""
r = {}
r['id'] = id = self.get_cell_id()
if len(worksheet.compute_cell_id_list()) <= 1:
r['command'] = 'ignore'
else:
prev_id = worksheet.delete_cell_with_id(id)
r['command'] = 'delete'
r['prev_id'] = worksheet.delete_cell_with_id(id)
r['cell_id_list'] = worksheet.cell_id_list()
return encode_response(r)
@worksheet_command('delete_cell_output')
def worksheet_delete_cell_output(self, worksheet):
"""Delete's a cell's output."""
r = {}
r['id'] = id = self.get_cell_id()
worksheet.get_cell_with_id(id).delete_output()
r['command'] = 'delete_output'
return encode_response(r)
########################################################
# Evaluation and cell update
########################################################
@worksheet_command('eval')
def worksheet_eval(self, worksheet):
"""
Evaluate a worksheet cell.
If the request is not authorized (the requester did not enter the
correct password for the given worksheet), then the request to
evaluate or introspect the cell is ignored.
If the cell contains either 1 or 2 question marks at the end (not
on a comment line), then this is interpreted as a request for
either introspection to the documentation of the function, or the
documentation of the function and the source code of the function
respectively.
"""
r = {}
r['id'] = id = self.get_cell_id()
cell = worksheet.get_cell_with_id(id)
public = worksheet.tags().get('_pub_', [False])[0] #this is set in pub_worksheet
if public and not cell.is_interactive_cell():
r['command'] = 'error'
r['message'] = 'Cannot evaluate non-interactive public cell with ID %r.' % id
return encode_response(r)
worksheet.increase_state_number()
if public:
# Make public input cells read-only.
input_text = cell.input_text()
else:
input_text = unicode_str(self.request_values.get('input', '')).replace('\r\n', '\n') #DOS
# Handle an updated / recomputed interact. TODO: JSON encode
# the update data.
if 'interact' in self.request_values:
r['interact'] = 1
input_text = INTERACT_UPDATE_PREFIX
variable = self.request_values.get('variable', '')
if variable!='':
adapt_number = int(self.request_values.get('adapt_number', -1))
value = self.request_values.get('value', '')
input_text += "\n_interact_.update('%s', '%s', %s, _interact_.standard_b64decode('%s'), globals())" % (id, variable, adapt_number, value)
if int(self.request_values.get('recompute', 0)):
input_text += "\n_interact_.recompute('%s')" % id
cell.set_input_text(input_text)
if int(self.request_values.get('save_only', '0')):
self.notebook_updates()
return encode_response(r)
elif int(self.request_values.get('text_only', '0')):
self.notebook_updates()
r['cell_html'] = cell.html()
return encode_response(r)
cell.evaluate(username=self.notebook_username)
new_cell = int(self.request_values.get('newcell', 0)) #whether to insert a new cell or not
if new_cell:
new_cell = worksheet.new_cell_after(id)
r['command'] = 'insert_cell'
r['new_cell_id'] = new_cell.id()
r['new_cell_html'] = new_cell.html(div_wrap=False)
else:
r['next_id'] = cell.next_compute_id()
self.notebook_updates()
return encode_response(r)
@worksheet_command('cell_update')
def worksheet_cell_update(self, worksheet):
import time
r = {}
r['id'] = id = self.get_cell_id()
# update the computation one "step".
worksheet.check_comp()
# now get latest status on our cell
r['status'], cell = worksheet.check_cell(id)
if r['status'] == 'd':
r['new_input'] = cell.changed_input_text()
r['output_html'] = cell.output_html()
# Update the log.
t = time.strftime('%Y-%m-%d at %H:%M',
time.localtime(time.time()))
H = "Worksheet '%s' (%s)\n" % (worksheet.name(), t)
H += cell.edit_text(ncols=self.notebook.HISTORY_NCOLS, prompts=False,
max_out=self.notebook.HISTORY_MAX_OUTPUT)
self.notebook.add_to_user_history(H, self.notebook_username)
else:
r['new_input'] = ''
r['output_html'] = ''
r['interrupted'] = cell.interrupted()
if 'Unhandled SIGSEGV' in cell.output_text(raw=True).split('\n'):
r['interrupted'] = 'restart'
print 'Segmentation fault detected in output!'
r['output'] = cell.output_text(html=True)
r['output_wrapped'] = cell.output_text(self.notebook.conf()['word_wrap_cols'])
r['introspect_output'] = cell.introspect_output()
# Compute 'em, if we got 'em.
worksheet.start_next_comp()
return encode_response(r)
########################################################
# Cell introspection
########################################################
@worksheet_command('introspect')
def worksheet_introspect(self, worksheet):
"""
Cell introspection. This is called when the user presses the tab
key in the browser in order to introspect.
"""
r = {}
r['id'] = id = self.get_cell_id()
if worksheet.tags().get('_pub_', [False])[0]: #tags set in pub_worksheet
r['command'] = 'error'
r['message'] = 'Cannot evaluate public cell introspection.'
return encode_response(r)
before_cursor = self.request_values.get('before_cursor', '')
after_cursor = self.request_values.get('after_cursor', '')
cell = worksheet.get_cell_with_id(id)
cell.evaluate(introspect=[before_cursor, after_cursor])
r['command'] = 'introspect'
return encode_response(r)
########################################################
# Edit the entire worksheet
########################################################
@worksheet_command('edit')
def worksheet_edit(self, worksheet):
"""
Return a window that allows the user to edit the text of the
worksheet with the given filename.
"""
return render_template(os.path.join("html", "worksheet_edit.html"),
worksheet = worksheet,
username = self.notebook_username)
########################################################
# Plain text log view of worksheet
########################################################
@worksheet_command('text')
def worksheet_text(self, worksheet):
"""
Return a window that allows the user to edit the text of the
worksheet with the given filename.
"""
from cgi import escape
plain_text = worksheet.plain_text(prompts=True, banner=False)
plain_text = escape(plain_text).strip()
return render_template(os.path.join("html", "worksheet_text.html"),
username = self.notebook_username,
plain_text = plain_text)
########################################################
# Copy a worksheet
########################################################
@worksheet_command('copy')
def worksheet_copy(self, worksheet):
copy = self.notebook.copy_worksheet(worksheet, self.notebook_username)
if 'no_load' in self.request_values:
return ''
else:
return redirect(url_for_worksheet(copy))
########################################################
# Get a copy of a published worksheet and start editing it
########################################################
@worksheet_command('edit_published_page')
def worksheet_edit_published_page(self, worksheet):
## if user_type(self.username) == 'guest':
## return current_app.message('You must <a href="/">login first</a> in order to edit this worksheet.')
ws = worksheet.worksheet_that_was_published()
if ws.owner() == self.notebook_username:
W = ws
else:
W = self.notebook.copy_worksheet(worksheet, self.notebook_username)
W.set_name(worksheet.name())
return redirect(url_for_worksheet(W))
########################################################
# Collaborate with others
########################################################
@worksheet_command('invite_collab')
def worksheet_invite_collab(self, worksheet):
owner = worksheet.owner()
id_number = worksheet.id_number()
old_collaborators = set(worksheet.collaborators())
collaborators = set([u.strip() for u in self.request_values.get('collaborators', '').split(',') if u!=owner])
if len(collaborators-old_collaborators)>500:
# to prevent abuse, you can't add more than 500 collaborators at a time
return current_app.message(_("Error: can't add more than 500 collaborators at a time"), cont=url_for_worksheet(worksheet))
worksheet.set_collaborators(collaborators)
user_manager = self.notebook.user_manager()
# add worksheet to new collaborators
for u in collaborators-old_collaborators:
try:
user_manager.user(u).viewable_worksheets().add((owner, id_number))
except KeyError:
# user doesn't exist
pass
# remove worksheet from ex-collaborators
for u in old_collaborators-collaborators:
try:
user_manager.user(u).viewable_worksheets().discard((owner, id_number))
except KeyError:
# user doesn't exist
pass
return ''
########################################################
# Revisions
########################################################
# TODO take out or implement
@worksheet_command('revisions')
def worksheet_revisions(self, worksheet):
"""
Show a list of revisions of this worksheet.
"""
if 'action' not in self.request_values:
if 'rev' in self.request_values:
return self.notebook.html_specific_revision(self.notebook_username, worksheet,
self.request_values['rev'])
else:
return self.notebook.html_worksheet_revision_list(self.notebook_username, worksheet)
else:
rev = self.request_values['rev']
action = self.request_values['action']
if action == 'revert':
import bz2
worksheet.save_snapshot(self.notebook_username)
#XXX: Requires access to filesystem
txt = bz2.decompress(open(worksheet.get_snapshot_text_filename(rev)).read())
worksheet.delete_cells_directory()
worksheet.edit_save(txt)
return redirect(url_for_worksheet(worksheet))
elif action == 'publish':
import bz2
W = self.notebook.publish_worksheet(worksheet, self.notebook_username)
txt = bz2.decompress(open(worksheet.get_snapshot_text_filename(rev)).read())
W.delete_cells_directory()
W.edit_save(txt)
return redirect(url_for_worksheet(W))
else:
return current_app.message(_('Error'))
########################################################
# Cell directories
########################################################
@worksheet_command('cells/<path:filename>')
def worksheet_cells(self, worksheet, filename):
#XXX: This requires that the worker filesystem be accessible from
#the server.
from flask.helpers import send_from_directory
return send_from_directory(worksheet.cells_directory(), filename)
##############################################
# Data
##############################################
@worksheet_command('data/<path:filename>')
def worksheed_data_folder(self, worksheet, filename):
dir = os.path.abspath(worksheet.data_directory())
if not os.path.exists(dir):
return make_response(_('No data file'), 404)
else:
from flask.helpers import send_from_directory
return send_from_directory(worksheet.data_directory(), filename)
@worksheet_command('delete_datafile')
def worksheet_delete_datafile(self, worksheet):
dir = os.path.abspath(worksheet.data_directory())
filename = self.request_values['name']
path = os.path.join(dir, filename)
os.unlink(path)
return ''
@worksheet_command('edit_datafile/<path:filename>')
def worksheet_edit_datafile(self, worksheet, filename):
ext = os.path.splitext(filename)[1].lower()
file_is_image, file_is_text = False, False
text_file_content = ""
path = "/home/%s/data/%s" % (worksheet.filename(), filename)
if ext in ['.png', '.jpg', '.gif']:
file_is_image = True
if ext in ['.txt', '.tex', '.sage', '.spyx', '.py', '.f', '.f90', '.c']:
file_is_text = True
text_file_content = open(os.path.join(worksheet.data_directory(), filename)).read()
return render_template(os.path.join("html", "datafile_edit.html"),
worksheet = worksheet,
username = self.notebook_username,
filename_ = filename,
file_is_image = file_is_image,
file_is_text = file_is_text,
text_file_content = text_file_content,
path = path)
@worksheet_command('save_datafile')
def worksheet_save_datafile(self, worksheet):
filename = self.request_values['filename']
if 'button_save' in self.request_values:
text_field = self.request_values['textfield']
dest = os.path.join(worksheet.data_directory(), filename) #XXX: Requires access to filesystem
if os.path.exists(dest):
os.unlink(dest)
open(dest, 'w').write(text_field)
print 'saving datafile, redirect'
return redirect(url_for_worksheet(worksheet))
# @worksheet_command('link_datafile')
# def worksheet_link_datafile(self, worksheet):
# target_worksheet_filename = self.request_values['target']
# data_filename = self.request_values['filename']
# src = os.path.abspath(os.path.join(
# worksheet.data_directory(), data_filename))
# target_ws = self.notebook.get_worksheet_with_filename(target_worksheet_filename)
# target = os.path.abspath(os.path.join(
# target_ws.data_directory(), data_filename))
# if target_ws.owner() != self.notebook_username and not target_ws.is_collaborator(self.notebook_username):
# return current_app.message(_("illegal link attempt!"), worksheet_datafile.url_for(worksheet, name=data_filename))
# if os.path.exists(target):
# return current_app.message(_("The data filename already exists in other worksheet\nDelete the file in the other worksheet before creating a link."), worksheet_datafile.url_for(worksheet, name=data_filename))
# os.link(src,target)
# return redirect(worksheet_datafile.url_for(worksheet, name=data_filename))
# #return redirect(url_for_worksheet(target_ws) + '/datafile?name=%s'%data_filename) #XXX: Can we not hardcode this?
@worksheet_command('upload_datafile')
def worksheet_upload_datafile(self, worksheet):
from werkzeug.utils import secure_filename
file = request.files['file']
name = self.request_values.get('name', '').strip() or file.filename
name = secure_filename(name)
#XXX: disk access
dest = os.path.join(worksheet.data_directory(), name)
if os.path.exists(dest):
if not os.path.isfile(dest):
return _('Suspicious filename encountered uploading file.')
os.unlink(dest)
file.save(dest)
return ''
@worksheet_command('datafile_from_url')
def worksheet_datafile_from_url(self, worksheet):
from werkzeug.utils import secure_filename
name = self.request_values.get('name', '').strip()
url = self.request_values.get('url', '').strip()
if url and not name:
name = url.split('/')[-1]
name = secure_filename(name)
import urllib2
from urlparse import urlparse
# we normalize the url by parsing it first
parsedurl = urlparse(url)
if not parsedurl[0] in ('http','https','ftp'):
return _('URL must start with http, https, or ftp.')
download = urllib2.urlopen(parsedurl.geturl())
dest = os.path.join(worksheet.data_directory(), name)
if os.path.exists(dest):
if not os.path.isfile(dest):
return _('Suspicious filename encountered uploading file.')
os.unlink(dest)
import re
matches = re.match("file://(?:localhost)?(/.+)", url)
if matches:
f = file(dest, 'wb')
f.write(open(matches.group(1)).read())
f.close()
return ''
with open(dest, 'w') as f:
f.write(download.read())
return ''
@worksheet_command('new_datafile')
def worksheet_new_datafile(self, worksheet):
from werkzeug.utils import secure_filename
name = self.request_values.get('new', '').strip()
name = secure_filename(name)
#XXX: disk access
dest = os.path.join(worksheet.data_directory(), name)
if os.path.exists(dest):
if not os.path.isfile(dest):
return _('Suspicious filename encountered uploading file.')
os.unlink(dest)
open(dest, 'w').close()
return ''
################################
#Publishing
################################
@worksheet_command('publish')
def worksheet_publish(self, worksheet):
"""
This provides a frontend to the management of worksheet
publication. This management functionality includes
initializational of publication, re-publication, automated
publication when a worksheet saved, and ending of publication.
"""
if 'publish_on' in self.request_values:
self.notebook.publish_worksheet(worksheet, self.notebook_username)
if 'publish_off' in self.request_values and worksheet.has_published_version():
self.notebook.delete_worksheet(worksheet.published_version().filename())
if 'auto_on' in self.request_values:
worksheet.set_auto_publish(True)
if 'auto_off' in self.request_values:
worksheet.set_auto_publish(False)
if 'is_auto' in self.request_values:
return str(worksheet.is_auto_publish())
if 'republish' in self.request_values:
self.notebook.publish_worksheet(worksheet, self.notebook_username)
return ''
############################################
# Ratings
############################################
# @worksheet_command('rating_info')
# def worksheet_rating_info(worksheet):
# return worksheet.html_ratings_info()
# @worksheet_command('rate')
# def worksheet_rate(worksheet):
# ## if user_type(self.username) == "guest":
# ## return HTMLResponse(stream = message(
# ## 'You must <a href="/">login first</a> in order to rate this worksheet.', ret))
# rating = int(self.request_values['rating'])
# if rating < 0 or rating >= 5:
# return current_app.messge("Gees -- You can't fool the rating system that easily!",
# url_for_worksheet(worksheet))
# comment = self.request_values['comment']
# worksheet.rate(rating, comment, self.notebook_username)
# s = """
# Thank you for rating the worksheet <b><i>%s</i></b>!
# You can <a href="rating_info">see all ratings of this worksheet.</a>
# """%(worksheet.name())
# #XXX: Hardcoded url
# return current_app.message(s.strip(), '/pub/', title=u'Rating Accepted')
########################################################
# Downloading, moving around, renaming, etc.
########################################################
@worksheet_command('download/<path:title>')
def worksheet_download(self, worksheet, filename):
try:
#XXX: Accessing the hard disk.
self.notebook.export_worksheet(worksheet.filename(), filename)
except KeyError:
print 'No such worksheet.'
@worksheet_command('restart_sage')
def worksheet_restart_sage(self, worksheet):
#XXX: TODO -- this must not block long (!)
worksheet.restart_sage()
return 'done'
@worksheet_command('quit_sage')
def worksheet_quit_sage(self, worksheet):
#XXX: TODO -- this must not block long (!)
worksheet.quit()
return 'done'
@worksheet_command('interrupt')
def worksheet_interrupt(self, worksheet):
#XXX: TODO -- this must not block long (!)
worksheet.sage().interrupt()
return 'failed' if worksheet.sage().is_computing() else 'success'
@worksheet_command('hide_all')
def worksheet_hide_all(self, worksheet):
worksheet.hide_all()
return 'success'
@worksheet_command('show_all')
def worksheet_show_all(self, worksheet):
worksheet.show_all()
return 'success'
@worksheet_command('delete_all_output')
def worksheet_delete_all_output(self, worksheet):
try:
worksheet.delete_all_output(self.notebook_username)
except ValueError:
return 'fail'
else:
return 'success'
@worksheet_command('print')
def worksheet_print(self, worksheet):
#XXX: We might want to separate the printing template from the
#regular html template.
return self.notebook.html(worksheet.filename(), do_print=True)
#######################################################
# Jmol Popup
#######################################################
#@ws.route('/home/<username>/<id>/jmol_popup.html', methods=['GET'])
#@login_required
def jmol_popup(username, id):
return render_template(os.path.join('html', 'jmol_popup.html'))
############################
# Notebook autosave.
############################
# save if make a change to notebook and at least some seconds have elapsed since last save.
def init_updates(self):
self.save_interval = self.notebook.conf()['save_interval']
self.idle_interval = self.notebook.conf()['idle_check_interval']
self.last_save_time = walltime()
self.last_idle_time = walltime()
def notebook_save_check(self):
t = walltime()
if t > self.last_save_time + self.save_interval:
with global_lock:
# if someone got the lock before we did, they might have saved,
# so we check against the last_save_time again
# we don't put the global_lock around the outer loop since we don't need
# it unless we are actually thinking about saving.
if t > self.last_save_time + self.save_interval:
self.notebook.save()
self.last_save_time = t
def notebook_idle_check(self):
t = walltime()
if t > self.last_idle_time + self.idle_interval:
if t > self.last_idle_time + self.idle_interval:
self.notebook.update_worksheet_processes()
self.notebook.quit_idle_worksheet_processes()
self.last_idle_time = t
def notebook_updates(self):
self.notebook_save_check()
#Guru does not quit idle worksheet processes.
#self.notebook_idle_check()
|
rljacobson/Guru
|
guru/WorksheetController.py
|
Python
|
mit
| 43,950
|
[
"Jmol"
] |
7b9599b80f89e17fbd1a2dae5ccd7f3b9828078f8892f30b38fe311417a8bef7
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""transforms.py -- This module contains parameter transformations that may be
useful to transform from parameters that are easier to _sample_ in to the
parameters required for building SED models.
They can be used as ``"depends_on"`` entries in parameter specifications.
"""
import numpy as np
from ..sources.constants import cosmo
__all__ = ["stellar_logzsol", "delogify_mass",
"tburst_from_fage", "tage_from_tuniv", "zred_to_agebins",
"dustratio_to_dust1",
"logsfr_ratios_to_masses", "logsfr_ratios_to_sfrs",
"logsfr_ratios_to_masses_flex", "logsfr_ratios_to_agebins",
"zfrac_to_masses", "zfrac_to_sfrac", "zfrac_to_sfr", "masses_to_zfrac",
"sfratio_to_sfr", "sfratio_to_mass"]
# --------------------------------------
# --- Basic Convenience Transforms ---
# --------------------------------------
def stellar_logzsol(logzsol=0.0, **extras):
"""Simple function that takes an argument list and returns the value of the
`logzsol` argument (i.e. the stellar metallicity)
:param logzsol:
FSPS stellar metaliicity parameter.
:returns logzsol:
The same.
"""
return logzsol
def delogify_mass(logmass=0.0, **extras):
"""Simple function that takes an argument list including a `logmass`
parameter and returns the corresponding linear mass.
:param logmass:
The log10(mass)
:returns mass:
The mass in linear units
"""
return 10**logmass
def total_mass(mass=0.0, **extras):
"""Simple function that takes an argument list uncluding a `mass`
parameter and returns the corresponding total mass.
:param mass:
length-N vector of masses in bins
:returns total mass:
Total mass in linear units
"""
return mass.sum()
# --------------------------------------
# Fancier transforms
# --------------------------------------
def tburst_from_fage(tage=0.0, fage_burst=0.0, **extras):
"""This function transfroms from a fractional age of a burst to an absolute
age. With this transformation one can sample in ``fage_burst`` without
worry about the case ``tburst`` > ``tage``.
:param tage:
The age of the host galaxy (Gyr)
:param fage_burst:
The fraction of the host age at which the burst occurred
:returns tburst:
The age of the host when the burst occurred (i.e. the FSPS ``tburst``
parameter)
"""
return tage * fage_burst
def tage_from_tuniv(zred=0.0, tage_tuniv=1.0, **extras):
"""This function calculates a galaxy age from the age of the univers at
``zred`` and the age given as a fraction of the age of the universe. This
allows for both ``zred`` and ``tage`` parameters without ``tage`` exceeding
the age of the universe.
:param zred:
Cosmological redshift.
:param tage_tuniv:
The ratio of ``tage`` to the age of the universe at ``zred``.
:returns tage:
The stellar population age, in Gyr
"""
tuniv = cosmo.age(zred).value
tage = tage_tuniv * tuniv
return tage
def zred_to_agebins(zred=0.0, agebins=[], **extras):
"""Set the nonparameteric SFH age bins depending on the age of the universe
at ``zred``. The first bin is not altered and the last bin is always 15% of
the upper edge of the oldest bin, but the intervening bins are evenly
spaced in log(age).
:param zred:
Cosmological redshift. This sets the age of the universe.
:param agebins:
The SFH bin edges in log10(years). ndarray of shape ``(nbin, 2)``.
:returns agebins:
The new SFH bin edges.
"""
tuniv = cosmo.age(zred).value * 1e9
tbinmax = tuniv * 0.85
ncomp = len(agebins)
agelims = list(agebins[0]) + np.linspace(agebins[1][1], np.log10(tbinmax), ncomp-2).tolist() + [np.log10(tuniv)]
return np.array([agelims[:-1], agelims[1:]]).T
def dustratio_to_dust1(dust2=0.0, dust_ratio=0.0, **extras):
"""Set the value of dust1 from the value of dust2 and dust_ratio
:param dust2:
The diffuse dust V-band optical depth (the FSPS ``dust2`` parameter.)
:param dust_ratio:
The ratio of the extra optical depth towards young stars to the diffuse
optical depth affecting all stars.
:returns dust1:
The extra optical depth towards young stars (the FSPS ``dust1``
parameter.)
"""
return dust2 * dust_ratio
# --------------------------------------
# --- Transforms for the continuity non-parametric SFHs used in (Leja et al. 2018) ---
# --------------------------------------
def logsfr_ratios_to_masses(logmass=None, logsfr_ratios=None, agebins=None,
**extras):
"""This converts from an array of log_10(SFR_j / SFR_{j+1}) and a value of
log10(\Sum_i M_i) to values of M_i. j=0 is the most recent bin in lookback
time.
"""
nbins = agebins.shape[0]
sratios = 10**np.clip(logsfr_ratios, -100, 100) # numerical issues...
dt = (10**agebins[:, 1] - 10**agebins[:, 0])
coeffs = np.array([ (1. / np.prod(sratios[:i])) * (np.prod(dt[1: i+1]) / np.prod(dt[: i]))
for i in range(nbins)])
m1 = (10**logmass) / coeffs.sum()
return m1 * coeffs
def logsfr_ratios_to_sfrs(logmass=None, logsfr_ratios=None, agebins=None, **extras):
"""Convenience function
"""
masses = logsfr_ratios_to_masses(logmass=logmass, logsfr_ratios=logsfr_ratios,
agebins=agebins)
dt = (10**agebins[:, 1] - 10**agebins[:, 0])
return masses / dt
# --------------------------------------
# --- Transforms for the flexible agebin continuity non-parametric SFHs used in (Leja et al. 2018) ---
# --------------------------------------
def logsfr_ratios_to_masses_flex(logmass=None, logsfr_ratios=None,
logsfr_ratio_young=None, logsfr_ratio_old=None,
**extras):
logsfr_ratio_young = np.clip(logsfr_ratio_young, -100, 100)
logsfr_ratio_old = np.clip(logsfr_ratio_old, -100, 100)
abins = logsfr_ratios_to_agebins(logsfr_ratios=logsfr_ratios, **extras)
nbins = abins.shape[0] - 2
syoung, sold = 10**logsfr_ratio_young, 10**logsfr_ratio_old
dtyoung, dt1 = (10**abins[:2, 1] - 10**abins[:2, 0])
dtn, dtold = (10**abins[-2:, 1] - 10**abins[-2:, 0])
mbin = (10**logmass) / (syoung*dtyoung/dt1 + sold*dtold/dtn + nbins)
myoung = syoung * mbin * dtyoung / dt1
mold = sold * mbin * dtold/dtn
n_masses = np.full(nbins, mbin)
return np.array(myoung.tolist() + n_masses.tolist() + mold.tolist())
def logsfr_ratios_to_agebins(logsfr_ratios=None, agebins=None, **extras):
"""This transforms from SFR ratios to agebins by assuming a constant amount
of mass forms in each bin agebins = np.array([NBINS,2])
use equation:
delta(t1) = tuniv / (1 + SUM(n=1 to n=nbins-1) PROD(j=1 to j=n) Sn)
where Sn = SFR(n) / SFR(n+1) and delta(t1) is width of youngest bin
"""
# numerical stability
logsfr_ratios = np.clip(logsfr_ratios, -100, 100)
# calculate delta(t) for oldest, youngest bins (fixed)
lower_time = (10**agebins[0, 1] - 10**agebins[0, 0])
upper_time = (10**agebins[-1, 1] - 10**agebins[-1, 0])
tflex = (10**agebins[-1,-1] - upper_time - lower_time)
# figure out other bin sizes
n_ratio = logsfr_ratios.shape[0]
sfr_ratios = 10**logsfr_ratios
dt1 = tflex / (1 + np.sum([np.prod(sfr_ratios[:(i+1)]) for i in range(n_ratio)]))
# translate into agelims vector (time bin edges)
agelims = [1, lower_time, dt1+lower_time]
for i in range(n_ratio):
agelims += [dt1*np.prod(sfr_ratios[:(i+1)]) + agelims[-1]]
#agelims += [tuniv[0]]
agelims += [10**agebins[-1, 1]]
agebins = np.log10([agelims[:-1], agelims[1:]]).T
return agebins
# --------------------------------------
# --- Transforms for Dirichlet non-parametric SFH used in (Leja et al. 2017) ---
# --------------------------------------
def zfrac_to_sfrac(z_fraction=None, **extras):
"""This transforms from independent dimensionless `z` variables to sfr
fractions. The transformation is such that sfr fractions are drawn from a
Dirichlet prior. See Betancourt et al. 2010 and Leja et al. 2017
:param z_fraction:
latent variables drawn form a specific set of Beta distributions. (see
Betancourt 2010)
:returns sfrac:
The star formation fractions (See Leja et al. 2017 for definition).
"""
sfr_fraction = np.zeros(len(z_fraction) + 1)
sfr_fraction[0] = 1.0 - z_fraction[0]
for i in range(1, len(z_fraction)):
sfr_fraction[i] = np.prod(z_fraction[:i]) * (1.0 - z_fraction[i])
sfr_fraction[-1] = 1 - np.sum(sfr_fraction[:-1])
return sfr_fraction
def zfrac_to_masses(total_mass=None, z_fraction=None, agebins=None, **extras):
"""This transforms from independent dimensionless `z` variables to sfr
fractions and then to bin mass fractions. The transformation is such that
sfr fractions are drawn from a Dirichlet prior. See Betancourt et al. 2010
and Leja et al. 2017
:param total_mass:
The total mass formed over all bins in the SFH.
:param z_fraction:
latent variables drawn form a specific set of Beta distributions. (see
Betancourt 2010)
:returns masses:
The stellar mass formed in each age bin.
"""
# sfr fractions
sfr_fraction = np.zeros(len(z_fraction) + 1)
sfr_fraction[0] = 1.0 - z_fraction[0]
for i in range(1, len(z_fraction)):
sfr_fraction[i] = np.prod(z_fraction[:i]) * (1.0 - z_fraction[i])
sfr_fraction[-1] = 1 - np.sum(sfr_fraction[:-1])
# convert to mass fractions
time_per_bin = np.diff(10**agebins, axis=-1)[:, 0]
mass_fraction = sfr_fraction * np.array(time_per_bin)
mass_fraction /= mass_fraction.sum()
masses = total_mass * mass_fraction
return masses
# -- version of above for arrays of fractions --
#zf = np.atleast_2d(z_fraction)
#shape = list(zf.shape)
#shape[-1] += 1
#sfr_fraction = np.zeros(shape)
#sfr_fraction[..., 0] = 1.0 - z_fraction[..., 0]
#for i in range(1, shape[-1]-1):
# sfr_fraction[..., i] = (np.prod(z_fraction[..., :i], axis=-1) *
# (1.0 - z_fraction[...,i]))
#sfr_fraction[..., -1] = 1 - np.sum(sfr_fraction[..., :-1], axis=-1)
#sfr_fraction = np.squeeze(sfr_fraction)
#
# convert to mass fractions
#time_per_bin = np.diff(10**agebins, axis=-1)[:,0]
#sfr_fraction *= np.array(time_per_bin)
#mtot = np.atleast_1d(sfr_fraction.sum(axis=-1))
#mass_fraction = sfr_fraction / mtot[:, None]
#
#masses = np.atleast_2d(total_mass) * mass_fraction.T
#return masses.T
def zfrac_to_sfr(total_mass=None, z_fraction=None, agebins=None, **extras):
"""This transforms from independent dimensionless `z` variables to SFRs.
:returns sfrs:
The SFR in each age bin (msun/yr).
"""
time_per_bin = np.diff(10**agebins, axis=-1)[:, 0]
masses = zfrac_to_masses(total_mass, z_fraction, agebins)
return masses / time_per_bin
def masses_to_zfrac(mass=None, agebins=None, **extras):
"""The inverse of :py:func:`zfrac_to_masses`, for setting mock parameters
based on mock bin masses.
:returns total_mass:
The total mass
:returns zfrac:
The dimensionless `z` variables used for sfr fraction parameterization.
"""
total_mass = mass.sum()
time_per_bin = np.diff(10**agebins, axis=-1)[:, 0]
sfr_fraction = mass / time_per_bin
sfr_fraction /= sfr_fraction.sum()
z_fraction = np.zeros(len(sfr_fraction) - 1)
z_fraction[0] = 1 - sfr_fraction[0]
for i in range(1, len(z_fraction)):
z_fraction[i] = 1.0 - sfr_fraction[i] / np.prod(z_fraction[:i])
return total_mass, z_fraction
# --------------------------------------
# --- Transforms for SFR ratio based nonparameteric SFH ---
# --------------------------------------
def sfratio_to_sfr(sfr_ratio=None, sfr0=None, **extras):
raise(NotImplementedError)
def sfratio_to_mass(sfr_ratio=None, sfr0=None, agebins=None, **extras):
raise(NotImplementedError)
|
bd-j/bsfh
|
prospect/models/transforms.py
|
Python
|
gpl-2.0
| 12,300
|
[
"Galaxy"
] |
5d9a20f78c2c22599078b41eadf1ed579bb4a282c585ba31a2acfd43a04b3e55
|
#! /usr/bin/env python
import os
import sys
import logging
import logging.config
import sketch
import util
from encoder import Encoder
from jskparser.jskparser import parse
import rewrite, decode
pwd = os.path.dirname(__file__)
root_dir = os.path.join(pwd, "..")
res_dir = os.path.join(root_dir, "result")
log_lvls = {'0':logging.NOTSET, '10':logging.DEBUG, '20':logging.INFO, '30':logging.WARNING,
'40':logging.ERROR, '50':logging.CRITICAL}
def translate(**kwargs):
## logging configuration
log_lvl = log_lvls.get(kwargs.get('log_lvl', '10'))
logging.config.fileConfig(os.path.join(pwd, "logging.conf"))
logging.getLogger().setLevel(log_lvl)
prg = kwargs.get('prg', None)
out_dir = kwargs.get('out_dir', res_dir)
sk = kwargs.get('sketch', True)
fs = kwargs.get('fs', False)
cgen = kwargs.get('custom_gen', False)
cntr = kwargs.get('cntr', False)
skv = kwargs.get('skv', 0)
lib = kwargs.get('lib', True)
inline = kwargs.get('inline', None)
unroll = kwargs.get('unroll', None)
inbits = kwargs.get('inbits', None)
cbits = kwargs.get('cbits', None)
parallel = kwargs.get('parallel', None)
jgen = kwargs.get('jgen', False)
cgen = True if jgen else cgen
codegen_jar = os.path.join(root_dir, "codegen", "lib", "codegen.jar")
logging.info('parsing {}'.format(prg))
prg_ast = parse(prg,lib=lib)
if jgen:
logging.info('rewriting {}'.format(prg))
rewrite.visit(prg_ast)
util.add_object(prg_ast)
encoder = Encoder(prg_ast, out_dir, fs)
logging.info('encoding to Sketch')
encoder.to_sk()
# Sketch options
opts = kwargs.get('opts', [])
# Sketch inlining and unrolling
if inline: opts.extend(['--bnd-inline-amnt', inline])
if unroll: opts.extend(['--bnd-unroll-amnt', unroll])
if inbits: opts.extend(['--bnd-inbits', inbits])
if cbits: opts.extend(['--bnd-cbits', cbits])
if parallel: opts.append("--slv-parallel")
# print counter examples
if cntr: opts.extend(['-V3', '--debug-cex'])
if skv != 0: opts.extend(['-V{}'.format(skv)])
# place to keep sketch's temporary files
opts.extend(["--fe-tempdir", out_dir])
opts.append("--fe-keep-tmp")
# custom codegen
if cgen: opts.extend(["--fe-custom-codegen", codegen_jar])
# run Sketch
if not os.path.exists(os.path.join(out_dir, "output")):
os.makedirs(os.path.join(out_dir, "output"))
output_path = os.path.join(out_dir, "output", "{}.txt".format(encoder.demo_name))
if sk:
if os.path.exists(output_path): os.remove(output_path)
sketch.set_default_option(opts)
logging.info('sk_dir: {}, output_path: {}'.format(encoder.sk_dir, output_path))
_, r = sketch.run(encoder.sk_dir, output_path)
if jgen:
java_output_dir = os.path.join(out_dir, "java")
# Due to Encoder changing the AST, we have to re-parse here
# TODO: Find another way to preserve the AST
prg_ast = parse(prg,lib=lib)
rewrite.visit(prg_ast)
decode.to_java(java_output_dir, prg_ast, output_path)
# if sketch fails, halt the process here
if not r: return 1
elif not prg:
jskparser.error("need to pass in some file")
return 0
def main(prg, log_lvl='20'):
return translate(prg=prg, log_lvl=log_lvl)
if __name__ == "__main__":
# print 'booleanValue:'
# descriptors = util.get_mtd_types('java/lang/Byte', 'parseByte', 2)
# print
# descriptors = util.get_mtd_types('java/lang/Byte', 'compare', 2)
# exit()
if len(sys.argv) < 1:
sys.exit("incorrect number of arguments")
from optparse import OptionParser
jskparser = OptionParser(usage="%prog [options]* [-t tmp_path]* (api_path)")
jskparser.add_option("-t", "--template",
action="append", dest="tmpl", default=[],
help="template folder")
jskparser.add_option("-l", "--log_lvl",
action="store", dest="log_lvl", default='10',
help="level of logging")
jskparser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="print intermediate messages verbosely")
jskparser.add_option("-m", "--model",
action="store_true", dest="model", default=False,
help="use models of Java libraries")
jskparser.add_option("-o", "--out_dir",
dest="out_dir", default=None,
help="output directory for sketches, templates and codegen")
jskparser.add_option("-f", "--file-system",
action="store_true", dest="fs", default=False,
help="model filesytem with HashMap")
jskparser.add_option("--no-sketch",
action="store_false", dest="sketch", default=True,
help="proceed the whole process without running Sketch")
jskparser.add_option("-c", "--custom-codegen",
action="store_true", dest="custom_gen", default=False,
help="use custom code generator")
jskparser.add_option("--cntr",
action="store_true", dest="cntr", default=False,
help="print out counter examples")
jskparser.add_option("--skv",
action="store", dest="skv", default=0,
help="set verbosity level for Sketch")
jskparser.add_option("--no-lib",
action="store_false", dest="lib", default=True,
help="compile without linking default Java libraries")
jskparser.add_option("--inline",
action="store", dest="inline", default=None,
help="change inlining amount for Sketch")
jskparser.add_option("--unroll",
action="store", dest="unroll", default=None,
help="change unrolling amount for Sketch")
jskparser.add_option("--inbits",
action="store", dest="inbits", default=None,
help="change input integer bits for Sketch")
jskparser.add_option("--cbits",
action="store", dest="cbits", default=None,
help="change control integer bits for Sketch")
jskparser.add_option("--sk_opts",
action="append", dest="opts", default=[],
help="extra sketch options")
jskparser.add_option("--parallel",
action="store_true", dest="parallel", default=None,
help="run Sketch in parallel mode")
jskparser.add_option("--java_codegen",
action="store_true", dest="jgen", default=False,
help="Enable rewrite-based Java code generation")
(OPT, argv) = jskparser.parse_args()
OPT.prg = argv
sys.exit(translate(**vars(OPT)))
|
plum-umd/java-sketch
|
java_sk/main.py
|
Python
|
mit
| 7,160
|
[
"VisIt"
] |
4e2a6a31c5dbb9d176b476d4ac5328dd9bb69f44be15fa89ac99b47fd0a6853a
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
import numpy as np
# make simulation deterministic
np.random.seed(42)
benchmark, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@BENCHMARKS_DIR@/lb.py", cmd_arguments=["--particles_per_core", "80"],
measurement_steps=200, n_iterations=2, min_skin=0.688, max_skin=0.688)
@skipIfMissingFeatures
class Sample(ut.TestCase):
system = benchmark.system
if __name__ == "__main__":
ut.main()
|
pkreissl/espresso
|
testsuite/scripts/benchmarks/test_lb.py
|
Python
|
gpl-3.0
| 1,176
|
[
"ESPResSo"
] |
5390add4296c569696f8a4817df960898b1c0560be0189d42cad450f7fc4cb54
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.