blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bbdfd2a738fa239c003b365edd88175d2bffc480 | 6c651e1d1eecba344590ed473c2c3208e1995ee7 | /Training/python/python.py | bf52c499941fb42abf6be3dad0cc6bf000e28dde | [] | no_license | rbjagadeesh/Materials | 0355c54c0c4c7f95d7e2dc404022bf0b20f92911 | 5c10e452b7f2cd13a0c4f55ec268a0ff88fdf11f | refs/heads/master | 2021-01-19T06:56:38.011935 | 2017-04-06T12:45:13 | 2017-04-06T12:45:13 | 87,511,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | x = 10
y = True
z = "abcd"
a = [1,2,3,4,5,6,7,8,9]
b = (1,2,3,4,56,7,8,9)
| [
"jagadeeshrb1@gmail.com"
] | jagadeeshrb1@gmail.com |
70f33bb40b94725d71df75b5591e7a9b56325cca | f9ed608c620093b9f6b5058bcedf7ae610c09c8d | /329-Longest_Increasing_Path_in_a_Matrix.py | 2076972e979562372e23e07d8d2bfd9f51a966ba | [] | no_license | chanyoonzhu/leetcode-python | 9b88d7f2749e1ae3ed597759b1bf9f7fa4912c35 | 085d868ba0458fc8e6b5549aa00fa151c335fa7f | refs/heads/master | 2022-05-24T11:20:35.927915 | 2022-04-16T06:02:33 | 2022-04-16T06:02:33 | 166,224,197 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,114 | py | """
- dfs with memoization
- note: path is strictly increasing -> no loop -> DAG -> can use dfs
- O(mn), O(mn)
"""
class Solution:
def longestIncreasingPath(self, matrix: List[List[int]]) -> int:
self.matrix = matrix
self.dir = [(1, 0), (-1, 0), (0, 1), (0, -1)]
self.m = len(matrix)
self.n = len(matrix[0])
self.res = 0
for r in range(self.m):
for c in range(self.n):
self.dfs(r, c)
return self.res
@lru_cache(None)
def dfs(self, r, c):
connected = 0
for i, j in self.dir:
nr, nc = r + i, c + j
if 0 <= nr < self.m and 0 <= nc < self.n and self.matrix[nr][nc] > self.matrix[r][c]:
connected = max(connected, self.dfs(nr, nc))
connected += 1 # itself
self.res = max(self.res, connected)
return connected
"""
- topological sorting
- O(mn), O(mn)
"""
class Solution:
def longestIncreasingPath(self, matrix: List[List[int]]) -> int:
M, N = len(matrix), len(matrix[0])
indegrees = [[0] * N for _ in range(M)]
DIR = [(1, 0), (-1, 0), (0, 1), (0, -1)]
for r in range(M):
for c in range(N):
for i, j in DIR:
nr, nc = r + i, c + j
if 0 <= nr < M and 0 <= nc < N and matrix[nr][nc] > matrix[r][c]:
indegrees[r][c] += 1
q = deque()
for r in range(M):
for c in range(N):
if indegrees[r][c] == 0:
q.append((r, c))
steps = 0
while q:
new_q = deque()
while q:
r, c = q.popleft()
for i, j in DIR:
nr, nc = r + i, c + j
if 0 <= nr < M and 0 <= nc < N and matrix[nr][nc] < matrix[r][c]:
indegrees[nr][nc] -= 1
if indegrees[nr][nc] == 0:
new_q.append((nr, nc))
q = new_q
steps += 1
return steps | [
"zhuchanyoon@gmail.com"
] | zhuchanyoon@gmail.com |
c2ac2a498c6d0f1b98e65c591ae6d6739f4a5ed0 | ec2b311861e61890639e0be31515af7480d27f8f | /Django-Python-Full-Stack-Web-Devloper-master/Python_Level_Two/Part1_Scope.py | b265008226e51f0a83b85fb6b181131dc0a85780 | [] | no_license | schwarzm3283/django_udemy | f501446009c0477190e08444acc8f8d220fa4ca6 | 485cd1e383ca4e2381eef7a47660c32c54abffd8 | refs/heads/master | 2021-01-21T10:55:35.647831 | 2017-06-20T00:59:13 | 2017-06-20T00:59:13 | 91,714,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,982 | py | ###############################
# Nested Statements and Scope #
###############################
# Now that we have gone over on writing our own functions, its important to
# understand how Python deals with the variable names you assign. When you create
# a variable name in Python the name is stored in a *name-space*. Variable names
# also have a "scope", the scope determines the visibility of that variable name
# to other parts of your code.
#
# Lets start with a quick thought experiment, imagine the following code:
x = 25
def printer():
x = 50
return x
print(x)
print(printer())
# What do you imagine the output of printer() is? 25 or 50? What is the output
# of print x? 25 or 50? Or what about this:
print(x)
print(printer())
print(x)
# Interesting! But how does Python know which "x" you're referring to in your
# code? This is where the idea of scope comes in. Python has a set of rules it
# follows to decide what variables (such as x in this case) you are referencing
# in your code. Lets break down the rules:
# This idea of scope in your code is very important to understand in order to
# properly assign and call variable names.
#
# In simple terms, the idea of scope can be described by 3 general rules:
#
# 1. Name assignments will create or change local names by default.
# 2. Name references search (at most) four scopes, these are:
# * local
# * enclosing functions
# * global
# * built-in
# 3. Names declared in global and nonlocal statements map assigned names to
# enclosing module and function scopes.
#
#
# The statement in #2 above can be defined by the LEGB rule.
#
# **LEGB Rule.**
#
# L: Local — Names assigned in any way within a function (def or lambda)),
# and not declared global in that function.
#
# E: Enclosing function locals — Name in the local scope of any and all
# enclosing functions (def or lambda), from inner to outer.
#
# G: Global (module) — Names assigned at the top-level of a module file, or
# declared global in a def within the file.
#
# B: Built-in (Python) — Names preassigned in the built-in names module :
# open,range,SyntaxError,...
###############################
### Quick examples of LEGB ####
###############################
# Local
# x is local here:
f = lambda x:x**2
# Enclosing function locals
#
# This occurs when we have a function inside a function (nested functions)
#
name = 'This is a global name'
def greet():
# Enclosing function
name = 'Sammy'
def hello():
print('Hello '+name)
hello()
greet()
# Note how Sammy was used, because the hello() function was enclosed inside of
# the greet function!
# Global
#
print name
# Built-in
# These are the built-in function names in Python (don't overwrite these!)
# You will know if you've typed one based on its color!
len
# Local Variables
# When you declare variables inside a function definition, they are not related
# in any way to other variables with the same names used outside the function -
# i.e. variable names are local to the function. This is called the scope of the
# variable. All variables have the scope of the block they are declared in
# starting from the point of definition of the name.
#
# Example:
x = 50
def func(x):
print('x is', x)
x = 2
print('Changed local x to', x)
func(x)
print('x is still', x)
# The first time that we print the value of the name x with the first line in
# the function’s body, Python uses the value of the parameter declared in the
# main block, above the function definition.
#
# Next, we assign the value 2 to x. The name x is local to our function. So,
# when we change the value of x in the function, the x defined in the main block
# remains unaffected.
#
# With the last print statement, we display the value of x as defined in the main
# block, thereby confirming that it is actually unaffected by the local
# assignment within the previously called function.
################################
# The Global Statement
################################
# If you want to assign a value to a name defined at the top level of the program
# (i.e. not inside any kind of scope such as functions or classes), then you have
# to tell Python that the name is not local, but it is global. We do this using
# the global statement. It is impossible to assign a value to a variable defined
# outside a function without the global statement.
#
# You can use the values of such variables defined outside the function
# (assuming there is no variable with the same name within the function).
# However, this is not encouraged and should be avoided since it becomes unclear
# to the reader of the program as to where that variable’s definition is. Using
# the global statement makes it amply clear that the variable is defined
# in an outermost block.
#
# Example:
x = 50
def func():
global x
print('This function is now using the global x!')
print('Because of global x is: ', x)
x = 2
print('Ran func(), changed global x to', x)
print('Before calling func(), x is: ', x)
func()
print('Value of x (outside of func()) is: ', x)
# The global statement is used to declare that x is a global variable - hence,
# when we assign a value to x inside the function, that change is reflected
# when we use the value of x in the main block.
#
# You can specify more than one global variable using the same global statement
# e.g. global x, y, z.
###############################
# Conclusion
###############################
# You should now have a good understanding of Scope (you may have already
# intuitively felt right about Scope which is great!) One last mention is that
# you can use the globals() and locals() functions to check what are your current
# local and global variables.
#
# Another thing to keep in mind is that everything in Python is an object! I can
# assign variables to functions just like I can with numbers! We will go over
# this again in the decorator section of the course!
| [
"schwarz.mike@gmail.com"
] | schwarz.mike@gmail.com |
fde39ca57166f92eb10125b0a16994d58681046d | 8c732265fd0a8245136d57cda91d8c1c6c727305 | /word_count_milestone_2.py | 3380267e6ca580c1dc1065c92eaca21a642c90b3 | [] | no_license | daruby59/word_count | 0cf4adbef6bd190e105516077fe2e37c470cdafb | a36b7129049aed2e09ea396b9902f2dfb18a7e2a | refs/heads/master | 2021-01-01T04:43:14.967701 | 2020-01-11T15:54:45 | 2020-01-11T15:54:45 | 97,233,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,347 | py | from __future__ import division
import sys
import random
import re
xfile = str(sys.argv[1])
limit = str(sys.argv[2])
count = 0
tagcount = 0
#
# color references...
#
color_array = ["red","maroon","magenta","orange","#6094DB","green","teal","olive","purple","blue","navy","#0088dd","#6755E3","#B6BA18","black"]
#
def mean(x):
count = len(x)
sumx = sum(x)
return sumx/count
#
def syllable_count(xx1,count):
syl_sum = 0
syllable = [0 for x_i in range(count)]
for i in range(count):
word = xx1[i]
syllable[i] = sylco(word)
return(syllable)
#
# function from: m.emre aydin (2013) eayd.in/p=232
#
def sylco(word) :
word = word.lower()
# exception_add are words that need extra syllables
# exception_del are words that need less syllables
exception_add = ['serious','crucial']
exception_del = ['fortunately','unfortunately']
co_one = ['cool','coach','coat','coal','count','coin','coarse','coup','coif','cook','coign','coiffe','coof','court']
co_two = ['coapt','coed','coinci']
pre_one = ['preach']
syls = 0 #added syllable number
disc = 0 #discarded syllable number
#
# 1) if letters < 3 : return 1
#
if len(word) <= 3 :
syls = 1
return syls
#
# 2) if doesn't end with "ted" or "tes" or "ses" or "ied" or "ies", discard "es" and "ed" at the end.
# if it has only 1 vowel or 1 set of consecutive vowels, discard. (like "speed", "fled" etc.)
#
if word[-2:] == "es" or word[-2:] == "ed" :
doubleAndtripple_1 = len(re.findall(r'[eaoui][eaoui]',word))
if doubleAndtripple_1 > 1 or len(re.findall(r'[eaoui][^eaoui]',word)) > 1 :
if word[-3:] == "ted" or word[-3:] == "tes" or word[-3:] == "ses" or word[-3:] == "ied" or word[-3:] == "ies" :
pass
else :
disc+=1
#
# 3) discard trailing "e", except where ending is "le"
#
le_except = ['whole','mobile','pole','male','female','hale','pale','tale','sale','aisle','whale','while']
if word[-1:] == "e" :
if word[-2:] == "le" and word not in le_except :
pass
else :
disc+=1
#
# 4) check if consecutive vowels exists, triplets or pairs, count them as one.
#
doubleAndtripple = len(re.findall(r'[eaoui][eaoui]',word))
tripple = len(re.findall(r'[eaoui][eaoui][eaoui]',word))
disc+=doubleAndtripple + tripple
#
# 5) count remaining vowels in word.
#
numVowels = len(re.findall(r'[eaoui]',word))
#
# 6) add one if starts with "mc"
#
if word[:2] == "mc" :
syls+=1
#
# 7) add one if ends with "y" but is not surrouned by vowel
#
if word[-1:] == "y" and word[-2] not in "aeoui" :
syls +=1
#
# 8) add one if "y" is surrounded by non-vowels and is not in the last word.
#
for i,j in enumerate(word) :
if j == "y" :
if (i != 0) and (i != len(word)-1) :
if word[i-1] not in "aeoui" and word[i+1] not in "aeoui" :
syls+=1
#
# 9) if starts with "tri-" or "bi-" and is followed by a vowel, add one.
#
if word[:3] == "tri" and word[3] in "aeoui" :
syls+=1
if word[:2] == "bi" and word[2] in "aeoui" :
syls+=1
#
# 10) if ends with "-ian", should be counted as two syllables, except for "-tian" and "-cian"
#
if word[-3:] == "ian" :
# and (word[-4:] != "cian" or word[-4:] != "tian") :
if word[-4:] == "cian" or word[-4:] == "tian" :
pass
else :
syls+=1
#
# 11) if starts with "co-" and is followed by a vowel, check if exists in the double syllable dictionary, if not, check if in single dictionary and act accordingly.
#
if word[:2] == "co" and word[2] in 'eaoui' :
if word[:4] in co_two or word[:5] in co_two or word[:6] in co_two :
syls+=1
elif word[:4] in co_one or word[:5] in co_one or word[:6] in co_one :
pass
else :
syls+=1
#
# 12) if starts with "pre-" and is followed by a vowel, check if exists in the double syllable dictionary, if not, check if in single dictionary and act accordingly.
#
if word[:3] == "pre" and word[3] in 'eaoui' :
if word[:6] in pre_one :
pass
else :
syls+=1
#
# 13) check for "-n't" and cross match with dictionary to add syllable.
#
negative = ["doesn't", "isn't", "shouldn't", "couldn't","wouldn't"]
if word[-3:] == "n't" :
if word in negative :
syls+=1
else :
pass
#
# 14) Handling the exceptional words.
#
if word in exception_del :
disc+=1
if word in exception_add :
syls+=1
#
# calculate the output
#
return (numVowels - disc + syls)
#
def avg(syllable) :
return(sum(ss for ss in syllable)/(len(syllable)))
#
def publishResults(sentence_count, xx1, xx2):
f = open(xfile+"_cloud.html","w+")
f.write("<html>\n")
f.write("<body>")
#
h = open(xfile+"_table.html","w+")
h.write("<html>\n")
h.write("<body>\n")
j1 = 1
k1 = 0
j2 = 1
k2 = 0
#
count = len(xx1)
word_length = sum(len(word) for word in xx1)/count
#
for i in range(count) :
xx1[i] = str.lower(xx1[i])
#
xx1.sort()
#
syllable = syllable_count(xx1,count)
average_syllable = avg(syllable)
print "*", len(syllable)
#
# new code here -- get rid of plurals
#
total_word_instance = ["" for x_i in range(count)]
total_word_count = [0 for x_i in range(count)]
total_word_syllable = [0 for x_i in range(count)]
for i in range(count-1):
if (xx1[i] == xx1[i+1][:-1]):
xx1[i+1] = xx1[i]
#
for i in range(count-1):
if ((xx1[i] <> xx1[i+1])):
total_word_instance[k1] = xx1[i]
total_word_syllable[k1] = syllable[i]
total_word_count[k1] = j1
j1 = 1
k1 = k1+1
if (xx1[i] == xx1[i+1]):
j1=j1+1
#
net_count = len(xx2)
for i in range(net_count):
xx2[i] = str.lower(xx2[i])
word_instance = ["" for x_i in range(net_count)]
word_count = [0 for x_i in range(net_count)]
xx2.sort()
#
for i in range(net_count-1):
if (xx2[i] == xx2[i+1][:-1]):
xx2[i+1] = xx2[i]
#
for i in range(net_count-1):
if ((xx2[i] <> xx2[i+1])):
word_instance[k2] = xx2[i]
word_count[k2] = j2
j2 = 1
k2 = k2+1
if (xx2[i] == xx2[i+1]):
j2=j2+1
#
ratio_unique = (k1/count)*100.0
sentence_avg = count/sentence_count
#
# FKRA = (0.39 x ASL) + (11.8 x ASW) - 15.59
#
# FKRE = 206.835 - 1.015x ASL - 84.6xASW
#
fkra = (0.39*sentence_avg) + (11.8*average_syllable) - 15.59
fkre = 206.835 - (1.015*sentence_avg) - (84.6*average_syllable)
fk_formula = (sentence_avg + average_syllable)*0.4
sentence_hundred = 100.0/sentence_avg
syllables_hundred = average_syllable*100.0
#
f.write("<table border=\"1\" cellspacing=\"0\" cellpadding = \"1\" width=\"1200px\"><tr>")
f.write("<td width = \"40%\" valign=\"top\">")
f.write("<span style = \"text-align: center; font-size: 16px; color: navy;\">Text Analysis (Reading level)</span><br/>")
f.write("<p>Input file: "+xfile+".doc</p>")
f.write("<ul>")
f.write("<li>Sentence count: "+str(sentence_count)+"")
f.write("<li>Average words per sentence: "+str(round(sentence_avg,1))+"</li>")
f.write("<li>Total Word count: "+str(count)+"</li>")
f.write("<li>Net Word count (less stop words): "+str(net_count)+"</li>")
f.write("<li>Unique words: "+str(k1)+" -- % of Total: "+str(round(ratio_unique,2))+"% </li>")
f.write("<li>Average word length: "+str(round(word_length,1))+" characters</li>")
f.write("<li>Average number of syllables per word: "+str(round(average_syllable,2))+"</li>")
f.write("</ul>")
f.write("<ul>")
f.write("<li>FKRA (Flesch-Kincaid) Reading Level - Grade: "+str(round(fk_formula,1))+"</li>")
f.write("<li>FKRE (Flesch-Kincaid) Reading Level - (alt): "+str(round(fkra,1))+"</li>")
f.write("<li>Sentences per 100 words: "+str(round(sentence_hundred,1))+"")
f.write("<li>Syllables per 100 words: "+str(round(syllables_hundred,1))+"</li>")
f.write("</ul></td>")
f.write("<td width=\"60%\">")
#
# separate reference table
#
h.write("<table cellspacing = \"0\" cellpadding = \"1\" border=\"1\" width = \"600\">")
h.write("<tr><td bgcolor=\"#dedede\" width=\"300\" align=\"center\"><b>Word Instance</b></td>")
h.write("<td bgcolor=\"#dedede\" width=\"100\" align=\"center\"><b>Syllables</b></td>")
h.write("<td bgcolor=\"#dedede\" width = \"100\" align=\"center\"><b>Count</b> ( > "+limit+")</td>")
h.write("<td bgcolor=\"#dedede\" width = \"100\" align=\"center\"><b>Frequency</b></td></tr>")
freq = mean(word_count)
print "Mean: ",freq
nn = 0
#
fsize = 10
for i in range(k1):
relative_count = (total_word_count[i]/k1)*100.0
h.write("<tr><td width=\"300\">"+total_word_instance[i]+"</td>")
h.write("<td width=\"100\" align=\"center\">"+str(total_word_syllable[i])+"</td>")
h.write("<td width = \"100\" align=\"right\">"+str(total_word_count[i])+"</td>")
h.write("<td width = \"100\" align=\"right\">"+str(round(relative_count,2))+"% </td></tr>")
#
# word cloud
#
for i in range(k2):
if (word_instance[i] > "aa" and word_count[i] > float(limit)) :
# print word_instance[i], word_count[i]
# new code
duo = 0
for duo in range(16) :
if (word_count[i] >= (2**duo) and word_count[i] < (2**(duo+1))) :
fsize = 14 + duo*4
#
ccx = int(random.random()*14)
color_choice = color_array[ccx]
nn = nn+1
f.write("<span style = \"font-size: "+str(fsize)+"px; color: "+color_choice+";\">"+word_instance[i]+"</span> ")
#
f.write("</td></tr></table>")
f.write("</body>")
f.write("</html>")
f.close()
#
h.write("</table>")
h.write("</body>")
h.write("</html>")
h.close()
#
def main():
g = open(xfile+".txt", 'r')
xx = g.read()
#
# sentence count [xx0]
#
xx = xx.replace("? ",". ")
xx = xx.replace("! ",". ")
xx0 = xx.split(". ")
sentence_count = len(xx0)
#
# eliminate punctuation
#
xx = xx.replace(".\n",". ")
xx = xx.replace(".","")
xx = xx.replace(",","")
xx = xx.replace("\'","")
xx = xx.replace("\"","")
xx = xx.replace("\/"," ")
xx = xx.replace("\n","")
xx = xx.replace("(","")
xx = xx.replace(")","")
xx = xx.replace("?","")
xx = xx.replace("!","")
xx = xx.replace(":","")
xx = xx.replace(";","")
xx = xx.replace(" "," ")
#
# all words [xx1]
#
xx1 = xx.split(" ")
#
# less stop words [xx2]
#
null_words = ["A "," a "," all ","All "," also ","Also "," am "," an ","An "," and ","And "," are ","Are "," as ","As "," at ","At ",
" be "," but ","But "," by ","By "," can "," do "," for "," from "," had "," has "," have "," how ","How "," I ","I "," if ","If ",
" in ","In "," is ","Is "," it ","It "," its "," if ","If "," my ","My "," no "," not "," on ","On "," or ","Or "," of "," our ","Our ", " so ","So ",
" that "," the ","The "," these ","These "," there "," they ","They "," this ","This "," their "," to "," too "," us ","You "," you ",
"Your "," your "," was ","Was "," what ","What "," we ","We "," when ","When "," where ","Where "," which ","Which ",
" why ","Why "," who ","Who "," will "," with ","With "," would "," yet "]
#
for i in range(len(null_words)):
xx = xx.replace(null_words[i]," ")
xx = xx.replace(" "," ")
#
xx2 = xx.split(" ")
publishResults(sentence_count, xx1, xx2)
#
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | noreply@github.com |
c22170c208dfa332195dc6b992e8089d3a9b37ee | 7930befaed2a94711484411bf9c36d35d039e099 | /train.py | 61fd5c6f1df957b4dc336db4f0516ef9e264cf3b | [] | no_license | iamsile/tf-ran-cell | 4ed077a703c04f1b29f7f89b69df7630a96fbea0 | 389d549314928e5c9cb7658b3371f76d25a44ab2 | refs/heads/master | 2021-06-18T04:13:36.307125 | 2017-06-30T07:25:40 | 2017-06-30T07:25:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,274 | py | import os
import numpy as np
import tensorflow as tf
import dataplumbing as dp
from tensorflow.contrib import rnn
from tensorflow.contrib.rnn import GRUCell, BasicLSTMCell, LayerNormBasicLSTMCell
from tensorflow.contrib.rnn.python.ops import core_rnn
from tensorflow.contrib.layers import xavier_initializer as glorot
from ran_cell import RANCell
from ran_cell_v2 import RANCellv2
flags = tf.app.flags
flags.DEFINE_string("rnn_type", "RAN", "rnn type [RAN, RANv2, RAN_LNv2, RAN_LN, LSTM, GRU]")
FLAGS = flags.FLAGS
def main(_):
np.random.seed(1)
tf.set_random_seed(1)
num_features = dp.train.num_features
max_steps = dp.train.max_length
num_cells = 250
num_classes = dp.train.num_classes
initialization_factor = 1.0
num_iterations = 500
batch_size = 100
learning_rate = 0.001
current_step = 0
initializer = tf.random_uniform_initializer(minval=-np.sqrt(6.0 * 1.0 / (num_cells + num_classes)),
maxval=np.sqrt(6.0 * 1.0 / (num_cells + num_classes)))
with tf.variable_scope("train", initializer=initializer):
s = tf.Variable(tf.random_normal([num_cells], stddev=np.sqrt(initialization_factor))) # Determines initial state
x = tf.placeholder(tf.float32, [batch_size, max_steps, num_features]) # Features
y = tf.placeholder(tf.float32, [batch_size]) # Labels
l = tf.placeholder(tf.int32, [batch_size])
global_step = tf.Variable(0, name="global_step", trainable=False)
if FLAGS.rnn_type == "RAN":
cell = RANCell(num_cells)
elif FLAGS.rnn_type == "RANv2":
cell = RANCellv2(num_cells)
elif FLAGS.rnn_type == "LSTM":
cell = BasicLSTMCell(num_cells)
elif FLAGS.rnn_type == "LSTM_LN":
cell = LayerNormBasicLSTMCell(num_cells)
elif FLAGS.rnn_type == "GRU":
cell = GRUCell(num_cells)
elif FLAGS.rnn_type == "RAN_LN":
cell = RANCell(num_cells, normalize=True)
elif FLAGS.rnn_type == "RAN_LNv2":
cell = RANCellv2(num_cells, normalize=True)
states = cell.zero_state(batch_size, tf.float32)
outputs, states = tf.nn.dynamic_rnn(cell, x, l, states)
W_o = tf.Variable(tf.random_uniform([num_cells, num_classes],
minval=-np.sqrt(6.0*initialization_factor / (num_cells + num_classes)),
maxval=np.sqrt(6.0*initialization_factor / (num_cells + num_classes))))
b_o = tf.Variable(tf.zeros([num_classes]))
if FLAGS.rnn_type == "LSTM" or FLAGS.rnn_type == "LSTM_LN" \
or FLAGS.rnn_type == "RANv2" or FLAGS.rnn_type == "RAN_LNv2":
ly = tf.matmul(states.h, W_o) + b_o
else:
ly = tf.matmul(states, W_o) + b_o
ly_flat = tf.reshape(ly, [batch_size])
py = tf.nn.sigmoid(ly_flat)
##########################################################################################
# Optimizer/Analyzer
##########################################################################################
# Cost function and optimizer
#
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=ly_flat, labels=y)) # Cross-entropy cost function
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost, global_step=global_step)
# Evaluate performance
#
correct = tf.equal(tf.round(py), tf.round(y))
accuracy = 100.0 * tf.reduce_mean(tf.cast(correct, tf.float32))
tf.summary.scalar('cost', cost)
tf.summary.scalar('accuracy', accuracy)
##########################################################################################
# Train
##########################################################################################
# Operation to initialize session
#
initializer = tf.global_variables_initializer()
summaries = tf.summary.merge_all()
# Open session
#
with tf.Session() as session:
# Summary writer
#
summary_writer = tf.summary.FileWriter('log/' + FLAGS.rnn_type, session.graph)
# Initialize variables
#
session.run(initializer)
# Each training session represents one batch
#
for iteration in range(num_iterations):
# Grab a batch of training data
#
xs, ls, ys = dp.train.batch(batch_size)
feed = {x: xs, l: ls, y: ys}
# Update parameters
out = session.run((cost, accuracy, optimizer, summaries, global_step), feed_dict=feed)
print('Iteration:', iteration, 'Dataset:', 'train', 'Cost:', out[0]/np.log(2.0), 'Accuracy:', out[1])
summary_writer.add_summary(out[3], current_step)
# Periodically run model on test data
if iteration%100 == 0:
# Grab a batch of test data
#
xs, ls, ys = dp.test.batch(batch_size)
feed = {x: xs, l: ls, y: ys}
# Run model
#
summary_writer.flush()
out = session.run((cost, accuracy), feed_dict=feed)
test_cost = out[0] / np.log(2.0)
test_accuracy = out[1]
print('Iteration:', iteration, 'Dataset:', 'test', 'Cost:', test_cost, 'Accuracy:', test_accuracy)
current_step = tf.train.global_step(session, global_step)
summary_writer.close()
# Save the trained model
os.makedirs('bin', exist_ok=True)
saver = tf.train.Saver()
saver.save(session, 'bin/train.ckpt')
if __name__ == "__main__":
tf.app.run()
| [
"indiejoseph@gmail.com"
] | indiejoseph@gmail.com |
541a2bb132c30e724fa65dfdccfd3b3de2e89856 | 7f651a7dfa7cd101ddf9dd133ff78bfe996eeb3f | /main.py | 910916d224daa088ba293871bad373666348f2d1 | [
"MIT"
] | permissive | TrendingTechnology/PyPi-Bot | 33071b0e789509dfc267ec25a3e11417d60c1395 | bc2ee98981af4bc9f415a1f968bf872380d017f0 | refs/heads/main | 2023-06-30T08:50:57.641601 | 2021-08-02T13:47:28 | 2021-08-02T13:47:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,257 | py | # Author: Fayas (https://github.com/FayasNoushad) (@FayasNoushad)
import os
import requests
from requests.utils import requote_uri
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
API = "https://api.abirhasan.wtf/pypi?query="
START_TEXT = """
Hello {},
I am a pypi package search telegram bot.
- Send a pypi package name.
- I will send the information of package.
Made by @FayasNoushad
"""
BUTTONS = [InlineKeyboardButton('⚙ Join Updates Channel ⚙', url='https://telegram.me/FayasNoushad')]
Bot = Client(
"PyPi-Bot",
bot_token = os.environ["BOT_TOKEN"],
api_id = int(os.environ["API_ID"]),
api_hash = os.environ["API_HASH"]
)
@Bot.on_message(filters.private & filters.command(["start", "help", "about"]))
async def start(bot, update):
text = START_TEXT.format(update.from_user.mention)
reply_markup = InlineKeyboardMarkup([BUTTONS])
await update.reply_text(
text=text,
disable_web_page_preview=True,
reply_markup=reply_markup,
quote=True
)
@Bot.on_message(filters.text)
async def pypi_info(bot, update):
try:
query = update.text if update.chat.type == "private" else update.text.split()[1]
text = pypi_text(query)
reply_markup = InlineKeyboardMarkup([pypi_buttons(query), BUTTONS])
await update.reply_text(
text=text,
disable_web_page_preview=True,
reply_markup=reply_markup,
quote=True
)
except:
pass
def pypi(query):
r = requests.get(requote_uri(API + query))
info = r.json()
return info
def pypi_text(query):
info = pypi(query)
text = "--**Information**--\n"
text += f"\n**Package Name:** `{info['PackageName']}`"
text += f"\n**Title:** `{info['Title']}`"
text += f"\n**About:** `{info['About']}`"
text += f"\n**Latest Release Date:** `{info['LatestReleaseDate']}`"
text += f"\n**PiP Command:** `{info['PipCommand']}`"
return text
def pypi_buttons(query):
info = pypi(query)
buttons = [
InlineKeyboardButton(text="PyPi", url=info['PyPi']),
InlineKeyboardButton(text="Home Page", url=info['HomePage'])
]
return buttons
Bot.run()
| [
"noreply@github.com"
] | noreply@github.com |
9973c7bbcdadc89c6e2f809cd6749834574c852a | a01099ad3b57e3ba58dcaa982d42192d0c7bc465 | /start.py | 9d1e0516e2a13ab9270e4e694eabf5f7b2660e7f | [] | no_license | pradumna123/Decision-tree-for-authorship-prediction | 54f89c3d52273bb9edc98a9c7f0d15bbe4b5df09 | d3bfc6c356b1c4304bec6bb02f143f11393a8b02 | refs/heads/master | 2020-07-25T02:06:16.826534 | 2019-09-14T15:48:47 | 2019-09-14T15:48:47 | 208,125,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,593 | py | from main import data_processing as dp
import timeit as t
import pickle
class Node:
"""
this is a class that represents the node of the tree classifier
"""
__slots__ = ["left_node", "right_node", "data_obj", "data_for_left", "data_for_right", 'current_depth',
'expr_entropy', "root_status", 'dict_current_level', "node_status", 'sum', "do_not_count", "sr_no",
"class_entropy", 'column_no', 'split_value', 'data_for_this_level', 'previous_data_obj', 'exp_depth']
def __init__(self, old_obj, cureent_depth, node_status=None, rootstatus=False, sr_no=None):
self.previous_data_obj = old_obj # reference of previous data object
self.root_status = rootstatus # this will help us to create a root node.
self.current_depth = cureent_depth # the current depth of tree.
# self.exp_depth = exp_depth # expected depth of tree 0 based so always +1
# self.expr_entropy = expr_entropy # expected entropy cutoff given by user
# self.root_status = rootstatus
self.data_for_left = []
self.do_not_count = None
self.data_for_right = []
self.sr_no = sr_no
self.left_node = None # left node
self.right_node = None # right node
self.node_status = node_status # 1 for left and 0 for right
self.class_entropy = 0
self.sum = 0
self.column_no = 0
self.split_value = 0
# print("node at level ::: ", self.current_depth)
# self.functions_to_invoke()
def get_lef_right(self):
string = ""
string += str(self.sr_no) + " "
if self.left_node != None:
if self.left_node.do_not_count != None:
string += str(self.left_node.sr_no) + " "
else:
string += "None" + " "
if self.right_node != None:
if self.right_node.do_not_count != None:
string += str(self.right_node.sr_no) + " "
else:
string += "None" + " "
string += str(self.column_no) + " "
string += str(self.split_value) + " "
string += str(self.data_obj.get_majority_class()) + " " + str(self.root_status)
string += "\n"
return string
def __str__(self):
string = ""
string = str(self.sr_no)
string += " node is at current depth is :" + str(
self.current_depth) + " node id is: " + str(self.current_depth) + str(
self.node_status) + ' column no is :' + str(
self.column_no) + " split_value is " + str(self.split_value) + '***' + str(self.data_obj.l_dict) + "\n"
return string
def functions_to_invoke(self):
# special case for root node -> job is to parse the file.
if self.root_status == True:
self.do_not_count = False
self.data_obj = self.previous_data_obj # create a root object.
self.data_obj.worker() # we call this to address various functions in this data bj
self.column_no, self.split_value = self.data_obj.get_row_with_highest_entropy()
if self.column_no == None:
self.do_not_count = None # here we ge row with highest_entropy and splitvalue
return False
self.dict_current_level = self.data_obj.return_adict_() # returns the dictionary of used Values.
self.data_for_left, self.data_for_right = self.data_obj.get_split_data_set() # we store the divided dataset for future node use
self.class_entropy = self.data_obj.total_entropy
return True
else:
# create a object
# normal node
left, right = self.previous_data_obj.get_split_data_set()
if self.node_status == 1:
data = left # when this is left node
if len(data) < 1:
print("!!!!!!!!!!", len(data))
return False
else:
data = right # when this is right node
if len(data) < 1: # somehow no data.
print("!!!!!!", len(data))
return False
self.data_obj = dp(None, self.previous_data_obj.return_modified_dicr(), self.previous_data_obj,
) # here we create a new
self.data_obj.size_of_data()
self.data_obj.set_data(data)
status = self.data_obj.worker()
if status == False:
# returns false if the the dataset has only 1 category of result.
return False
self.dict_current_level = self.data_obj.return_modified_dicr()
self.column_no, self.split_value = self.data_obj.get_row_with_highest_entropy()
self.data_for_left, self.data_for_right = self.data_obj.get_split_data_set()
# print(self.current_depth, self.data_obj, len(data)) # we store the divided dataset for future node use
self.dict_current_level = self.data_obj.return_adict_()
print(self.current_depth, self.data_obj, len(data),
self.data_obj.l_dict) # returns the dictionary of used Values.
self.class_entropy = self.data_obj.total_entropy # we save the entropy here.
self.do_not_count = False
return True
class solver:
"""
we will create a tree here
"""
__slots__ = ["dict_of_used_values", "root", 'root_of_tree', 'entropy_set', "serialnum", 'sum', "final_string",
"depth_set"]
def __init__(self, expected_depth=9, expected_entropy=None, filename1=None):
dp_for_root = dp(filename1, None, None, root=True)
self.serialnum = 0
# print(dp_for_root.filename)
self.root = Node(dp_for_root, 0, rootstatus=True, sr_no=self.serialnum)
# self.serialnum += 1
self.root.functions_to_invoke()
# self.root = dp(filename1, None, None, root=True) # this will be root .
# self.root.data_obj.worker()
# self.dict_of_used_values = self.root.data_obj.return_adict_()
# self.root.data_obj.g_dict = self.dict_of_used_values
# self.root.data_obj.copy_g_dict()
self.root_of_tree = self.root
self.depth_set = expected_depth
self.entropy_set = expected_entropy
self.sum = 0
self.final_string = ''
# send depth+1
def Create_tree(self):
self.ct(self.root, 1)
def ct(self, node, depth):
if depth == self.depth_set:
return # when the depth is excedded
self.serialnum += 1
temp = Node(node.data_obj, depth,
node_status=1, sr_no=self.serialnum)
status = temp.functions_to_invoke()
if status == True:
node.left_node = temp
self.ct(node.left_node, depth + 1)
else:
self.serialnum -= 1
return
self.serialnum += 1
temp = Node(node.data_obj, depth, node_status=0, sr_no=self.serialnum)
status = temp.functions_to_invoke()
if status == True:
node.right_node = temp
self.ct(node.right_node, depth + 1)
else:
self.serialnum -= 1
return
return
def printTree(self):
self.final_string += self.root.get_lef_right()
self.printtreea(self.root)
def printstart(self):
# self.root.sr_no = 0
self.printtree23(self.root)
def printtree23(self, node):
"""
give sr_no to nodes
:param node:
:return:
"""
if node != None and node.do_not_count != None:
self.printtree23(node.left_node)
self.printtree23(node.right_node)
node.sr_no = self.sum
self.sum += 1
def printstart555(self):
# self.final_string += self.root.get_lef_right()
self.printtree235555(self.root)
def printtree235555(self, node):
if node != None and node.do_not_count != None:
self.printtree235555(node.left_node)
# node.sr_no = self.sum
self.final_string += node.get_lef_right()
self.printtree235555(node.right_node)
def printtreea(self, node):
# if node != None and node.do_not_count != None:
if node.left_node != None:
if node.left_node.do_not_count != None:
self.printtreea(node.left_node)
else:
return
else:
return
node.sr_no = self.sum
# self.final_string += node.get_lef_right()
self.sum += 1
# print(node, "!!!!", self.sum)
if node.right_node != None:
if node.left_node.do_not_count != None:
self.printtreea(node.right_node)
else:
return
else:
return
# print(node)
def printTree1(self):
# self.final_string += self.root.get_lef_right()
self.printtreea(self.root)
def printtreea1(self, node):
if node.left_node != None:
if node.left_node.do_not_count != None:
self.printtreea(node.left_node)
else:
return
# node.sr_no = self.sum
self.final_string += node.get_lef_right()
# self.sum += 1
# print(node, "!!!!", self.sum)
if node.right_node != None:
if node.left_node.do_not_count != None:
self.printtreea(node.right_node)
else:
return
else:
return
# print(node)
def test(self, attr):
return self._test1(self.root, attr)
def _test1(self, root, attr):
# print(root)
col_no = root.column_no
split_val = root.split_value
if col_no == None:
print("here")
return
if attr[col_no] <= split_val:
if root.left_node != None:
if root.left_node.do_not_count != None:
# if we have a node go ahead
return self._test1(root.left_node, attr)
else:
# we have reached end.
return root.data_obj.get_majority_class()
else:
if root.right_node != None:
if root.right_node.do_not_count != None:
return self._test1(root.right_node, attr)
else:
return root.data_obj.get_majority_class()
# print(root.data_obj.get_majority_class())
"""
testing data
"""
# test = [22.727272727272727, 11, 115, 18, 3, 24, 89, 4.88, 1, 0, 2]
# test=[27.77777777777778 ,9 ,133, 15, 1, 14, 73, 4.59, 5, 0, 3]
# test = [22.727272727272727, 11, 122, 8, 4, 15, 78, 4.62, 4, 0, 3]
# test = [20.833333333333332, 12, 130, 19, 5, 18, 66, 4.61, 4, 0, 2]
test = [25.0, 10, 104, 16, 0, 27, 93, 5.15, 3, 2, 2]
attribute = test[:len(test) - 1]
val = [-1]
filename = input("enter a filename to generate tree from: ")
depth = input("enter the depth of tree")
# filename = "data_extracted.txt"
a = solver(filename1=filename)
"this methods creates the tree"
a.Create_tree()
a.depth_set = depth
print("##################################################################")
a.printstart()
a.printstart555()
print("num of nodes", a.sum)
print(a.test(attribute))
# pickle_out = open("treeclassifier.pickle", "wb")
# pickle.dump(a, pickle_out)
# pickle_out.close()
"""
writing the tree to a file
"""
text_file = open("alpha.txt", "w")
text_file.write(a.final_string)
text_file.close()
# print("saved object")
# # print(a.root.data_obj)
| [
"ps6275@gmail.com"
] | ps6275@gmail.com |
e452abec56c616eb8b46b78e240cb845aecc2319 | 6b0d0aec9704d70663fe0edc2a6624a689cc081e | /src/app/pre/wav.py | db52981330702787a0ef82c48d08f68a0e589f2b | [
"BSD-3-Clause"
] | permissive | stefantaubert/tacotron2 | 086d81b38b2c49655df6b0d8d63c633e7531399a | 8475f014391c5066cfe0b92b6c74568639be5e79 | refs/heads/master | 2023-03-29T21:07:02.266973 | 2020-11-25T09:57:40 | 2020-11-25T09:57:40 | 267,858,113 | 5 | 0 | BSD-3-Clause | 2020-05-29T12:56:56 | 2020-05-29T12:56:55 | null | UTF-8 | Python | false | false | 3,818 | py | import os
from functools import partial
from typing import Any
from src.app.pre.ds import get_ds_dir, load_ds_csv
from src.core.common.utils import get_subdir
from src.core.pre.wav import (WavData, WavDataList, normalize, preprocess,
remove_silence, stereo_to_mono, upsample)
_wav_data_csv = "data.csv"
def _get_wav_root_dir(ds_dir: str, create: bool = False):
return get_subdir(ds_dir, "wav", create)
def get_wav_dir(ds_dir: str, wav_name: str, create: bool = False):
return get_subdir(_get_wav_root_dir(ds_dir, create), wav_name, create)
def load_wav_csv(wav_dir: str) -> WavDataList:
path = os.path.join(wav_dir, _wav_data_csv)
return WavDataList.load(WavData, path)
def save_wav_csv(wav_dir: str, wav_data: WavDataList):
path = os.path.join(wav_dir, _wav_data_csv)
wav_data.save(path)
def preprocess_wavs(base_dir: str, ds_name: str, wav_name: str):
print("Preprocessing wavs...")
ds_dir = get_ds_dir(base_dir, ds_name)
wav_dir = get_wav_dir(ds_dir, wav_name)
if os.path.isdir(wav_dir):
print("Already exists.")
else:
data = load_ds_csv(ds_dir)
wav_data = preprocess(data)
os.makedirs(wav_dir)
save_wav_csv(wav_dir, wav_data)
def _wav_op(base_dir: str, ds_name: str, origin_wav_name: str, destination_wav_name: str, op: Any):
ds_dir = get_ds_dir(base_dir, ds_name)
dest_wav_dir = get_wav_dir(ds_dir, destination_wav_name)
if os.path.isdir(dest_wav_dir):
print("Already exists.")
else:
orig_wav_dir = get_wav_dir(ds_dir, origin_wav_name)
assert os.path.isdir(orig_wav_dir)
data = load_wav_csv(orig_wav_dir)
os.makedirs(dest_wav_dir)
wav_data = op(data, dest_wav_dir)
save_wav_csv(dest_wav_dir, wav_data)
def wavs_normalize(base_dir: str, ds_name: str, orig_wav_name: str, dest_wav_name: str):
print("Normalizing wavs...")
op = partial(normalize)
_wav_op(base_dir, ds_name, orig_wav_name, dest_wav_name, op)
def wavs_upsample(base_dir: str, ds_name: str, orig_wav_name: str, dest_wav_name: str, rate: int):
print("Resampling wavs...")
op = partial(upsample, new_rate=rate)
_wav_op(base_dir, ds_name, orig_wav_name, dest_wav_name, op)
def wavs_stereo_to_mono(base_dir: str, ds_name: str, orig_wav_name: str, dest_wav_name: str):
print("Converting wavs from stereo to mono...")
op = partial(stereo_to_mono)
_wav_op(base_dir, ds_name, orig_wav_name, dest_wav_name, op)
def wavs_remove_silence(base_dir: str, ds_name: str, orig_wav_name: str, dest_wav_name: str, chunk_size: int, threshold_start: float, threshold_end: float, buffer_start_ms: float, buffer_end_ms: float):
print("Removing silence in wavs...")
op = partial(remove_silence, chunk_size=chunk_size, threshold_start=threshold_start,
threshold_end=threshold_end, buffer_start_ms=buffer_start_ms, buffer_end_ms=buffer_end_ms)
_wav_op(base_dir, ds_name, orig_wav_name, dest_wav_name, op)
if __name__ == "__main__":
preprocess_wavs(
base_dir="/datasets/models/taco2pt_v5",
ds_name="ljs",
wav_name="22050kHz",
)
preprocess_wavs(
base_dir="/datasets/models/taco2pt_v5",
ds_name="thchs",
wav_name="16000kHz",
)
wavs_normalize(
base_dir="/datasets/models/taco2pt_v5",
ds_name="thchs",
orig_wav_name="16000kHz",
dest_wav_name="16000kHz_normalized",
)
wavs_remove_silence(
base_dir="/datasets/models/taco2pt_v5",
ds_name="thchs",
orig_wav_name="16000kHz_normalized",
dest_wav_name="16000kHz_normalized_nosil",
threshold_start=-20,
threshold_end=-30,
chunk_size=5,
buffer_start_ms=100,
buffer_end_ms=150
)
wavs_upsample(
base_dir="/datasets/models/taco2pt_v5",
ds_name="thchs",
orig_wav_name="16000kHz_normalized_nosil",
dest_wav_name="22050kHz_normalized_nosil",
rate=22050,
)
| [
"stefan.taubert@posteo.de"
] | stefan.taubert@posteo.de |
88f57018677f39300839046947eab8d9e791995c | 43ae95628a9d5f384ebaee9fd682592649b4c445 | /service_SG.py | 3989270f58e38d742a578e6051f60eb7cf806804 | [] | no_license | ottriho/CNCottriho | 62486296574fe2d93b35ef3499770d2daf52acb7 | 19abd576f97b29d87e0d6158a082a4de3d82b986 | refs/heads/master | 2020-04-28T22:46:16.148845 | 2019-03-14T13:44:18 | 2019-03-14T13:44:18 | 175,629,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,245 | py | from flask import Flask, request
import sqlite3
import json
app = Flask(__name__)
@app.route('/blog', methods=['POST', 'GET'])
def sum():
c=request.args.get("c",'')
conn = sqlite3.connect('andmestik.db')
cur = conn.cursor()
if c:
sql1 = """
insert into LOG(LOG_DATE,LOG_TIME,LOG_TXT)
values (date('now'),time(time('now'), '+120 minutes'),'"""+c+"""'); """
cur.execute(sql1)
sql2="""select Program,
StartTime,
EndTime,
Duraction,
Sheets,
AverageTimePerSheet,
SheetUtilizationRate,
RealTime,
EstTime,
ToDate,
Difference,
Comment,
Btype,
Bname,
Standard,
Mtype,
Mthicknes,
SheetMass,
SheetCode,
Operaator,
FeedbackDate,
CustSegment,
CustLine
FROM SG WHERE Program LIKE '%"""+c+"""%';"""
#sql2="""select LOG_NO, LOG_DATE, LOG_TIME, LOG_TXT FROM LOG; """
cur.execute(sql2)
res=cur.fetchall()
#res=cur.fetchone
cur.close()
conn.commit()
conn.close()
# return str(res)
return json.dumps(res)
app.run(debug=True, port=5000) | [
"noreply@github.com"
] | noreply@github.com |
81723606fe2b68e5e78173541ef43ddbca608263 | 0eab71dd5cf5fa5aa29c5ccc595bee1e19caa9c3 | /app/routes.py | a17f43930959321f77661517d2a0322ec7d24de2 | [] | no_license | aljanaki/whosliverightnow | 13af76e681828a6c385a37628caaf680be8147ea | 952fb2211b290d893f60eb09a5698a57e2ba77cb | refs/heads/master | 2021-05-17T12:48:53.929259 | 2020-03-30T11:58:13 | 2020-03-30T11:58:13 | 250,722,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | from app import app
from datetime import date
from flask import render_template
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
| [
"aljanaki@gmail.com"
] | aljanaki@gmail.com |
79d180a70cb6a5c44e3877544316527d2b40a258 | 674013162755a57e258156832d7fdc645ab55c0d | /No0739-dailyTemperatures2.py | e82bafb62ad60c48f8f53787116b62972db01c3c | [] | no_license | chenxy3791/leetcode | cecd6afb0a85e5563ba2d5ae8eb2562491f663e0 | 1007197ff0feda35001c0aaf13382af6869869b2 | refs/heads/master | 2023-06-25T13:54:12.471419 | 2023-06-12T05:02:03 | 2023-06-12T05:02:03 | 229,204,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,966 | py | """
每日温度
根据每日 气温 列表,请重新生成一个列表,对应位置的输出是需要再等待多久温度才会升高超过该日的天数。如果之后都不会升高,请在该位置用 0 来代替。
例如,给定一个列表 temperatures = [73, 74, 75, 71, 69, 72, 76, 73],你的输出应该是 [1, 1, 4, 2, 1, 1, 0, 0]。
提示:气温 列表长度的范围是 [1, 30000]。每个气温的值的均为华氏度,都是在 [30, 100] 范围内的整数。
"""
import math
import time
import numpy as np
from collections import deque
class Solution:
#def dailyTemperatures(self, T: List[int]) -> List[int]:
def dailyTemperatures(self, T):
if len(T) == 0:
return None
if len(T) == 1:
return [0]
rslt = [0 for _ in range(len(T))]
print(rslt)
myStack = deque()
for k in range(len(T)):
# Compare the stack top with T[k],
# if T[k] is greater, pop out the stack top, record the corresponding result,
# and then continue the comparison, till the stack becomes empty, or find that
# the stack top is not less than T[k]
while myStack:
if T[k] > myStack[-1][1]:
a = myStack.pop()
rslt[a[0]] = k - a[0]
else:
break
myStack.append((k,T[k]))
# If myStack is empty, the result for corresponding element should be all 0
while myStack:
a = myStack.pop()
rslt[a[0]] = 0
return rslt
if __name__ == '__main__':
sln = Solution()
print('\ntestcase1 ...')
temperatures = [73, 74, 75, 71, 69, 72, 76, 73]
tStart= time.time()
print(sln.dailyTemperatures(temperatures))
tStop = time.time()
print('tStart={0}, tStop={1}, tElapsed={2}(sec)'.format(tStart, tStop, tStop-tStart))
| [
"chenxy@BWAVE.LOCAL"
] | chenxy@BWAVE.LOCAL |
15ed8211c8d43131be4eeaa704dbd1400bbea598 | 59c55725576bbf0e2f6617507ba2f1db639abb3f | /stock_analytic_account/model/analytic_account.py | ea5556d076df6616c951843e5c8ca6abdca7a083 | [] | no_license | bmya/eficent-odoo-addons | e3426ebaf1f59e52726253fc1dd36a09d9363059 | 5d8ddfa384ab4417f42bda103b71d926848035f6 | refs/heads/7.0 | 2021-01-21T16:48:55.312452 | 2015-11-04T14:11:19 | 2015-11-04T14:11:19 | 45,649,141 | 1 | 3 | null | 2015-11-06T00:35:17 | 2015-11-06T00:35:17 | null | UTF-8 | Python | false | false | 1,957 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Eficent (<http://www.eficent.com/>)
# Jordi Ballester Alomar <jordi.ballester@eficent.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv, orm
import openerp.addons.decimal_precision as dp
class account_analytic_account(orm.Model):
_inherit = "account.analytic.account"
_columns = {
'move_ids': fields.one2many('stock.move', 'analytic_account_id',
'Moves for this analytic account',
readonly=True),
'use_reserved_stock': fields.boolean(
'Use reserved stock',
help="Stock with reference to this analytic account "
"is considered to be reserved.")
}
def copy(self, cr, uid, id, default=None, context=None):
if context is None:
context = {}
if default is None:
default = {}
default['move_ids'] = []
res = super(account_analytic_account, self).copy(cr, uid, id, default,
context)
return res
| [
"jordi.ballester@eficent.com"
] | jordi.ballester@eficent.com |
0b216fd14819d28ed423883d66d0426efdbf220b | d00e68b2c14b44e7cbf13f7ddca284cdfd21c201 | /tests/v2_validation/cattlevalidationtest/core/test_services_lb_host_routing_balancer.py | 94d5b0c0c6c9e7f67ae8c9ce735a2053a54d1fa2 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | guangbochen/validation-tests | db92aff9b48c705115be828de1dc645143f4c9c8 | 23e7ab95ce76744483a0657f790b42a88a93436d | refs/heads/master | 2021-09-11T14:16:57.668817 | 2018-04-08T19:01:02 | 2018-04-08T19:01:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57,937 | py | from common_fixtures import * # NOQA
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
STRESS_LB_PORT_RULE_COUNT = os.environ.get(
'STRESS_LB_PORT_RULE_COUNT', "20")
def test_lbservice_host_routing_1(client, socat_containers):
port = "900"
service_scale = 2
lb_scale = 1
service_count = 4
port_rules = []
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc3.com",
"path": "/service1.html",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc4.com",
"path": "/service2.html",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc3.com",
"path": "/service1.html",
"serviceId": 3,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc4.com",
"path": "/service2.html",
"serviceId": 3,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port,
[services[0], services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[0], services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[2], services[3]],
"www.abc3.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[2], services[3]],
"www.abc4.com", "/service2.html")
delete_all(client, [env])
def test_lbservice_host_routing_cross_stack(
client, socat_containers):
port = "901"
service_scale = 2
lb_scale = 1
service_count = 4
port_rules = []
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc3.com",
"path": "/service1.html",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc4.com",
"path": "/service2.html",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc3.com",
"path": "/service1.html",
"serviceId": 3,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc4.com",
"path": "/service2.html",
"serviceId": 3,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count,
port_rules, crosslinking=True)
for service in services:
service = service.activate()
for service in services:
service = client.wait_success(service, 120)
assert service.state == "active"
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port,
[services[0], services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[0], services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[2], services[3]],
"www.abc3.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[2], services[3]],
"www.abc4.com", "/service2.html")
to_delete = [env]
for service in services:
to_delete.append(get_env(client, service))
delete_all(client, to_delete)
def test_lbservice_host_routing_2(client, socat_containers):
port = "902"
service_scale = 2
lb_scale = 1
service_count = 3
port_rules = []
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/name.html",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/name.html",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0], services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[0], services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[2]],
"www.abc1.com", "/name.html")
validate_lb_service(client,
lb_service, port, [services[2]],
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc1.com",
"/service2.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc2.com",
"/service1.html")
delete_all(client, [env])
def test_lbservice_host_routing_scale_up(
client, socat_containers):
port = "903"
service_scale = 2
lb_scale = 1
service_count = 3
port_rules = []
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/name.html",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/name.html",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0], services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[0], services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[2]],
"www.abc1.com", "/name.html")
validate_lb_service(client,
lb_service, port, [services[2]],
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc1.com",
"/service2.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc2.com",
"/service1.html")
final_service_scale = 3
final_services = []
for service in services:
service = client.update(service, scale=final_service_scale,
name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == final_service_scale
final_services.append(service)
wait_for_lb_service_to_become_active(client,
final_services, lb_service)
validate_lb_service(client, lb_service, port,
[final_services[0], final_services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
lb_service, port,
[final_services[0], final_services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [final_services[2]],
"www.abc1.com", "/name.html")
validate_lb_service(client,
lb_service, port, [final_services[2]],
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc1.com",
"/service2.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc2.com", "/service1.html")
delete_all(client, [env])
def test_lbservice_host_routing_scale_down(
client, socat_containers):
port = "904"
service_scale = 3
lb_scale = 1
service_count = 3
port_rules = []
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/name.html",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/name.html",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0], services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[0], services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[2]],
"www.abc1.com", "/name.html")
validate_lb_service(client,
lb_service, port, [services[2]],
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc1.com",
"/service2.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc2.com",
"/service1.html")
final_service_scale = 2
final_services = []
for service in services:
service = client.update(service, scale=final_service_scale,
name=service.name)
service = client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == final_service_scale
final_services.append(service)
wait_for_lb_service_to_become_active(client,
final_services, lb_service)
validate_lb_service(client, lb_service, port,
[final_services[0], final_services[1]],
"www.abc1.com", "/service1.html")
validate_lb_service(client, lb_service, port,
[final_services[0], final_services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(client, lb_service,
port, [final_services[2]],
"www.abc1.com", "/name.html")
validate_lb_service(client, lb_service, port,
[final_services[2]],
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc1.com",
"/service2.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc2.com",
"/service1.html")
delete_all(client, [env])
def test_lbservice_host_routing_only_path(
client, socat_containers):
port = "905"
service_scale = 2
lb_scale = 1
service_count = 2
port_rules = []
port_rule = {"path": "/service1.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"path": "/service2.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc2.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[0]],
None, "/service1.html")
validate_lb_service(client,
lb_service, port, [services[1]],
"www.abc3.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[0]],
None, "/service1.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc3.com", "/name.html")
delete_all(client, [env])
def test_lbservice_host_routing_only_host(
client, socat_containers):
port = "906"
service_scale = 2
lb_scale = 1
service_count = 2
port_rules = []
port_rule = {"hostname": "www.abc.com",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
[services[0], services[1]],
lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[1]],
"www.abc1.com", "/name.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc2.com", "/name.html")
delete_all(client, [env])
def test_lbservice_host_routing_3(client, socat_containers):
port = "907"
service_scale = 2
lb_scale = 1
service_count = 4
port_rules = []
port_rule = {"hostname": "www.abc.com",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"path": "/service1.html",
"serviceId": 3,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[1]],
"www.abc1.com", "/name.html")
validate_lb_service(client,
lb_service, port, [services[2]],
"www.abc2.com", "/name.html")
validate_lb_service(client,
lb_service, port, [services[3]],
"www.abc3.com", "/service1.html")
delete_all(client, [env])
def test_lbservice_edit_host_routing_3(client, socat_containers):
port = "908"
service_scale = 2
lb_scale = 1
service_count = 5
port_rules = []
port_rule = {"hostname": "www.abc.com",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"path": "/service1.html",
"serviceId": 3,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
service_list = [services[0], services[1], services[2], services[3]]
wait_for_lb_service_to_become_active(client,
service_list, lb_service)
validate_lb_service(client, lb_service,
port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[1]],
"www.abc1.com", "/name.html")
validate_lb_service(client,
lb_service, port, [services[2]],
"www.abc2.com", "/name.html")
validate_lb_service(client,
lb_service, port, [services[3]],
"www.abc3.com", "/service1.html")
# Edit port_rules
port_rules = []
port_rule = {"hostname": "www.abc.com",
"serviceId": services[0].id,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"serviceId": services[2].id,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"path": "/service2.html",
"serviceId": services[3].id,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc.com",
"serviceId": services[4].id,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"serviceId": services[4].id,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
lb_service = client.update(lb_service,
lbConfig=create_lb_config(port_rules))
service_list = [services[0], services[2], services[3], services[4]]
wait_for_lb_service_to_become_active(client,
service_list, lb_service)
validate_lb_service(client,
lb_service, port, [services[0], services[4]],
"www.abc.com", "/service1.html")
validate_lb_service(client,
lb_service, port, [services[4]],
"www.abc1.com", "/name.html")
validate_lb_service(client,
lb_service, port, [services[2]],
"www.abc2.com", "/name.html")
validate_lb_service(client,
lb_service, port, [services[3]],
"www.abc3.com", "/service2.html")
delete_all(client, [env])
def test_lbservice_edit_host_routing_add_host(
client, socat_containers):
port = "909"
service_scale = 2
lb_scale = 1
service_count = 1
port_rules = []
port_rule = {"hostname": "www.abc.com",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc3.com", "/name.html")
port_rule = {"hostname": "www.abc2.com",
"serviceId": services[0].id,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
lb_service = client.update(lb_service,
lbConfig=create_lb_config(port_rules))
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc2.com", "/name.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc3.com", "/name.html")
delete_all(client, [env])
def test_lbservice_edit_host_routing_remove_host(
client, socat_containers):
port = "910"
service_scale = 2
lb_scale = 1
service_count = 1
port_rules = []
port_rule = {"hostname": "www.abc.com",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client, services,
lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc2.com", "/service2.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc3.com", "/name.html")
# Edit port rules
port_rules = []
port_rule = {"hostname": "www.abc.com",
"serviceId": services[0].id,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
lb_service = client.update(lb_service,
lbConfig=create_lb_config(port_rules))
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc2.com", "/name.html")
delete_all(client, [env])
def test_lbservice_edit_host_routing_edit_existing_host(
client, socat_containers):
port = "911"
service_scale = 2
lb_scale = 1
service_count = 1
port_rules = []
port_rule = {"hostname": "www.abc.com",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc.com", "/service2.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc2.com", "/name.html")
# Edit port rules
port_rules = []
port_rule = {"hostname": "www.abc2.com",
"serviceId": services[0].id,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
lb_service = client.update(lb_service,
lbConfig=create_lb_config(port_rules))
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc2.com", "/service2.html")
validate_lb_service_for_no_access(client, lb_service, port,
"www.abc.com", "/name.html")
delete_all(client, [env])
def test_lbservice_host_routing_multiple_port_1(
client, socat_containers):
port1 = "1000"
port2 = "1001"
port1_target = "80"
port2_target = "81"
service_scale = 2
lb_scale = 1
service_count = 4
port_rules = []
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 0,
"sourcePort": port1,
"targetPort": port1_target,
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/service3.html",
"serviceId": 0,
"sourcePort": port2,
"targetPort": port2_target,
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"serviceId": 1,
"sourcePort": port1,
"targetPort": port1_target,
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"serviceId": 1,
"sourcePort": port2,
"targetPort": port2_target,
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"path": "/service1.html",
"serviceId": 2,
"sourcePort": port1,
"targetPort": port1_target,
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"path": "/service3.html",
"serviceId": 2,
"sourcePort": port2,
"targetPort": port2_target,
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"serviceId": 3,
"sourcePort": port1,
"targetPort": port1_target,
"protocol": "http"}
port_rules.append(port_rule)
port_rule = {"serviceId": 3,
"sourcePort": port2,
"targetPort": port2_target,
"protocol": "http"}
port_rules.append(port_rule)
env, services, lb_service = \
create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port1, port2], service_count,
port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port1,
[services[0]],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
lb_service, port1, [services[3]],
"www.abc1.com", "/service2.html")
validate_lb_service(client,
lb_service, port1, [services[1]],
"www.abc2.com", "/service1.html")
validate_lb_service(client,
lb_service, port1, [services[1]],
"www.abc2.com", "/service2.html")
validate_lb_service(client,
lb_service, port2, [services[1]],
"www.abc2.com", "/service3.html")
validate_lb_service(client,
lb_service, port2,
[services[0]],
"www.abc1.com", "/service3.html")
validate_lb_service(client,
lb_service, port2, [services[2]],
"www.abc4.com", "/service3.html")
validate_lb_service(client,
lb_service, port2, [services[3]],
"www.abc3.com", "/service4.html")
delete_all(client, [env])
def test_lbservice_host_routing_multiple_port_2(
client, socat_containers):
port1 = "1002"
port2 = "1003"
port1_target = "80"
port2_target = "81"
service_scale = 2
lb_scale = 1
service_count = 3
port_rules = []
port_rule = {"path": "/81",
"serviceId": 0,
"sourcePort": port1,
"targetPort": port1_target,
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"path": "/81/service3.html",
"serviceId": 1,
"sourcePort": port1,
"targetPort": port1_target,
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"path": "/service",
"serviceId": 2,
"sourcePort": port1,
"targetPort": port1_target,
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"path": "/service",
"serviceId": 2,
"sourcePort": port2,
"targetPort": port2_target,
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = \
create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port1, port2], service_count,
port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port1,
[services[2]],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
lb_service, port1, [services[0]],
"www.abc1.com", "/81/service4.html")
validate_lb_service(client,
lb_service, port1, [services[1]],
"www.abc1.com", "/81/service3.html")
validate_lb_service(client,
lb_service, port2, [services[2]],
"www.abc1.com", "/service3.html")
validate_lb_service(client,
lb_service, port2, [services[2]],
"www.abc1.com", "/service4.html")
delete_all(client, [env])
def test_lbservice_host_routing_multiple_port_3(
client, socat_containers):
port1 = "1004"
port2 = "1005"
port1_target = "80"
port2_target = "81"
service_scale = 2
lb_scale = 1
service_count = 2
port_rules = []
port_rule = {"serviceId": 0,
"sourcePort": port1,
"targetPort": port1_target,
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"serviceId": 1,
"sourcePort": port2,
"targetPort": port2_target,
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = \
create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port1, port2], service_count,
port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port1,
[services[0]],
"www.abc1.com", "/service1.html")
validate_lb_service(client,
lb_service, port2,
[services[1]],
"www.abc1.com", "/service3.html")
delete_all(client, [env])
def test_lbservice_external_service(client, socat_containers):
port = "1010"
lb_scale = 2
env, lb_service, ext_service, con_list = \
create_env_with_ext_svc_and_lb(client, lb_scale, port)
ext_service = activate_svc(client, ext_service)
lb_service = activate_svc(client, lb_service)
validate_lb_service_for_external_services(client,
lb_service, port, con_list)
delete_all(client, [env])
def test_lbservice_host_routing_tcp_only(client,
socat_containers):
port = "1011"
service_scale = 2
lb_scale = 1
service_count = 2
port_rules = []
port_rule = {"hostname": "www.abc1.com",
"path": "/service1.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "tcp"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "tcp"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc2.com",
"path": "/service2.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "tcp"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port,
[services[0], services[1]])
delete_all(client, [env])
def test_lbservice_host_routing_tcp_and_http(client,
socat_containers):
port1 = "1012"
port2 = "1013"
service_scale = 2
lb_scale = 1
service_count = 2
port_rules = []
port_rule = {"hostname": "www.abc1.com",
"path": "/service3.html",
"serviceId": 0,
"sourcePort": port1,
"targetPort": "80",
"protocol": "tcp"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/service3.html",
"serviceId": 0,
"sourcePort": port2,
"targetPort": "81",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/service4.html",
"serviceId": 1,
"sourcePort": port1,
"targetPort": "80",
"protocol": "tcp"
}
port_rules.append(port_rule)
port_rule = {"hostname": "www.abc1.com",
"path": "/service4.html",
"serviceId": 1,
"sourcePort": port2,
"targetPort": "81",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port1, port2], service_count,
port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
port1 = "1012"
"""
validate_lb_service(client,
lb_service, port1,
[services[0], services[1]])
validate_lb_service(client,
lb_service, port1,
[services[0], services[1]])
"""
validate_lb_service(client,
lb_service, port2,
[services[0]],
"www.abc1.com", "/service3.html")
validate_lb_service(client,
lb_service, port2, [services[1]],
"www.abc1.com", "/service4.html")
validate_lb_service_for_no_access(client, lb_service, port2,
"www.abc2.com",
"/service3.html")
delete_all(client, [env])
def test_lbservice_host_routing_wildcard(
client, socat_containers):
port = "1014"
service_scale = 2
lb_scale = 1
service_count = 3
port_rules = []
port_rule = {"hostname": "*.domain.com",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "domain.*",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "abc.domain.com",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port,
[services[2]],
"abc.domain.com", "/name.html")
validate_lb_service(client,
lb_service, port,
[services[0]],
"abc.def.domain.com", "/service1.html")
validate_lb_service(client,
lb_service, port,
[services[1]],
"domain.abc.def.com", "/service1.html")
validate_lb_service(client,
lb_service, port,
[services[1]],
"domain.abc.com", "/name.html")
delete_all(client, [env])
def test_lbservice_host_routing_wildcard_order(
client, socat_containers):
port = "1014"
service_scale = 2
lb_scale = 1
service_count = 5
port_rules = []
port_rule = {"hostname": "*.domain.com",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "domain.*",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "abc.domain.com",
"serviceId": 2,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "abc.domain.com",
"path": "/service1.html",
"serviceId": 3,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
port_rule = {"hostname": "*.domain.com",
"path": "/service1.html",
"serviceId": 4,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port,
[services[4]],
"abc.def.domain.com", "/service1.html")
validate_lb_service(client,
lb_service, port,
[services[0]],
"abc.def.domain.com", "/name.html")
validate_lb_service(client,
lb_service, port,
[services[1]],
"domain.abc.com", "/service1.html")
validate_lb_service(client,
lb_service, port,
[services[1]],
"domain.def.com", "/service1.html")
validate_lb_service(client,
lb_service, port,
[services[2]],
"abc.domain.com", "/name.html")
validate_lb_service(client,
lb_service, port,
[services[3]],
"abc.domain.com", "/service1.html")
delete_all(client, [env])
def test_lbservice_host_routing_priority_override_1(
client, socat_containers):
port = "1015"
service_scale = 2
lb_scale = 1
service_count = 2
port_rules = []
port_rule = {"hostname": "*.com",
"path": "/service1.html",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http",
"priority": 1
}
port_rules.append(port_rule)
port_rule = {"hostname": "abc.domain.com",
"path": "/service1.html",
"serviceId": 1,
"sourcePort": port,
"targetPort": "80",
"protocol": "http",
"priority": 2
}
port_rules.append(port_rule)
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, [port], service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port,
[services[0]],
"abc.domain.com", "/service1.html")
delete_all(client, [env])
def test_lb_with_selector_link_target_portrules(client,
socat_containers):
port = "20001"
# Create Environment
env = create_env(client)
launch_config_svc = {"imageUuid": LB_HOST_ROUTING_IMAGE_UUID,
"labels": {"test1": "value1"}}
port_rule1 = {
"targetPort": "80",
"hostname": "www.abc.com",
"path": "/name.html"}
port_rule2 = {
"targetPort": "80",
"hostname": "www.abc1.com",
"path": "/service1.html"}
# Create Service
random_name = random_str()
service_name = random_name.replace("-", "")
service1 = client.create_service(name=service_name,
stackId=env.id,
launchConfig=launch_config_svc,
scale=1,
lbConfig=create_lb_config([port_rule1]))
service1 = client.wait_success(service1)
assert service1.state == "inactive"
random_name = random_str()
service2_name = random_name.replace("-", "")
service2 = client.create_service(name=service2_name,
stackId=env.id,
launchConfig=launch_config_svc,
scale=1,
lbConfig=create_lb_config([port_rule2]))
service2 = client.wait_success(service2)
assert service2.state == "inactive"
launch_config_lb = {"ports": [port],
"imageUuid": get_haproxy_image()}
port_rule1 = {
"sourcePort": port,
"selector": "test1=value1"}
lb_env = create_env(client)
lb_service = client.create_loadBalancerService(
name="lb-withselectorlinks",
stackId=lb_env.id,
launchConfig=launch_config_lb,
scale=1,
lbConfig=create_lb_config([port_rule1]))
lb_service = client.wait_success(lb_service)
assert lb_service.state == "inactive"
service1 = activate_svc(client, service1)
service2 = activate_svc(client, service2)
lb_service = activate_svc(client, lb_service)
wait_for_lb_service_to_become_active(client,
[service1, service2], lb_service)
validate_lb_service(client,
lb_service, port,
[service1],
"www.abc.com", "/name.html")
validate_lb_service(client,
lb_service, port,
[service2],
"www.abc1.com", "/service1.html")
delete_all(client, [env])
@if_stress
def test_lbservice_edit_add_multiple_port_rules(
client, socat_containers):
port = "90"
count = int(STRESS_LB_PORT_RULE_COUNT)
service_scale = 2
lb_scale = 1
service_count = 1
ports = [port]
port_rules = []
port_rule = {"hostname": "www.abc.com",
"serviceId": 0,
"sourcePort": port,
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
launch_config_lb = {"imageUuid": get_haproxy_image()}
env, services, lb_service = create_env_with_multiple_svc_and_lb(
client, service_scale, lb_scale, ports, service_count, port_rules)
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc.com", "/service2.html")
for i in range(0, count):
port_rule = {"hostname": "www.abc.com"+str(i),
"serviceId": services[0].id,
"sourcePort": port+str(i),
"targetPort": "80",
"protocol": "http"
}
port_rules.append(port_rule)
ports.append(port+str(i))
launch_config_lb["ports"] = ports
lb_service = client.update(lb_service,
launchConfig=launch_config_lb,
lbConfig=create_lb_config(port_rules))
wait_for_lb_service_to_become_active(client,
services, lb_service)
validate_lb_service(client,
lb_service, port, [services[0]],
"www.abc.com", "/service2.html")
for j in range(0, i):
print "Validation after adding " + str(i) + " ports"
validate_lb_service(client,
lb_service, port+str(j), [services[0]],
"www.abc.com"+str(j), "/name.html")
delete_all(client, [env])
| [
"sangeetha@rancher.com"
] | sangeetha@rancher.com |
d255e8072a01057e097ccaa3a705564e60199c9e | 91fe8f479fa921fa84111d19222a5c6aa6eff030 | /basis/progr-py/Gui/ShellGui/packdlg_redirect.py | e74111a94ff6ede688ace45c422255376555b419 | [] | no_license | romanticair/python | 2055c9cdaa46894c9788d5797643283786ed46dd | 6f91fe5e7cbedcdf4b8f7baa7641fd615b4d6141 | refs/heads/master | 2022-11-03T17:17:17.608786 | 2019-07-05T07:07:29 | 2019-07-05T07:07:29 | 195,356,190 | 0 | 1 | null | 2022-10-14T20:51:14 | 2019-07-05T07:00:33 | Python | UTF-8 | Python | false | false | 496 | py | # 将命令行脚本包装到图形界面重定向工具中,输出显示到弹出式窗口中
from tkinter import *
from packdlg import runPackDialog
from Gui.Tools.guiStreams import redirectedGuiFunc
def runPackDialog_Wrapped(): # 在mytools.py中运行的回调函数
redirectedGuiFunc(runPackDialog) # 对整个回调处理程序进行包装
if __name__ == '__main__':
root = Tk()
Button(root, text='pop', command=runPackDialog_Wrapped).pack(fill=X)
root.mainloop()
| [
"1024519570@qq.com"
] | 1024519570@qq.com |
a828a1d10bfc5c4b5cd149e658aca32e30558efa | 7f80554c5013ba7bc66a3ec98f804156d977c277 | /src/readux/urls.py | d95051dab91bc770f6c85b38826d7a40f9f870b8 | [] | no_license | akrahdan/LearnAI | fa89c133dbe3b0c06bfdce720ea6dcb429d1dc57 | fbea836a7fc78c8ab92b313c2afa4bdeef59c362 | refs/heads/main | 2023-07-24T15:07:15.692045 | 2021-08-20T16:39:44 | 2021-08-20T16:39:44 | 376,688,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,083 | py | """readux URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.conf import settings
from allauth.account.views import confirm_email
from django.conf.urls.static import static
from courses.views import CourseDetailSlugView
from projects.views import ProjectDetailView
from .views import home_page, CourseLeadView, pricing
from files.views import DownloadView, UploadPolicyView, UploadView, UploadCoursePolicy, DownloadCourseView
urlpatterns = [
path('admin/', admin.site.urls),
path('', home_page, name='home'),
path('api/lead/', CourseLeadView.as_view(), name='course_signup'),
path('api/pricing/', pricing, name='pricing'),
path('api/dashboard/', include(('dashboard.urls', 'dashboard'), namespace="dashboard")),
path('api/courses/', include('courses.urls')),
path('api/course/<slug:slug>/', CourseDetailSlugView.as_view()),
#path('auths/', include(('accounts.urls', 'auths'), 'auths')),
path('accounts/', include('allauth.urls')),
path('api/accounts/', include('accounts.urls')),
path('api/billing/', include(('billing.urls', 'billing'), 'billing')),
path('api/instructor/', include(('instructors.urls'))),
path('api/students/', include(('students.urls', 'students'), namespace='students')),
path('api/upload/', UploadView.as_view()),
path('api/upload/policy/', UploadPolicyView.as_view()),
path('api/files/<int:id>/download/', DownloadView.as_view()),
path('api/orders/', include('orders.urls')),
path('rest-auth/', include('rest_auth.urls')),
path('api/auth/', include('auths.urls')),
path('api/analytics/', include('analytics.urls')),
path('api/projects/', include('projects.urls')),
path('api/projects/', include('projects.urls')),
path('api/project/<slug:slug>/', ProjectDetailView.as_view()),
path('api/categories/', include('categories.urls')),
path('api/project_categories/', include('project_categories.urls')),
re_path(r"^rest-auth/registration/account-confirm-email/(?P<key>[\s\d\w().+-_',:&]+)/$", confirm_email,
name="account_confirm_email"),
path('rest-auth/registration/', include('rest_auth.registration.urls')),
path('api/cart/', include('carts.urls')),
]
# if settings.DEBUG:
# urlpatterns = urlpatterns + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
# urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"akrahdan@gmail.com"
] | akrahdan@gmail.com |
7bff1cf2cf3d6197fe73bcee6755c0bf8ceff5cf | 2a68b03c923119cc747c4ffcc244477be35134bb | /Alog/class4/exercises/knightII.py | 5367616c345efb372dc4c02cc4833b0a6ae69052 | [] | no_license | QitaoXu/Lintcode | 0bce9ae15fdd4af1cac376c0bea4465ae5ea6747 | fe411a0590ada6a1a6ae1166c86c585416ac8cda | refs/heads/master | 2020-04-24T20:53:27.258876 | 2019-09-24T23:54:59 | 2019-09-24T23:54:59 | 172,259,064 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,708 | py | from collections import deque
DIRECTIONS = [
(1, 2), (-1, 2),
(2, 1), (-2, 1)
]
class Solution:
"""
@param grid: a chessboard included 0 and 1
@return: the shortest path
"""
def shortestPath2(self, grid):
# write your code here
# BFS
queue = deque()
distance = {(0, 0) : 0}
queue.append( (0, 0) )
while len(queue):
size = len(queue)
for _ in range(size):
x, y = queue.popleft()
if (x, y) == ( len(grid) -1, len(grid[0]) - 1 ):
return distance[(x,y)]
for dx, dy in DIRECTIONS:
next_x, next_y = x + dx, y + dy
if not self.is_valid(next_x, next_y, grid):
continue
elif (next_x, next_y) in distance.keys():
continue
# elif (next_x, next_y) == ( len(grid) -1, len(grid[0]) - 1 ):
# return distance[(x, y)] + 1
else:
distance[(next_x, next_y)] = distance[(x, y)] + 1
queue.append((next_x, next_y))
return -1
def is_valid(self, x, y, grid):
row, col = len(grid), len(grid[0])
if x < 0 or x >= row or y < 0 or y >= col:
return False
if grid[x][y] == 1:
return False
return True
| [
"xuqitao@QX-MBP.local"
] | xuqitao@QX-MBP.local |
fbea320702fa3c11e50173b54ddcad4517345559 | 65e9bc161df0553b272de5b2d91fe7ad16df7c31 | /medipipeline/utility.py | 2a712dbee0d7b7c3223ae6d1b2ed4ddb960b2063 | [] | no_license | EhsanArabnezhad/Django-Dashboard | 6369d4b85798c9c3d8c35d22a9d258b9d32fe96b | 8ce00ffe0598c71391c169300cc54190b2477786 | refs/heads/master | 2022-07-31T13:11:28.461970 | 2020-05-21T07:58:28 | 2020-05-21T07:58:28 | 265,787,754 | 0 | 0 | null | 2020-05-21T07:53:31 | 2020-05-21T07:49:27 | JavaScript | UTF-8 | Python | false | false | 779 | py | import pandas as pd
import psutil
import os
# measure memory usage
def memory_usage():
process = psutil.Process(os.getpid())
mem_bytes = process.memory_info().rss
return( float(mem_bytes)/1048576 )
# Substitute to pandas.apply, which has a memory leak.
# https://ys-l.github.io/posts/2015/08/28/how-not-to-use-pandas-apply/
def apply_optimized_output_series(dataFrame, function):
rawSeries = []
for _, row in dataFrame.iterrows():
processedRow = function(row)
rawSeries.append(processedRow)
return pd.Series(rawSeries)
if __name__ == '__main__':
def someFunc(x):
return x
df = pd.DataFrame({
'a': [1,2,3,4,5,6,7],
'b': [7,6,5,4,3,2,1]
})
print (apply_optimized_output_series(df, someFunc)) | [
"ehsan5359_ar@yahoo.com"
] | ehsan5359_ar@yahoo.com |
1a7c13a0cf606ed6a8e9e5f4c348e5582d7ccda9 | 074d3e411b87a12f93872ef70a98ff270d5068d8 | /Python_Gui/test.py | b2b086d631051cdeebf6cac775268069684e0b52 | [] | no_license | zidaliu/APIC_CISCO | f0bb3a88baee82025f5ed9f09c90e6f1a8b07ac1 | bd92206665719409b4ffce2577460f545880b0f9 | refs/heads/master | 2021-08-19T19:50:05.542437 | 2017-11-27T09:02:23 | 2017-11-27T09:02:23 | 110,214,166 | 1 | 0 | null | 2017-11-16T12:01:19 | 2017-11-10T07:01:31 | Python | UTF-8 | Python | false | false | 1,009 | py | #!/usr/bin/env python
# _*_ coding:utf-8 _*_
from Tkinter import *
root = Tk()
# 按扭调用的函数,
def reg():
User = e_user.get()
Pwd = e_pwd.get()
len_user = len(User)
len_pwd = len(Pwd)
if User == '111' and Pwd == '222':
l_msg['text'] = '登陆成功'
else:
l_msg['text'] = '用户名或密码错误'
e_user.delete(0, len_user)
e_pwd.delete(0, len_pwd)
# 第一行,用户名标签及输入框
l_user = Label(root, text='用户名:')
l_user.grid(row=0, sticky=W)
e_user = Entry(root)
e_user.grid(row=0, column=1, sticky=E)
# 第二行,密码标签及输入框
l_pwd = Label(root, text='密码:')
l_pwd.grid(row=1, sticky=W)
e_pwd = Entry(root)
e_pwd['show'] = '*'
e_pwd.grid(row=1, column=1, sticky=E)
# 第三行登陆按扭,command绑定事件
b_login = Button(root, text='登陆', command=reg)
b_login.grid(row=2, column=1, sticky=E)
# 登陆是否成功提示
l_msg = Label(root, text='')
l_msg.grid(row=3)
root.mainloop() | [
"liuzida@liuzidadeMacBook-Pro.local"
] | liuzida@liuzidadeMacBook-Pro.local |
cdba2f90e4c387023b541c5623d1da6b8a4184ff | e2189dc1dbbde547d334e6d66ce0c8ca8d1c929b | /apps/pspnet/tools/fix_unprocessed_init_image.py | fb13512d6587859517e1ebcc4be7d6d0e8501dc4 | [
"MIT"
] | permissive | c0710204/python-socketio-cluster | d3503b79e5472acaa19b89f4dfae778faccd9500 | 768cf2eb40a9b75b912a8b8cd86b14c52f3fc9d7 | refs/heads/master | 2021-04-27T03:46:20.464320 | 2018-12-17T23:06:39 | 2018-12-18T19:11:04 | 124,025,324 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | import pymysql
db=pymysql.connect(host="127.0.0.1",port=33061, user="guxi",passwd="dHtFkI6g",db="gsv_file_list")
sql="update tasks set status=\"wait\" where status=\"init\""
with db.cursor() as cur:
cur.execute(sql)
db.commit()
| [
"c0710204@gmail.com"
] | c0710204@gmail.com |
b7759d6a6dcb81a63298d8ff7c3583729f1d19eb | 7facdc4644fbe4209b5acdad9f2503bfcfb0d534 | /ensure/_types.py | d0d2db7350653d2171e927886cffa6eccef0f7f8 | [
"Apache-2.0"
] | permissive | KeyWeeUsr/ensure | 2a19d2101418f334bb188d299f5368f96aaf7916 | 47becf82672906d2fcfd4e8e5b0542e43845b3ed | refs/heads/master | 2023-06-01T04:11:19.154208 | 2018-11-06T01:39:11 | 2018-11-06T01:39:11 | 165,532,375 | 0 | 0 | Apache-2.0 | 2019-01-13T17:14:11 | 2019-01-13T17:14:10 | null | UTF-8 | Python | false | false | 1,050 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from six import add_metaclass
USING_PYTHON2 = True if sys.version_info < (3, 0) else False
if USING_PYTHON2:
str = unicode # noqa
class NumericStringType(type):
_type = str
_cast = float
def __instancecheck__(self, other):
try:
if not isinstance(other, self._type):
raise TypeError()
self._cast(other)
return True
except (TypeError, ValueError):
return False
class NumericByteStringType(NumericStringType):
_type = bytes
class IntegerStringType(NumericStringType):
_cast = int
class IntegerByteStringType(IntegerStringType):
_type = bytes
@add_metaclass(NumericStringType)
class NumericString(str):
pass
@add_metaclass(NumericByteStringType)
class NumericByteString(bytes):
pass
@add_metaclass(IntegerStringType)
class IntegerString(str):
pass
@add_metaclass(IntegerByteStringType)
class IntegerByteString(bytes):
pass
| [
"kislyuk@gmail.com"
] | kislyuk@gmail.com |
78e5cde7fe4b290dc0854b446bc10a45a3a71864 | bb0a28f1c7140fc75241085af4d772bf690c92c5 | /books/deep-learning-from-scratch-2/ch06/train_better_rnnlm.py | adc2c710c5231c2fbcfa605a9890582c48f4536c | [
"MIT"
] | permissive | oonisim/python-programs | dcddad2df3d3451169e79d053624072706091741 | b592c9bf004d9f2ca6b014eae0e9623e5567bcff | refs/heads/master | 2023-08-23T19:09:19.425013 | 2023-06-23T03:38:29 | 2023-06-23T03:38:29 | 224,111,443 | 2 | 2 | null | 2023-02-16T07:37:54 | 2019-11-26T05:40:37 | Jupyter Notebook | UTF-8 | Python | false | false | 1,689 | py | # coding: utf-8
import sys
sys.path.append('..')
from src.common import config
# GPUで実行する場合は下記のコメントアウトを消去(要cupy)
# ==============================================
# config.GPU = True
# ==============================================
from src.common import SGD
from src.common import RnnlmTrainer
from src.common import eval_perplexity, to_gpu
from dataset import ptb
from better_rnnlm import BetterRnnlm
# ハイパーパラメータの設定
batch_size = 20
wordvec_size = 650
hidden_size = 650
time_size = 35
lr = 20.0
max_epoch = 40
max_grad = 0.25
dropout = 0.5
# 学習データの読み込み
corpus, word_to_id, id_to_word = ptb.load_data('train')
corpus_val, _, _ = ptb.load_data('val')
corpus_test, _, _ = ptb.load_data('test')
if config.GPU:
corpus = to_gpu(corpus)
corpus_val = to_gpu(corpus_val)
corpus_test = to_gpu(corpus_test)
vocab_size = len(word_to_id)
xs = corpus[:-1]
ts = corpus[1:]
model = BetterRnnlm(vocab_size, wordvec_size, hidden_size, dropout)
optimizer = SGD(lr)
trainer = RnnlmTrainer(model, optimizer)
best_ppl = float('inf')
for epoch in range(max_epoch):
trainer.fit(xs, ts, max_epoch=1, batch_size=batch_size,
time_size=time_size, max_grad=max_grad)
model.reset_state()
ppl = eval_perplexity(model, corpus_val)
print('valid perplexity: ', ppl)
if best_ppl > ppl:
best_ppl = ppl
model.save_params()
else:
lr /= 4.0
optimizer.lr = lr
model.reset_state()
print('-' * 50)
# テストデータでの評価
model.reset_state()
ppl_test = eval_perplexity(model, corpus_test)
print('test perplexity: ', ppl_test)
| [
"oonisim@gmail.com"
] | oonisim@gmail.com |
766c006b44f7bca3d96dc1ad604ef9851b7c73be | 0a1a95fe0344c27197b677e8f8d1acc05a9813bd | /tests/test_app/test_static.py | 9c4549b3036f5029a3b59f6d3252f224e800aa5a | [
"MIT"
] | permissive | hirokiky/uiro | 5ddaee966395512919016406c5ed18baed5cb68c | 8436976b21ac9b0eac4243768f5ada12479b9e00 | refs/heads/master | 2023-04-27T00:57:13.953417 | 2013-11-09T02:15:57 | 2013-11-09T02:15:57 | 13,859,983 | 0 | 0 | MIT | 2023-04-15T15:13:52 | 2013-10-25T12:30:05 | Python | UTF-8 | Python | false | false | 411 | py | import pytest
from webtest import TestApp
@pytest.fixture
def target():
from matcha import make_wsgi_app
from uiro.static import generate_static_matching
from .pkgs import static_app
matching = generate_static_matching(static_app)
return TestApp(make_wsgi_app(matching))
def test_static(target):
resp = target.get('/static/static_app/test.txt')
resp.mustcontain('No more work')
| [
"hirokiky@gmail.com"
] | hirokiky@gmail.com |
eb287c9f403e2617a50b4f92ea1ae509b3f71845 | 56271296eec22a95840b8ca3904ab125bba2365c | /Battleship.py | 602ccfd6c5a3c24544682839531152cb5412d0ba | [] | no_license | Brianw9/Python | fdbc21bf6d6c7d19e09c21d1238b8a008afd3324 | b122010e30f9d94d0e75eb05207b95b322d2da9e | refs/heads/master | 2020-04-19T09:37:04.417227 | 2019-02-26T05:56:05 | 2019-02-26T05:56:05 | 168,116,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,086 | py | from random import randint
board = []
for x in range(5):
board.append(["O"] * 5)
def print_board(board):
for row in board:
print " ".join(row)
print "Let's play Battleship!"
print_board(board)
def random_row(board):
return randint(0, len(board) - 1)
def random_col(board):
return randint(0, len(board[0]) - 1)
ship_row = random_row(board)
ship_col = random_col(board)
for turn in range(4):
guess_row = int(raw_input("Guess Row:"))
guess_col = int(raw_input("Guess Col:"))
if guess_row == ship_row and guess_col == ship_col:
print "Congratulations! You sunk my battleship!"
break
else:
if (guess_row < 0 or guess_row > 4) or (guess_col < 0 or guess_col > 4):
print "Oops, that's not even in the ocean."
elif(board[guess_row][guess_col] == "X"):
print "You guessed that one already."
else:
print "You missed my battleship!"
board[guess_row][guess_col] = "X"
print "Turn", turn + 1
print_board(board)
if turn >= 3:
print "Game Over"
| [
"noreply@github.com"
] | noreply@github.com |
afd0fc499a05d344d03d6e8a34a154dec63f373a | 06bdd09453504d23bf262a87513e1f6790f62359 | /crud_sqlite.py | f66e6e9adeb5deae87fa4410d0f41c4462e1135e | [] | no_license | kiransringeri/flask_tutorial | 1efeb386458fd7c2fa6c82c8eb45ceb223f25a90 | 266eb11ab6caf56267f1150ff3834cadf4ddc5f2 | refs/heads/master | 2020-03-26T16:19:18.816422 | 2018-08-17T09:03:51 | 2018-08-17T09:03:51 | 145,093,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,409 | py | """
CRUD application to manage users - list/add/edit/delete users. Uses MySQL database
Need to run the below commands in python terminal/console to initialize the SQLite databse
from crud import db
db.create_all()
"""
from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
import os
app = Flask(__name__)
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'flask_tutorial.sqlite')
db = SQLAlchemy(app)
ma = Marshmallow(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
def __init__(self, username, email):
self.username = username
self.email = email
"""
def serialize(self):
return {
'username': self.username,
'email': self.email
}
"""
class UserSchema(ma.Schema):
class Meta:
# Fields to expose
fields = ('id', 'username', 'email')
user_schema = UserSchema()
users_schema = UserSchema(many=True)
# endpoint to create new user
@app.route("/user", methods=["POST"])
def add_user():
username = request.json['username']
email = request.json['email']
new_user = User(username, email)
db.session.add(new_user)
db.session.commit()
return user_schema.jsonify(new_user)
# endpoint to show all users
@app.route("/user", methods=["GET"])
def get_user():
all_users = User.query.all()
result = users_schema.dump(all_users)
return jsonify(result.data)
# endpoint to get user detail by id
@app.route("/user/<id>", methods=["GET"])
def user_detail(id):
user = User.query.get(id)
return user_schema.jsonify(user)
# endpoint to update user
@app.route("/user/<id>", methods=["PUT"])
def user_update(id):
user = User.query.get(id)
username = request.json['username']
email = request.json['email']
user.email = email
user.username = username
db.session.commit()
return user_schema.jsonify(user)
# endpoint to delete user
@app.route("/user/<id>", methods=["DELETE"])
def user_delete(id):
user = User.query.get(id)
db.session.delete(user)
db.session.commit()
return user_schema.jsonify(user)
if __name__ == '__main__':
app.run(debug=True) | [
"kiransringeri@gmail.com"
] | kiransringeri@gmail.com |
8a2ba36c13ed0a54537e2379413daddba59b9ec9 | c7c6e6a08d1d8440895480499ce4f76817b475de | /app/models/db/models.py | cede069b4479c8008bece82162d9b8c07f44c927 | [] | no_license | PatchouliTC/tencent_webhook | c606167191ace91f11e67100ddf655575e9abbfe | 3b478c4a282345a2580e414b827861ed11e5a096 | refs/heads/master | 2023-03-14T00:06:56.554572 | 2021-03-03T08:17:53 | 2021-03-03T08:17:53 | 342,829,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,951 | py | from enum import unique
import datetime
from sqlalchemy import *
from sqlalchemy.orm import relationship
from sqlalchemy.types import TIMESTAMP
from app.utils import verifyUtil
from . import Base
class Repository(Base):
__tablename__='repositories'
id=Column(Integer,primary_key=True,index=True)
identityid=Column(Integer,nullable=False,unique=True)
name=Column(String(256),default="UnKnownRepo",nullable=False)
displayname=Column(String(256),default="UndefinedRepo")
visibility_level=Column(Integer,default=0) #0=private 10=public
namespace=Column(String(512),index=True)
url=Column(String)
accesstoken=Column(String)
verifytoken=Column(String)
branches=relationship("Branch",backref="repo",cascade='all,delete-orphan')
merges=relationship("MergeRecord",backref="repo",cascade='all,delete-orphan')
watcher=relationship('NoticeReceiver',secondary='repos_recvs')
submitters=relationship('Submitter',secondary='repos_subs')
def verify_accesstoken(self,token) -> bool:
if self.verifytoken and len(self.verifytoken)>0:
return verifyUtil.verify_hook_token(token,self.accesstoken)
return True
def __repr__(self):
return f"Repo:<{self.name}>({'Public' if self.visibility_level > 0 else 'Private'})\n[{self.url}]"
class Branch(Base):
__tablename__='branches'
id=Column(Integer,primary_key=True,index=True)
repo_id=Column(Integer,ForeignKey('repositories.id',ondelete='CASCADE'))
#repo
name=Column(String)
pushes=relationship("PushRecord",backref="branch",cascade='all,delete-orphan')
merges=relationship("MergeRecord",backref="branch",cascade='all,delete-orphan')
def __repr__(self):
return f"Branch:{'[%s]' % self.repo.name if self.repo else ''} - {self.name}"
class Submitter(Base):
__tablename__='submitters'
id=Column(Integer,primary_key=True,index=True)
identityid=Column(Integer,nullable=False)#locate remote
displayname=Column(String(256),default="UnDefinedSubmitter")
name=Column(String(256),nullable=False,index=True)
pushes=relationship("PushRecord",backref="submitter",cascade='all,delete-orphan')
merges=relationship("MergeRecord",backref="submitter",cascade='all,delete-orphan')
associate_repos=relationship('Repository',secondary='repos_subs')
def __repr__(self):
return f"<{self.displayname}>[{self.name}]"
#push info
class PushRecord(Base):
__tablename__='pushrecords'
id=Column(Integer,primary_key=True,index=True)
#操作人
sub_id=Column(Integer,ForeignKey('submitters.id',ondelete='CASCADE'))
#submitter
#指向分支
branch_id=Column(Integer,ForeignKey('branches.id',ondelete='CASCADE'))
#branch
#本次提交的hash
current_hash=Column(String(64),index=True)
#前一次提交的hash
before_hash=Column(String(64))
additions=Column(BigInteger,default=0)
deletions=Column(BigInteger,default=0)
push_at=Column(DateTime,nullable=False,default=datetime.datetime.now(),index=True)
commits=relationship("Commit",backref="pushrecord",cascade='all,delete-orphan')
def __repr__(self):
return f"[{self.submitter.name}]-P->({self.branch.name})({self.push_at.strftime('%Y-%m-%d %H:%M:%S')})"
#push commit list
class Commit(Base):
__tablename__='commits'
id=Column(Integer,primary_key=True,index=True)
push_id=Column(Integer,ForeignKey('pushrecords.id',ondelete='CASCADE'))
#pushrecord
remoteid=Column(String(64),nullable=False,index=True)
message=Column(String)
url=Column(String(256))
commit_at=Column(DateTime)
class MergeRecord(Base):
__tablename__='mergerecords'
id=Column(Integer,primary_key=True,index=True)
remoteid=Column(String(64),nullable=False,unique=True)
url=Column(String(512))
snap_source_branch_name=Column(String(256))
snap_source_repo_namespace=Column(String(256))
snap_sub_name=Column(String(256),index=True)
sub_id=Column(Integer,ForeignKey('submitters.id',ondelete='CASCADE'))
#submitter
target_branch_id=Column(Integer,ForeignKey('branches.id',ondelete='CASCADE'))
#branch
happened_repo_id=Column(Integer,ForeignKey('repositories.id',ondelete='CASCADE'))
#repo
title=Column(String(256))
current_merge_state=Column(String(64))
current_state=Column(String(64))
create_at=Column(DateTime,index=True)
update_at=Column(DateTime)
merges=relationship("MergeLog",backref="merge",cascade='all,delete-orphan')
class MergeLog(Base):
__tablename__='mergelogs'
id=Column(Integer,primary_key=True,index=True)
merge_id=Column(Integer,ForeignKey('mergerecords.id',ondelete='CASCADE'))
#merge
current_merge_state=Column(String(64))
current_state=Column(String(64))
action=Column(String(32),index=True)
extension_action=Column(String(32))
record_at=Column(DateTime,index=True)
__table_args__ = (Index('action_at_index', "action", "record_at"), )
class NoticeReceiver(Base):
__tablename__='noticereceivers'
id=Column(Integer,primary_key=True,index=True)
name=Column(String(256),default='UndefinedReceiver',nullable=False,index=True)
url=Column(String(256))
stats_send_at=Column(String(64))
token=Column(String(256))
watchingrepos=relationship('Repository',secondary='repos_recvs')
class RepoRecv(Base):
__tablename__='repos_recvs'
id=Column(Integer,primary_key=True,index=True)
repo_id=Column(Integer,ForeignKey('repositories.id',ondelete='CASCADE'))
recv_id=Column(Integer,ForeignKey('noticereceivers.id',ondelete='CASCADE'))
activate=Column(Boolean,default=True)
class RepoSub(Base):
__tablename__='repos_subs'
id=Column(Integer,primary_key=True,index=True)
repo_id=Column(Integer,ForeignKey('repositories.id',ondelete='CASCADE'))
sub_id=Column(Integer,ForeignKey('submitters.id',ondelete='CASCADE'))
| [
"1009609373@qq.com"
] | 1009609373@qq.com |
76c6c29d88c946054eeb89706a4d5b02734af8a6 | 7fcae6ef4351befd3b67105149eb010a11e8aaa3 | /2019/day8/Day8.py | 188f586edc020543678ccf55f0552b42a6e7c5ad | [] | no_license | RobinVercruysse/AdventOfCode | 2f4610c4e26262d0fd0b6c6cefe96f2d6a8f2f34 | a73f62c6c23813af0a303277262eafe4f248eb56 | refs/heads/master | 2022-12-08T21:26:39.046585 | 2022-11-27T22:19:23 | 2022-11-27T22:19:23 | 160,561,734 | 0 | 0 | null | 2020-10-13T11:18:07 | 2018-12-05T18:33:49 | Java | UTF-8 | Python | false | false | 1,777 | py | layers = []
width = 25
height = 6
def print_layer(layer):
for h in range(height):
row = ''
for w in range(width):
index = (h * (width - 1)) + w
row += str(layer[index])
print(row)
with open('input') as fp:
layer_index = 0
w = 0
h = 0
current_layer = []
for digit in fp.readline():
current_layer.append(int(digit))
w += 1
if w >= width:
w = 0
h += 1
if h >= height:
h = 0
layers.append(current_layer)
current_layer = []
layer_index += 1
layer_fewest_zeroes = None
fewest_zeroes = -1
fewest_zeroes_ones = 0
fewest_zeroes_twos = 0
for layer in layers:
zeroes = 0
ones = 0
twos = 0
for digit in layer:
if digit == 0:
zeroes += 1
elif digit == 1:
ones += 1
elif digit == 2:
twos += 1
if fewest_zeroes == -1 or zeroes < fewest_zeroes:
fewest_zeroes = zeroes
layer_fewest_zeroes = layer
fewest_zeroes_ones = ones
fewest_zeroes_twos = twos
print(fewest_zeroes)
print_layer(layer_fewest_zeroes)
print(fewest_zeroes_ones)
print(fewest_zeroes_twos)
print(fewest_zeroes_ones * fewest_zeroes_twos)
print('*'*40)
for h in range(0, height):
row = ''
for w in range(0, width):
if h == 0:
index = w
else:
index = (h * width) + w
final_digit = ''
for layer in layers:
current_digit = layer[index]
if current_digit == 0:
final_digit = ' '
break
elif current_digit == 1:
final_digit = '■'
break
row += final_digit
print(row)
| [
"cm9iaW4@protonmail.com"
] | cm9iaW4@protonmail.com |
4afc9e26c651892b4c66a8e40b134a2277fdb425 | be4759201435054c55ca76d4a973aee8c549e1a6 | /sockets/mn_edge_indices_list_socket.py | 82fca744d5684176868484ad02929b8ee962b360 | [] | no_license | vvFiCKvv/animation-nodes | 75f94549f82702b3ac5f548f009dd2202c694240 | 6988606b8c3601d428fa3fe32c77c7b440eb7c38 | refs/heads/master | 2021-01-17T00:29:13.299665 | 2015-04-25T16:46:20 | 2015-04-25T16:46:20 | 27,539,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | import bpy
from animation_nodes.mn_execution import nodePropertyChanged
from animation_nodes.mn_node_base import *
class mn_EdgeIndicesListSocket(mn_BaseSocket, mn_SocketProperties):
bl_idname = "mn_EdgeIndicesListSocket"
bl_label = "Edge Indices List Socket"
dataType = "Edge Indices List"
allowedInputTypes = ["Edge Indices List"]
drawColor = (0, 0.55, 0.23, 1)
def drawInput(self, layout, node, text):
layout.label(text)
def getValue(self):
return []
def setStoreableValue(self, data):
pass
def getStoreableValue(self):
pass
def getCopyValueFunctionString(self):
return "return [edgeIndices[:] for edgeIndices in value]"
| [
"mail@jlucke.com"
] | mail@jlucke.com |
bd70c58aac8b6133299432ebb60e9a77ed4bca33 | 06a88c9651d07c26a7bcf8f50afc0a426af526b6 | /Classifying:Clustering_ForestCoverType_Project/covtype_classifier.py | 5524b548aeec27716d6cda01c1bc1611fa18624c | [] | no_license | psanch/coen140 | 1c1b6fe5bdbfcd81f02a15d5e1cc4e26949bc317 | 11ee72b22f0104ba65c4570e1c39c8552aa890d7 | refs/heads/master | 2020-03-21T18:49:05.852688 | 2018-10-25T23:07:40 | 2018-10-25T23:07:40 | 138,915,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,802 | py | # Pedro Sanchez
# ==================================================
# IMPORTS
# ==================================================
import numpy as np
import math
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
# ==================================================
# DEFINE CONSTANTS
# ==================================================
NUM_DATASET_INSTANCES = 581012
NUM_FEATURES = 54 + 1
TRAIN_FILE = "covtype_training.txt"
TEST_FILE = "covtype_testing.txt"
# ==================================================
# PARSE DATA FUNCTIONS
# ==================================================
def formatFileIntoNumpy(filename):
rawData = open(filename,"r")
lines = rawData.read().split("\n")
data_list = []
y_list = []
for i in range(len(lines)):
string = lines[i].split(",")
y_list.append(string[NUM_FEATURES-1:])
data_list.append(string[:-1])
x = np.ones((len(lines),NUM_FEATURES))
y = np.ones((len(lines),1))
for i in range(len(lines)):
y[i] = y_list[i]
for j in range(NUM_FEATURES-1):
x[i][j] = data_list[i][j]
return x,y
def getData(train_fname, test_fname):
x_train, y_train = formatFileIntoNumpy(train_fname)
x_test, y_test = formatFileIntoNumpy(test_fname)
return x_train, y_train, x_test, y_test
# ==================================================
# HELPER FUNCTIONS
# ==================================================
def getAccuracy(a,b):
if(len(a) != len(b)):
print("getAccuracy needs elements of same length!")
return -1
num = len(a)
correct = 0
for i in range(num):
if( int(a[i]) == int(b[i]) ):
correct+=1
acc = float(correct)/float(num)
acc*=100
print("\tAccuracy %:\n" + "\t" + str(acc))
return acc
# ==================================================
# EXECUTE
# ==================================================
print("Getting data...")
x_train, y_train, x_test, y_test = getData(TRAIN_FILE, TEST_FILE)
print("Training LDA...")
lda = LinearDiscriminantAnalysis(solver="svd")
model = lda.fit(x_train, y_train.ravel())
print("Predicting LDA over Training Data...")
y_train_pred = model.predict(x_train)
LDA_train_accuracy = getAccuracy(y_train_pred, y_train)
print("Predicting LDA over Testing Data...")
y_test_pred = model.predict(x_test)
LDA_test_accuracy = getAccuracy(y_test_pred, y_test)
print("Training QDA...")
qda = QuadraticDiscriminantAnalysis()
model = qda.fit(x_train, y_train.ravel())
print("Predicting QDA over Training Data...")
y_train_pred = model.predict(x_train)
QDA_train_accuracy = getAccuracy(y_train_pred, y_train)
print("Predicting QDA over Testing Data...")
y_test_pred = model.predict(x_test)
QDA_test_accuracy = getAccuracy(y_test_pred, y_test)
| [
"pedrosanchezm97@gmail.com"
] | pedrosanchezm97@gmail.com |
785d121eb91f18fa2396671c6a87ce682ba81220 | 74f6da0c3f197ab395caafc321575e6374e2f6dc | /bitPredict.py | 2aa60a2b346e044582f7320e48a2fd194e28d750 | [] | no_license | RNNCCL/bitPredict | 1e80f7d5830a596043d791cf340e2a237c3cb476 | 98ebcd86967e5f3eca854081ba789d5664d06e4e | refs/heads/master | 2021-05-30T00:56:19.562494 | 2015-10-01T05:16:22 | 2015-10-01T05:16:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57,348 | py | import math
import urllib
import contextlib # for urllib.urlopen
import copy
import os
import tkMessageBox
import tkSimpleDialog
from datetime import datetime
from datetime import date
from Tkinter import *
import time
import webbrowser
from eventBasedAnimationClass import EventBasedAnimationClass
class Matrix(object):
def __init__(self, rows, cols, A):
self.rows = rows
self.cols = cols
self.entries = [[0 for j in xrange(cols)] for i in xrange(rows)]
for i in xrange(rows):
for j in xrange(cols):
self.entries[i][j] = A[i][j]
self.D = None
self.T = None
def __mul__(self, other):
if type(other) == Matrix:
return self.matrixMatrixMultiplication(other)
elif type(other) == Vector:
return self.matrixVectorMultiplication(other)
elif type(other) == int or type(other) == float:
return self.matrixScalarMultiplication(other)
def matrixMatrixMultiplication(self, other):
if other.rows == self.cols:
multiplied = ([[0 for i in xrange(self.rows)]
for j in xrange(other.cols)])
for row in xrange(self.rows):
vec1 = Vector(self.cols, self.entries[row])
for col in xrange(other.cols):
vec2entries = ([other.entries[r][col]
for r in xrange(other.rows)])
vec2 = Vector(other.rows, vec2entries)
multiplied[row][col] = vec1 * vec2
return Matrix(self.rows, other.cols, multiplied)
else:
raise Exception("Cannot be multiplied")
def matrixVectorMultiplication(self, other):
if other.dimension == self.cols:
multiplied = [0 for i in xrange(self.rows)]
for row in xrange(self.rows):
vec1 = Vector(self.cols, self.entries[row])
vec2 = other
multiplied[row] = vec1 * vec2
return Vector(self.rows, multiplied)
else:
raise Exception("Cannot be multiplied")
def matrixScalarMultiplication(self, other):
multiplied = copy.deepcopy(self.entries)
for row in xrange(self.rows):
for col in xrange(self.cols):
multiplied[row][col] *= other
return Matrix(self.rows, self.cols, multiplied)
def __rmul__(self, other):
return self * other
def __div__(self, other):
newMatrix = Matrix(self.rows, self.cols, self.entries)
if isinstance(other, int) or isinstance(other, float):
for row in xrange(self.rows):
for col in xrange(self.cols):
newMatrix.entries[row][col] = (float(
newMatrix.entries[row][col])/other)
return newMatrix
def determinant(self):
if self.rows == self.cols:
n, self.D = self.rows, 0
if n == 1:
return self.entries[0][0]
else:
for i in xrange(self.cols):
self.D += (self.entries[0][i] * self.cofactor(0, i))
return self.D
def inverse(self):
if self.D == 0:
raise Exception("Inverse doesn't exist")
else:
return self.adjoint()/self.determinant()
def cofactor(self, a, b):
assert (self.rows == self.cols)
n = self.rows
residualMatrix = [[0 for j in xrange(n-1)] for i in xrange(n-1)]
crow = 0
for i in xrange(n):
if i != a:
ccol = 0
for j in xrange(n):
if j != b:
residualMatrix[crow][ccol] = self.entries[i][j]
ccol += 1
crow += 1
return ((-1)**(a+b)) * (Matrix(self.rows - 1, self.cols - 1,
residualMatrix)).determinant()
def cofactorMatrix(self):
assert self.rows == self.cols
n = self.rows
cofMatrix = [[0 for j in xrange(n)] for i in xrange(n)]
for i in xrange(n):
for j in xrange(n):
cofMatrix[i][j] = self.cofactor(i, j)
return Matrix(n, n, cofMatrix)
def adjoint(self):
assert self.rows == self.cols
n = self.rows
return self.cofactorMatrix().transpose()
def transpose(self):
B = [[0 for col in xrange(self.rows)] for row in xrange(self.cols)]
for row in xrange(self.rows):
for col in xrange(self.cols):
B[col][row] = self.entries[row][col]
self.T = Matrix(self.cols, self.rows, B)
return self.T
def append(self, n):
# appends a column of n's to the right of matrix
newMatrix = ([[0 for i in xrange(self.cols + 1)]
for j in xrange(self.rows)])
for row in xrange(self.rows):
for col in xrange(self.cols):
newMatrix[row][col] = self.entries[row][col]
newMatrix[row][self.cols] = n
return Matrix(self.rows, self.cols+1, newMatrix)
class Vector(Matrix):
def __init__(self, dimension, b):
self.dimension = dimension
self.entries = b
def __mul__(self, other):
if type(other) == Vector:
product = 0
assert self.dimension == other.dimension
for i in xrange(self.dimension):
product += self.entries[i] * other.entries[i]
return product
elif type(other) == Matrix:
return other * self
def leastSquares(A, b):
# matrix A, vector b. returns vector with slope and intercept
return (A.transpose() * A).inverse() * (A.transpose() * b)
# CITATION: function rgbString taken from Course notes
def rgbString(red, green, blue):
return "#%02x%02x%02x" % (red, green, blue)
# CITATION: function readWebPage taken from Course notes
def readWebPage(url):
# reads from url and returns it
assert(url.startswith("https://"))
with contextlib.closing(urllib.urlopen(url)) as fin:
return fin.read()
def writeFile(filename, contents, mode = "a"):
# writes contents to filename
fout = open(filename, mode)
if type(contents) == list:
for i in xrange(len(contents)):
fout.write(str(contents[i]))
else: fout.write(str(contents))
fout.close()
def makeFileIntoArray(filename):
with open(filename, "rt") as fin:
contents = fin.read()
contents = contents.split("\n")
return contents
def getSpotRate():
# returns the spot rate at that moment. returns a STRING
url = "https://api.coinbase.com/v1/prices/spot_rate"
priceAndCurrency = readWebPage(url)
priceAndCurrency = priceAndCurrency.split("\"")
priceIndex = 3
price = priceAndCurrency[priceIndex]
return price
class Application(EventBasedAnimationClass):
def __init__(self):
self.width, self.height = 1200, 600
self.spotRate, self.timerCount = 0, 0
self.lastEntry = 0.0
super(Application, self).__init__(self.width, self.height)
def change(self, activePage):
self.activePage = activePage(self.change)
def initAnimation(self):
self.timerDelay = 1000
self.activePage = HomePage(self.change)
self.root.bind("<Motion>", lambda event: self.onMouseMotion(event))
def onTimerFired(self):
self.timerCount += 1
self.spotRate = getSpotRate()
if self.activePage.data:
self.callCreateDataFile()
if self.activePage.chartIntermediate:
self.changeToChartPage()
if self.timerCount >= 120:
# two minutes up, look for new data
self.newEntry = self.activePage.getNewEntry()
if (self.activePage == PredictPage and self.activePage.frozen and
self.activePage.promptToBuy and self.newEntry > self.lastEntry):
self.displayDialog("BUY NOW!")
# was frozen and it's time to BUY NOW!
elif (self.activePage == PredictPage and self.activePage.frozen and
self.activePage.promptToSell and self.newEntry<self.lastEntry):
self.displayDialog("SELL NOW!")
# was frozen and it's time to SELL NOW!
self.lastEntry = self.newEntry
self.timerCount = 0
self.redrawAll()
def callCreateDataFile(self):
self.redrawAll()
self.activePage.data = False
self.activePage.createDataFile()
def changeToChartPage(self):
self.redrawAll()
self.activePage.change(ChartPage)
self.activePage.chartIntermediate = False
def displayDialog(self, msg):
message = msg
title = "Info box"
tkMessageBox.showinfo(title, message)
def onKeyPressed(self, event):
self.activePage.onKeyPressed(event)
self.redrawAll()
def onMousePressed(self, event):
self.activePage.onMousePressed(event)
self.redrawAll()
def onMouseMotion(self, event):
self.activePage.onMouseMotion(event)
self.redrawAll()
def redrawAll(self):
self.activePage.draw(self.canvas, self.spotRate)
class Page(object):
def __init__(self, change):
self.pageWidth, self.pageHeight = 1200, 600
self.appNameX, self.appNameY = self.pageWidth/4, self.pageHeight/8
wby2, h = 50, 40
self.initializeBooleanVariables()
self.change = change
self.initializeAllButtonVariables()
filename = "tempDir" + os.sep + "bitcoinHistory2.txt"
(self.days1Month, self.prices1Month) = self.getNMonthsData(filename,1)
self.initializeChartStuff()
self.want1Month = True
self.want6Months, self.want3Months, self.want1Year = False, False, False
self.justStarted = False
self.chartIntermediate = False
def initializeBooleanVariables(self):
self.predict = False
self.chart = False
self.data = False
def initializeAllButtonVariables(self):
wby2, h, mgn, space = 50, 40, 80, 120
self.predictX1 = self.pageWidth/2 - wby2 - mgn
self.predictY1 = self.pageHeight - h
self.predictX2 = self.pageWidth/2 + wby2 - mgn
self.predictY2 = self.pageHeight
self.chartX1, self.chartX2 = self.predictX1-space, self.predictX2-space
self.chartY1, self.chartY2 = self.predictY1, self.predictY2
self.dataX1, self.dataX2 = self.predictX1+space, self.predictX2 + space
self.dataY1, self.dataY2 = self.predictY1, self.predictY2
self.homeX1, self.homeX2 = self.predictX1-2*space,self.predictX2-2*space
self.homeY1, self.homeY2 = self.predictY1, self.predictY2
self.personalizedX1 = self.predictX1 + 2*space
self.personalizedX2 = self.predictX2 + 2*space
self.personalizedY1, self.personalizedY2 = self.predictY1,self.predictY2
self.helpX1 = self.predictX1 + 3*space
self.helpX2 = self.predictX2 + 3*space
self.helpY1, self.helpY2 = self.predictY1, self.predictY2
def initializeChartStuff(self):
self.lengthOfXAxisInPixels, self.lengthOfYAxisInPixels = 1000, 300
self.chartWidth = self.lengthOfXAxisInPixels
self.chartHeight = self.lengthOfYAxisInPixels
leftMargin, botMargin = 150, 100
self.originX = leftMargin
self.originY = self.pageHeight - botMargin
self.days, self.prices = self.days1Month, self.prices1Month
self.xmax, self.ymax = len(self.days), max(self.prices)
self.horizScalingFactor = float(self.lengthOfXAxisInPixels)/self.xmax
# pixel per day
self.vertScalingFactor = float(self.lengthOfYAxisInPixels)/self.ymax
# pixel per dollar
def createDataFile(self):
# creates a file containing data of approximately
# last one year
self.data = True
url = "https://api.coinbase.com/v1/prices/historical?page="
path = "tempDir" + os.sep + "bitcoinHistory2.txt"
if os.path.exists(path):
writeFile(path, "", "wt")
else:
os.makedirs("tempDir")
writeFile(path, "", "wt")
reachedLastYear = False
pageNo = 1
while not reachedLastYear:
urlWithPage = url + str(pageNo)
urlContents = readWebPage(urlWithPage)
dateLen = 10
reachedLastYear = self.checkLastYear(str(urlContents[0:dateLen]))
normalizedContents = self.normalize(urlContents)
writeFile(path, normalizedContents)
pageNo += 1
self.data = False
def getNewEntry(self):
# gets new entry if released by coinbase
url = "https://api.coinbase.com/v1/prices/historical?page=1"
path = "tempDir" + os.sep + "bitcoinHistory2.txt"
with open(path, "rt") as fin:
originalContents = fin.read()
contents = makeFileIntoArray(path)
urlContents = readWebPage(url)
urlContents = urlContents.split("\n")
urlContents = urlContents[0]
possibleNewEntry = self.normalize(urlContents)
if possibleNewEntry[0].split("\n")[0] != contents[0]:
newContents = (str(possibleNewEntry[0].split("\n")[0]) +
"\n" + str(originalContents))
writeFile(path, newContents, "wt")
def normalize(self, urlContents):
# normalizes all timestamps to CMU's timezone,
# i.e. UTC-5
# reason for having this fn: coinbase randomizes the timezone it
# displays its data in, every instant.
cmuTimezone = -5 # relative to UTC
newContents = "" # the contents as we want them
urlContents = urlContents.split("\n")
hIndex, hhmmLength, tzIndex, dateLen = 11, 5, 21, 10
for i in xrange(len(urlContents)):
timestamp = urlContents[i][hIndex : hIndex + hhmmLength]
# in hh:mm format
coinbaseTimezone = int(urlContents[i][tzIndex - 2 : tzIndex + 1])
mIdx, hrsInDay, priceIdx = 3, 24, 26
hour, minute = int(timestamp[0 : 2]), timestamp[mIdx : mIdx + 2]
normalizedHour = (str((hour + cmuTimezone - coinbaseTimezone)
% hrsInDay))
newDate = urlContents[i][0 : dateLen]
if len(normalizedHour) == 1: normalizedHour = "0" + normalizedHour
if int(normalizedHour) + coinbaseTimezone - cmuTimezone >= 24:
newDate = self.timezoneTooPositiveDecreaseDate(newDate)
# timezone was so positive that date changed
elif int(normalizedHour) + coinbaseTimezone - cmuTimezone < 0:
newDate = self.timezoneTooNegativeIncreaseDate(newDate)
# timezone was so negative that date changed
urlContents[i] = (newDate + normalizedHour
+ ":" + str(minute) + str(urlContents[i][priceIdx:]) + "\n")
return urlContents
def timezoneTooNegativeIncreaseDate(self, d):
# returns a string of the newDate
d = date(int(d[:4]), int(d[5:7]), int(d[8:]))
dayLengthList = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
dayCopy = d.day + 1
if dayCopy > dayLengthList[d.month-1]:
newDay = 1
if d.month == 12:
newMonth = 1
newYear = d.year + 1
else:
newMonth = d.month + 1
newYear = d.year
else:
newDay = dayCopy
newMonth = d.month
newYear = d.year
newDate = date(newYear, newMonth, newDay)
dateString = str(newDate)
return dateString
def timezoneTooPositiveDecreaseDate(self, d):
# returns a string of the newDate
d = date(int(d[:4]), int(d[5:7]), int(d[8:]))
dayLengthList = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
dayCopy = d.day - 1
if dayCopy == 0:
newDay = dayLengthList[(d.month - 1) - 1]
if d.month == 1:
newMonth = 12
newYear = d.year - 1
else:
newMonth = d.month - 1
newYear = d.year
else:
newDay = dayCopy
newMonth = d.month
newYear = d.year
newDate = date(newYear, newMonth, newDay)
dateString = str(newDate)
return dateString
def checkLastYear(self, dateOnPage):
# returns True if dateOnPage is older than one year before
# today's date
dateOnPage = dateOnPage.split("-")
today = str(date.today()).split("-")
if (int(dateOnPage[0]) == int(today[0]) - 1 and
((int(dateOnPage[1]) < int(today[1])) or
(int(dateOnPage[1]) == int(today[1]) and
int(dateOnPage[2]) < int(today[2])))):
return True
return False
def onMousePressed(self, event):
x, y = event.x, event.y
if (self.predictX1 < x < self.predictX2 and
self.predictY1 < y < self.predictY2):
self.predict, self.chart, self.data = True, False, False
self.change(PredictPage)
elif (self.chartX1 < x < self.chartX2 and self.chartY1<y<self.chartY2):
self.chart, self.chartIntermediate = True, True
self.predict, self.data = False, False
elif (self.dataX1 < x < self.dataX2 and self.dataY1 < y < self.dataY2):
self.data = True
elif (self.homeX1 < x < self.homeX2 and self.homeY1 < y < self.homeY2):
self.predict, self.data, self.chart = False, False, False
self.change(HomePage)
elif (self.personalizedX1 < x < self.personalizedX2 and
self.personalizedY1 < y < self.personalizedY2):
self.predict, self.data, self.chart = False, False, False
self.change(PersonalizedCharts)
elif (self.helpX1 < x < self.helpX2 and self.helpY1 < y < self.helpY2):
self.predict, self.data, self.chart = False, False, False
self.change(Help)
def draw(self, canvas, spotRate):
canvas.delete(ALL)
self.makePredictButton(canvas)
self.makeChartsButton(canvas)
self.makeLoadDataButton(canvas)
self.makeHomeButton(canvas)
self.makePersonalizedChartsButton(canvas)
self.makeAboutButton(canvas)
# rgbString(30, 104, 255) is the dodger blue color which is the main color
# I use in my app.
def makeChartsButton(self, canvas):
wby2, h = 50, 40
canvas.create_rectangle(self.chartX1, self.chartY1,
self.chartX2, self.chartY2,
fill = rgbString(30, 104, 255),
outline = rgbString(30, 104, 255))
canvas.create_text(self.chartX1 + wby2, self.chartY1 + h/2,
text = "View Charts", fill = "snow")
def makePredictButton(self, canvas):
wby2, h = 50, 40
canvas.create_rectangle(self.predictX1, self.predictY1,
self.predictX2, self.predictY2,
fill = rgbString(30, 104, 255),
outline = rgbString(30, 104, 255))
canvas.create_text(self.predictX1 + wby2, self.predictY1 + h/2,
text = "Predict!", fill = "snow")
def makeLoadDataButton(self, canvas):
wby2, h = 50, 40
canvas.create_rectangle(self.dataX1, self.dataY1,
self.dataX2, self.dataY2,
fill = rgbString(30, 104, 255),
outline = rgbString(30, 104, 255))
canvas.create_text(self.dataX1 + wby2, self.dataY1 + h/2,
text = "Refresh Data", fill = "snow")
def makeHomeButton(self, canvas):
wby2, h = 50, 40
canvas.create_rectangle(self.homeX1, self.homeY1,
self.homeX2, self.homeY2,
fill = rgbString(30, 104, 255),
outline = rgbString(30, 104, 255))
canvas.create_text(self.homeX1 + wby2, self.homeY1 + h/2,
text = "Home", fill = "snow")
def makePersonalizedChartsButton(self, canvas):
wby2, h = 50, 40
canvas.create_rectangle(self.personalizedX1, self.personalizedY1,
self.personalizedX2, self.personalizedY2,
fill = rgbString(30, 104, 255),
outline = rgbString(30, 104, 255))
canvas.create_text(self.personalizedX1 + wby2,
self.personalizedY1 + h/2,
text = "Personalize", fill = "snow")
def makeAboutButton(self, canvas):
wby2, h = 50, 40
canvas.create_rectangle(self.helpX1, self.helpY1,
self.helpX2, self.helpY2,
fill = rgbString(30, 104, 255),
outline = rgbString(30, 104, 255))
canvas.create_text(self.helpX1 + wby2,
self.helpY1 + h/2,
text = "About", fill = "snow")
def drawLoadingScreen(self, canvas):
canvas.create_rectangle(0, 0, self.pageWidth, self.pageHeight,
fill = "black")
canvas.create_text(self.pageWidth/2, self.pageHeight/2,
text = "Loading... Please be patient..",
font = "Arial 40 bold", fill = "snow")
def drawBanner(self, canvas):
canvas.create_rectangle(0, 0, self.pageWidth, self.pageHeight/4,
fill = rgbString(30, 104, 255), outline = rgbString(30, 104, 255))
# dodger blue color
canvas.create_text(self.appNameX, self.appNameY,
text = "bitPredict", fill = "snow",
font = "Trebuchet 100 bold italic")
def getPriceArray(self, filename):
priceIdx = 15
with open(filename, "rt") as fin:
contents = fin.read()
contents = contents.split("\n")
for i in xrange(len(contents)):
contents[i] = contents[i][priceIdx:]
return contents
def getLastNMaximas(self, N):
filename = "tempDir" + os.sep + "bitcoinHistory2.txt"
self.priceArray = self.getPriceArray(filename)
self.maximas, noOfMax, i = [], 0, 1
while noOfMax < N:
if (float(self.priceArray[i]) >= float(self.priceArray[i - 1]) and
float(self.priceArray[i]) >= float(self.priceArray[i + 1])):
self.maximas += [float(self.priceArray[i])]
noOfMax += 1
i += 1
return self.maximas
def getLastNMinimas(self, N):
filename = "tempDir" + os.sep + "bitcoinHistory2.txt"
self.priceArray = self.getPriceArray(filename)
self.minimas, noOfMin, i = [], 0, 1
while noOfMin < N:
if (float(self.priceArray[i]) <= float(self.priceArray[i - 1]) and
float(self.priceArray[i]) <= float(self.priceArray[i + 1])):
self.minimas += [float(self.priceArray[i])]
noOfMin += 1
i += 1
return self.minimas
def getResistanceLine(self):
N, S, avg = 10, 0, 0
self.maximas = self.getLastNMaximas(N)
for i in xrange(len(self.maximas)):
S += self.maximas[i]
avg = float(S)/N
return avg
def getSupportLine(self):
N, S, avg = 10, 0, 0
self.minimas = self.getLastNMinimas(N)
for i in xrange(len(self.minimas)):
S += self.minimas[i]
avg = float(S)/N
return avg
def getNMonthsData(self, filename, N):
# sifts through the file and creates two arrays of time coordinate and
# varying bitcoin price.
days, prices = [date.today()], [float(getSpotRate())]
with open(filename, "rt") as fin:
contents = fin.read()
contents = contents.split("\n")
current = date.today()
yrIdx, mIdx, dIdx, hIdx, minIdx, priceIdx = 4, 5, 8, 10, 13, 15
i = 0
month = date.today().month
while (((date.today().month - month) % 12 < N) or
((date.today().month - month) % 12 == N and
day >= date.today().day)):
year = int(contents[i][0 : yrIdx])
month = int(contents[i][mIdx : mIdx + 2])
day = int(contents[i][dIdx : dIdx + 2])
if (date(year, month, day) != current):
days = [date(year, month, day)] + days
prices = [float(contents[i][priceIdx:])] + prices
current = date(year, month, day)
i += 1
return (days, prices)
def getOneYearData(self, filename):
# sifts through the file and creates two arrays of time coordinate and
# varying bitcoin price.
days, prices = [date.today()], [float(getSpotRate())]
with open(filename, "rt") as fin:
contents = fin.read()
contents = contents.split("\n")
current = date.today()
yrIdx, mIdx, dIdx, hIdx, minIdx, priceIdx = 4, 5, 8, 10, 13, 15
for i in xrange(len(contents)):
year = int(contents[i][0 : yrIdx])
month = int(contents[i][mIdx : mIdx + 2])
day = int(contents[i][dIdx : dIdx + 2])
if (current >= date(date.today().year - 1,
date.today().month, date.today().day)):
if (date(year, month, day) != current):
days = [date(year, month, day)] + days
prices = [float(contents[i][priceIdx:])] + prices
current = date(year, month, day)
else:
break
return (days, prices)
def drawScaledAxes(self, canvas):
# draws the Axes scaled according to parameters given as input.
canvas.create_line(self.originX, self.originY,
self.originX, self.originY - self.chartHeight)
# draws Y axis
canvas.create_line(self.originX, self.originY,
self.originX + self.chartWidth, self.originY)
# draws X axis
self.hashXAxis(canvas)
self.hashYAxis(canvas)
def hashXAxis(self, canvas):
spacing = 10
i = 0
if self.want1Year: step = 30
elif self.want6Months: step = 20
elif self.want3Months: step = 15
elif self.want1Month: step = 3
while (i <= len(self.days) - step):
canvas.create_text(self.originX + i * self.horizScalingFactor,
self.originY + spacing, text = self.display(self.days[i]))
i += step
canvas.create_text(self.originX + self.lengthOfXAxisInPixels,
self.originY + spacing, text = self.display(self.days[-1]))
# display today's date
def hashYAxis(self, canvas):
spacing = 30
canvas.create_text(self.originX - spacing,
self.originY - self.lengthOfYAxisInPixels,
text = "$ " + str(self.ymax))
i = 0
while i <= self.ymax:
canvas.create_text(self.originX - spacing,
self.originY - i * self.vertScalingFactor,
text = "$ " + str(i))
i += 200
def plotChart(self, canvas, noOfMonths):
filename = "tempDir" + os.sep + "bitcoinHistory2.txt"
if self.justStarted: self.justStarted = False
elif noOfMonths == 12:
self.days, self.prices = self.days1Year, self.prices1Year
elif noOfMonths == 6:
self.days, self.prices = self.days6Months, self.prices6Months
elif noOfMonths == 3:
self.days, self.prices = self.days3Months, self.prices3Months
elif noOfMonths == 1:
self.days, self.prices = self.days1Month, self.prices1Month
self.adjustScale()
self.drawScaledAxes(canvas)
oldScreenX = self.originX
oldScreenY = self.originY - self.vertScalingFactor * self.prices[0]
for i in xrange(len(self.days)):
screenX = (self.originX + i*self.horizScalingFactor)
screenY = self.originY - (self.prices[i]*self.vertScalingFactor)
canvas.create_line(screenX, screenY, oldScreenX, oldScreenY)
oldScreenX, oldScreenY = screenX, screenY
def adjustScale(self):
self.xmax, self.ymax = len(self.days), max(self.prices)
self.horizScalingFactor = float(self.lengthOfXAxisInPixels)/self.xmax
# pixel per day
self.vertScalingFactor = float(self.lengthOfYAxisInPixels)/self.ymax
# pixel per dollar
def display(self, date):
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep",
"Oct", "Nov", "Dec"]
month = months[date.month - 1]
day = date.day
return str(month) + " " + str(day)
def displaySpotRateInCorner(self, canvas, spotRate):
lineSpace = 100
canvas.create_text(self.pageWidth, 0, anchor = NE,
text = "$ " + spotRate, font = "Helvetica 50 bold")
def inChart(self, x, y):
return ((self.originX < x < self.originX + self.chartWidth) and
(self.originY - self.chartHeight < y < self.originY))
def getDaysIndexFromChartX(self, x):
index = (x - self.originX)/self.horizScalingFactor
return index
def getChartYFromPricesIndex(self, index):
return (self.originY - self.prices[index] * self.vertScalingFactor)
class HomePage(Page):
def __init__(self, change):
super(HomePage, self).__init__(change)
self.spotRateX, self.spotRateY = self.pageWidth/2, self.pageHeight*5/8
self.lastRate = 0.0
def draw(self, canvas, spotRate):
if self.chartIntermediate:
self.drawLoadingScreen(canvas)
elif self.data:
self.drawLoadingScreen(canvas)
else:
super(HomePage, self).draw(canvas, spotRate)
canvas.create_rectangle(0, 0, self.pageWidth, self.pageHeight/4,
fill = rgbString(30, 104, 255), outline=rgbString(30, 104, 255))
canvas.create_text(self.appNameX, self.appNameY,
text = "bitPredict", fill = "snow",
font = "Trebuchet 100 bold italic")
if self.chart:
self.change(ChartPage)
else:
canvas.create_text(self.spotRateX, self.spotRateY,
text = "$ "+spotRate, fill = "black",
font = "Helvetica 200 bold")
self.lastRate = float(spotRate)
self.makeLogInButton(canvas)
def makeLogInButton(self, canvas):
lineSpace, vertR, horizR = 40, 20, 60
self.butX, self.butY = self.pageWidth*3/4, self.pageHeight/8
canvas.create_rectangle(self.butX - horizR, self.butY - vertR,
self.butX + horizR, self.butY + vertR, fill = rgbString(0, 0, 128))
canvas.create_text(self.butX, self.butY, text = "Log into Coinbase",
fill = "snow")
def onMousePressed(self, event):
x, y = event.x, event.y
inpFieldVertR, inpFieldHorizR, butVertR, butHorizR = 10, 100, 20, 60
if (self.butX - butHorizR < x < self.butX + butHorizR and
self.butY - butVertR < y < self.butY + butVertR):
browser = webbrowser.get()
browser.open_new_tab("https://www.coinbase.com")
else:
super(HomePage, self).onMousePressed(event)
def onMouseMotion(self, event):
pass
def onKeyPressed(self, event):
pass
class PredictPage(Page):
def __init__(self, change):
super(PredictPage, self).__init__(change)
self.predict = True
self.intention, self.intentionRecorded, self.trend = None, False, None
self.frozen = False
wby2, h, spacing = 50, 40, 100
self.originX = float(self.pageWidth)/4
self.originY = float(self.pageHeight)*3/4
self.horizPixelLimit = self.pageWidth/2
self.vertPixelLimit = self.pageHeight/2
path = "tempDir" + os.sep + "bitcoinHistory2.txt"
(self.xi, self.yi) = self.getPastOneDayData(path)
self.ymax, self.ymin = max(self.yi) + 5, min(self.yi) - 5 # in dollars
self.xmax = -1 * self.xi[-1] # in seconds
self.horizScalingFactor = float(self.horizPixelLimit)/self.xmax
self.vertScalingFactor = (float(self.vertPixelLimit)/(self.ymax -
self.ymin))
self.initializeFrozenAndPromptVariables()
def initializeFrozenAndPromptVariables(self):
wby2, h, spacing = 50, 40, 100
self.freezeX1 = self.pageWidth/2 - wby2
self.freezeX2 = self.pageWidth/2 + wby2
self.freezeY1 = self.pageHeight - spacing - h/2
self.freezeY2 = self.pageHeight - spacing + h/2
self.promptToBuy, self.promptToSell = False, False
def draw(self, canvas, spotRate):
if self.chartIntermediate:
self.drawLoadingScreen(canvas)
elif self.data:
self.drawLoadingScreen(canvas)
else:
super(PredictPage, self).draw(canvas, spotRate)
if not self.intentionRecorded:
self.drawBanner(canvas)
self.displaySpotRateInSnow(canvas, spotRate)
self.drawWhenIntentionNotRecorded(canvas)
else:
self.displaySpotRateInCorner(canvas, spotRate)
if self.wait:
self.drawWaitPrediction(canvas)
elif self.buy:
self.drawBuyPrediction(canvas)
elif self.sell:
self.drawSellPrediction(canvas)
self.plotLinRegChart(canvas)
self.showLegend(canvas)
def showLegend(self, canvas):
startX = self.originX + self.horizPixelLimit
canvas.create_text(startX, 200, anchor = W,
text = "Resistance Line: $ " + str(self.resistanceLine),
fill = rgbString(0, 100, 0))
canvas.create_text(startX, 300, anchor = W,
text = ("Linear regression curve: \ny = "+str(self.slope)+"x + " +
str(self.intercept)), fill = "blue")
canvas.create_text(startX, 400, anchor = W,
text = "Support Line: $ " + str(self.supportLine),
fill = "red")
#canvas.create_text()
def drawWaitPrediction(self, canvas):
self.trend = self.determineRecentTrend()
if ((self.intention == "b" or self.intention == "f") and
self.trend == "decreasing"):
message = self.getWaitMessageForSimilarTrendAndIntention()
elif ((self.intention == "b") and self.trend == "increasing"):
message = self.getWaitMessageForOppositeTrendAndIntention()
elif (self.intention == "s" and self.trend == "decreasing"):
message = self.getWaitMessageForOppositeTrendAndIntention()
elif ((self.intention == "s" or self.intention == "f") and
self.trend == "increasing"):
message = self.getWaitMessageForSimilarTrendAndIntention()
canvas.create_text(self.pageWidth/2, self.pageHeight/8, text = message,
font = "Helvetica 14 bold")
def getWaitMessageForSimilarTrendAndIntention(self):
# decreasing -> buy
# increasing -> sell
if self.trend == "decreasing":
limitLine = str(self.supportLine)
else:
limitLine = str(self.resistanceLine)
if self.intention == "b" or self.intention == "f":
activity = "buy"
else:
activity = "sell"
behavior = "drop" if self.trend == "decreasing" else "rise"
message = ("Please wait for a while, the price is in a %s\n" +
"trend. As the prices %s further upto $%s, \n" +
"you should %s.") %(self.trend, behavior, limitLine, activity)
return message
def getWaitMessageForOppositeTrendAndIntention(self):
# decreasing -> sell
# increasing -> buy
if self.intention == "f":
for intent in "bs":
message += self.setValuesAndGetMessage(intent)
return message
else:
message = self.setValuesAndGetMessage(self.intention)
return message
def setValuesAndGetMessage(self, intent):
activity = "buy" if intent == "b" else "sell"
behavior = "rise" if self.trend == "decreasing" else "drop"
hilo = "high" if self.trend == "decreasing" else "low"
if self.trend == "decreasing":
limitLine = str(self.resistanceLine)
else:
limitLine = str(self.supportLine)
message = ("At the moment, prices are %s. This is not a\n" +
" bad time to %s, but it may not be a very good time to %s \n" +
" as we anticipate the price to %s further than the current\n" +
" price eventually, i.e. at least as %s as $%s \n") %(self.trend,
activity, activity, behavior, hilo, limitLine)
return message
def drawBuyPrediction(self, canvas):
self.trend = self.determineRecentTrend()
if ((self.intention == "b" or self.intention == "f")
and self.trend == "decreasing"):
message = self.getBuyPredictionWithFreezeForDecreasingTrend(canvas)
elif ((self.intention == "b" or self.intention == "f")
and self.trend == "increasing"):
message = ("Current price is low, but it's rising. BUY NOW!")
elif (self.intention == "s" and self.trend == "decreasing"):
message = self.getBuyPredictionForSellIntentionAndDecreasingTrend()
elif (self.intention == "s" and self.trend == "increasing"):
message = ("Prices are lower than usual right now, and increasing."
+ "\n This is the time to wait to sell. Although you want to" +
" sell," + "\nthis is a great time to buy.")
canvas.create_text(self.pageWidth/2, self.pageHeight/8, text = message,
font = "Helvetica 14 bold")
def getBuyPredictionWithFreezeForDecreasingTrend(self, canvas):
message = ("This is a good time to buy. But the trend is\n" +
" decreasing, so prices will fall further. Click FREEZE if\n" +
" you want to be prompted when to buy." )
wby2, h = 50, 40
canvas.create_rectangle(self.freezeX1, self.freezeY1,
self.freezeX2, self.freezeY2, fill = rgbString(30, 104, 255))
canvas.create_text(self.freezeX1 + wby2, self.freezeY1 + h/2,
text = "FREEZE!", fill = "snow")
self.promptToBuy = True
return message
def getBuyPredictionForSellIntentionAndDecreasingTrend(self):
message = ("This is not a bad time to sell because prices are\n" +
" decreasing, but we anticipate" + " the price to rise as\n" +
" high as " + str(self.resistanceLine) + " eventually.\n" +
" Although you want to sell, this might be a good\n" +
" time to buy or wait for the prices to fall further.")
return message
def drawSellPrediction(self, canvas):
self.trend = self.determineRecentTrend()
wby2, h = 50, 40
if ((self.intention == "s" or self.intention == "f") and
self.trend == "decreasing"):
message = ("Current price is high, but it's dropping. SELL NOW!")
elif ((self.intention == "s" or self.intention == "f") and
self.trend == "increasing"):
message = self.getSellPredictionWithFreezeForIncreasingTrend(canvas)
elif self.intention == "b" and self.trend == "decreasing":
message = ("Prices are higher than usual right now, and decreasing."
+ " \nThis is the time to wait to buy. Although you want to" +
" buy, \nthis is a great time to sell.")
elif self.intention == "b" and self.trend == "increasing":
message = self.getSellPredictionForBuyIntentionAndIncreasingTrend()
canvas.create_text(self.pageWidth/2, self.pageHeight/8, text = message,
font = "Helvetica 14 bold")
def getSellPredictionWithFreezeForIncreasingTrend(self, canvas):
message = ("This is a good time to sell. But the price is \n" +
"increasing, so prices will rise further. Click FREEZE if\n" +
" you want to be prompted when to sell.")
canvas.create_rectangle(self.freezeX1, self.freezeY1,
self.freezeX2, self.freezeY2, fill = rgbString(30, 104, 255))
canvas.create_text(self.freezeX1 + wby2, self.freezeY1 + h/2,
text = "FREEZE!", fill = "snow")
self.promptToSell = True
return message
def getSellPredictionForBuyIntentionAndIncreasingTrend(self):
message = ("This is not a bad time to buy because prices are\n" +
" increasing, but we anticipate" + " the price to fall as\n" +
" low as " + str(self.supportLine) + " eventually.\n" +
" Although you want to buy, this might be a good\n" +
" time to sell or wait for the prices to rise further.")
return message
def drawWhenIntentionNotRecorded(self, canvas):
intentMessage = ("What do you intend to do?\n" +
"Press B if you intend to buy\n"+"Press S if you intend to sell\n"+
"Press F if you're flexible.")
canvas.create_text(self.pageWidth/2, self.pageHeight/2,
text = intentMessage, font = "Georgia 20 bold")
def onKeyPressed(self, event):
if not self.intentionRecorded:
if event.char == "b" or event.char == "s" or event.char == "f":
self.intention = event.char
self.intentionRecorded = True
self.prediction()
def onMousePressed(self, event):
x, y = event.x, event.y
if (self.intentionRecorded and
(self.freezeX1 < x < self.freezeX2) and
(self.freezeY1 < y < self.freezeY2)):
self.frozen = True
else:
super(PredictPage, self).onMousePressed(event)
def prediction(self):
self.resistanceLine = self.getResistanceLine()
self.supportLine = self.getSupportLine()
self.spotRate = float(getSpotRate())
margin = 0.1 # in dollars
if (self.supportLine + margin <= self.spotRate <=
self.resistanceLine - margin):
self.wait, self.buy, self.sell = True, False, False
elif self.spotRate < self.supportLine + margin:
self.wait, self.buy, self.sell = False, True, False
elif self.spotRate > self.resistanceLine + margin:
self.wait, self.buy, self.sell = False, False, True
def onMouseMotion(self, event):
pass
def getPastOneDayData(self, filename):
# sifts through the file and creates two arrays of time coordinate and
# varying bitcoin price. This is to implement the short term linear rgn
contents = makeFileIntoArray(filename)
now, then, idx = datetime.now(), datetime.now(), 0
diff, xi, yi = now - then, [ ], [ ]
xi += [0]
yi += [float(getSpotRate())]
while diff.days < 1:
yrIdx, mIdx, dIdx, hIdx, minIdx, priceIdx = 4, 5, 8, 10, 13, 15
year = int(contents[idx][0:yrIdx])
month = int(contents[idx][mIdx:mIdx+2])
day = int(contents[idx][dIdx:dIdx+2])
hour = int(contents[idx][hIdx:hIdx+2])
minute = int(contents[idx][minIdx:minIdx+2])
price = float(contents[idx][priceIdx:])
then = datetime(year, month, day, hour, minute)
diff = now - then
idx += 1
xi += [-1*diff.seconds]
yi += [price]
# do not consider last element because that was beyond a day old
return (xi[:-1], yi[:-1])
def linearRegression(self, xi, yi):
# returns the equation of the line that best approximates all sets of
# points (xi, yi), where xi = time coordinate, yi = bitcoin price
matXi = [[0] for i in xrange(len(xi))]
for i in xrange(len(xi)):
matXi[i] = [xi[i]]
X = Matrix(len(xi), 1, matXi)
X = X.append(1)
y = Vector(len(yi), yi)
lstsqVector = leastSquares(X,y)
# Performs a technique called least squares approximation in linear alg
return lstsqVector.entries
def plotLinRegChart(self, canvas):
# plots a graph of bitcoin variation over the past one day
# self.xi is going from most recent timestamp to oldest timestamp
# we want to plot the chart in reverse order.
# first translate all the entries of xi. translation will be done by
# adding self.xi[-1]
xi = self.translate() # translated
# all entries are now non negative in decreasing order.
# we can plot as (xi, yi) now.
oldScreenX = self.originX
oldScreenY = (self.originY - self.vertScalingFactor *
(self.yi[-1] - self.ymin))
# traversing xi in reverse order now
for index in xrange(-2, -len(xi)-1, -1):
chartX, chartY = xi[index], self.yi[index] - self.ymin
screenX = self.originX + chartX * self.horizScalingFactor
screenY = self.originY - chartY * self.vertScalingFactor
canvas.create_oval(screenX - 1, screenY - 1, screenX + 1, screenY+1)
canvas.create_line(screenX, screenY, oldScreenX, oldScreenY)
oldScreenX, oldScreenY = screenX, screenY
self.drawLinRegScaledAxes(canvas)
self.drawLinRegCurve(canvas)
self.plotResistanceLine(canvas)
self.plotSupportLine(canvas)
def drawLinRegScaledAxes(self, canvas):
marginY = 20
canvas.create_line(self.originX, self.originY,
self.originX + self.horizPixelLimit, self.originY)
canvas.create_line(self.originX, self.originY,
self.originX, self.originY - self.vertPixelLimit)
canvas.create_text(self.originX, self.originY + marginY,
text = "One day back")
canvas.create_text(self.originX - self.xi[-1] * self.horizScalingFactor,
self.originY + marginY, text = self.format(date.today()))
self.hashYAxis(canvas)
def drawLinRegCurve(self, canvas):
lineVector = self.linearRegression(self.xi, self.yi)
slope, intercept = lineVector[0], lineVector[1]
xi = self.translate()
oldScreenX = self.originX
oldScreenY = (self.originY -
(slope * self.xi[-1] + intercept - self.ymin) *
self.vertScalingFactor)
screenX = self.originX + xi[0] * self.horizScalingFactor
screenY = (self.originY - (intercept - self.ymin) *
self.vertScalingFactor)
canvas.create_line(screenX, screenY, oldScreenX, oldScreenY,
fill = "blue", width = 4)
def plotResistanceLine(self, canvas):
self.resistanceLine = self.getResistanceLine()
y = (self.originY - (self.resistanceLine - self.ymin) *
self.vertScalingFactor)
canvas.create_line(self.originX, y,
self.originX + self.horizPixelLimit, y,
fill = rgbString(0, 100, 0), width = 4)
def plotSupportLine(self, canvas):
self.supportLine = self.getSupportLine()
y = (self.originY - (self.supportLine - self.ymin) *
self.vertScalingFactor)
canvas.create_line(self.originX, y,
self.originX + self.horizPixelLimit, y,
fill = "red", width = 4)
def hashYAxis(self, canvas):
margin = 30
for price in [self.ymin, self.ymax]:
canvas.create_text(self.originX - margin,
self.originY - self.vertScalingFactor*(float(price)-self.ymin),
text = "$ "+str(float(price)))
def format(self, date):
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep",
"Oct", "Nov", "Dec"]
month = months[date.month - 1]
day = date.day
return str(month) + " " + str(day)
def translate(self):
translatingFactor = -1 * self.xi[-1]
xi = [0 for i in xrange(len(self.xi))]
for i in xrange(len(self.xi)):
xi[i] = self.xi[i] + translatingFactor
return xi
def determineRecentTrend(self):
# returns a string determining increasing or decreasing trend
path = "tempDir" + os.sep + "bitcoinHistory2.txt"
arr = self.linearRegression(self.xi, self.yi)
self.slope, self.intercept = arr[0], arr[1]
if self.slope < 0:
return "decreasing"
else:
return "increasing"
def displaySpotRateInSnow(self, canvas, spotRate):
lineSpace = 100
canvas.create_text(self.pageWidth, 0, anchor = NE,
text = "$ " + spotRate, font = "Helvetica 50 bold",
fill = "snow")
class ChartPage(Page):
def __init__(self, change):
super(ChartPage, self).__init__(change)
self.chart = True
self.want1Year = True
self.want6Months = False
self.want3Months = False
self.want1Month = False
self.tooltip = False
self.tooltipX = None
self.tooltipY = None
self.tooltipText = None
self.justStarted = False
self.initializeAndMemoize()
self.setButtonLocations()
def initializeAndMemoize(self):
self.lengthOfXAxisInPixels, self.lengthOfYAxisInPixels = 1000, 300
self.chartWidth = self.lengthOfXAxisInPixels
self.chartHeight = self.lengthOfYAxisInPixels
leftMargin, botMargin = 150, 150
self.originX = leftMargin
self.originY = self.pageHeight - botMargin
filename = "tempDir" + os.sep + "bitcoinHistory2.txt"
(self.days1Year, self.prices1Year) = self.getOneYearData(filename)
(self.days6Months, self.prices6Months) = self.getNMonthsData(filename,6)
(self.days3Months, self.prices3Months) = self.getNMonthsData(filename,3)
(self.days1Month, self.prices1Month) = self.getNMonthsData(filename,1)
self.days, self.prices = self.days1Year, self.prices1Year
self.xmax, self.ymax = len(self.days), max(self.prices)
self.horizScalingFactor = float(self.lengthOfXAxisInPixels)/self.xmax
# pixel per day
self.vertScalingFactor = float(self.lengthOfYAxisInPixels)/self.ymax
# pixel per dollar
def setButtonLocations(self):
left, top, dist = 100, 50, 200
cx, cy = left, top
self.timeButY = cy # y coordinate common for all time buttons
self.oneYrButX = cx
cx += dist
self.sixMthButX = cx
cx += dist
self.thrMthButX = cx
cx += dist
self.oneMthButX = cx
def onMousePressed(self, event):
x, y = event.x, event.y
wby2, h = 50, 40
if self.timeButY - h/2 < y < self.timeButY + h/2:
if self.oneYrButX - wby2 < x < self.oneYrButX + wby2:
self.mousePressedFor1Year()
elif self.sixMthButX - wby2 < x < self.sixMthButX + wby2:
self.mousePressedFor6Months()
elif self.thrMthButX - wby2 < x < self.thrMthButX + wby2:
self.mousePressedFor3Months()
elif self.oneMthButX - wby2 < x < self.oneMthButX + wby2:
self.mousePressedFor1Month()
else:
super(ChartPage, self).onMousePressed(event)
def mousePressedFor1Year(self):
self.tooltip = False
self.want1Year = True
self.want6Months = False
self.want3Months = False
self.want1Month = False
def mousePressedFor6Months(self):
self.tooltip = False
self.want1Year = False
self.want6Months = True
self.want3Months = False
self.want1Month = False
def mousePressedFor3Months(self):
self.tooltip = False
self.want1Year = False
self.want6Months = False
self.want3Months = True
self.want1Month = False
def mousePressedFor1Month(self):
self.tooltip = False
self.want1Year = False
self.want6Months = False
self.want3Months = False
self.want1Month = True
def onMouseMotion(self, event):
x, y, space = event.x, event.y, 30
if self.inChart(x, y):
index = int(self.getDaysIndexFromChartX(x))
chartY = self.getChartYFromPricesIndex(index)
self.tooltip = True
self.mouseX, self.mouseY = x, chartY
self.tooltipText = (self.display(self.days[index]) + "\n$" +
str(self.prices[index]))
self.tooltipX, self.tooltipY = x, chartY - space
def draw(self, canvas, spotRate):
super(ChartPage, self).draw(canvas, spotRate)
if self.chartIntermediate: self.drawLoadingScreen(canvas)
elif self.data: self.drawLoadingScreen(canvas)
else:
self.displaySpotRateInCorner(canvas, spotRate)
self.makeButton(canvas, self.oneYrButX, self.timeButY,
"Last one year")
self.makeButton(canvas, self.sixMthButX, self.timeButY,
"Last 6 months")
self.makeButton(canvas, self.thrMthButX, self.timeButY,
"Last 3 months")
self.makeButton(canvas, self.oneMthButX, self.timeButY,
"Last 1 month")
if self.want1Year: self.plotChart(canvas, 12)
elif self.want6Months: self.plotChart(canvas, 6)
elif self.want3Months: self.plotChart(canvas, 3)
elif self.want1Month: self.plotChart(canvas, 1)
if self.tooltip:
self.displayTooltip(canvas)
def makeButton(self, canvas, cx, cy, stringOfText):
wby2, h = 50, 40
canvas.create_rectangle(cx - wby2, cy - h/2, cx + wby2, cy + h/2,
fill = rgbString(30, 104, 255),
outline = rgbString(30, 104, 255))
canvas.create_text(cx, cy, text = stringOfText, fill = "snow")
def displayTooltip(self, canvas):
rx, ry = 25, 15
canvas.create_oval(self.mouseX-3, self.mouseY-3,
self.mouseX+3, self.mouseY+3, fill = "blue")
canvas.create_rectangle(self.tooltipX - rx, self.tooltipY - ry,
self.tooltipX + rx, self.tooltipY + ry, fill = "yellow")
canvas.create_text(self.tooltipX, self.tooltipY,
text = self.tooltipText, font = "Mono 10 bold")
def onKeyPressed(self, event):
pass
class PersonalizedCharts(Page):
def __init__(self, change):
super(PersonalizedCharts, self).__init__(change)
wby2, h, spacing = 50, 40, 100
self.okX1 = self.pageWidth/2 - wby2
self.okX2 = self.pageWidth/2 + wby2
self.okY1 = self.pageHeight - spacing - h/2
self.okY2 = self.pageHeight - spacing + h/2
self.okPressed = False
self.showInstructions = False
self.tooltip = False
self.tooltipX = None
self.tooltipY = None
self.tooltipText = None
def draw(self, canvas, spotRate):
if self.chartIntermediate:
self.drawLoadingScreen(canvas)
elif self.data:
self.drawLoadingScreen(canvas)
else:
super(PersonalizedCharts, self).draw(canvas, spotRate)
if not self.showInstructions:
if not self.okPressed:
self.drawWhenOKNotPressed(canvas)
self.displaySpotRateInCorner(canvas, spotRate)
else:
self.drawBanner(canvas)
self.displaySpotRateInCorner(canvas, spotRate)
self.plotChart(canvas, 1)
self.plotResistanceLine(canvas)
self.plotSupportLine(canvas)
self.getPurchaseHistory()
self.plotBuyPoints(canvas)
self.plotSellPoints(canvas)
w, h = 200, 50
if self.tooltip:
canvas.create_rectangle(self.tooltipX - w/2,
self.tooltipY - h/2,
self.tooltipX + w/2,
self.tooltipY + h/2,
fill = "yellow")
canvas.create_text(self.tooltipX, self.tooltipY,
text = self.tooltipText)
else:
self.drawBanner(canvas)
self.displayInstructions(canvas)
self.displaySpotRateInCorner(canvas, spotRate)
def drawWhenOKNotPressed(self, canvas):
self.drawBanner(canvas)
wby2, h = 50, 40
self.drawWhenInstructionsNotShown(canvas)
canvas.create_rectangle(self.okX1, self.okY1,
self.okX2, self.okY2,
fill = rgbString(30, 104, 255))
canvas.create_text(self.pageWidth/2, self.pageHeight - 100,
text = "OK!", fill = "snow")
# because spacing = 100
def getPurchaseHistory(self):
path = "tempDir" + os.sep + "userData.txt"
with open(path, "rt") as fin:
self.purchaseHistory = fin.read()
self.purchaseHistory = self.purchaseHistory.split("\n")
self.balance = float(self.purchaseHistory[0])
for i in xrange(1, len(self.purchaseHistory)):
self.purchaseHistory[i] = self.purchaseHistory[i].split(",")
newArray = (self.purchaseHistory[i][0].split("@") +
[self.purchaseHistory[i][1]])
self.purchaseHistory[i-1] = newArray
# self.purhaseHistory is a 2d list.
self.purchaseHistory = self.purchaseHistory[:-1]
# to ignore last entry, which was copied to -2'th entry
def plotBuyPoints(self, canvas):
for i in xrange(len(self.purchaseHistory)):
if self.purchaseHistory[i][0][0] == '+':
priceBoughtAt = float(self.purchaseHistory[i][1])
dateBoughtAt = self.purchaseHistory[i][2]
recordedIndex = 0
for j in xrange(len(self.days)):
if str(self.days[j]) == dateBoughtAt:
recordedIndex = j
break
canvas.create_oval(
self.originX + recordedIndex * self.horizScalingFactor - 5,
self.originY - priceBoughtAt * self.vertScalingFactor - 5,
self.originX + recordedIndex * self.horizScalingFactor + 5,
self.originY - priceBoughtAt * self.vertScalingFactor + 5,
fill = "red"
)
def plotSellPoints(self, canvas):
for i in xrange(len(self.purchaseHistory)):
if self.purchaseHistory[i][0][0] == '-':
priceSoldAt = float(self.purchaseHistory[i][1])
dateSoldAt = self.purchaseHistory[i][2]
recordedIndex = 0
for j in xrange(len(self.days)):
if str(self.days[j]) == dateSoldAt:
recordedIndex = j
break
canvas.create_oval(
self.originX + recordedIndex * self.horizScalingFactor - 5,
self.originY - priceSoldAt * self.vertScalingFactor - 5,
self.originX + recordedIndex * self.horizScalingFactor + 5,
self.originY - priceSoldAt * self.vertScalingFactor + 5,
fill = "green"
)
def plotResistanceLine(self, canvas):
self.resistanceLine = self.getResistanceLine()
y = self.originY - self.resistanceLine * self.vertScalingFactor
canvas.create_line(self.originX, y,
self.originX + self.lengthOfXAxisInPixels, y,
fill = "green", width = 4)
def plotSupportLine(self, canvas):
self.supportLine = self.getSupportLine()
y = self.originY - self.supportLine * self.vertScalingFactor
canvas.create_line(self.originX, y,
self.originX + self.lengthOfXAxisInPixels, y,
fill = "red", width = 4)
def drawWhenInstructionsNotShown(self, canvas):
dist = 100
wby2, h = 50, 40
message = ("Press H to know how to enter your purchase history\n" +
"Click OK when you are done entering your data")
canvas.create_text(self.pageWidth/2, self.pageHeight/2,
text = message, font = "Helvetica 20 bold")
canvas.create_rectangle(self.okX1, self.okY1, self.okX2, self.okY2,
fill = rgbString(30, 104, 255))
canvas.create_text(self.okX1 + wby2, self.okY1 + h/2,
text = "OK!", fill = "snow")
def displayInstructions(self, canvas):
instructions = ("\n"+
"Please enter your purchase history for the last one month\n" +
" in the text file, userData.txt, in tempDir, as follows:\n"+
"Enter your balance (in BTC) as the first line of the file.\n" +
"If you bought BTC 1.3481 at $500.12 per BTC on Nov 23, 2014"+
",\n enter the details as follows:\n+1.3481@500.12,2014-11-23"+
"\nIf you sold BTC 1.3481 at $500.12 per BTC on Nov 25, 2014," +
"\n enter the details as follows: \n-1.3481@500.12,2014-11-25" +
"\nPress H to go back")
canvas.create_text(self.pageWidth/2, self.pageHeight/2,
text = instructions, font = "Helvetica 20 bold")
def onMousePressed(self, event):
x, y = event.x, event.y
if (not self.showInstructions and
((self.okX1 < x < self.okX2) and (self.okY1 < y < self.okY2))):
self.okPressed = True
else:
super(PersonalizedCharts, self).onMousePressed(event)
def onMouseMotion(self, event):
if self.okPressed:
x, y = event.x, event.y
if self.inChart(x, y):
index = int(self.getDaysIndexFromChartX(x))
if self.onResistanceLine(x, y):
self.mouseMotionOnResistanceLine(x, y)
elif self.onSupportLine(x, y):
self.mouseMotionOnSupportLine(x, y)
else:
self.tooltip = False
def mouseMotionOnResistanceLine(self, x, y):
space = 30
self.tooltipX = x
self.tooltipY = (self.originY -
self.resistanceLine * self.vertScalingFactor - space)
self.tooltip = True
self.tooltipText = ("Resistance Line: " + str(self.resistanceLine))
def mouseMotionOnSupportLine(self, x, y):
space = 30
self.tooltipX = x
self.tooltipY = (self.originY -
self.supportLine * self.vertScalingFactor - space)
self.tooltip = True
self.tooltipText = ("Support Line: " + str(self.supportLine))
def onResistanceLine(self, x, y):
return (abs(y - int(self.originY -
self.resistanceLine * self.vertScalingFactor)) < 4)
def onSupportLine(self, x, y):
return (abs(y - int(self.originY -
self.supportLine * self.vertScalingFactor)) < 4)
def inChart(self, x, y):
if (self.originX < x < self.originX + self.lengthOfXAxisInPixels and
self.originY - self.lengthOfYAxisInPixels < y < self.originY):
return True
return False
def onKeyPressed(self, event):
if event.char == "h":
self.showInstructions = not self.showInstructions
def displaySpotRateInCorner(self, canvas, spotRate):
lineSpace = 100
canvas.create_text(self.pageWidth, 0, anchor = NE,
text = "$ " + spotRate, font = "Helvetica 50 bold",
fill = "snow")
class Help(Page):
def __init__(self, change):
super(Help, self).__init__(change)
self.wantAlgorithm = False
def draw(self, canvas, spotRate):
super(Help, self).draw(canvas, spotRate)
self.drawBanner(canvas)
self.displaySpotRateInCorner(canvas, spotRate)
if self.wantAlgorithm:
self.displayAlgorithmInfo(canvas)
else:
self.displayHelp(canvas)
def displayHelp(self, canvas):
space = 20
filename = "tempDir" + os.sep + "about.txt"
with open(filename, "rt") as fin:
self.helpMessage = fin.read()
canvas.create_text(self.pageWidth/2, self.pageHeight/2 + space,
text = self.helpMessage)
def displayAlgorithmInfo(self, canvas):
space = 50
filename = "tempDir" + os.sep + "algoInfo.txt"
with open(filename, "rt") as fin:
self.algo = fin.read()
canvas.create_text(self.pageWidth/2, self.pageHeight/2 + space,
text = self.algo)
def onMousePressed(self, event):
super(Help, self).onMousePressed(event)
def onMouseMotion(self, event):
pass
def onKeyPressed(self, event):
if event.char == "a":
self.wantAlgorithm = not self.wantAlgorithm
elif event.char == "b":
browser = webbrowser.get()
browser.open_new_tab("http://en.wikipedia.org/wiki/Bitcoin")
def displaySpotRateInCorner(self, canvas, spotRate):
lineSpace = 100
canvas.create_text(self.pageWidth, 0, anchor = NE,
text = "$ " + spotRate, font = "Helvetica 50 bold",
fill = "snow")
# SHORT TERM LINEAR REGRESSION IMPLEMENTED BELOW
bitPredict = Application()
bitPredict.run()
def listsAlmostEqual(list1, list2):
assert len(list1) == len(list2)
for i in xrange(len(list1)):
if almostEqual(list1[i], list2[i]): pass
else: return False
return True
def almostEqual(num1, num2, epsilon = 10 ** -6):
return abs(num1 - num2) < epsilon
def testLeastSquares():
print "Testing leastSquares...",
A, b = Matrix(3, 2, [[3, 1], [1, 1], [1, 2]]), Vector(3, [1, 1, 1])
assert listsAlmostEqual(leastSquares(A, b).entries, [1.0/5, 7.0/15])
print "Passed!"
def testMatrixAndVectorClasses():
print "Testing Matrix and Vector Classes..."
A = Matrix(3, 3,
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
B = Matrix(3, 3,
[
[1, 1, 2],
[2, 3, 4],
[5, 5, 7]])
I = Matrix(3, 3, [[1, 0, 0], [0, 1, 0], [0, 0, 1]])
scalar = 0.5
testMatrixMultiplication(A, B)
testMatrixScalarDivision(A, scalar)
testDeterminant(A, B)
testInverse(A, B, I)
def testMatrixMultiplication(A, B):
C = A * B
print "Testing matrix-matrix multiplication...",
assert C.entries == [[20, 22, 31], [44, 49, 70], [68, 76, 109]]
print "Passed!"
print "Testing matrix-vector multiplication...",
v = Vector(3, [9, 8, 7])
assert type(A * v) == Vector
assert (A * v).entries == [46, 118, 190]
print "Passed!"
print "Testing matrix-scalar multiplication...",
scalar = 0.5
M = A * scalar
assert type(M) == Matrix
assert M.entries == [[0.5, 1.0, 1.5], [2.0, 2.5, 3.0], [3.5, 4.0, 4.5]]
print "Passed!"
def testMatrixScalarDivision(A, scalar):
print "Testing matrix-scalar division...",
N = A / scalar
assert type(N) == Matrix
assert N.entries == [[2, 4, 6], [8, 10, 12], [14, 16, 18]]
print "Passed!"
def testDeterminant(A, B):
print "Testing determinant...",
assert A.determinant() == 0
assert B.determinant() == -3
print "Passed!"
def testInverse(A, B, I):
print "Testing inverse...",
try:
A.inverse()
except:
pass
assert I.inverse().entries == I.entries
assert type(B.inverse()) == Matrix
assert (B.inverse().entries == [
[-1.0/3, -1, 2.0/3],
[-2, 1, 0],
[5.0/3, 0, -1.0/3]
])
print "Passed!"
testLeastSquares()
testMatrixAndVectorClasses() | [
"shantanuchhabra@Shantanus-MBP.wv.cc.cmu.edu"
] | shantanuchhabra@Shantanus-MBP.wv.cc.cmu.edu |
8c41b3c3d982dc9e35baa49bc9cc3669dade3e3f | c15f9b7da2476d26cbbfe23a9a4af158c8e902cb | /tests/test_utils.py | 8de1e037c40a9e461425db65f21787d9d96a3e68 | [
"MIT"
] | permissive | winterwolf32/JWT- | e0cff77a2a4c92b209d5a0f5cbfa40979818e692 | 34c552cab40c0f8c4370a26f1a0848182b8182f8 | refs/heads/debian | 2023-02-24T06:18:00.364230 | 2021-01-09T18:42:52 | 2021-01-09T18:42:52 | 331,849,653 | 0 | 0 | MIT | 2021-01-22T06:15:06 | 2021-01-22T05:49:56 | Python | UTF-8 | Python | false | false | 2,217 | py | """Test"""
import os
import pytest as pytest
from myjwt.Exception import InvalidJWT
from myjwt.Exception import InvalidJwtJson
from myjwt.utils import create_crt
from myjwt.utils import encode_jwt
from myjwt.utils import encoded_to_json
from myjwt.utils import HEADER
from myjwt.utils import is_valid_jwt
from myjwt.utils import is_valid_jwt_json
from myjwt.utils import jwt_to_json
from myjwt.utils import PAYLOAD
from myjwt.utils import SIGNATURE
invalid_jwt = "test.test"
jwt = "eyJ0eXAiOiJKV1QiLCJhbGciOiJub25lIn0.eyJsb2dpbiI6ImF6In0."
encoded_string = "eyJ0eXAiOiJKV1QiLCJhbGciOiJub25lIn0"
header = {"typ": "JWT", "alg": "none"}
payload = {"login": "az"}
signature = ""
jwt_json = {
HEADER: header,
PAYLOAD: payload,
SIGNATURE: signature,
}
def test_jwt_to_json_InvalidJWT():
"""
Test jwt_to_json method when jwt is invalid in utils.py
"""
with pytest.raises(InvalidJWT):
jwt_to_json(invalid_jwt)
def test_jwt_to_json():
"""
Test jwt_to_json method in utils.py
"""
jwt_json = jwt_to_json(jwt)
assert type(jwt_json) == dict
assert list(jwt_json.keys()) == [HEADER, PAYLOAD, SIGNATURE]
assert jwt_json[HEADER] == header
assert jwt_json[PAYLOAD] == payload
assert jwt_json[SIGNATURE] == ""
def test_encoded_to_json():
"""
Test encoded_to_json method in utils.py
"""
jsonDecoded = encoded_to_json(encoded_string)
assert type(jsonDecoded) == dict
assert jsonDecoded == header
def test_encode_jwt():
"""
Test encode_jwt method in utils.py
"""
with pytest.raises(InvalidJwtJson):
encode_jwt({})
new_jwt = encode_jwt(jwt_json)
assert new_jwt + "." == jwt
def test_is_valid_jwt():
"""
Test is_valid_jwt method in utils.py
"""
assert is_valid_jwt(jwt)
def test_is_valid_jwt_json():
"""
Test is_valid_jwt_json method in utils.py
"""
assert is_valid_jwt_json(jwt_json)
def test_create_crt():
"""
Test create_crt method in utils.py
"""
create_crt()
assert os.path.exists("selfsigned.crt")
assert os.path.exists("private.pem")
| [
"matthieubouamama@gmail.com"
] | matthieubouamama@gmail.com |
1b7bc60e5f96d7021876fa2984fee6dbd694c9b2 | 29eca8f09cda1b5a744d71405e866758b298eb3f | /sprint_01_[Types,Loops,Conditions,Collections,Modules]/Task_7.py | 8a3c59f68dec4b3091bc76449975614d7fa827df | [] | no_license | JustSayHelloWorld/python-coding-marathon | c8144b9832e7860b832cdfa223a872f460aaef64 | f2e4a60af668954c794112ad80eb13ae88f7cd76 | refs/heads/main | 2023-03-06T19:15:49.163394 | 2021-02-23T00:46:29 | 2021-02-23T00:46:29 | 313,949,762 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,680 | py | """Nicky and Dev work in a company where each member is given his income in the form of points. On Nicky's birthday, Dev decided to give some of his points as a gift. The number of points Dev is gifting is the total number of visible zeros visible in the string representation of the N points he received this month.
Let's say that Nicky got M points from Dev. By the company law, if M is even and greater than 0, Nicky must give one point to the company. If M is odd, the company gives Nicky one additional point.
Given the number of points N Dev received this month, calculate the number of points Nicky will receive as a gift and return this number in its binary form.
Note: visible zeros are calculated as follows:
0, 6 and 9 contain 1 visible zero each;
8 contains 2 visible zeros;
other digits do not contain visible zeros.
Example
For N = "565", the output should be
Cipher_Zeroes(N) = 10.
There's one visible zero in "565". Since one is odd, the company will give an additional point, so Nicky will receive 2 points.
210 = 102, so the output should be 10.
Input/Output
[input] string N
The number of points Dev received this month.
Constraints:
1 ≤ N ≤ 101000.
[output] integer
The number of points Nicky will receive in the binary representation."""
def Cipher_Zeroes(N):
zero_counter = N.count("0")
six_counter = N.count("6")
nine_counter = N.count("9")
eight_counter = N.count("8") * 2
given_point = zero_counter + six_counter + nine_counter + eight_counter
if given_point > 0 and given_point % 2 == 0:
given_point -= 1
elif given_point > 0:
given_point += 1
return bin(given_point).replace("0b", "") | [
"Useresps5007717f"
] | Useresps5007717f |
7153a4c17d679b6a69da201b4c53f56cfe0c5619 | 517a904955033092aec11288151d725548226abc | /pandas_tutorial/data_advance/df_column_order.py | bd0d1a7f8f2a20eab540746de7dffb1501d42be3 | [] | no_license | MinSu-Kim/python_tutorial | ae0a4e3570aa4cb411626cefbc031777364764d5 | ed0c08892822d7054161c9e8f98841370868e82d | refs/heads/master | 2021-06-16T16:15:30.349719 | 2021-05-26T04:59:47 | 2021-05-26T04:59:47 | 207,266,202 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | import seaborn as sns
print("# titanic 데이터셋의 부분을 선택하여 데이터프레임 만들기")
titanic = sns.load_dataset('titanic')
df = titanic.loc[0:4, 'survived':'age']
print(df, '\n')
print("# 열 이름의 리스트 만들기")
columns = list(df.columns.values) # 기존 열 이름
print("ssss", sorted(columns, reverse=True), type(sorted(columns, reverse=True)))
print("# 열 이름을 알파벳 순으로 정렬하기")
columns_sorted = sorted(columns) # 알파벳 순으로 정렬
df_sorted = df[columns_sorted]
print(df_sorted, '\n')
print(columns_sorted, '\n')
print("# 열 이름을 기존 순서의 정반대 역순으로 정렬하기")
columns_reversed = list(sorted(columns, reverse=True))
df_reversed = df[columns_reversed]
print(df_reversed, '\n')
print(columns_reversed, '\n')
print("# 열 이름을 사용자가 정의한 임의의 순서로 재배치하기")
columns_customed = ['pclass', 'sex', 'age', 'survived']
df_customed = df[columns_customed]
print(df_customed)
| [
"net94.teacher@gmail.com"
] | net94.teacher@gmail.com |
abfed8d8ae22cce4407a9b4c8daf17be14dc23d7 | 81fc35981a1a77f5a4af2f333be7a01e8c00959b | /NOT.py | 60dd196d039a901551a417d3b971dc001e4c1a94 | [] | no_license | mellow-d/GameOfLife | ba01ee508ff807f12dda64ec0b0c63133812947d | 2c00ae7d58ccfc4a4e1d04a8694081a72d855193 | refs/heads/master | 2021-10-08T19:56:39.758529 | 2018-12-17T04:54:44 | 2018-12-17T04:54:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,446 | py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from spawners import *
import sys
# N = 50
ON = 255
OFF = 0
vals = [ON, OFF]
# populate grid with random on/off - more off than on
#grid = np.random.choice(vals, 80 * 158, p=[0.2, 0.8]).reshape(80, 158)
def numOfNeighbors(x, y):
total = 0
for i in range(x - 1, x + 2):
for j in range(y - 1, y + 2):
try:
if grid[i, j] == ON:
total += 1
except IndexError:
pass
if grid[x, y] == ON:
total -= 1
return total
def update(data):
global grid
newGrid = grid.copy()
for i in range(40):
for j in range(80):
total = numOfNeighbors(i, j)
if grid[i, j] == ON:
if (total < 2) or (total > 3):
newGrid[i, j] = OFF
else:
if total == 3:
newGrid[i, j] = ON
mat.set_data(newGrid)
grid = newGrid
return [mat]
# set up animation
if __name__ == '__main__':
toSpawn = []
toSpawn.append(spawnGlider(1, 0))
toSpawn.append(spawnReverseGlider(3, 40))
toSpawn.append(spawnEaterNot())
if int(sys.argv[1]) == 0:
toSpawn.append(spawnStopperNot())
grid = np.zeros((40, 80)).reshape(40, 80)
for listemt in toSpawn:
for emt in listemt:
grid[emt[0], emt[1]] = ON
fig, ax = plt.subplots()
mat = ax.matshow(grid)
ani = animation.FuncAnimation(fig, update, interval = 1, save_count = 5)
plt.show()
| [
"majinsparks@gmail.com"
] | majinsparks@gmail.com |
7887f18e6d71f9eaf61d02aa2d134eb927a02aec | d3638776a2ce455eb42f29c9c06e267392b6815a | /reading/book/migrations/0007_auto_20180703_2156.py | d188bede58a1d3c4590c803e1b5a26ae3aa7e460 | [
"MIT"
] | permissive | Family-TreeSY/reading | abce1b5e6047c72867839303ab0181c7a4997913 | a35d1242ce3a7303fe125c11de8125bd9afbdb3c | refs/heads/master | 2020-03-20T04:53:18.089247 | 2018-07-09T08:51:32 | 2018-07-09T08:51:32 | 137,197,886 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-07-03 13:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('book', '0006_auto_20180619_2116'),
]
operations = [
migrations.AddField(
model_name='story',
name='html',
field=models.TextField(default='', help_text='\u6b63\u6587\u53ef\u4ee5\u4f7f\u7528markdown', verbose_name='html\u6e32\u67d3\u540e\u7684\u9875\u9762'),
),
migrations.AddField(
model_name='story',
name='is_markdown',
field=models.BooleanField(default=True, verbose_name='\u4f7f\u7528markdown'),
),
]
| [
"286210002@qq.com"
] | 286210002@qq.com |
bda236931f532e5ace9057479265cd634180924d | dd3692b21cab2bf907775b6769cb015c887b8fe2 | /Admin/routes/categories.py | 5be1ca125908ae88ffc90fabc0b65f3bc69f7358 | [] | no_license | Nesquate/flaskBlog | e718bed9a2e36f31324cc240c6fa06dfb792f91b | 17e2077ddb048f5e08ca9b083227e453d07c579a | refs/heads/master | 2023-05-23T18:47:37.540339 | 2021-06-19T18:21:59 | 2021-06-19T18:21:59 | 344,166,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,187 | py | from flask import render_template, url_for, escape, redirect, abort, request
from flask_login.utils import login_required
from app import core
from database import db, models
@core.route('/admin/categories', methods=['GET', 'POST'])
@login_required
def adminCategories():
status = 0
if request.method == 'POST':
print(request.form)
status = delTag(request.form['delCategory'])
categoriesList = db.Categories.query.all()
return render_template('admin/categories/categories.html', categoriesList=categoriesList, status=status)
categoriesList = db.Categories.query.all()
return render_template('admin/categories/categories.html', categoriesList=categoriesList)
@core.route('/admin/categories/new', methods=['GET', 'POST'])
@login_required
def newCategory():
if request.method == 'POST':
print(request.form)
tag = models.Categories(name=request.form['title'], description=request.form['description'])
db.db.session.add(tag)
db.db.session.commit()
return redirect(url_for('adminCategories'))
categories = db.Categories.query.all()
return render_template('admin/categories/newcategory.html', categoriesList=categories)
@core.route('/admin/categories/edit/<int:categoryid>', methods=['GET', 'POST'])
@login_required
def editCategory(categoryid):
if request.method == 'POST':
category = db.Categories.query.filter_by(id=categoryid).first()
print(category.id)
category.name = request.form['title']
category.description = request.form['description']
print(category.description)
db.db.session.commit()
return redirect(url_for('adminCategories'))
category = db.Categories.query.filter_by(id=categoryid).first()
return render_template('admin/categories/editcategory.html', category=category)
def delTag(categoryID):
print(db.Categories.query.filter_by(id=categoryID).first() )
if db.Categories.query.filter_by(id=categoryID).first() is None:
return -1
else:
tag = db.Categories.query.filter_by(id=categoryID).first()
db.db.session.delete(tag)
db.db.session.commit()
return 1
| [
"nesquate.100001@gmail.com"
] | nesquate.100001@gmail.com |
dc09c3c13f4ca2119ef4419cf567c1bbe2bf7f42 | 81bdc1dccfb95877e5f376527c23cb5c72a13922 | /pyl2extra/gui/debugger/remote_window.py | 05041c0221dd8c677c8b0d68e7c8136c0ee9f4e5 | [
"BSD-3-Clause"
] | permissive | TNick/pyl2extra | 1fb5be10448bc09018e2b0ac294b2e03fb146a57 | 323e1ecefeedc7d196de6d7ac6d8eceecb756333 | refs/heads/master | 2021-01-22T07:04:10.082374 | 2015-08-11T09:57:17 | 2015-08-11T09:57:17 | 34,400,301 | 0 | 1 | null | 2015-04-22T17:19:50 | 2015-04-22T15:58:21 | Python | UTF-8 | Python | false | false | 2,954 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: Nicu Tofan <nicu.tofan@gmail.com>
"""
from PyQt4 import QtGui, QtCore
from pyl2extra.gui.guihelpers import center
class RemoteDialog(QtGui.QDialog):
"""
Allows selecting remote in order to debug on that remote.
"""
def __init__(self, mw):
"""
Constructor
"""
super(RemoteDialog, self).__init__()
self.mw = mw
self.init_ui()
def init_ui(self):
"""
Prepares the GUI.
"""
self.resize(300, 200)
self.setWindowTitle('Connect to remote')
center(self)
self.button_box = QtGui.QDialogButtonBox(self)
self.button_box.setGeometry(QtCore.QRect(150, 250, 341, 32))
self.button_box.setOrientation(QtCore.Qt.Horizontal)
self.button_box.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.button_box.setObjectName("button_box")
lbl_address = QtGui.QLabel('Address')
lbl_post_rcv = QtGui.QLabel('Control port')
lbl_port_sub = QtGui.QLabel('Broadcast port')
le_address = QtGui.QLineEdit()
le_address.setPlaceholderText('The address of the remote machine')
le_address.setToolTip('This may also be an ip address.')
le_address.setText('127.0.0.1')
sp_port_rcv = QtGui.QSpinBox()
sp_port_rcv.setMinimum(1024)
sp_port_rcv.setMaximum(65565)
sp_port_rcv.setValue(5955)
sp_port_rcv.setToolTip('Port for command and control.')
sp_port_sub = QtGui.QSpinBox()
sp_port_sub.setMinimum(1024)
sp_port_sub.setMaximum(65565)
sp_port_sub.setValue(5956)
sp_port_sub.setToolTip('Port where the remote debugger publishes information.')
grid1 = QtGui.QGridLayout()
grid1.setSpacing(10)
grid1.addWidget(lbl_address, 1, 0)
grid1.addWidget(le_address, 1, 1)
grid1.addWidget(lbl_post_rcv, 2, 0)
grid1.addWidget(sp_port_rcv, 2, 1)
grid1.addWidget(lbl_port_sub, 3, 0)
grid1.addWidget(sp_port_sub, 3, 1)
grid = QtGui.QVBoxLayout()
grid.setSpacing(10)
grid.addLayout(grid1)
grid.addWidget(self.button_box)
self.setLayout(grid)
QtCore.QObject.connect(self.button_box, QtCore.SIGNAL("accepted()"), self.accept)
QtCore.QObject.connect(self.button_box, QtCore.SIGNAL("rejected()"), self.reject)
QtCore.QMetaObject.connectSlotsByName(self)
self.le_address = le_address
self.sp_port_rcv = sp_port_rcv
self.sp_port_sub = sp_port_sub
def get_values(self):
"""
Return the values selected by the user.
"""
values = {'address': self.le_address.text().strip(),
'rport': self.sp_port_rcv.value(),
'pport': self.sp_port_sub.value()}
return values
| [
"nicu.tofan@gmail.com"
] | nicu.tofan@gmail.com |
7bd05db19bfc73b84e228e2d63a0457b0444eb53 | eef3b9b1796cda5342399c5ed286503bc0f92aee | /breast_cancer.py | 708e00a7ef1eeb90c5676eb80a383ba89979d187 | [] | no_license | RenanJochem98/RedesNeurais | 8112b890c8e22d0e21a892bb54a4dbe908f8385d | 5b09332540fc3d43351f4c36c168ab8da85963c2 | refs/heads/master | 2020-12-20T07:19:41.645345 | 2020-02-24T13:38:27 | 2020-02-24T13:38:27 | 235,999,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,446 | py | import numpy as np
from sklearn import datasets
from datetime import datetime
def calculaAtivacao(entradas, pesos):
somaSinapse = np.dot(entradas, pesos)
return sigmoid(somaSinapse)
def atualizaPesos(camada, delta, pesos, momento, taxaAprendizagem):
camadaTransposta = camada.T # eh necessaria a transposta para multiplicacao de matrizes do dot product
pesosNovo = camadaTransposta.dot(delta)
return calculaPeso(pesos, pesosNovo, momento, taxaAprendizagem)
def calculaPeso(pesos, pesosNovo, momento, taxaAprendizagem):
return (pesos * momento) + (pesosNovo * taxaAprendizagem)
#funcao de ativacao
def sigmoid(soma):
return 1 / (1 + np.exp(-soma))
# funcao para calculo da descida do gradiente
def sigmoidDerivada(sig):
return sig * (1 - sig)
def deltaSaida(sigmoidDerivada, erro):
return sigmoidDerivada * erro
base = datasets.load_breast_cancer()
entradas = base.data
valoresSaidas = base.target
saidas = np.empty([len(valoresSaidas), 1], dtype=int)
for i in range(len(valoresSaidas)):
saidas[i] = valoresSaidas[i]
# entradas = np.array([[0,0], [0,1],[1,0],[1,1]])
# saidas = np.array([[0],[1],[1],[0]])
quantNeuronios = 48
pesos0 = 2*np.random.random((30,quantNeuronios)) - 1
pesos1 = 2*np.random.random((quantNeuronios,1)) - 1
pesos0_inicial = pesos0
pesos1_inicial = pesos1
epocas = 1000000
taxaAprendizagem = 0.3
momento = 1 # momento serve para achar falsos minimos locais
try:
inicio = datetime.now()
for j in range(epocas):
camadaEntrada = entradas
camadaOculta = calculaAtivacao(camadaEntrada, pesos0)
camadaSaida = calculaAtivacao(camadaOculta, pesos1)
# gera um array com a subtracao dos valores em index iguais
# nao eh necessario percorrer o array pq sao arrays do numpy. A lib se encarrega disso
erroCamadaSaida = saidas - camadaSaida
mediaAbsoluta = np.mean(np.abs(erroCamadaSaida))
print("Erro: "+ str(mediaAbsoluta)+ " Epoca: "+ str(j))
derivadaSaida = sigmoidDerivada(camadaSaida) # gradiente
# deltaSaida = deltaSaida(derivadaSaida, erroCamadaSaida) #TypeError: 'numpy.ndarray' object is not callable??
deltaSaida = erroCamadaSaida * derivadaSaida
pesos1Transposta = pesos1.T # eh necessaria a transposta para multiplicacao de matrizes do dot product
deltaSaidaXPeso = deltaSaida.dot(pesos1Transposta)
deltaCamadaOculta = deltaSaidaXPeso * sigmoidDerivada(camadaOculta)
# atualizacao de pesos da camada oculta, para backpropagration
pesos1 = atualizaPesos(camadaOculta, deltaSaida, pesos1, momento, taxaAprendizagem)
# atualizacao de pesos da camada de entrada, para backpropagration
pesos0 = atualizaPesos(camadaEntrada, deltaCamadaOculta, pesos0, momento, taxaAprendizagem)
fim = datetime.now()
print()
print("#"*20+" RESULTADO "+"#"*20)
print()
print("Epoca: "+str(j+1))
print("Tempo: ", end=" ")
print(fim-inicio)
print("Erro medio: " + str(mediaAbsoluta))
print("Pesos0 Inicial: ")
print(pesos0_inicial)
print()
print("Pesos0: ")
print(pesos0)
print()
print("Pesos1 Inicial:")
print(pesos1_inicial)
print()
print("Pesos1:")
print(pesos1)
print()
print("Saida:")
print(camadaSaida)
print()
print("#"*50)
except KeyboardInterrupt:
print("Interrompido via teclado!!")
finally:
print("Final da execução")
| [
"renanjochem98@gmail.com"
] | renanjochem98@gmail.com |
3ec4aad73f67b02aaeb6f0ca72ece622d9168757 | eeab5c5f07331cf3fbccb5232a2825c90d4bf5b5 | /RestAPI/urls.py | 4f957d035b1b19d8e90f4e261ede85d759b5f76b | [] | no_license | AlbertBuluma/DjangoRestAPI | 8ab8c6e935aae12008643e813c73d6105ee98c80 | a1dc6ae4768219b590a25c4f8b8c1bca6c848ca6 | refs/heads/master | 2020-06-07T15:44:56.822383 | 2019-06-21T14:21:24 | 2019-06-21T14:21:24 | 193,051,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | """RestAPI URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('songz.urls')),
]
| [
"albert.buluma1@gmail.com"
] | albert.buluma1@gmail.com |
a343c836cf8321f236486772dd7e2cff06976448 | 65b844b72641fcdbf14b640f101c7fb0c4bbef5a | /eventex/core/views.py | fae68c412e60530ef53ab48d9bb2b82a2968de91 | [] | no_license | victornovais/wttd | bcb6a53cf447e40062246c8b1ea4ea444aa64fb8 | 839d7ffa95809436d1c3da11e43651accb83ef14 | refs/heads/master | 2021-01-10T19:01:28.934467 | 2012-10-30T15:04:54 | 2012-10-30T15:04:54 | 6,015,049 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | # -- coding:utf-8 --
from django.views.generic.simple import direct_to_template
def homepage(request):
return direct_to_template(request, template='index.html')
| [
"victorh.novaisr@gmail.com"
] | victorh.novaisr@gmail.com |
d580bb52936c3e6585d52824c4aa694524658047 | 7b593731dffd7be9034a7904eb46a8ac6771d3a4 | /FrozenLake-v0/Q_learning.py | 373db383257e0d22ef46b383cb523531f404fdbe | [] | no_license | hsdtlx/openAI-Gym | 1bfa32f885543744720860b551c487bd38502579 | b3c59788d29cab70388c614063a02e07d41b16d2 | refs/heads/master | 2020-03-22T07:05:30.572768 | 2018-09-22T04:02:03 | 2018-09-22T04:02:03 | 139,677,472 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | # results of 100000 episodes, 10000 tests:
# 74.72%
# 73.97%
# 74.18%
import gym
import numpy as np
env = gym.make('FrozenLake-v0')
LR = .01
y = .8
MAX_EPISODES = 100000
MAX_TEST = 10000
Q = np.zeros([env.observation_space.n, env.action_space.n])
test_reward = 0
training_reward = 0
epsilon = 0.9
decay_rate = 0.001
min_epsilon = 0
# training
for i in range(MAX_EPISODES):
s = env.reset()
reward = 0
while True:
random = np.random.uniform()
if random > epsilon:
a = np.argmax(Q[s, :])
else:
a = env.action_space.sample()
s_, r, done, _ = env.step(a)
training_reward += r
if done and (r == 0):
r = -1
Q[s, a] = Q[s, a] + LR * (r + y * np.max(Q[s_, :]) - Q[s, a])
s = s_
if done:
if i % 1000 == 0:
print("Reward =", training_reward / 1000, 'in last 1000 episodes Episode: ', i)
training_reward = 0
break
# test of training
for i in range(MAX_TEST):
s = env.reset()
reward = 0
while True:
a = np.argmax(Q[s, :])
s_, r, done, _ = env.step(a)
s = s_
if done:
test_reward += r
break
print(Q)
print(test_reward/MAX_TEST*100, '%') | [
"956895214@qq.com"
] | 956895214@qq.com |
d109de108b76bdf5b2a33d7d1a963f878e74c838 | 76964e4eedb8f2f8317dcac51ca458fe31af0de7 | /IITBxReportsProj/v_1_faculty/urls.py | 4c8d9791907fad24bdc172e3c1d3b1cc32e00515 | [] | no_license | Chirram/IITBombayXMAPP | 4e8e7a4086fc3bfa8147397e69b1a7cb6a447685 | ffea8b3e7168da9065a91363fcd50f3112953203 | refs/heads/master | 2021-01-10T18:12:11.468559 | 2016-02-21T07:08:22 | 2016-02-21T07:08:22 | 52,195,116 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,623 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^index/(?P<facultyid>(.+))$',views.index,name='index'),
url(r'^course_unanswered_questions/(?P<facultyid>(.+))$',views.course_unanswered_questions,name='course_unanswered_questions'),
url(r'^course_answered_questions/(?P<facultyid>(.+))$',views.course_answered_questions,name='course_answered_questions'),
url(r'^course_discussions/(?P<facultyid>(.+))$',views.course_discussions,name='course_discussions'),
url(r'^stuofcrs/(.+)/(.+)/(.+)/(.+)/(.+)/(.+)/(.+)$',views.students_of_course_result_display,name='students_of_course_result_display'),
url(r'^stuofcrs/(?P<facultyid>(.+))$',views.students_of_course,name='stuofcrs'),
# url(r'^course_enrollment_details/(?P<facultyid>(.+))$',views.course_enrollment_details,name='course_enrollment_details'),
url(r'^stugrades/(?P<courseid>(.+))/(?P<facultyid>(.+))$',views.students_grade_courselevel,name='stugrades'),
url(r'^quizlevelgrades/(?P<student_id>(.+))/(?P<courseid>(.+))/(?P<facultyid>(.+))$',views.students_grade_quizlevel,name='quizlevelgrades'),
url(r'^cohort_details$',views.cohort_details,name='cohort_details'),
url(r'^cohort_detailed_discussions$',views.cohort_detailed_discussions,name='cohort_detailed_discussions'),
url(r'^cohort_detailed_answered$',views.cohort_detailed_answered,name='cohort_detailed_answered'),
url(r'^cohort_detailed_unanswered$',views.cohort_detailed_unanswered,name='cohort_detailed_unanswered'),
url(r'^cohort_students_list$',views.cohort_students_list,name='cohort_students_list'),
]
| [
"chkumariiit123@gmail.com"
] | chkumariiit123@gmail.com |
99d7d18dcbf19d05926656b8d453cb74662d354b | c34805d6b2e9b4cd03feaa53feee93077e5efba6 | /common/migrations/0001_initial.py | 3b8413542aa7fe972d2e90bdf18d1b0460b7d05b | [] | no_license | ccpwcn/niu_she_bing | 4d98333693e21c2614998d1634a0f296f8c839e3 | 207302706d5632b62203a46ce9e6e69b62d8b5bf | refs/heads/master | 2022-09-01T04:49:34.542610 | 2020-05-25T12:50:55 | 2020-05-25T12:50:55 | 259,833,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,093 | py | # Generated by Django 3.0.5 on 2020-04-29 11:32
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.IntegerField(default=0, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='标题')),
('sub_title', models.CharField(max_length=200, verbose_name='副标题')),
('content', models.TextField(verbose_name='正文')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
],
options={
'db_table': 'article',
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.IntegerField(default=0, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='名称')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
],
options={
'db_table': 'category',
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.IntegerField(default=0, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='名称')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
],
options={
'db_table': 'tag',
},
),
]
| [
"ccpwcn@gmail.com"
] | ccpwcn@gmail.com |
3768f4cd95d5fb4609086e286e82398041acb23f | dc112f7819ba3cba1c889d1a524fad054cdfb3a2 | /Write a Python script to generate and print a dictionary that contains a number in the form/main.py | 7c56342e13514313eae0b7a6b915d75c6cc6c9c6 | [] | no_license | Grozdanovsky/Dictionaries | fcc42558733864e780ead407c4e98f721da62201 | 72941bd1ae34f45a5d47b8c0bc768f5d14147723 | refs/heads/master | 2023-03-12T20:04:26.092581 | 2021-03-01T18:06:50 | 2021-03-01T18:06:50 | 343,498,322 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | counter = int(input("write a number: "))
dic1 = {}
for item in range(1,counter+1):
dic1.update({item: item*item})
print(dic1) | [
"viktor.grozdanovski@outlook.com"
] | viktor.grozdanovski@outlook.com |
5e917b913b6974267f31b4a811c0056105fd1047 | 9625c975792f7a7bc2ad73f0a48fb478452cd15e | /search.py | e6b308aca4f4e37594890e8c0053a2e4c789756f | [] | no_license | nickallaire/CSE150Assignment1 | 9698038ed91526a8a258416b410fd87c64a4d6c6 | da5fe4baf7975f40b5d0b3a143fd27fca49d98a3 | refs/heads/master | 2020-03-10T08:54:32.514931 | 2018-04-13T02:49:31 | 2018-04-13T02:49:31 | 129,297,252 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,345 | py | # search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
Your search algorithm needs to return a list of actions that reaches the
goal. Make sure to implement a graph search algorithm.
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
"""
'''print "Start:", problem.getStartState()
print "Is the start a goal?", problem.isGoalState(problem.getStartState())
print "Start's successors:", problem.getSuccessors(problem.getStartState())'''
"*** YOUR CODE HERE ***"
# use stack as data structure
stack = util.Stack()
successors = problem.getSuccessors(problem.getStartState())
# parent map is used to determine the path to the goal
parentMap = {}
parentMap[problem.getStartState] = 0
# count is used to determine if a node had been visited before
count = util.Counter()
# actions is the list of actions to take to reach the goal
actions = []
count[problem.getStartState()] += 1
#add start states successors to stack
while len(successors) > 0 :
suc = successors.pop(0)
if count[suc[0]] == 0 :
stack.push(suc)
#count[suc[0]] += 1
parentMap[suc] = 0
#DFS algorithm
while (stack.isEmpty() != 1):
nextMove = stack.pop()
count[nextMove[0]] += 1
if problem.isGoalState(nextMove[0]) == True:
currNode = nextMove
while (currNode != 0) :
actions.insert(0, currNode[1])
currNode = parentMap[currNode]
return actions
else:
newSuccessors = problem.getSuccessors(nextMove[0])
while len(newSuccessors) > 0:
sucToAdd = newSuccessors.pop(0)
if count[sucToAdd[0]] == 0:
stack.push(sucToAdd)
parentMap[sucToAdd] = nextMove
def breadthFirstSearch(problem):
"""Search the shallowest nodes in the search tree first."""
"*** YOUR CODE HERE ***"
# use queue as data structure
queue = util.Queue()
successors = problem.getSuccessors(problem.getStartState())
# parent map is used to determine the path to the goal
parentMap = {}
parentMap[problem.getStartState] = 0
# count is used to determine if a node had been visited before
count = util.Counter()
# actions is the list of actions to take to reach the goal
actions = []
count[problem.getStartState()] += 1
#add start states successors to queue
while len(successors) > 0:
suc = successors.pop(0)
if count[suc[0]] == 0:
queue.push(suc)
count[suc[0]] += 1
parentMap[suc] = 0
#BFS algorithm
while(queue.isEmpty() != 1) :
nextMove = queue.pop()
if problem.isGoalState(nextMove[0]) == True:
if count[nextMove[0]] > 0:
currNode = nextMove
while (currNode != 0):
actions.insert(0, currNode[1])
currNode = parentMap[currNode]
return actions
else:
newSuccessors = problem.getSuccessors(nextMove[0])
while len(newSuccessors) > 0:
sucToAdd = newSuccessors.pop(0)
if count[sucToAdd[0]] == 0:
queue.push(sucToAdd)
parentMap[sucToAdd] = nextMove
count[sucToAdd[0]] += 1
def uniformCostSearch(problem):
"""Search the node of least total cost first."""
"*** YOUR CODE HERE ***"
# use priority queue as data structure
pqueue = util.PriorityQueue()
successors = problem.getSuccessors(problem.getStartState())
# parent map is used to determine the path to the goal
# valueParentMap is used to determine the total cost to reach the node
parentMap = {}
valueParentMap = {}
parentMap[problem.getStartState()] = 0
valueParentMap[problem.getStartState()] = 0
# count is used to determine if a node had been visited before
count = util.Counter()
# actions is the list of actions to take to reach the goal
actions = []
count[problem.getStartState()] += 1
#add start states successors to priority queue
while len(successors) > 0:
suc = successors.pop(0)
if count[suc[0]] == 0:
pqueue.push(suc, suc[2])
count[suc[0]] += 1
parentMap[suc] = 0
valueParentMap[suc[0]] = suc[2]
#UCS algorithm
while (pqueue.isEmpty() != 1):
nextMove = pqueue.pop()
if problem.isGoalState(nextMove[0]) == True:
if count[nextMove[0]] > 0:
currNode = nextMove
while (currNode != 0):
actions.insert(0, currNode[1])
currNode = parentMap[currNode]
return actions
else:
newSuccessors = problem.getSuccessors(nextMove[0])
while len(newSuccessors) > 0:
sucToAdd = newSuccessors.pop(0)
if count[sucToAdd[0]] == 0 or sucToAdd[2] + valueParentMap[nextMove[0]] < valueParentMap[sucToAdd[0]] :
pqueue.push(sucToAdd, sucToAdd[2] + valueParentMap[nextMove[0]])
valueParentMap[sucToAdd[0]] = sucToAdd[2] + valueParentMap[nextMove[0]]
parentMap[sucToAdd] = nextMove
count[sucToAdd[0]] += 1
util.raiseNotDefined()
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
"*** YOUR CODE HERE ***"
# use priority queue as data structure
pqueue = util.PriorityQueue()
successors = problem.getSuccessors(problem.getStartState())
# parent map is used to determine the path to the goal
# valueParentMap is used to determine the total cost to reach the node
parentMap = {}
valueParentMap = {}
parentMap[problem.getStartState()] = 0
valueParentMap[problem.getStartState()] = 0
# count is used to determine if a node had been visited before
count = util.Counter()
# actions is the list of actions to take to reach the goal
actions = []
count[problem.getStartState()] += 1
#add start states successors to priority queue
while len(successors) > 0:
suc = successors.pop(0)
if count[suc[0]] == 0:
pqueue.push(suc, suc[2] + heuristic(suc[0], problem))
count[suc[0]] += 1
parentMap[suc] = 0
valueParentMap[suc[0]] = suc[2]
#A* Search algorithm
while (pqueue.isEmpty() != 1):
nextMove = pqueue.pop()
if problem.isGoalState(nextMove[0]) == True:
if count[nextMove[0]] > 0:
currNode = nextMove
while (currNode != 0):
actions.insert(0, currNode[1])
currNode = parentMap[currNode]
return actions
else:
newSuccessors = problem.getSuccessors(nextMove[0])
while len(newSuccessors) > 0:
sucToAdd = newSuccessors.pop(0)
if count[sucToAdd[0]] == 0 or sucToAdd[2] + valueParentMap[nextMove[0]] < valueParentMap[sucToAdd[0]] :
pqueue.push(sucToAdd, sucToAdd[2] + valueParentMap[nextMove[0]] + heuristic(nextMove[0], problem))
valueParentMap[sucToAdd[0]] = sucToAdd[2] + valueParentMap[nextMove[0]]
parentMap[sucToAdd] = nextMove
count[sucToAdd[0]] += 1
util.raiseNotDefined()
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
| [
"nick.allaire@gmail.com"
] | nick.allaire@gmail.com |
32ad761ef74eab1d9a05b46efafe7da15251b1e0 | f9aba4362f254ee094028b6c3fe1f2a8465a706e | /configs/SpecialRequests2013/gio3bis_STEP1_DIGI_L1_DIGI2RAW_HLT.py | 28102ab99f63ca4813b7db581b3e8f2e08d231dd | [] | no_license | cms-PdmV/wmcontrol | be28bf80eb022ceeb9ccb3b3c2906a66261f6536 | 6f564c325db5a9718f2aceb9e6f18f901ff04179 | refs/heads/master | 2022-07-28T17:46:14.908388 | 2022-06-22T10:10:13 | 2022-06-22T10:10:13 | 12,247,869 | 1 | 12 | null | 2021-03-16T12:25:06 | 2013-08-20T16:20:53 | Python | UTF-8 | Python | false | false | 3,396 | py | # Auto generated configuration file
# using:
# Revision: 1.381.2.13
# Source: /local/reps/CMSSW/CMSSW/Configuration/PyReleaseValidation/python/ConfigBuilder.py,v
# with command line options: STEP1 --step DIGI,L1,DIGI2RAW,HLT:7E33v2 --conditions START53_V7C::All --datamix NODATAMIXER --eventcontent RAWSIM --datatier GEN-SIM-RAW -n 100 --no_exec
import FWCore.ParameterSet.Config as cms
process = cms.Process('HLT')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.Digi_cff')
process.load('Configuration.StandardSequences.SimL1Emulator_cff')
process.load('Configuration.StandardSequences.DigiToRaw_cff')
process.load('HLTrigger.Configuration.HLT_7E33v2_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
# Input source
process.source = cms.Source("PoolSource",
secondaryFileNames = cms.untracked.vstring(),
fileNames = cms.untracked.vstring('file:STEP1_SIM.root')
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.381.2.13 $'),
annotation = cms.untracked.string('STEP1 nevts:100'),
name = cms.untracked.string('PyReleaseValidation')
)
# Output definition
process.RAWSIMoutput = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
outputCommands = process.RAWSIMEventContent.outputCommands,
fileName = cms.untracked.string('STEP1_DIGI_L1_DIGI2RAW_HLT.root'),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string(''),
dataTier = cms.untracked.string('GEN-SIM-RAW')
)
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'START53_V7C::All', '')
# Path and EndPath definitions
process.digitisation_step = cms.Path(process.pdigi)
process.L1simulation_step = cms.Path(process.SimL1Emulator)
process.digi2raw_step = cms.Path(process.DigiToRaw)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RAWSIMoutput_step = cms.EndPath(process.RAWSIMoutput)
# Schedule definition
process.schedule = cms.Schedule(process.digitisation_step,process.L1simulation_step,process.digi2raw_step)
process.schedule.extend(process.HLTSchedule)
process.schedule.extend([process.endjob_step,process.RAWSIMoutput_step])
# customisation of the process.
# Automatic addition of the customisation function from HLTrigger.Configuration.customizeHLTforMC
from HLTrigger.Configuration.customizeHLTforMC import customizeHLTforMC
#call to customisation function customizeHLTforMC imported from HLTrigger.Configuration.customizeHLTforMC
process = customizeHLTforMC(process)
# End of customisation functions
| [
"franzoni@4525493e-7705-40b1-a816-d608a930855b"
] | franzoni@4525493e-7705-40b1-a816-d608a930855b |
ee714e86e308d91faaac2484d9ae4ef7adaa57e7 | 2e4d49fbcc29bf5a5d409e9c8b7565dd1624f5d4 | /ecom/api/user/models.py | 58ebf55199243862362faa425c7dd01bc56cbbe2 | [] | no_license | Vasanth-Korada/Ecommerce-Web-App-React-Django | b114ba1d007c3d81189efc9434c8028e50621635 | 7032a50a153719b4da37fea00c41ee9348c1c2fe | refs/heads/master | 2022-11-28T12:02:31.380058 | 2020-08-10T17:25:13 | 2020-08-10T17:25:13 | 286,537,329 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class CustomUser(AbstractUser):
name = models.CharField(max_length = 50, default = "Anonymous User")
first_name = models.CharField(max_length = 50, default = "")
last_name = models.CharField(max_length = 50, default = "")
email = models.EmailField(max_length = 250, unique = True)
username = None
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
phone = models.CharField(max_length = 20, blank = True, null = True)
gender = models.CharField(max_length = 10, blank = True, null = True)
session_token = models.CharField(max_length = 250, default = "0")
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
| [
"vasanthkorada999@gmail.com"
] | vasanthkorada999@gmail.com |
91a6002050a2af7455c332231a47d933bb9a242e | 435585a1fb0d0c0e0a6029fc93526ef0797462dd | /scripts/setup_mysql.py | 79deba1ac2cbfb77239e45341c88d2592f18b9cb | [] | no_license | jhuang1996/seidm-hw7 | f86de860a06ad25803ab72ad860844caa6252246 | 3d449a133ece98f6bc4692828440bc35ada37cb6 | refs/heads/master | 2021-01-01T18:40:18.729930 | 2017-07-21T00:09:41 | 2017-07-21T00:09:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,257 | py | #!/home/yeeede/pyenv/bin/python
# -*- coding: utf-8 -*-
import sys
import MySQLdb
import MySQLdb.cursors
try:
r_conn = MySQLdb.connect(host='127.0.0.1',
user='root',
passwd='root1234',
charset='utf8')
except:
print("Can't Connect Database via root: ", sys.exc_info()[0])
sys.exit()
# drop db and user if exist
r_cursor = r_conn.cursor()
# create user and db, and grant privileges
r_cursor.execute("CREATE USER 'demouser'@'localhost' IDENTIFIED BY 'demo1234'")
r_cursor.execute("CREATE DATABASE demo CHARACTER SET UTF8")
r_cursor.execute("GRANT ALL PRIVILEGES ON demo.* to 'demouser'@'localhost'")
r_cursor.execute("FLUSH PRIVILEGES")
r_cursor.close()
r_conn.close()
# connect demo db
try:
conn = MySQLdb.connect(host='127.0.0.1',
user='demouser',
passwd='demo1234',
db='demo',
charset='utf8')
except:
print("Can't Connect Database via demouser: ", sys.exc_info()[0])
sys.exit()
# create schema
cursor = conn.cursor()
# cursor.execute("DROP TABLE if EXISTS a136")
# cursor.execute("USE demo")
cursor.execute("""CREATE TABLE rainfall (
rpk INT(12) NOT NULL AUTO_INCREMENT PRIMARY KEY,
name CHAR(10) NOT NULL,
sid CHAR(5) NOT NULL,
timestamp CHAR(25) NOT NULL,
r_10m FLOAT(6,1) DEFAULT NULL,
r_1h FLOAT(6,1) DEFAULT NULL,
r_3h FLOAT(6,1) DEFAULT NULL,
r_6h FLOAT(6,1) DEFAULT NULL,
r_12h FLOAT(6,1) DEFAULT NULL,
r_24h FLOAT(6,1) DEFAULT NULL,
r_td FLOAT(6,1) DEFAULT NULL,
r_yd FLOAT(6,1) DEFAULT NULL,
r_2d FLOAT(6,1) DEFAULT NULL
) ENGINE=InnoDB""")
cursor.execute("""CREATE TABLE station (
spk INT(5) NOT NULL PRIMARY KEY,
name CHAR(10) NOT NULL,
sid CHAR(5) NOT NULL,
county CHAR(3) NOT NULL,
lon FLOAT(7,4) NOT NULL,
lat FLOAT(7,4) NOT NULL
) ENGINE=InnoDB""")
cursor.close()
conn.close()
| [
"yeeede@gmail.com"
] | yeeede@gmail.com |
8bb060909a202550a078a45e2a9a1cd214cf9ab5 | 9941a2c8e6eac5ca86f369bc6f863edee181e79b | /routes/admin.py | 048ede46f56aeecae946f16211d4f0ab69d5bd48 | [] | no_license | InvokerAndrey/detect_route | 62bda06b6f8942808151e3a2641836aca0ea347f | 3b3b6618aa77f79d77e55616f0d2c25be2db008f | refs/heads/master | 2023-03-24T08:43:05.694189 | 2021-03-23T18:15:06 | 2021-03-23T18:15:06 | 343,092,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | from django.contrib import admin
from .models import Route
admin.site.register(Route) | [
"dydyshko1999@gmail.com"
] | dydyshko1999@gmail.com |
3e7419435bcf00e6e2ce2fac653376bc982e68eb | 1cc505f220f49ea59afc5d81a9d2dda45334e78c | /update.py | 0c5273f2c873478ea897cc7255def9da7723de15 | [] | no_license | padmeshnaik/Sentiment-Analysis | 7dd57ee5b10003f3c8f23c640f5d15ee881c66cd | b76586ab215919fdca186c32a6a98674192d21af | refs/heads/master | 2022-11-07T08:20:14.239534 | 2020-06-23T09:58:27 | 2020-06-23T09:58:27 | 274,345,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,675 | py | import pickle
import sqlite3
import numpy as np
import os
# import HashingVectorizer from local dir
from vectorizer import vect
""" The update_model function will fetch entries from the SQLite database in batches of
10,000 entries at a time, unless the database contains fewer entries. Alternatively, we
could also fetch one entry at a time by using fetchone instead of fetchmany, which
would be computationally very inefficient. However, keep in mind that using the
alternative fetchall method could be a problem if we are working with large
datasets that exceed the computer or server's memory capacity. """
def update_model(db_path, model, batch_size=10000):
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('SELECT * from review_db')
results = c.fetchmany(batch_size)
while results:
data = np.array(results)
X = data[:, 0]
y = data[:, 1].astype(int)
classes = np.array([0, 1])
X_train = vect.transform(X)
model.partial_fit(X_train, y, classes=classes)
results = c.fetchmany(batch_size)
conn.close()
return model
cur_dir = os.path.dirname(__file__)
clf = pickle.load(open(os.path.join(cur_dir,
'pkl_objects',
'classifier.pkl'), 'rb'))
db = os.path.join(cur_dir, 'reviews.sqlite') # Output: C:/....../../../reviews.sqlite
clf = update_model(db_path=db, model=clf, batch_size=10000)
# Uncomment the following lines if you are sure that
# you want to update your classifier.pkl file
# permanently.
# pickle.dump(clf, open(os.path.join(cur_dir,
# 'pkl_objects', 'classifier.pkl'), 'wb')
# , protocol=4)
| [
"paddy@orkut.com"
] | paddy@orkut.com |
bb89c0558e9830a7ba414e9cea296ffb578f8509 | e49b654d3db99773390c5b9686df9c99fbf92b2a | /linked_lists/linked_list.py | f018e58590e973d2a1aac0516779018498713c0c | [] | no_license | hao89/diary_of_programming_puzzles | 467e8264d0ad38768ba5ac3cfb45301293d79943 | 0e05d3716f28075f99bbd7b433d16a383209e57c | refs/heads/master | 2021-01-16T00:49:38.956102 | 2015-08-25T13:44:53 | 2015-08-25T13:44:53 | 41,692,587 | 1 | 0 | null | 2015-08-31T18:20:38 | 2015-08-31T18:20:36 | Python | UTF-8 | Python | false | false | 455 | py | class LinkedListNode:
def __init__(self, data):
self.next = None
self.data = data
def __str__(self):
node_str = ""
current_node = self
while current_node:
if current_node.next:
node_str = node_str + str(current_node.data) + ", "
else:
node_str = node_str + str(current_node.data)
current_node = current_node.next
return node_str | [
"me@davidadamojr.com"
] | me@davidadamojr.com |
edf6d30b532cf45a423e77182e923ca471f650bb | daa0a1df5c86b5eed882f50bb2f10be670f16bd7 | /utils/get_seq_from_fasta.py | 30258750f3538594fbbdb4f82d65133084cda6ee | [] | no_license | mengzhou/scripts | ded2d3ccf81905c444e1b4c11c09313fc7bca1ed | 6b7d2c184aaee6e6541e788d7098ba2548215638 | refs/heads/master | 2021-01-12T02:04:11.908707 | 2019-01-04T20:03:29 | 2019-01-04T20:03:29 | 78,463,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | #!/usr/bin/python
import sys
def get_length(inf):
pool = []
headline = 0
for i in range(2):
line = inf.readline()
if line.startswith(">"):
headline = len(line)
continue
pool.append(line)
l = [len(i) for i in pool]
return (sum(l)/len(l),headline)
def process_len( start, len_par ):
if len_par.startswith("+"):
return int(len_par[1:])
else:
return int(len_par) - int(start)
def seeker(inf, start, length):
(l,hl) = get_length(inf)
block_size_start = start/(l-1)*l + start % (l-1)
inf.seek(hl+block_size_start)
length_tail = length - (l - start%(l-1))
block_size_length = length_tail/(l-1)*l + length_tail % (l-1) + (l - start%(l-1)) + 1
return inf.read(block_size_length).replace("\n",'')
def fold(string):
width = 500000
return [string[i:i+width]+"\n" for i in range(0,len(string),width)]
def main():
if len(sys.argv) < 4:
sys.stderr.write("Usage: %s <input fasta> <start coordinate> "%sys.argv[0] + \
"<end coordinate | +length>\n")
sys.stderr.write("Example 1: %s chr1.fa 100050 +50\n"%sys.argv[0])
sys.stderr.write("Example 2: %s chr1.fa 100050 100150\n"%sys.argv[0])
sys.exit(1)
if sys.argv[1] == "stdin":
inf = sys.stdin
else:
inf = open(sys.argv[1],'r')
length = process_len(sys.argv[2], sys.argv[3])
sys.stdout.write("".join(fold(seeker(inf,int(sys.argv[2]),length))))
if __name__ == '__main__':
main()
| [
"mengzhou@usc.edu"
] | mengzhou@usc.edu |
a2c894266a18f801e6be30feb0966af7a35fcc59 | 1fbcb308d5431e60b50262b175a62ad935b833dd | /object_detection/1_data_creation/vid_to_img.py | 4c4c080659f54b7abbcc38416429854182cdca53 | [] | no_license | deveshasha/computer_vision | d0288554c440f9051e4e719447bd61ace74dc316 | 9cbd86c273a907355c9d06d6d118f67d2fb0009c | refs/heads/master | 2020-04-25T14:54:18.379126 | 2019-03-26T12:17:29 | 2019-03-26T12:17:29 | 172,858,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | import cv2
import os
vidcap = cv2.VideoCapture('1.mp4')
success,image = vidcap.read()
count = 0
i=0
path = 'D:/D/win10_desktop/projects/tensorflow/bottle/images'
while success:
if count%2 == 0:
i += 1
cv2.imwrite(os.path.join(path,'frame%d.jpg' % i), image) # save frame as JPEG file
success,image = vidcap.read()
print('Read a new frame: ', success)
count += 1 | [
"deveshasha.2@gmail.com"
] | deveshasha.2@gmail.com |
1bfb4f5d995aad01cce1ca871097d0778a1a7b0b | 2054c6debfb02196c79474d2bd72404c9e23129d | /09day/12-等腰三角形.py | b73ca5639a64362fef42af7570782bff1c56b055 | [] | no_license | yanzixiong/Python_Test | 7c5083d8fc2fef81f534c15d800e7b749668ea0d | 9cf53b42e72df6fa4b296bc78c0debadcf3dd625 | refs/heads/master | 2021-04-15T14:33:00.440187 | 2018-05-03T01:34:15 | 2018-05-03T01:34:15 | 126,916,873 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | i = 1
while i <= 5:
print("* "*i)#*后带空格 跟*后不带空格
i+=1
j = 4
while j >= 1:
print("* "*j)
j-=1
| [
"569603978@qq.com"
] | 569603978@qq.com |
e69e7a9c4fffe8c06fd9032e770298561232b62a | 13bc9dc187a4714fb76b83c9385ff27c0b55fc99 | /HiggsAnalysis/CombinedLimit/python/HiggsCouplings.py | 2ddb6bd16770169add412d7b56fc9a1de447e982 | [] | no_license | bachtis/CMSDAS | 46bf05e896b6f01b797f1abfa9c04fe713a22d0b | 17796f62f03d1a2739977ff0180d65ed7650abac | refs/heads/V0 | 2016-09-16T05:01:26.062550 | 2014-01-23T11:20:54 | 2014-01-23T11:20:54 | 15,795,736 | 1 | 0 | null | 2014-01-14T16:05:58 | 2014-01-10T10:53:36 | C++ | UTF-8 | Python | false | false | 1,510 | py | # Benchmark Higgs models as defined in (put ref to LHCXSWG document)
# the model equivalent to mu
from HiggsAnalysis.CombinedLimit.HiggsBenchmarkModels.CSquared import CSquaredHiggs
cSq = CSquaredHiggs()
# CVCF models
from HiggsAnalysis.CombinedLimit.HiggsBenchmarkModels.VectorsAndFermionsModels import CvCfHiggs, CvCfXgHiggs, CfXgHiggs
cVcF = CvCfHiggs()
#cVcFxG = CvCfXgHiggs()
#cFxG = CfXgHiggs()
# Models probing the Fermion sector
from HiggsAnalysis.CombinedLimit.HiggsBenchmarkModels.FermionSectorModels import C5qlHiggs, C5udHiggs, LambdaduHiggs, LambdalqHiggs
lambdadu = LambdaduHiggs()
lambdalq = LambdalqHiggs()
c5ql = C5qlHiggs()
c5ud = C5udHiggs()
# Models to test Custodial symmetry
from HiggsAnalysis.CombinedLimit.HiggsBenchmarkModels.CustodialSymmetryModels import CwzHiggs, CzwHiggs, RzwHiggs, RwzHiggs, LambdaWZHiggs
lambdaWZ = LambdaWZHiggs()
cWZ = CwzHiggs()
cZW = CzwHiggs()
rZW = RzwHiggs()
rWZ = RwzHiggs()
# Models probing the loops structure
from HiggsAnalysis.CombinedLimit.HiggsBenchmarkModels.LoopAndInvisibleModel import HiggsLoops, HiggsLoopsInvisible
higgsLoops = HiggsLoops()
higgsLoopsInvisible = HiggsLoopsInvisible()
# Minimal and maximal
from HiggsAnalysis.CombinedLimit.HiggsBenchmarkModels.MinimalModels import HiggsMinimal
higgsMinimal = HiggsMinimal()
#higgsMinimalInvisible = HiggsMinimalInvisible()
# Model with full LO parametrization
from HiggsAnalysis.CombinedLimit.LOFullParametrization import C5, C6
c5 = C5()
c6 = C6()
| [
"bachtis@cern.ch"
] | bachtis@cern.ch |
b34ecbc075ee5638798ac7fef871cffcc948730a | 039a8b5362c958ce58275ca80d9bb03b03bf9d70 | /warehouse-management-python-widget/p1_vodjenje_magacina/p_model.py | d54f7bdb55135c266db433fcd89e17aa24f13f25 | [] | no_license | djkrstovic/Python-warehouse-management-widget | 3c02641da90465baae3f52f225938a1e082be878 | 750bbd2b45718b611f820b7e3b1722d85bbe85f1 | refs/heads/master | 2020-09-22T15:49:19.665482 | 2019-12-02T02:02:21 | 2019-12-02T02:02:21 | 225,255,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,068 | py | from PySide2 import QtCore
import csv
import os
import re
class PModel(QtCore.QAbstractTableModel):
"""
Klasa koja predstavlja specijalizaciju QAbstractTableModel-a.
Koristimo tabelarni model, jer cemo podatke posmatrati kao tabelu, i u tabeli ih prikazivati.
Svaki tabelarni model ima redove i kolone. Red je jedan korisnik u imeniku, a kolone predstavalju
korisnikove pojedinacne podatke, poput imena, prezimena itd.
Datoteka na osnovu koje se populise model je CSV datoteka, gde su redovi modela zapravo redovi
iz datoteke, a kolone modela, su podaci koji su u redu u datoteci odvojeni separatorom (zarezom).
"""
def __init__(self ,path=None):
self.putanja_do_fajla = path
"""
Inicijalizator modela za kontakte.
Pri inicijalizaciji se na osnovu datoteke sa putanje path ucitavaju i populise se model.
:param path: putanja do datoteke u kojoj su smesteni podaci.
:type path: str
"""
super().__init__()
# matrica, redovi su liste, a unutar tih listi se nalaze pojedinacni podaci o korisniku iz imenika
self._data = []
self.load_data(self.putanja_do_fajla)
def rowCount(self, index):
"""
Vraca broj redova u modelu.
:param index: putanja do datoteke u kojoj su smesteni podaci.
:type index: QModelIndex
:returns: int -- broj redova modela.
"""
return len(self._data)
def columnCount(self, index):
"""
Vraca broj kolona u modelu. Posto znamo da nas korisnik iz imenika je opisan sa pet
podataka, vracamo fiksni broj kolona na osnovu datoteke.
:param index: indeks elementa modela.
:type index: QModelIndex
:returns: int -- broj kolona modela.
"""
return 3
def data(self, index, role):
"""
Vraca podatak smesten na datom indeksu sa datom ulogom.
:param index: indeks elementa modela.
:type index: QModelIndex
:param role: putanja do datoteke u kojoj su smesteni podaci.
:type role: QtCore.Qt.XXXRole (gde je XXX konkretna uloga)
:returns: object -- podatak koji se nalazi na zadatom indeksu sa zadatom ulogom.
"""
element = self.get_element(index)
if element is None:
return None
if role == QtCore.Qt.DisplayRole:
return element
def headerData(self, section, orientation, role):
"""
Vraca podatak koji ce popuniti sekciju zaglavlja tabele.
:param section: sekcija koja u zavisnosti od orijentacije predstavlja redni broj kolone ili reda.
:type section: int
:param orientation: odredjuje polozaj zaglavlja.
:type orientation: QtCore.Qt.Vertical ili QtCore.Qt.Horizontal
:param role: putanja do datoteke u kojoj su smesteni podaci.
:type role: QtCore.Qt.XXXRole (gde je XXX konkretna uloga)
:returns: str -- naziv sekcije zaglavlja.
"""
if orientation != QtCore.Qt.Vertical:
if (section == 0) and (role == QtCore.Qt.DisplayRole):
return "Naziv"
elif (section == 1) and (role == QtCore.Qt.DisplayRole):
return "Rok Upotrebe"
elif (section == 2) and (role == QtCore.Qt.DisplayRole):
return "Temperatura"
def setData(self, index, value, role):
"""
Postavlja vrednost na zadatom indeksu.
Ova metoda je vazna ako zelimo da nas model moze da se menja.
:param index: indeks elementa modela.
:type index: QModelIndex
:param value: nova vrednost koju zelimo da postavimo.
:type value: str -- vrednost koja ce biti dodeljena, za sada radimo samo sa stringovima
:param role: putanja do datoteke u kojoj su smesteni podaci.
:type role: QtCore.Qt.XXXRole (gde je XXX konkretna uloga)
:returns: bool -- podatak o uspesnosti izmene.
"""
try:
if value == "":
return False
#elif index.column() == 0: #menja se naziv
# return
elif index.column() == 1: #znaci da je rok upotrebe
if not (re.search("^([1-9]([0-9])?[\/]){2}[1-9][0-9]{3}$",value.strip())):
return False
self._data[index.row()][index.column()] = value.strip()
self.save_data() #posle svake promene pamti je u nas CSV
self.dataChanged()
return True
except:
return False
def flags(self, index):
"""
Vraca flagove koji su aktivni za dati indeks modela.
Ova metoda je vazna ako zelimo da nas model moze da se menja.
:param index: indeks elementa modela.
:type index: QModelIndex
:returns: object -- flagovi koji treba da budu aktivirani.
"""
# ne damo da menja TEMPERATURA PROIZVODA (primera radi)
if index.column() != 2:
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable
# sve ostale podatke korisnik moze da menja
else:
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
def get_element(self, index : QtCore.QModelIndex):
"""
Dobavlja podatak smesten na zadatom indeksu, ako je indeks validan.
Pomocna metoda nase klase.
:param index: indeks elementa modela.
:type index: QModelIndex
:returns: object -- vrednost na indeksu.
"""
if index.isValid():
element = self._data[index.row()][index.column()]
if element:
return element
return None
def load_data(self, path=""):
"""
Ucitava podatke iz CSV datoteke na zadatoj path putanji uz pomoc CSV reader-a.
Pomocna metoda nase klase.
:param path: putanja do CSV datoteke.
:type path: str
"""
with open(path, "r", encoding="utf-8") as fp:
self._data = list(csv.reader(fp, dialect=csv.unix_dialect))
| [
"noreply@github.com"
] | noreply@github.com |
fd69e5c0ad13bddd3665e157cdd85e17f6da1920 | d25003d4e1a1cd3b5eca1525c0119da47579f294 | /scripts/sort_double.py | 51093694d8a595573520419157b7d218af437429 | [] | no_license | rd37/GooglePracticeProjects | ceabcb838bd4bd50397b8fdf775e810db320dbb1 | b3543ada39b8c24f688a41cf0b745482013a93d9 | refs/heads/master | 2016-09-06T16:50:41.303580 | 2014-12-12T03:23:23 | 2014-12-12T03:23:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | '''
Created on Dec 10, 2014
@author: ronaldjosephdesmarais
'''
ints = [5,8.2,1,7,4.1,13,12,4.1,8.2]
print "------use python sorted------"
print sorted(ints)
print "------use dictionary ------"
srt_dict = {}
srt_arr = []
for i in ints:
if i not in srt_dict:
srt_dict[i]=1
else:
srt_dict[i]=srt_dict[i]+1
for i_key in srt_dict:
for i in range(0,srt_dict[i_key]):
srt_arr.append(i_key)
print srt_arr
| [
"ron.desmarais@gmail.com"
] | ron.desmarais@gmail.com |
17b011426ea5dd281920f3b73b76457056e5bd1b | 4ce6fb5c49ee6ec4b5df9e056040382812a8a591 | /product/migrations/0029_auto_20191001_0528.py | 2120012f6b7045350592076be1c5027236969a78 | [] | no_license | yantrashalait/Multronics | 198c807a0bb2b8c1ae7bcc2325436467ee8a90b3 | c85b5a263fe1507c994236bba26ad12d93157622 | refs/heads/master | 2021-02-14T18:28:25.984830 | 2021-01-18T09:19:21 | 2021-01-18T09:19:21 | 244,825,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | # Generated by Django 2.2.4 on 2019-10-01 05:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0028_aboutiteam'),
]
operations = [
migrations.AddField(
model_name='product',
name='visibility',
field=models.BooleanField(default=True, verbose_name='Make this product visibile?'),
),
migrations.AlterField(
model_name='aboutiteam',
name='logo',
field=models.ImageField(help_text='Image size: width=192px height=31px', upload_to='logo/'),
),
]
| [
"saneprijal@gmail.com"
] | saneprijal@gmail.com |
cd19029f0c39b283a11f7e1d3128085545b137cf | f6e6aa28ec179090d93da2fa6e3ab32ad448a17c | /login.py | b86829fb5fec7d51699f501ef6006647f37c0ae8 | [] | no_license | Feelian/pythonVkChat | 6d644c3ad3ca91bc393ee4730461114f186a6764 | 8ce9ce144f7cf77c12f8cdb87be42784248f397d | refs/heads/master | 2020-04-04T02:41:18.854789 | 2014-08-18T09:59:48 | 2014-08-18T09:59:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,813 | py | import time
from urllib import unquote
from PyQt4 import QtGui, QtCore, uic, QtWebKit
from PyQt4.QtGui import QDialog
from PyQt4.QtCore import QUrl
from loginDialog import Ui_LoginDialog
class LoginExc(Exception):
def __init__(self, arror_msg):
self.error_msg = error_msg
def __str__(self):
return repr(self.error_msg)
class loginDialog(QtGui.QDialog, Ui_LoginDialog):
def __init__(self, parent=None):
super(loginDialog, self).__init__(parent)
self.parent = parent
self.setupUi(self)
self.setWindowTitle('Login')
self.webView = QtWebKit.QWebView(self)
self.webView.setGeometry(6, 6, 800, 400)
self.webView.setObjectName("webView")
self.webView.connect(self.webView, QtCore.SIGNAL("urlChanged(const QUrl&)"), self.evurlChanged)
self.webView.load(QUrl("""https://oauth.vk.com/authorize?client_id=4509481&scope=4096&
redirect_uri=http://oauth.vk.com/blank.html&display=popup&response_type=token"""))
def closeLoginDialog(self):
self.close()
def evurlChanged(self):
path = str(self.webView.url().path())
if path == '/blank.html':
self.webView.url().path()
self.returnSession(unquote(unicode(self.webView.url().toString())).split('='))
elif path == '/api/login_failure.html':
raise LoginExc('Login failure')
def returnSession(self, session):
session = "&".join(session).split('&')
session = {session[i]:session[i + 1] for i in range(0, len(session), 2)}
self.parent.expires = time.time() + int(session["expires_in"])
self.parent.userId = session[u"user_id"]
self.parent.token = session[u"https://oauth.vk.com/blank.html#access_token"]
self.closeLoginDialog()
| [
"feelianp@gmail.com"
] | feelianp@gmail.com |
ddfd7bf5af10cd3f6fccd9b4bb92f1766db97e72 | a1d5290470d5a8beb99846d62d8539a13021470e | /exercicios/PythonBrasilWiki/exe016.descisao.py | 7a33791db9916adabfbecfbfee5c60f676bcb34e | [] | no_license | Jackroll/aprendendopython | 26007465b42666f0a9aff43e8229b24aef418d4c | 9211612a1be8015bcf8d23d8cdfbd11d9df38135 | refs/heads/master | 2021-02-19T19:40:22.993033 | 2020-04-04T21:35:08 | 2020-04-04T21:35:08 | 245,319,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,415 | py | #Faça um programa que calcule as raízes de uma equação do segundo grau, na forma ax2 + bx + c.
# O programa deverá pedir os valores de a, b e c e fazer as consistências, informando ao usuário nas seguintes situações:
#Se o usuário informar o valor de A igual a zero, a equação não é do segundo grau e o programa não deve fazer pedir os demais valores, sendo encerrado;
#Se o delta calculado for negativo, a equação não possui raizes reais. Informe ao usuário e encerre o programa;
#Se o delta calculado for igual a zero a equação possui apenas uma raiz real; informe-a ao usuário;
#Se o delta for positivo, a equação possui duas raiz reais; informe-as ao usuário;
a = float(input('Valor de A :'))
if a == 0 :
print('A = 0 a equação não é de segundo grau, calculo encerrado !!')
else :
b = float(input('Valor de B :'))
c = float(input('Valor de C :'))
delta = (b**2)-(4*a*c)
if delta < 0 :
print(f'Delta = {delta} valor negativo, portanto a equação não possui raizes reais, calculo encerrado !!')
elif delta == 0:
x1 = ((-b)+ (delta ** 0.5))/(2*a)
print(f'Delta = {delta}, a equação possui apenas uma raiz real, x = {x1}')
else :
x1 = ((-b)+ (delta ** 0.5))/(2*a)
x2 = ((-b)- (delta ** 0.5))/(2*a)
print(f'Delta = {delta}, a equação possui duas raizes reais, x1 = {x1}, x2 = {x2}') | [
"jeremias.jacson@gmail.com"
] | jeremias.jacson@gmail.com |
70ea07fbc9a71f9c1baec3a6bf4c6d9ddb095cba | 80ba3b75b6080cc166e629e4170869dee54b7e6a | /WebScraping/CharityNavigator/Scrape.py | ecacf77c681e446777dcb212cc88d99beb866e7c | [] | no_license | bloodtypebpos/Python | 1df10e6ff18c403a402f086a2ed9bffedb5f98bc | 28b9f107b065643239d897670d1c2afd56c3ca8d | refs/heads/master | 2023-07-21T19:40:23.756432 | 2023-07-19T12:07:48 | 2023-07-19T12:07:48 | 49,387,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | import urllib2
import re
import urllib
import csv
f = open('File.txt', 'r')
x = f.readlines()
urls = x
i = 0
regex = '<td align="right">(.+?)</td>'
regex2 = '<h1 class="charityname">(.+?)</h1>'
y = []
pattern = re.compile(regex)
pattern2 = re.compile(regex2)
while i < 100:
argh = 0
htmlfile = urllib.urlopen(urls[i])
htmltext = htmlfile.read()
titles = re.findall(pattern2, htmltext)
information = re.findall(pattern,htmltext)
while argh < len(information):
information[argh] = information[argh].replace("'","")
information[argh] = information[argh].replace(",","")
information[argh] = information[argh].replace("%","")
information[argh] = information[argh].replace("<strong>","")
information[argh] = information[argh].replace("</strong>","")
information[argh] = information[argh].replace("$","")
information[argh] = information[argh].replace(" ","")
information[argh] = information[argh].replace(" ","")
information[argh] = information[argh].replace(";","")
titles.append(information[argh])
argh+=1
y.append(titles)
i+=1
i = 0
while i < len(y):
print(y[i])
i+=1
myFile = open('File.csv', 'w')
myFile.truncate()
theNextLine = "\n"
myFile.close()
i = 0
with open('File.csv', 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL, quotechar='|')
while i<len(y):
spamwriter.writerow(y[i])
i+=1
| [
"matt.tigrett@gmail.com"
] | matt.tigrett@gmail.com |
ea49f983a7f76448d0096f7f3db4167a050bb333 | 914169f306e4d4a4662d5e6d9503cef29dbf1747 | /104.二叉树的最大深度.py | caac420138092b50deec6bcf6ceb2425d257a333 | [] | no_license | weshao/LeetCode | 55a6bd16ce6496928748d0f8f0de3cdf40fd1087 | 6abef0e78db48ad2d1bd69e3991a14784c6fb813 | refs/heads/master | 2023-03-06T09:58:19.740092 | 2021-02-09T10:47:16 | 2021-02-09T10:47:16 | 284,904,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | #
# @lc app=leetcode.cn id=104 lang=python3
#
# [104] 二叉树的最大深度
#
# https://leetcode-cn.com/problems/maximum-depth-of-binary-tree/description/
#
# algorithms
# Easy (75.29%)
# Likes: 738
# Dislikes: 0
# Total Accepted: 304.9K
# Total Submissions: 404.9K
# Testcase Example: '[3,9,20,null,null,15,7]'
#
# 给定一个二叉树,找出其最大深度。
#
# 二叉树的深度为根节点到最远叶子节点的最长路径上的节点数。
#
# 说明: 叶子节点是指没有子节点的节点。
#
# 示例:
# 给定二叉树 [3,9,20,null,null,15,7],
#
# 3
# / \
# 9 20
# / \
# 15 7
#
# 返回它的最大深度 3 。
#
#
# @lc code=start
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def maxDepth(self, root: TreeNode) -> int:
return self.maxDepthRecur(root)
def maxDepthRecur(self, root):
if root is None:
return 0
return max(self.maxDepthRecur(root.left), self.maxDepthRecur(root.right)) + 1
# @lc code=end
| [
"shihao.wei@doneenterpsies.fi"
] | shihao.wei@doneenterpsies.fi |
dbe400a1b3020905a30368837a0dd906638b22cc | 8f9ba4a18bd61490a12f25940dd346dd99090b45 | /election/src/attacks.py | 852dfb17c292297907ff72e630189578c07e2618 | [
"MIT"
] | permissive | KiFoundation/ki-simulator | f658045c9060f2868aca22279bddcd4a367a7d5c | 291c9217f4555d4bebd4f7a86b802badfeaffe5a | refs/heads/master | 2020-04-22T16:18:58.184572 | 2019-09-19T13:17:05 | 2019-09-19T13:17:05 | 170,503,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,162 | py | import os
import pandas as pd
from election.src.generator import *
from dotenv import load_dotenv
# Load dotenv file
load_dotenv()
# Load config
weight_stake = float(os.getenv("weight_stake"))
stake_operator = float(os.getenv("stake_operator"))
num_rounds = int(os.getenv("num_rounds"))
num_validators_per_round = int(os.getenv("num_validators_per_round"))
Xs_conf = list(map(int, os.getenv("eligible_validators_for_one_operator_range").strip(' ').split(',')))
Ys_conf = list(map(int, os.getenv("eligible_validators_range").strip(' ').split(',')))
Xs = np.arange(Xs_conf[0], Xs_conf[1], Xs_conf[2])
Ys = np.arange(Ys_conf[0], Ys_conf[1], Ys_conf[2])
def att_monopole(validators_list, res_folder):
# From the validator list sample Y validators with X belonging to the same person
# Select V validators R times
# measure A = X'/V where X' = intersection (X, V)
# avg A for R
# store Max (A)
# repeat for values of X
# repeat fot values of Y
df = pd.DataFrame(index=Xs, columns=Ys)
df_v = pd.DataFrame(index=Xs, columns=Ys)
for X in Xs:
for Y in Ys:
val_sampled = controled_sample(validators_list, X, Y, 'a')
val, val_rep = fill_reputation(val_sampled, weight_stake, 'a', stake_operator, 1)
tmp_A_ = []
for R in range(10):
tmp_round_val = select_validators_rounds(val_rep, num_rounds, num_validators_per_round)[0]
tmp_count_op = count_validator_per_operator(tmp_round_val, 'a')
tmp_count_op /= num_validators_per_round
tmp_A_.append(tmp_count_op)
df[Y][X] = np.mean(tmp_A_)
df_v[Y][X] = np.sqrt(np.var(tmp_A_))
print(df)
print(df_v)
df.fillna(value=np.nan, inplace=True)
ax = plt.axes()
sns.heatmap(df, vmin=0, vmax=1, annot=True, ax=ax)
ax.set_title('Collusion risk for VPR = ' + str(num_validators_per_round))
ax.set_ylabel('Num of eligible validators for one operator')
ax.set_xlabel('Num of eligible validators')
plt.savefig(res_folder + '-'.join([str(num_validators_per_round), str(stake_operator), str(weight_stake)]) + ".pdf")
| [
"tarek.awwad.36@gmail.com"
] | tarek.awwad.36@gmail.com |
3580cafd6143fcbe243e5532adc80b03980dee44 | bf383bfdbeefe059ee5e9145b3515ccab428433a | /Week 2 - Task 1.py | 88d174f0f0d3ef4c4f3ea640d575c14b199560ba | [] | no_license | Tstaite/210CT | 1a13c9ba4b61edb9f94e8547889b913b82a4f0a4 | 896307b2e2d888571aaecc310a7996548a82999a | refs/heads/master | 2021-05-01T00:46:49.205611 | 2016-11-29T17:29:29 | 2016-11-29T17:29:29 | 71,224,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 854 | py | #function to calculate the highest perfect square
def Perfect_Square(Mynumber):
#check datatype of input
try:
Mynumber = int(Mynumber)
except ValueError:
print("Incorrect datatype")
return
Integer = 1
Answer = Integer*Integer
if (Mynumber == 1):
print("The highest perfect square before your number is " + str(Integer))
elif (Mynumber < 1):
print("number must be greater than 0")
else:
#multiply each number by itself and see if the result is bigger
while (Answer <= Mynumber):
Integer = Integer + 1
Answer = Integer **2
Integer = Integer - 1
Integer = Integer**2
print("The highest perfect square before your number is " + str(Integer))
myNumber = input("Enter a number ")
Perfect_Square(myNumber)
| [
"noreply@github.com"
] | noreply@github.com |
d012a11c686f43885568788458bc63886376f692 | 1df81e3f797e8e22e8dd9a6da3571ac55bf6eb01 | /setup.py | 51e7c45dc21f1c07fdd38d62c23cd96aa2dfd25a | [] | no_license | xavitorne/python_skeleton | 48ce3709d4610838f8c36deb9a87c3d8c811466a | 93379c878e4522e1ca6c612db9a9d9ac78a498d1 | refs/heads/master | 2021-01-20T22:44:31.363008 | 2015-07-05T14:50:32 | 2015-07-05T14:50:32 | 38,571,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | import os
from setuptools import find_packages
from setuptools import setup
version = '0.1'
project = 'project_name'
install_requires=[
'Babel',
'lingua',
'sqlalchemy-i18n',
],
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md')).read()
setup(name=project,
version=version,
description="description",
long_description=README,
classifiers=[
"Programming Language :: Python",
"Framework :: ",
"License :: ",
],
keywords='',
author='Xavi',
author_email='xavitorne@gmail.com',
url='http://pypi.python.org/pypi/',
license='bsd',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
tests_require=[],
entry_points={},
extras_require={},
message_extractors={'project_name': [
('**.py', 'lingua_python', None),
('**.zcml', 'lingua_xml', None),
('**.pt', 'lingua_xml', None),
]},
)
| [
"xavitorne@gmail.com"
] | xavitorne@gmail.com |
1fde76fe6d7ee15fb2be82a427394b347a48cc92 | 24b2ddd58721395b2dc921404da65beacac500bd | /where_are_you_going/middlewares.py | e77c48d064445bc4e6f094725a95f4e04d7d3d13 | [
"MIT"
] | permissive | matheuslc/where_are_you_going | a033b770301c763063e4a4f649a52042a914324d | 3a86f91ba2d36829abf7f424606d39d9fb519d06 | refs/heads/master | 2021-01-18T16:12:16.341250 | 2017-03-30T16:38:44 | 2017-03-30T16:40:32 | 86,724,889 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,890 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class WhereAreYouGoingSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"mematheuslc@gmail.com"
] | mematheuslc@gmail.com |
92c9509a0a3d567feaa03af380141533adf3ec39 | 1f417a284f040ffd1936877228e9a8978d15158d | /hexrd/ui/interactive_template.py | f6b6f12668d4440185f012953a29ba05117a2f7b | [
"BSD-3-Clause"
] | permissive | aniru919/hexrdgui | 732302543c579bf1c47b30125e620b8d77795fab | ff178c4733f2c6837a1e11595ae8762c8a95564e | refs/heads/master | 2023-06-03T07:13:33.746381 | 2021-06-18T23:23:16 | 2021-06-18T23:23:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,739 | py | import numpy as np
from PySide2.QtCore import Qt
from matplotlib import patches
from matplotlib.path import Path
from matplotlib.transforms import Affine2D
from skimage.draw import polygon
from hexrd.ui.create_hedm_instrument import create_hedm_instrument
from hexrd.ui import resource_loader
from hexrd.ui.hexrd_config import HexrdConfig
class InteractiveTemplate:
def __init__(self, parent=None):
self.parent = parent.image_tab_widget.image_canvases[0]
self.ax = self.parent.axes_images[0]
self.raw_axes = self.parent.raw_axes[0]
self.panels = create_hedm_instrument().detectors
self.img = None
self.shape = None
self.press = None
self.total_rotation = 0
self.translating = True
self.shape_styles = []
self.parent.setFocusPolicy(Qt.ClickFocus)
def update_image(self, img):
self.img = img
def rotate_shape(self, angle):
angle = np.radians(angle)
self.rotate_template(self.shape.xy, angle)
self.redraw()
def create_shape(self, module, file_name, det, instr):
with resource_loader.resource_path(module, file_name) as f:
data = np.loadtxt(f)
verts = self.panels['default'].cartToPixel(data)
verts[:, [0, 1]] = verts[:, [1, 0]]
self.shape = patches.Polygon(verts, fill=False, lw=1, color='cyan')
self.shape_styles.append({'line': '-', 'width': 1, 'color': 'cyan'})
self.center = self.get_midpoint()
self.update_position(instr, det)
self.connect_translate()
self.raw_axes = self.parent.raw_axes[0]
self.raw_axes.add_patch(self.shape)
self.redraw()
def update_style(self, style, width, color):
self.shape_styles[-1] = {'line': style, 'width': width, 'color': color}
self.shape.set_linestyle(style)
self.shape.set_linewidth(width)
self.shape.set_edgecolor(color)
self.redraw()
def update_position(self, instr, det):
pos = HexrdConfig().boundary_position(instr, det)
if pos is not None:
self.shape.set_xy(pos)
self.center = self.get_midpoint()
elif instr == 'PXRDIP':
self.rotate_shape(angle=90)
@property
def template(self):
return self.shape
@property
def masked_image(self):
mask = self.mask()
return self.img, mask
@property
def bounds(self):
l, r, b, t = self.ax.get_extent()
x0, y0 = np.nanmin(self.shape.xy, axis=0)
x1, y1 = np.nanmax(self.shape.xy, axis=0)
return np.array([max(np.floor(y0), t),
min(np.ceil(y1), b),
max(np.floor(x0), l),
min(np.ceil(x1), r)]).astype(int)
def cropped_image(self, height, width):
y0, y1, x0, x1 = self.bounds
y1 = y0+height if height else y1
x1 = x0+width if width else x1
self.img = self.img[y0:y1, x0:x1]
self.cropped_shape = self.shape.xy - np.array([x0, y0])
return self.img
@property
def rotation(self):
return self.total_rotation
def clear(self):
if self.shape in self.raw_axes.patches:
self.raw_axes.patches.remove(self.shape)
self.redraw()
def save_boundary(self, color):
if self.shape in self.raw_axes.patches:
self.shape.set_linestyle('--')
self.redraw()
def toggle_boundaries(self, show):
if show:
self.raw_axes = self.parent.raw_axes[0]
for patch, style in zip(self.patches, self.shape_styles):
shape = patches.Polygon(
patch.xy,
fill=False,
ls='--',
lw=style['width'],
color=style['color']
)
self.raw_axes.add_patch(shape)
if self.shape:
self.shape = self.raw_axes.patches.pop()
self.shape.set_linestyle(self.shape_styles[-1]['line'])
self.raw_axes.add_patch(self.shape)
if self.translating:
self.connect_translate()
else:
self.connect_rotate()
self.redraw()
else:
if self.shape:
self.disconnect()
self.patches = self.raw_axes.patches
self.redraw()
def disconnect(self):
if self.translating:
self.disconnect_translate()
else:
self.disconnect_rotate()
def completed(self):
self.disconnect()
self.img = None
self.shape = None
self.press = None
self.total_rotation = 0
def mask(self):
col, row = self.cropped_shape.T
col_nans = np.where(np.isnan(col))[0]
row_nans = np.where(np.isnan(row))[0]
cols = np.split(col, col_nans)
rows = np.split(row, row_nans)
master_mask = np.zeros(self.img.shape, dtype=bool)
for c, r in zip(cols, rows):
c = c[~np.isnan(c)]
r = r[~np.isnan(r)]
rr, cc = polygon(r, c, shape=self.img.shape)
mask = np.zeros(self.img.shape, dtype=bool)
mask[rr, cc] = True
master_mask = np.logical_xor(master_mask, mask)
self.img[~master_mask] = 0
return master_mask
def get_paths(self):
all_paths = []
points = []
codes = []
for coords in self.shape.get_path().vertices[:-1]:
if np.isnan(coords).any():
codes[0] = Path.MOVETO
all_paths.append(Path(points, codes))
codes = []
points = []
else:
codes.append(Path.LINETO)
points.append(coords)
codes[0] = Path.MOVETO
all_paths.append(Path(points, codes))
return all_paths
def redraw(self):
self.parent.draw_idle()
def scale_template(self, sx=1, sy=1):
xy = self.shape.xy
# Scale the shape
scaled_xy = Affine2D().scale(sx, sy).transform(xy)
self.shape.set_xy(scaled_xy)
# Translate the shape back to where it was
diff = np.array(self.center) - np.array(self.get_midpoint())
new_xy = scaled_xy + diff
self.shape.set_xy(new_xy)
self.redraw()
def connect_translate(self):
self.button_press_cid = self.parent.mpl_connect(
'button_press_event', self.on_press_translate)
self.button_release_cid = self.parent.mpl_connect(
'button_release_event', self.on_release)
self.motion_cid = self.parent.mpl_connect(
'motion_notify_event', self.on_translate)
self.key_press_cid = self.parent.mpl_connect(
'key_press_event', self.on_key_translate)
self.parent.setFocus()
self.translating = True
def on_key_translate(self, event):
dx, dy = 0, 0
if event.key == 'right':
dx = 1
elif event.key == 'left':
dx = -1
elif event.key == 'up':
dy = -1
elif event.key == 'down':
dy = 1
else:
return
self.shape.set_xy(self.shape.xy + np.array([dx, dy]))
self.redraw()
def on_press_translate(self, event):
if event.inaxes != self.shape.axes:
return
contains, info = self.shape.contains(event)
if not contains:
return
self.press = self.shape.xy, event.xdata, event.ydata
def on_translate(self, event):
if self.press is None or event.inaxes != self.shape.axes:
return
xy, xpress, ypress = self.press
dx = event.xdata - xpress
dy = event.ydata - ypress
self.center = self.get_midpoint()
self.shape.set_xy(xy + np.array([dx, dy]))
self.redraw()
def on_release(self, event):
if self.press is None:
return
xy, xpress, ypress = self.press
dx = event.xdata - xpress
dy = event.ydata - ypress
self.shape.set_xy(xy + np.array([dx, dy]))
self.press = None
self.redraw()
def disconnect_translate(self):
self.parent.mpl_disconnect(self.button_press_cid)
self.parent.mpl_disconnect(self.button_release_cid)
self.parent.mpl_disconnect(self.motion_cid)
self.parent.mpl_disconnect(self.key_press_cid)
def connect_rotate(self):
self.button_press_cid = self.parent.mpl_connect(
'button_press_event', self.on_press_rotate)
self.button_drag_cid = self.parent.mpl_connect(
'motion_notify_event', self.on_rotate)
self.button_release_cid = self.parent.mpl_connect(
'button_release_event', self.on_rotate_release)
self.key_press_cid = self.parent.mpl_connect(
'key_press_event', self.on_key_rotate)
self.parent.setFocus()
self.translating = False
def on_press_rotate(self, event):
if event.inaxes != self.shape.axes:
return
contains, info = self.shape.contains(event)
if not contains:
return
self.center = self.get_midpoint()
self.shape.set_transform(self.ax.axes.transData)
self.press = self.shape.xy, event.xdata, event.ydata
def rotate_template(self, points, angle):
x = [np.cos(angle), np.sin(angle)]
y = [-np.sin(angle), np.cos(angle)]
verts = np.dot(points - self.center, np.array([x, y])) + self.center
self.shape.set_xy(verts)
def on_rotate(self, event):
if self.press is None:
return
x, y = self.center
xy, xpress, ypress = self.press
angle = self.get_angle(event)
self.rotate_template(xy, angle)
self.redraw()
def on_key_rotate(self, event):
angle = 0.01
if event.key == 'left' or event.key == 'up':
angle *= -1
elif event.key != 'right' and event.key != 'down':
return
self.rotate_template(self.shape.xy, angle)
self.redraw()
def get_midpoint(self):
x0, y0 = np.nanmin(self.shape.xy, axis=0)
x1, y1 = np.nanmax(self.shape.xy, axis=0)
return [(x1 + x0)/2, (y1 + y0)/2]
def mouse_position(self, e):
xmin, xmax, ymin, ymax = self.ax.get_extent()
x, y = self.get_midpoint()
xdata = e.xdata
ydata = e.ydata
if xdata is None:
if e.x < x:
xdata = 0
else:
xdata = xmax
if ydata is None:
if e.y < y:
ydata = 0
else:
ydata = ymax
return xdata, ydata
def get_angle(self, e):
xy, xdata, ydata = self.press
v0 = np.array([xdata, ydata]) - np.array(self.center)
v1 = np.array(self.mouse_position(e)) - np.array(self.center)
v0_u = v0/np.linalg.norm(v0)
v1_u = v1/np.linalg.norm(v1)
angle = np.arctan2(np.linalg.det([v0_u, v1_u]), np.dot(v0_u, v1_u))
return angle
def on_rotate_release(self, event):
if self.press is None:
return
angle = self.get_angle(event)
self.total_rotation += angle
y, x = self.center
xy, xpress, ypress = self.press
self.press = None
self.rotate_template(xy, angle)
self.redraw()
def disconnect_rotate(self):
self.parent.mpl_disconnect(self.button_press_cid)
self.parent.mpl_disconnect(self.button_drag_cid)
self.parent.mpl_disconnect(self.button_release_cid)
self.parent.mpl_disconnect(self.key_press_cid)
| [
"brianna.major@kitware.com"
] | brianna.major@kitware.com |
f1d81dc67ee2ad81eee744acd413b10b94d601ef | d28677015c35b03abcb316746761eb4757dd9fba | /isentia/items.py | 11058f05d163010c89dee18f9a3fa3af320985a2 | [] | no_license | saianger/isentia | 42ae3774b3aa090deb85e2489ebc5283dc7f4e1f | 25b9c856b247b55d5a8ccca19bab52f2d87d7acc | refs/heads/master | 2021-01-10T03:55:45.165106 | 2016-03-29T02:51:20 | 2016-03-29T02:51:20 | 54,907,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class IsentiaItem(scrapy.Item):
headline = scrapy.Field()
link = scrapy.Field()
article = scrapy.Field()
author = scrapy.Field()
| [
"guyangyu.li@hotmail.com"
] | guyangyu.li@hotmail.com |
24567018d6cc56c197cd0f52a9cf7d6b9311506f | 349d6ff272a4a113cee5b0ab7849f46305ebfb13 | /sc2/game_data.py | 2e69241ddf4f3f9265fd5ee0cf9aa760d4ddda4e | [
"MIT"
] | permissive | raimohanska/python-sc2 | dafec03d73b905b092c92aefd5ee9d896e8df5e1 | fb936be1618b4c8b8bf453d76d3f9894780a0f21 | refs/heads/master | 2021-09-03T04:04:46.630550 | 2018-01-05T12:50:11 | 2018-01-05T12:50:11 | 116,264,519 | 0 | 0 | null | 2018-01-04T13:41:56 | 2018-01-04T13:41:55 | null | UTF-8 | Python | false | false | 2,856 | py | from functools import lru_cache
from .data import Attribute
from .ids.unit_typeid import UnitTypeId
from .ids.ability_id import AbilityId
class GameData(object):
def __init__(self, data):
self.abilities = {a.ability_id: AbilityData(self, a) for a in data.abilities}
self.units = {u.unit_id: UnitTypeData(self, u) for u in data.units if u.available}
self.upgrades = {u.upgrade_id: UpgradeData(self, u) for u in data.upgrades}
@lru_cache(maxsize=256)
def calculate_ability_cost(self, ability):
for unit in self.units.values():
if unit.creation_ability == ability:
return unit.cost
for upgrade in self.upgrades.values():
if upgrade.research_ability == ability:
return upgrade.cost
return Cost(0, 0)
class AbilityData(object):
def __init__(self, game_data, proto):
self._game_data = game_data
self._proto = proto
@property
def id(self):
if self._proto.remaps_to_ability_id:
return AbilityId(self._proto.remaps_to_ability_id)
return AbilityId(self._proto.ability_id)
@property
def cost(self):
return self._game_data.calculate_ability_cost(self.id)
def __repr__(self):
return f"AbilityData(name={self._proto.button_name})"
class UnitTypeData(object):
def __init__(self, game_data, proto):
self._game_data = game_data
self._proto = proto
@property
def name(self):
return self._proto.name
@property
def creation_ability(self):
return self._game_data.abilities[self._proto.ability_id]
@property
def attributes(self):
return self._proto.attributes
@property
def has_attribute(self, attr):
assert isinstance(attr, Attribute)
return attr in self.attributes
@property
def has_minerals(self):
return self._proto.has_minerals
@property
def has_vespene(self):
return self._proto.has_vespene
@property
def cost(self):
return Cost(
self._proto.mineral_cost,
self._proto.vespene_cost
)
class UpgradeData(object):
def __init__(self, game_data, proto):
self._game_data = game_data
self._proto = proto
@property
def name(self):
return self._proto.name
@property
def research_ability(self):
return self._game_data.abilities[self._proto.ability_id]
@property
def cost(self):
return Cost(
self._proto.mineral_cost,
self._proto.vespene_cost
)
class Cost(object):
def __init__(self, minerals, vespene, time=None):
self.minerals = minerals
self.vespene = vespene
self.time = time
def __repr__(self):
return f"Cost({self.minerals}, {self.vespene})"
| [
"hannes.karppila@gmail.com"
] | hannes.karppila@gmail.com |
577e72e6c72d74c083f0ab47e43009c0b8368618 | 7579540f8f08666d8dc01d295162f94d7b0d9510 | /env/bin/django-admin.py | d6710b7781ef67e89705f283fc0920ed8f7c9aa9 | [
"MIT"
] | permissive | CanOzcan93/TriviaServer | a6d98865828ecc261094ccb0b3db779da21eb9ca | 64bdffb91198a123860047ba46e3577078bdf2b8 | refs/heads/master | 2022-11-22T16:17:10.933845 | 2019-10-30T00:26:41 | 2019-10-30T00:26:41 | 218,407,122 | 0 | 0 | MIT | 2022-11-04T19:14:42 | 2019-10-30T00:06:00 | Python | UTF-8 | Python | false | false | 190 | py | #!/Users/canozcan/Desktop/Projeler/Python/django-example-channels/env/bin/python3.6
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"can.ozcan@redblac.net"
] | can.ozcan@redblac.net |
28336385c105ef33bf482732e43d24abbfadd45e | 78dad7cae02eb5291bb6f6fefad773dff4d01d73 | /No1WeekMisssion_Two.py | 372d1ecd4c48d14d7a5a38395c007b41d8f97fa2 | [] | no_license | jasmintung/sz-hello-world | 34af376661c946a58dbe5ca87b032bb4a4d90cf3 | c51ab3f400f616ed3ab960c88ceb61190d0b010e | refs/heads/master | 2020-09-15T23:41:21.998958 | 2017-07-18T02:42:55 | 2017-07-18T02:42:55 | 66,353,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,064 | py | # Author : 张桐
# Create Date: 2017-06-27
# instruction: 三级菜单,显示省(直辖市)市区
import json
fileDst = "F:\CTO_week_mission\OneWeek\source\cities.txt"
recycle = True
highRecycle = True
middleRecycle = True
lowRecycle = True
while recycle:
print("退出使用请输入大些字母 : Q")
with open(fileDst, 'r', encoding='utf-8') as f:
loadList = json.load(f) # 解析JSON文件中数据
# print(loadList)
while highRecycle:
for i in range(len(loadList)):
loadListCity = loadList[i].get('city')
print("->%s" % (loadList[i].get('name'))) # 列出各个省名字
print("请输入要查看的省直辖市的完整名称")
province = input("输入: ")
if province == 'Q':
recycle = False
middleRecycle = False
lowRecycle = False
break
else:
for j in range(len(loadList)):
if province == loadList[j].get('name'): # 输入正确
loadListCity = loadList[j].get('city') # 指向这个省
for k in range(len(loadListCity)):
print("-->", loadListCity[k].get('name')) # 各个市
highRecycle = False
middleRecycle = True
break
else:
if j == len(loadList) - 1:
print("不存在!") # 输入错误
break
while middleRecycle:
print("请输入要查看的市的完整名称,返回上一级请输入: back")
choice = input("请输入: ")
if choice != "back" and choice != 'Q':
for m in range(len(loadListCity)):
if choice == loadListCity[m].get('name'): # 输入正确
for key in loadListCity[m].get('area'):
print("--->",key)
middleRecycle = False
lowRecycle = True
break
else:
if m == len(loadListCity) - 1:
print("不存在!") # 输入错误
break
else:
if choice == 'back':
highRecycle = True
lowRecycle = False
break
elif choice == 'Q':
recycle = False
middleRecycle = False
lowRecycle = False
break
else:
print("无效输入!")
break
while lowRecycle:
print("继续查看XXX区的信息请输入完整区县名称,返回上一级请输入: back,回到顶级请输入: roll")
endChoice = input("输入: ")
if endChoice == 'back':
highRecycle = False
middleRecycle = True
lowRecycle = False
break
elif endChoice == 'roll':
highRecycle = True
middleRecycle = False
break
elif endChoice == 'Q':
recycle = False
highRecycle = False
middleRecycle = False
lowRecycle = False
break
else:
for m in range(len(loadListCity)):
if endChoice in loadListCity[m].get('area'):
for idArea, areaInfo in enumerate(loadListCity[m].get('area').get(endChoice)):
print(idArea + 1, areaInfo)
break
else:
if m == len(loadListCity) - 1:
print("无效输入!")
| [
"noreply@github.com"
] | noreply@github.com |
38653fdd3af2752f4418005c7726f391d92dcefd | b1b0387bca58c30f62e02ca983cd1f9367ba4452 | /superset/sqllab/query_render.py | b03b21d83ce3a7150efec363c869e0220aec4842 | [
"Apache-2.0",
"OFL-1.1"
] | permissive | fakegit/incubator-superset | 32e347fddf87ccafdef950d8316e1c40548f4cb5 | 96f44219615a287db267f9dc1c0073aa038e0ed0 | refs/heads/master | 2023-09-03T00:13:06.357918 | 2021-10-18T18:09:23 | 2021-10-18T18:09:23 | 240,325,790 | 0 | 0 | Apache-2.0 | 2023-01-26T21:04:15 | 2020-02-13T17:56:13 | Python | UTF-8 | Python | false | false | 6,015 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-self-use, too-few-public-methods, too-many-arguments
from __future__ import annotations
from typing import Any, Callable, Dict, Optional, TYPE_CHECKING
from flask_babel import gettext as __, ngettext
from jinja2 import TemplateError
from jinja2.meta import find_undeclared_variables
from superset import is_feature_enabled
from superset.errors import SupersetErrorType
from superset.sqllab.command import SqlQueryRender
from superset.sqllab.exceptions import SqlLabException
from superset.utils import core as utils
MSG_OF_1006 = "Issue 1006 - One or more parameters specified in the query are missing."
if TYPE_CHECKING:
from superset.sqllab.sqllab_execution_context import SqlJsonExecutionContext
from superset.jinja_context import BaseTemplateProcessor
PARAMETER_MISSING_ERR = (
"Please check your template parameters for syntax errors and make sure "
"they match across your SQL query and Set Parameters. Then, try running "
"your query again."
)
class SqlQueryRenderImpl(SqlQueryRender):
_sql_template_processor_factory: Callable[..., BaseTemplateProcessor]
def __init__(
self, sql_template_factory: Callable[..., BaseTemplateProcessor]
) -> None:
self._sql_template_processor_factory = sql_template_factory # type: ignore
def render(self, execution_context: SqlJsonExecutionContext) -> str:
query_model = execution_context.query
try:
sql_template_processor = self._sql_template_processor_factory(
database=query_model.database, query=query_model
)
rendered_query = sql_template_processor.process_template(
query_model.sql, **execution_context.template_params
)
self._validate(execution_context, rendered_query, sql_template_processor)
return rendered_query
except TemplateError as ex:
self._raise_template_exception(ex, execution_context)
return "NOT_REACHABLE_CODE"
def _validate(
self,
execution_context: SqlJsonExecutionContext,
rendered_query: str,
sql_template_processor: BaseTemplateProcessor,
) -> None:
if is_feature_enabled("ENABLE_TEMPLATE_PROCESSING"):
# pylint: disable=protected-access
syntax_tree = sql_template_processor._env.parse(rendered_query)
undefined_parameters = find_undeclared_variables( # type: ignore
syntax_tree
)
if undefined_parameters:
self._raise_undefined_parameter_exception(
execution_context, undefined_parameters
)
def _raise_undefined_parameter_exception(
self, execution_context: SqlJsonExecutionContext, undefined_parameters: Any
) -> None:
raise SqlQueryRenderException(
sql_json_execution_context=execution_context,
error_type=SupersetErrorType.MISSING_TEMPLATE_PARAMS_ERROR,
reason_message=ngettext(
"The parameter %(parameters)s in your query is undefined.",
"The following parameters in your query are undefined: %(parameters)s.",
len(undefined_parameters),
parameters=utils.format_list(undefined_parameters),
),
suggestion_help_msg=PARAMETER_MISSING_ERR,
extra={
"undefined_parameters": list(undefined_parameters),
"template_parameters": execution_context.template_params,
"issue_codes": [{"code": 1006, "message": MSG_OF_1006,}],
},
)
def _raise_template_exception(
self, ex: Exception, execution_context: SqlJsonExecutionContext
) -> None:
raise SqlQueryRenderException(
sql_json_execution_context=execution_context,
error_type=SupersetErrorType.INVALID_TEMPLATE_PARAMS_ERROR,
reason_message=__(
"The query contains one or more malformed template parameters."
),
suggestion_help_msg=__(
"Please check your query and confirm that all template "
"parameters are surround by double braces, for example, "
'"{{ ds }}". Then, try running your query again.'
),
) from ex
class SqlQueryRenderException(SqlLabException):
_extra: Optional[Dict[str, Any]]
def __init__(
self,
sql_json_execution_context: SqlJsonExecutionContext,
error_type: SupersetErrorType,
reason_message: Optional[str] = None,
exception: Optional[Exception] = None,
suggestion_help_msg: Optional[str] = None,
extra: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__(
sql_json_execution_context,
error_type,
reason_message,
exception,
suggestion_help_msg,
)
self._extra = extra
@property
def extra(self) -> Optional[Dict[str, Any]]:
return self._extra
def to_dict(self) -> Dict[str, Any]:
rv = super().to_dict()
if self._extra:
rv["extra"] = self._extra
return rv
| [
"noreply@github.com"
] | noreply@github.com |
567d7710808a7acfc4aeaefead48b3e49edd8d77 | dae19f063c84698a92e8ab435c7563a6231dd1ea | /friends/urls.py | 9bcd02610286c52366cb50f16fb8bc9c0f733bb2 | [] | no_license | amirsedghi/friends | dd4861881db21c76bb46e0562c38aca98c2833fe | 668da04786e50add497bcb327d2a8fc02ed72551 | refs/heads/master | 2020-12-25T14:23:23.498342 | 2016-08-19T20:14:07 | 2016-08-19T20:14:07 | 66,109,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | """friends URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
urlpatterns = [
url(r'^', include('apps.friendship.urls')),
]
| [
"amir.sepultura@gmail.com"
] | amir.sepultura@gmail.com |
2d25644bf3cb309db61e58afcb3896a9c1e89aa9 | 0dc7ed076f70b28f2546f2e266783fd7dee5c5a3 | /8/myStuff.py | f69e8bb0c170966ca029132250b3f3de591a9cae | [] | no_license | Kristjan-O-Ragnarsson/vef2t-05cu_verkefni | 5145f8ef920af9221d53c139039d094349f77016 | 24f6ab6d1b0f282d3a0d964dbf9a6542789f80d0 | refs/heads/master | 2021-01-21T11:30:51.251314 | 2017-11-07T09:07:44 | 2017-11-07T09:07:44 | 102,003,336 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | """
Kristjan O.
myStuff 0.1.x
"""
DEBUG = True
PORT = 81
HOST = 'localhost'
session_opts = {
'session.type': 'memory',
'session.cookie_expires': True,
'session.auto': True
}
def debug(*args, **kwargs):
if DEBUG:
print(args, kwargs)
def msg(msg, title):
""" a msg page for bottle """
return None#template()
| [
"korri1313@gmail.com"
] | korri1313@gmail.com |
9f217ec36d8e23186e431bd2f0f1ae0b6ba58f28 | b7e6cdf094baaee9d6e5034c2355641fbf9138d7 | /test2.py | 1d278e6887286acf6ad8073e7a7038fe7af2e13f | [] | no_license | heshibo1994/leetcode-python-2 | 04296c66cd6d1fe58880062aeafdbe9d474b7d2e | 3ea32f03bd453743b9b81de9871fad7ac67ced90 | refs/heads/master | 2020-05-23T21:49:01.367969 | 2019-09-30T03:31:27 | 2019-09-30T03:31:27 | 186,961,972 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17 | py | print(int("343")) | [
"csuheshibo@163.com"
] | csuheshibo@163.com |
4ff9e42fa0d257aff951b9cb1b37e0e6c62952fc | 30884d1ee1767448528e0229f6adf7bc548cc1d4 | /archive/old_4IAR/beck_old/config.py | 09c7f5852b9a66f69767d8d1c0aa99285c54b30d | [] | no_license | jctops/4IAR-RL | 6fc0cbdc6c4ca772f35139dabacb6742b3088efa | c110c5c7daad754b3ce726d20965a02583d03637 | refs/heads/master | 2021-08-08T09:40:36.696763 | 2020-12-10T09:25:50 | 2020-12-10T09:25:50 | 229,827,229 | 0 | 0 | null | 2020-10-05T18:32:53 | 2019-12-23T21:46:14 | Python | UTF-8 | Python | false | false | 2,213 | py | MCTS_ARGS = {
'parallel_threads': 1,
'cpuct': 1,
'mcts_iterations': 60
}
SELFPLAY_ARGS = {
'DETERMINISTIC_PLAY': 8,
'GAMES_PER_SUBMISSION': 3,
}
TRAINING_ARGS = {
'MAX_MEMORY_SIZE': int(1e6),
'MAX_EXAMPLES_PER_RECEIVE': 10,
'START_TRAINING_THRESHOLD': 4096 * 3 / 1e6,
'TRAINING_SAMPLE_SIZE': 4096,
'BATCH_SIZE': 128,
'EPOCHS': 2,
'EVAL_GAMES': 40,
'PROMOTION_THRESHOLD': 0.55,
'PREVIOUS_CHECKPOINT': None,
'TRAINING_ROUNDS_PER_EVAL': 10,
'CHECKPOINT_DIR': '../checkpoints',
'CHECKPOINT_PREFIX': 'model_ex1_',
}
MAIN_ARGS = {
# 'NUM_OF_GPUS': 4,
# 'USE_GPUS': True,
'NUM_OF_GPUS': 0,
'USE_GPUS': False,
'NUM_OF_SELFPLAY_PROCESSES': 3,
'RUNNING_TIME': 252000
}
#################################
## Beck data (game_state.py) ##
#################################
m = 4
n = 9
k = 4
#####################################################
## MCTS constants (parallel_mcts_nothreading.py) ##
#####################################################
# MCTS_ARGS = {
# 'parallel_threads': 1,
# 'cpuct': 1,
# 'mcts_iterations': 40
# }
ALPHA = 0.3
C_PUCT = 3.0
EPSILON = 0.25
TAU = 1
NUMBER_OF_PASSES = 400
NUMBER_OF_THREADS = 8
POINT_OF_DETERMINISM = 10
IS_TWO_PLAYER_GAME = True
PASSES = 800
##################################
## Memory constants (memory.py) ##
##################################
STARTING_MEMORY_SIZE = 40000
####################################
## Network constants (network.py) ##
####################################
NNET_ARGS = {
'REG_CONST': 0.0001,
'LEARNING_RATE': 0.2,
'MOMENTUM': 0.9,
'INPUT_DIM': (3,4,9),
'OUTPUT_DIM': (4,9),
'NUM_OF_RESIDUAL_LAYERS': 4,
'CONV_FILTERS': 128,
'CONV_KERNEL_SIZE': (4,4),
'RES_FILTERS': 128,
'RES_KERNEL_SIZE': (4,4),
'POLICY_HEAD_FILTERS': 32,
'POLICY_HEAD_KERNEL_SIZE': (1,1),
'VALUE_HEAD_FILTERS': 32,
'VALUE_HEAD_KERNEL_SIZE': (1,1),
'VALUE_HEAD_DENSE_NEURONS': 20,
}
MAX_GENERATIONS = 40000
LEARNING_RATES_TO_TRY = [0.02, 0.002, 0.0002]
TRAINING_LOOPS = 10
BATCH_SIZE = 2048
MINIBATCH_SIZE = 32
EPOCHS = 1
INITIAL_NNET_WEIGHTS_FILENAME = 'classes/initial_nnet_weights.pkl' | [
"email@jaketopping.co.uk"
] | email@jaketopping.co.uk |
a68ba341c2091ba7c4bc3e301c4a02c3a80db0b6 | d9db6b00d0d0d570fce3e89cecd8b2c67b2ed9d8 | /aicsmlsegment/bin/curator/curator_takeall.py | aba192f8c000dd886999c927c6ed3ff456ba30cc | [
"BSD-2-Clause"
] | permissive | AllenInstitute/aics-ml-segmentation | 84afcf52085f36b1672a58828abc81479c806686 | 6ee1713100666ff4cd32380a75cc628124d9c2dc | refs/heads/master | 2021-06-09T07:32:49.827954 | 2020-12-16T05:46:22 | 2020-12-16T05:46:22 | 149,820,330 | 24 | 7 | NOASSERTION | 2020-11-16T14:40:31 | 2018-09-21T21:30:48 | Python | UTF-8 | Python | false | false | 6,669 | py | #!/usr/bin/env python
import os
import sys
import logging
import argparse
import traceback
import importlib
import pathlib
import csv
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
from random import shuffle
from scipy import stats
from skimage.io import imsave
from skimage.draw import line, polygon
from scipy import ndimage as ndi
from aicssegmentation.core.utils import histogram_otsu
from aicsimageio import AICSImage
from aicsimageio.writers import OmeTiffWriter
from aicsmlsegment.utils import input_normalization
####################################################################################################
# global settings
button = 0
flag_done = False
pts = []
draw_img = None
draw_mask = None
draw_ax = None
log = logging.getLogger()
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s - %(name)s - %(lineno)3d][%(levelname)s] %(message)s')
#
# Set the default log level for other modules used by this script
# logging.getLogger("labkey").setLevel(logging.ERROR)
# logging.getLogger("requests").setLevel(logging.WARNING)
# logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("matplotlib").setLevel(logging.INFO)
####################################################################################################
class Args(object):
"""
Use this to define command line arguments and use them later.
For each argument do the following
1. Create a member in __init__ before the self.__parse call.
2. Provide a default value here.
3. Then in p.add_argument, set the dest parameter to that variable name.
See the debug parameter as an example.
"""
def __init__(self, log_cmdline=True):
self.debug = False
self.output_dir = '.' + os.sep
self.struct_ch = 0
self.xy = 0.108
#
self.__parse()
#
if self.debug:
log.setLevel(logging.DEBUG)
log.debug("-" * 80)
self.show_info()
log.debug("-" * 80)
@staticmethod
def __no_args_print_help(parser):
"""
This is used to print out the help if no arguments are provided.
Note:
- You need to remove it's usage if your script truly doesn't want arguments.
- It exits with 1 because it's an error if this is used in a script with no args.
That's a non-interactive use scenario - typically you don't want help there.
"""
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
def __parse(self):
p = argparse.ArgumentParser()
# Add arguments
p.add_argument('--d', '--debug', action='store_true', dest='debug',
help='If set debug log output is enabled')
p.add_argument('--raw_path', required=True, help='path to raw images')
p.add_argument('--data_type', required=True, help='the type of raw images')
p.add_argument('--input_channel', default=0, type=int)
p.add_argument('--seg_path', required=True, help='path to segmentation results')
p.add_argument('--train_path', required=True, help='path to output training data')
p.add_argument('--mask_path', help='[optional] the output directory for masks')
p.add_argument('--Normalization', default=0, help='the normalization method to use')
self.__no_args_print_help(p)
p.parse_args(namespace=self)
def show_info(self):
log.debug("Working Dir:")
log.debug("\t{}".format(os.getcwd()))
log.debug("Command Line:")
log.debug("\t{}".format(" ".join(sys.argv)))
log.debug("Args:")
for (k, v) in self.__dict__.items():
log.debug("\t{}: {}".format(k, v))
###############################################################################
class Executor(object):
def __init__(self, args):
pass
def execute(self, args):
if not args.data_type.startswith('.'):
args.data_type = '.' + args.data_type
filenames = glob(args.raw_path + os.sep +'*' + args.data_type)
filenames.sort()
existing_files = glob(args.train_path+os.sep+'img_*.ome.tif')
print(len(existing_files))
training_data_count = len(existing_files)//3
for _, fn in enumerate(filenames):
training_data_count += 1
# load raw
reader = AICSImage(fn)
struct_img = reader.get_image_data("CZYX", S=0, T=0, C=[args.input_channel]).astype(np.float32)
struct_img = input_normalization(img, args)
# load seg
seg_fn = args.seg_path + os.sep + os.path.basename(fn)[:-1*len(args.data_type)] + '_struct_segmentation.tiff'
reader = AICSImage(seg_fn)
seg = reader.get_image_data("ZYX", S=0, T=0, C=0) > 0.01
seg = seg.astype(np.uint8)
seg[seg>0]=1
# excluding mask
cmap = np.ones(seg.shape, dtype=np.float32)
mask_fn = args.mask_path + os.sep + os.path.basename(fn)[:-1*len(args.data_type)] + '_mask.tiff'
if os.path.isfile(mask_fn):
reader = AICSImage(mask_fn)
mask = reader.get_image_data("ZYX", S=0, T=0, C=0)
cmap[mask==0]=0
with OmeTiffWriter(args.train_path + os.sep + 'img_' + f'{training_data_count:03}' + '.ome.tif') as writer:
writer.save(struct_img)
with OmeTiffWriter(args.train_path + os.sep + 'img_' + f'{training_data_count:03}' + '_GT.ome.tif') as writer:
writer.save(seg)
with OmeTiffWriter(args.train_path + os.sep + 'img_' + f'{training_data_count:03}' + '_CM.ome.tif') as writer:
writer.save(cmap)
def main():
dbg = False
try:
args = Args()
dbg = args.debug
# Do your work here - preferably in a class or function,
# passing in your args. E.g.
exe = Executor(args)
exe.execute(args)
except Exception as e:
log.error("=============================================")
if dbg:
log.error("\n\n" + traceback.format_exc())
log.error("=============================================")
log.error("\n\n" + str(e) + "\n")
log.error("=============================================")
sys.exit(1)
if __name__ == "__main__":
main()
| [
"jxchen.ustc@gmail.com"
] | jxchen.ustc@gmail.com |
4c6b68fbb70b544b5a6a1cded622f00c20ca5dc4 | 2c1e7bbc853097b3b7cc5ea5676a807a8abc84b2 | /env/bin/ndg_httpclient | 1c1ddc4828ed8ffd2296640ca45d52ab7630a3df | [
"MIT"
] | permissive | NickDST/Interactive-Assistant-Winter | 4c304d791f14d6b1bb8c60c47cfdeff76b1bcf8c | 7b4ea5bea45201a8a091134cdfab9e8bd3419d65 | refs/heads/master | 2023-01-20T09:08:32.618098 | 2020-01-06T15:39:22 | 2020-01-06T15:39:22 | 232,119,652 | 0 | 0 | MIT | 2023-01-09T12:06:44 | 2020-01-06T14:35:51 | Python | UTF-8 | Python | false | false | 261 | #!/Users/nicholasho/Desktop/GCPWinterV2/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from ndg.httpsclient.utils import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"learningnickk@gmail.com"
] | learningnickk@gmail.com | |
34e5075f02a2189ea0067c1e33f54c3292c1a874 | 56577a32dafc513c85dd02050a3ed93834bc2f83 | /com/yanglf/main/doutu.py | b0825a3071f356d024ccee500ed23f51027a30c8 | [] | no_license | yanglangfei/image | 50ee00f370259534d4e31385544e18a6472a70a4 | 333c7b7e5bda41e83a63cc85f46390b6d0408784 | refs/heads/master | 2020-07-21T00:58:02.022299 | 2020-03-17T09:58:52 | 2020-03-17T09:58:52 | 206,734,418 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | # _*_coding:utf8_*_
# Project: spider
# File: main.py
# Author: ClassmateLin
# Email: 406728295@qq.com
# 有项目的可以滴滴我, Python/Java/PHP/Go均可。WX: ClassmateYue
# Time: 2020/2/21 4:54 下午
# DESC:
import requests
import os
from bs4 import BeautifulSoup
def get_html_text(url):
"""
获取html文本
:param url:
:return:
"""
return requests.get(url).text
def get_images_urls(html_text):
"""
获取图片链接
:param html_text:
:return:
"""
urls = [] # 保存提取的url列表
soup = BeautifulSoup(html_text, 'html.parser') # 创建一个soup对象,可以打印出来看看里面的内容
div_tag = soup.find('div', {'id': 'post_content'}) # 查找id=post_content的标签
img_tag_list = div_tag.find_all_next('img') # 查找div下面的所有img标签
for img_tag in img_tag_list[:-4]: # 观察找到结果发现从倒数第四个开始并不是表情包,所以只迭代到倒数第四个
url = img_tag.attrs['src'] # 提取img标题的src元素的值
urls.append(url)
return urls
def save_images(dir, urls):
"""
保存图片
:param urls:
:return:
"""
if not os.path.exists(dir): # 使用os模块来判断文件夹是否存在,不存在则创建
os.makedirs(dir)
count = 1
for url in urls:
print('正在下载第{}张图片...'.format(str(count)))
ext = url.split('.')[-1] # 拿到图片的扩展名
filename = dir + '/' + str(count) + '.' + ext # 拼接图片的存储路径
content = requests.get(url).content # 通过GET请求获取图片的二进制内容,注意拿网页源码时候是text
with open(filename, 'wb') as f: # 已写二进制的形式打开文件
f.write(content) # 将图片内容写入
count += 1 # count 用于图片命名和计数,递增1
if __name__ == '__main__':
url = 'http://www.bbsnet.com/xiongmaoren-18.html'
html_text = get_html_text(url)
image_urls = get_images_urls(html_text)
save_images('./images', image_urls)
| [
"文字899117"
] | 文字899117 |
7a286bf190f3a7ccafa0b6a2278c68f4aebdc583 | 40280c446e21c07ac3ffd20c5eda064a05093698 | /easy_module_attribute_getter/custom_transforms.py | 4eb9cbf5ed62de0c0da0fdd380f0b4112685e08a | [
"MIT"
] | permissive | KevinMusgrave/easy-module-attribute-getter | 884fdee1960b792db49e09edc5de0d268fd6ac8a | e0a733c02f2e6a969191a75c79159f45440c969f | refs/heads/master | 2021-07-04T02:18:17.113242 | 2021-01-21T03:32:22 | 2021-01-21T03:32:22 | 218,787,854 | 9 | 2 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | import torchvision.transforms.functional as F
from PIL import Image
class ConvertToBGR(object):
"""
Converts a PIL image from RGB to BGR
"""
def __init__(self):
pass
def __call__(self, img):
r, g, b = img.split()
img = Image.merge("RGB", (b, g, r))
return img
def __repr__(self):
return "{}()".format(self.__class__.__name__)
class Multiplier(object):
def __init__(self, multiple):
self.multiple = multiple
def __call__(self, img):
return img*self.multiple
def __repr__(self):
return "{}(multiple={})".format(self.__class__.__name__, self.multiple) | [
"tkm45@cornell.edu"
] | tkm45@cornell.edu |
456651b819e2657166a381ed7218a062acf17c5c | 973c0e2155372d3e03ff8043a404fca07fdb49e1 | /accounts/migrations/0001_initial.py | f6867e6c216087438b9ace028733420cb89f089c | [] | no_license | CalebNash/full-stack-chatapp | 5221e75b77a5d20f1e4ed8a3889ccb6a5c40d136 | 2dac447d617783df51c4c2fa905b5da92463d657 | refs/heads/main | 2022-12-31T14:36:06.535441 | 2020-10-20T20:10:10 | 2020-10-20T20:10:10 | 305,493,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,870 | py | # Generated by Django 3.1.2 on 2020-10-19 20:28
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| [
"calebnash@Calebs-MacBook-Pro.local"
] | calebnash@Calebs-MacBook-Pro.local |
b953899f2ab5ded79933d11b7d4da92f268e3ba3 | 49d1cc538dc2c97304f545834afaecd32a0b57d4 | /proyectos/tienda_frutas_excel.py | 90885798111c337aca2f0a054f3f2b070ad1320f | [] | no_license | Misachel/python-master | ea70926e2570a638c55e1dddf7faecc0eab596bb | f1145ec0bf7d986f45bfe234fa0db04e11d9ee17 | refs/heads/master | 2023-07-10T13:33:54.166756 | 2021-08-07T00:11:39 | 2021-08-07T00:11:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,435 | py | # TODO: Abre la hoja de Excel que contiene el DataFrame de frutas (NOMBRE | PRECIO)
# TODO: Abre la hoja de Excel que contiene el DataFrame de ventas (FRUTA | PRECIO | CANTIDAD | TOTAL | FECHA)
def obtenerFrutas():
pass
# TODO: Recorre cada fruta del DataFrame de frutas
# TODO: haz un yield sobre df_frutas["NOMBRE"], df_frutas["PRECIO"]
def agregarFruta(nombre, precio):
pass
# TODO: Agrega una fila más al DataFrame de frutas con el `nombre` y `precio`
# Hint: df_frutas.append(pd.DataFrame({ "NOMBRE": nombre, "PRECIO": precio }))
# TODO: Guarda el DataFrame de frutas de vuelta a la hoja de Excel
# Hint: df_frutas.to_excel(<ruta al archivo xlsx>, sheet_name="...", ...)
def buscarFruta(nombre):
# TODO: Determina si la fruta con el `nombre` está en el DataFrame de frutas
# TODO: En caso de que no devuelve None
# TODO: Recupera el precio de la fruta con el `nombre`
precio = None # SUSTITUIR POR EL REAL
# Regresa el nombre y precio de la fruta
return {
"nombre": nombre,
"precio": precio
}
def editarFruta(nombre, precio):
pass
# TODO: Determina si la fruta con el `nombre` está en el DataFrame de frutas
# TODO: En caso de que no regresa
# TODO: Actualiza el precio de la fruta con el `nombre`
# TODO: Guarda el DataFrame de frutas de vuelta a la hoja de Excel
# Hint: df_frutas.to_excel(<ruta al archivo xlsx>, sheet_name="...", ...)
def eliminarFruta(nombre):
pass
# TODO: Determina si la fruta con el `nombre` está en el DataFrame de frutas
# TODO: En caso de que no regresa
# TODO: Elimina el registro asociado a la fruta con el `nombre`
# TODO: Guarda el DataFrame de frutas de vuelta a la hoja de Excel
# Hint: df_frutas.to_excel(<ruta al archivo xlsx>, sheet_name="...", ...)
def agregarVenta(fruta, precio, cantidad, total, fecha):
pass
# TODO: Agrega una fila más al DataFrame de ventas con la `fruta`, `precio`, `cantidad`, `total` y `fecha`
# TODO: Guarda el DataFrame de ventas de vuelta a la hoja de Excel
# Hint: df_ventas.to_excel(<ruta al archivo xlsx>, sheet_name="...", ...)
def obtenerVentas():
pass
# TODO: Recorre cada venta del DataFrame de ventas
# TODO: haz un yield sobre df_ventas["FRUTA"], df_ventas["PRECIO"], df_ventas["CANTIDAD"], df_ventas["TOTAL"], df_ventas["FECHA"] | [
"dragonnomada123@gmail.com"
] | dragonnomada123@gmail.com |
e9aaecada9a17d7d8b636210f8d990f11a900e07 | 16631cf7cd4a70f2cd2750851649d3eff5e17724 | /2022/day15/part2.py | 00daf3f196e692462e068c11a24b226c3febf106 | [] | no_license | kynax/AdventOfCode | 1dd609a3308d733f2dd7d4ea00508d2da73180b9 | 36a339241dd7a31ebe08a73e5efa599e5faeea1a | refs/heads/master | 2022-12-21T13:32:52.591068 | 2022-12-16T22:41:30 | 2022-12-16T22:41:30 | 48,439,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,241 | py | import sys
grid = {}
sensitivity = []
sensors = []
beacons = []
for l in sys.stdin:
l = l.strip().split(' ')
sx, sy, bx, by = int(l[2][2:-1]), int(l[3][2:-1]), int(l[8][2:-1]), int(l[9][2:])
grid[(sx,sy)] = 'S'
grid[(bx,by)] = 'B'
dx,dy = abs(sx-bx), abs(sy-by)
md = dx+dy
sensitivity.append((sx,sy,md))
sensors.append((sx,sy))
if (bx,by) not in beacons: beacons.append((bx,by))
minx = min([i[0]-i[2] for i in sensitivity])-1
maxx = max([i[0]+i[2] for i in sensitivity])+1
for row in range(4000000):
intervals = []
for s in sensitivity:
d = abs(s[1] - row)
if d > s[2]:
continue
w = s[2] - d
b,e = s[0] - abs(w), s[0] + abs(w)
if e < b: b,e = e,b
intervals.append((b,e))
ints = sorted(intervals)
nints = [ints[0]]
for i in range(1, len(ints)):
if ints[i][0] <= nints[-1][1]+1:
if ints[i][1] <= nints[-1][1]:
pass # fully included
else:
nints[-1] = (nints[-1][0], ints[i][1])
else:
nints.append(ints[i])
if len(nints) > 1:
print(nints, nints[0][1] + 1, row)
print(4000000 * (nints[0][1]+1) + row)
break | [
"guilemay@gmail.com"
] | guilemay@gmail.com |
17acf1996c328537a151dac3bb5658feba2a8a72 | 8f743e29d503e775fc19b2897ae23105b92a3659 | /fairplay/competition/migrations/0039_auto_20161014_2312.py | cd481237e6a57352f4f5a7109edd955b73261c62 | [] | no_license | Greymalkin/fairplay | 3bd3bd1e93008f4b0da07d79da9dd5baa5d7ce2d | 0c3280050e1caa34f42d350dfab00fd3b1dbe5ad | refs/heads/master | 2020-03-23T15:23:07.815912 | 2018-08-05T19:14:16 | 2018-08-05T19:14:16 | 141,743,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-10-15 03:12
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('competition', '0038_auto_20161014_2249'),
]
operations = [
migrations.RemoveField(
model_name='mensartisticgymnast',
name='gymnast_ptr',
),
migrations.DeleteModel(
name='MensArtisticGymnast',
),
]
| [
"plee@automatastudios.com"
] | plee@automatastudios.com |
d911a1de75e301eed643472356197ac68faf3647 | b0fab024e9b7e7bd51c18c5578f0f45314808592 | /sine_competition_url/competition_url.py | 692142fbbe0f49ea5ac2e79373fac3914a120a1b | [] | no_license | dhecar/SINERGIA | a34d98fda84ce8ca8d2f67b89680bbf19c15fe1b | 678cfd41df8045645be130d2f3d51399908b15fd | refs/heads/master | 2021-01-23T11:07:48.724177 | 2017-04-20T10:51:07 | 2017-04-20T10:51:07 | 33,182,317 | 1 | 7 | null | 2015-08-31T20:59:43 | 2015-03-31T11:45:11 | Python | UTF-8 | Python | false | false | 471 | py | import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
from openerp.osv import fields, osv
import urllib
import re
class competition_url(osv.osv):
_name = 'competition.url'
_description = 'URL for competition'
_table = 'competition_url'
_rec_name = 'url_competition'
_columns = {
'url_competition': fields.char('Url ', size=150),
'regex': fields.char('Expression', size=300),
}
competition_url()
| [
"dhecar@gmail.com"
] | dhecar@gmail.com |
4a9b0390b225568ad02d0406e914e19e6b4dd947 | 13153747fb37c39b51c6a9af9a7c06f92cc303a8 | /FeatureExtraction/TF_ISF.py | 6926056f03df07905a4c0b306d7820f0787c471b | [] | no_license | cs60050/ML-JusticeLeague | f824eab180ac96a6ac84303cf7aef8700a6d4c3f | ffe1cec8e251af531f22e2f3620c139e92d76f62 | refs/heads/master | 2021-01-11T19:41:33.483780 | 2016-11-16T09:40:14 | 2016-11-16T09:40:14 | 69,039,943 | 2 | 11 | null | 2016-11-11T11:11:04 | 2016-09-23T16:00:20 | Perl | UTF-8 | Python | false | false | 392 | py | def calcMeanTF_ISF(VSM, index):
vocab_len = len(VSM[index])
sentences_len = len(VSM)
count = 0
tfisf = 0
for i in range(vocab_len):
tf = VSM[index][i]
if(tf>0):
count += 1
sent_freq = 0
for j in range(sentences_len):
if(VSM[j][i]>0): sent_freq += 1
tfisf += (tf)*(1.0/sent_freq)
if(count > 0):
mean_tfisf = tfisf/count
else:
mean_tfisf = 0
return mean_tfisf
| [
"annepuharsha@gmail.com"
] | annepuharsha@gmail.com |
07f7b2b64d310595be6ea13a62a182660b842ec9 | 4cac54bd6b326f9fe2bd280aba0bb7df5b3ada72 | /src/adas/datasets.py | cfea4f614195a084c7cdc88b89e5932160cb8e68 | [
"MIT"
] | permissive | llucid-97/AdaS | d16e7990bc6a0d79cc53cceb42ea6302576e8b03 | 298beca98d5b432460c9f268364c0fe7ce8323a6 | refs/heads/master | 2022-12-17T13:48:59.097898 | 2020-09-21T19:43:02 | 2020-09-21T19:45:27 | 297,433,885 | 0 | 0 | MIT | 2020-09-21T18:58:43 | 2020-09-21T18:58:42 | null | UTF-8 | Python | false | false | 12,871 | py | from contextlib import contextmanager
from pathlib import Path
import warnings
import tempfile
import shutil
import os
import torch
from torchvision.datasets.utils import check_integrity,\
extract_archive, verify_str_arg, download_and_extract_archive
from torchvision.datasets.folder import ImageFolder
class TinyImageNet(ImageFolder):
url = 'http://cs231n.stanford.edu/tiny-imagenet-200.zip'
filename = 'tiny-imagenet-200.zip'
meta_file = 'wnids.txt'
"""`TinyImageNet
Args:
root (string): Root directory of the ImageNet Dataset.
split (string, optional): The dataset split, supports ``train``, or
``val``.
transform (callable, optional): A function/transform that takes in an
PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes
in the
target and transforms it.
loader (callable, optional): A function to load an image given its
path.
Attributes:
classes (list): List of the class name tuples.
class_to_idx (dict): Dict with items (class_name, class_index).
wnids (list): List of the WordNet IDs.
wnid_to_idx (dict): Dict with items (wordnet_id, class_index).
imgs (list): List of (image path, class_index) tuples
targets (list): The class_index value for each image in the dataset
"""
def __init__(self, root, split='train', download=False, **kwargs):
root = self.root = os.path.expanduser(root)
self.split = verify_str_arg(split, "split", ("train", "val"))
self.root = root
if download:
# self.download()
raise ValueError(
"Downloading of TinyImageNet is not supported. " +
"You must manually download the 'tiny-imagenet-200.zip' from" +
f" {self.url} and extract the 'tiny-imagenet-200' folder " +
"into the folder specified by 'root'. That is, once the" +
"'tiny-imagenet-200' folder is extracted, specify the data " +
"directory for this program as the path for to that folder")
self.parse_archives()
self.classes = self.load_meta_file()
self.class_to_idx = {cls: idx
for idx, clss in enumerate(self.classes)
for cls in clss}
super(TinyImageNet, self).__init__(self.split_folder, **kwargs)
def _check_integrity(self):
dirs = [d.name for d in Path(self.root).iterdir()]
if 'train' not in dirs or 'test' not in dirs or 'val' not in dirs:
return False
if not (Path(self.root) / 'wnids.txt').exists():
return False
return True
def download(self):
if self._check_integrity():
print("Files already downloaded and verified")
download_and_extract_archive(
self.url, self.root,
filename=self.filename, md5=None)
def load_meta_file(self):
if self._check_integrity():
with (Path(self.root) / self.meta_file).open('r') as f:
lines = [line.strip() for line in f.readlines()]
return lines
def parse_archives(self):
if self._check_integrity():
name = (Path(self.root) / 'train')
if (name / 'images').exists():
for c in name.iterdir():
os.remove(str(c / f'{c.name}_boxes.txt'))
for f in (c / 'images').iterdir():
shutil.move(str(f), c)
shutil.rmtree(str(c / 'images'))
name = (Path(self.root) / 'val')
if (name / 'images').exists():
with (name / 'val_annotations.txt').open('r') as f:
for line in f.readlines():
line = line.replace('\t', ' ').strip().split(' ')
(name / line[1]).mkdir(exist_ok=True)
shutil.move(str(name / 'images' / line[0]),
str(name / line[1]))
shutil.rmtree(str(name / 'images'))
os.remove(name / 'val_annotations.txt')
@ property
def split_folder(self):
return os.path.join(self.root, self.split)
def extra_repr(self):
return "Split: {split}".format(**self.__dict__)
class ImageNet(ImageFolder):
archive_meta = {
'train': ('ILSVRC2012_img_train.tar',
'1d675b47d978889d74fa0da5fadfb00e'),
'val': ('ILSVRC2012_img_val.tar',
'29b22e2961454d5413ddabcf34fc5622'),
'devkit': ('ILSVRC2012_devkit_t12.tar.gz',
'fa75699e90414af021442c21a62c3abf')
}
meta_file = "meta.bin"
"""`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset.
Args:
root (string): Root directory of the ImageNet Dataset.
split (string, optional): The dataset split, supports ``train``, or
``val``.
transform (callable, optional): A function/transform that takes in an
PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes
in the
target and transforms it.
loader (callable, optional): A function to load an image given its
path.
Attributes:
classes (list): List of the class name tuples.
class_to_idx (dict): Dict with items (class_name, class_index).
wnids (list): List of the WordNet IDs.
wnid_to_idx (dict): Dict with items (wordnet_id, class_index).
imgs (list): List of (image path, class_index) tuples
targets (list): The class_index value for each image in the dataset
"""
def __init__(self, root, split='train', download=None, **kwargs):
if download is True:
msg = ("The dataset is no longer publicly accessible. You need to "
"download the archives externally and place them in the "
"root directory.")
raise RuntimeError(msg)
elif download is False:
msg = ("The use of the download flag is deprecated, since the "
"dataset is no longer publicly accessible.")
warnings.warn(msg, RuntimeWarning)
root = self.root = os.path.expanduser(root)
self.split = verify_str_arg(split, "split", ("train", "val"))
self.parse_archives()
wnid_to_classes = load_meta_file(self.root)[0]
super(ImageNet, self).__init__(self.split_folder, **kwargs)
self.root = root
self.wnids = self.classes
self.wnid_to_idx = self.class_to_idx
self.classes = [wnid_to_classes[wnid] for wnid in self.wnids]
self.class_to_idx = {cls: idx
for idx, clss in enumerate(self.classes)
for cls in clss}
def parse_archives(self):
if not check_integrity(os.path.join(self.root, self.meta_file)):
parse_devkit_archive(self.root)
if not os.path.isdir(self.split_folder):
if self.split == 'train':
parse_train_archive(self.root)
elif self.split == 'val':
parse_val_archive(self.root)
@ property
def split_folder(self):
return os.path.join(self.root, self.split)
def extra_repr(self):
return "Split: {split}".format(**self.__dict__)
def load_meta_file(root, file=None):
if file is None:
file = ImageNet.meta_file
file = os.path.join(root, file)
if check_integrity(file):
return torch.load(file)
else:
msg = ("The meta file {} is not present in the root directory or is "
"corrupted. This file is automatically created by the"
" ImageNet dataset.")
raise RuntimeError(msg.format(file, root))
def _verify_archive(root, file, md5):
if not check_integrity(os.path.join(root, file), md5):
msg = ("The archive {} is not present in the root directory or is"
"corrupted. You need to download it externally and place it"
" in {}.")
raise RuntimeError(msg.format(file, root))
def parse_devkit_archive(root, file=None):
"""Parse the devkit archive of the ImageNet2012 classification dataset and save
the meta information in a binary file.
Args:
root (str): Root directory containing the devkit archive
file (str, optional): Name of devkit archive. Defaults to
'ILSVRC2012_devkit_t12.tar.gz'
"""
import scipy.io as sio
def parse_meta_mat(devkit_root):
metafile = os.path.join(devkit_root, "data", "meta.mat")
meta = sio.loadmat(metafile, squeeze_me=True)['synsets']
nums_children = list(zip(*meta))[4]
meta = [meta[idx] for idx, num_children in enumerate(nums_children)
if num_children == 0]
idcs, wnids, classes = list(zip(*meta))[:3]
classes = [tuple(clss.split(', ')) for clss in classes]
idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)}
wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)}
return idx_to_wnid, wnid_to_classes
def parse_val_groundtruth_txt(devkit_root):
file = os.path.join(devkit_root, "data",
"ILSVRC2012_validation_ground_truth.txt")
with open(file, 'r') as txtfh:
val_idcs = txtfh.readlines()
return [int(val_idx) for val_idx in val_idcs]
@ contextmanager
def get_tmp_dir():
tmp_dir = tempfile.mkdtemp()
try:
yield tmp_dir
finally:
shutil.rmtree(tmp_dir)
archive_meta = ImageNet.archive_meta["devkit"]
if file is None:
file = archive_meta[0]
md5 = archive_meta[1]
_verify_archive(root, file, md5)
with get_tmp_dir() as tmp_dir:
extract_archive(os.path.join(root, file), tmp_dir)
devkit_root = os.path.join(tmp_dir, "ILSVRC2012_devkit_t12")
idx_to_wnid, wnid_to_classes = parse_meta_mat(devkit_root)
val_idcs = parse_val_groundtruth_txt(devkit_root)
val_wnids = [idx_to_wnid[idx] for idx in val_idcs]
torch.save((wnid_to_classes, val_wnids),
os.path.join(root, ImageNet.meta_file))
def parse_train_archive(root, file=None, folder="train"):
"""Parse the train images archive of the ImageNet2012 classification
dataset and prepare it for usage with the ImageNet dataset.
Args:
root (str): Root directory containing the train images archive
file (str, optional): Name of train images archive. Defaults to
'ILSVRC2012_img_train.tar'
folder (str, optional): Optional name for train images folder.
Defaults to 'train'
"""
archive_meta = ImageNet.archive_meta["train"]
if file is None:
file = archive_meta[0]
md5 = archive_meta[1]
_verify_archive(root, file, md5)
train_root = os.path.join(root, folder)
extract_archive(os.path.join(root, file), train_root)
archives = [os.path.join(train_root, archive)
for archive in os.listdir(train_root)]
for archive in archives:
extract_archive(archive, os.path.splitext(
archive)[0], remove_finished=False)
def parse_val_archive(root, file=None, wnids=None, folder="val"):
"""Parse the validation images archive of the ImageNet2012 classification
dataset
and prepare it for usage with the ImageNet dataset.
Args:
root (str): Root directory containing the validation images archive
file (str, optional): Name of validation images archive. Defaults to
'ILSVRC2012_img_val.tar'
wnids (list, optional): List of WordNet IDs of the validation images.
If None is given, the IDs are loaded from the meta file in the root
directory
folder (str, optional): Optional name for validation images folder.
Defaults to 'val'
"""
archive_meta = ImageNet.archive_meta["val"]
if file is None:
file = archive_meta[0]
md5 = archive_meta[1]
if wnids is None:
wnids = load_meta_file(root)[1]
_verify_archive(root, file, md5)
val_root = os.path.join(root, folder)
extract_archive(os.path.join(root, file), val_root)
images = sorted([os.path.join(val_root, image)
for image in os.listdir(val_root)])
for wnid in set(wnids):
os.mkdir(os.path.join(val_root, wnid))
for wnid, img_file in zip(wnids, images):
shutil.move(img_file, os.path.join(
val_root, wnid, os.path.basename(img_file)))
| [
"tuli.mathieu@gmail.com"
] | tuli.mathieu@gmail.com |
29f1b6a21401ee236b971d6979bebb602294ee1b | 89967e55f8ab4037368972dcf30d2aa2cd8cb0f3 | /oop_pedia_classifier.py | 8a0939f92a57883e84b9301842adc075e6e28583 | [] | no_license | petkraw/classifier | 5f487dd51ef70023aa502d69ec402b14bfe6019c | b813ff17013caf4d6d5aa036d6cb45c6c745e3ef | refs/heads/master | 2021-01-21T20:54:22.315291 | 2017-05-18T14:59:12 | 2017-05-18T14:59:12 | 92,289,428 | 0 | 0 | null | 2017-05-24T12:23:13 | 2017-05-24T12:23:13 | null | UTF-8 | Python | false | false | 57,249 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 20 09:56:23 2017
@author: Martin
"""
import json, os
import warnings
import numpy as np
import sys
from matplotlib import pyplot as plt
from sklearn import svm
from sklearn import preprocessing
from sklearn.neural_network import MLPClassifier
from sklearn.externals import joblib
warnings.filterwarnings("ignore", category=DeprecationWarning)
class Sample(object):
"""common class for all samples. that is the vector of scores describing a sample and its unique identifier case and gene.
Attributes:
case: The ID of the case to which the vector belongs as a string.
gene: The name of the gene this vector describes as a string.
gestalt: The gestalt score based on FDNA's analysis of the case's portrait foto as a float.
feature: The feature score based on FDNA's analysis of the case's annotated symptoms as a float.
cadd_phred: the highest CADDphred scores of the gene (has passed filtering; otherwise it is 0) as a float
phenomizer: the phenomizer p-value (*-1) based on the phenomizers analysis of the cases's annotated symptoms in classical mode as a float
boqa: the phenomizer score based on the phenomizers analyisis of the of the case's annotated symptoms in BOQA mode as a float
pathogenicity: a class label as an integer: 1 means pathogenic, 0 means neutral
pedia: an attribute to hold the PEDIA score as a float (initially it is -5)
extom: an attribute to hold the extom score based on patients symptoms and exome (no biometry)
"""
def __init__(self, case='?', gene='?', gestalt=0, feature=0, cadd_phred=0, phenomizer=0, boqa=0, pathogenicity=0, pedia=-5, extom=-5):
self.case = case
self.gene = gene
self.gestalt = gestalt
self.feature = feature
self.cadd_phred = cadd_phred
self.phenomizer = phenomizer
self.boqa = boqa
self.pathogenicity = pathogenicity
self.pedia = pedia
self.extom = extom
def classify(self):
"""a function to classify a sample using the classifier and the scaler provided in the respective pkl-files
"""
clf = joblib.load('pedia_classifier.pkl')
scaler = joblib.load('pedia_scaler.pkl')
pedia = float(clf.decision_function(scaler.transform(np.array([self.gestalt, self.feature, self.cadd_phred, self.phenomizer, self.boqa]))))
print(pedia)
class Data:
"""Common class for a list of instances of the class Samples
Attributes:
name: name of the data as a string
samples: a list of samples as instances of class Sample
casedisgene: a list of lists [[case,gene]] containing each case in samples and the respective disease causing gene
"""
def __init__(self, samples=[], casedisgene=[]):
#self.name = name
self.samples = list(samples)
self.casedisgene = casedisgene
def load(self, path):
""" loads the samples attribute with the information of the json files in the same directory as instances of the class Samples"""
print('loading data')
for file_name in os.listdir(path):
if file_name.endswith(".json") and file_name != '34159.json' and file_name != '40536.json':
file_name = os.path.join(path, file_name)
vectors = {}
with open(file_name, encoding='utf-8', errors='ignore') as json_data:
data = json.load(json_data)
pathogene = '?'
#gene_omimID='?'
if len(data['genomicData']) >0:
if 'Gene Name' in data['genomicData'][0]['Test Information']:
pathogene = data['genomicData'][0]['Test Information']['Gene Name']
if pathogene == 'MLL2':
pathogene = 'KMT2D'
elif pathogene == 'MLL':
pathogene = 'KMT2A'
elif pathogene == 'B3GALTL':
pathogene = 'B3GLTC'
elif pathogene == 'CASKIN1':
pathogene = 'KIAA1306'
#gene_omimID=data['genomicData'][0]['Test Information']['Gene Name']
case = data['case_id']
for entry in data['geneList']:
gscore = 0 #gestalt score
if 'gestalt_score' in entry:# and entry['gestalt_score']>0: #sometimes negative; mistake by FDNA?
gscore = entry['gestalt_score']
fscore =0 #feature score
if 'feature_score' in entry:# and entry['feature_score']>0: #sometimes negative; mistake by FDNA?
fscore = entry['feature_score']
vscore = 0 #variant score
if 'cadd_phred_score' in entry:
vscore = entry['cadd_phred_score']
pscore = 0 #phenomizer score
if 'pheno_score' in entry:
pscore = entry['pheno_score']
bscore=0 #boqa score
if 'boqa_score' in entry:
bscore = entry['boqa_score']
gene = entry['gene_symbol']
patho = 0 #pathogenicity, will serve as class label, 1 is pathogenic mutation; 0 is neutral variant
if gene == pathogene:
patho = 1
if pathogene != '?':# and (gscore!=0 or fscore!=0 or vscore!=0 or pscore!=0 or bscore!=0): #nullvectors not allowed
if case + gene in vectors: # if a gene appears several times in the gene List of a case onnly its highest values will be assigned to its Sample
smpl = vectors[case + gene]
if gscore > smpl.gestalt:
smpl.gestalt = gscore
if fscore > smpl.feature:
smpl.feature = fscore
if vscore > smpl.cadd_phred:
smpl.cadd_phred = vscore
if pscore > smpl.phenomizer:
smpl.phenomizer = pscore
if bscore > smpl.boqa:
smpl.boqa = bscore
if case + gene not in vectors:
vectors[case+gene] = Sample(case = case, gene = gene, gestalt = gscore, feature = fscore, cadd_phred = vscore, phenomizer = pscore, boqa = bscore, pathogenicity = patho)
for vector in vectors:
self.samples.append(vectors[vector]) # loads samples with instances of the class Sample
casedisgene = []
cases = []
for smpl in self.samples:
if smpl.pathogenicity == 1:
casedisgene.append([smpl.case, smpl.gene])
cases.append(smpl.case)
for smpl in self.samples:
if smpl.case not in cases:
cases.append(smpl.case)
casedisgene.append([smpl.case, 'healthy?'])
self.casedisgene = casedisgene
def load2(self, path):
""" loads the samples attribute with the information of the json files in the same directory as instances of the class Samples"""
print('loading data')
for file_name in os.listdir():
if file_name.endswith(".json") and file_name != '34159.json' and file_name != '40536.json':
file_name = os.path.join(path, file_name)
vectors = {}
with open(file_name, encoding = 'utf-8', errors = 'ignore') as json_data:
data = json.load(json_data)
pathogene='?'
#gene_omimID='?'
if len(data['genomicData'])>0:
if 'Gene Name' in data['genomicData'][0]['Test Information']:
pathogene = data['genomicData'][0]['Test Information']['Gene Name']
if pathogene == 'MLL2':
pathogene = 'KMT2D'
elif pathogene == 'MLL':
pathogene = 'KMT2A'
elif pathogene == 'B3GALTL':
pathogene = 'B3GLTC'
elif pathogene == 'CASKIN1':
pathogene = 'KIAA1306'
#gene_omimID=data['genomicData'][0]['Test Information']['Gene Name']
case = data['case_id']
for entry in data['geneList']:
if 'feature_score' in entry or 'pheno_score' in entry or 'boqa_score' in entry or 'cadd_phred_score' in entry :
gscore = 0 #gestalt score
# if 'gestalt_score' in entry:# and entry['gestalt_score']>0: #sometimes negative; mistake by FDNA?
# gscore=entry['gestalt_score']
fscore = 0 #feature score
if 'feature_score' in entry:# and entry['feature_score']>0: #sometimes negative; mistake by FDNA?
fscore = entry['feature_score']
vscore = 0 #variant score
if 'cadd_phred_score' in entry:
vscore = entry['cadd_phred_score']
pscore = 0 #phenomizer score
if 'pheno_score' in entry:
pscore = entry['pheno_score']
bscore = 0 #boqa score
if 'boqa_score' in entry:
bscore = entry['boqa_score']
gene = entry['gene_symbol']
patho = 0 #pathogenicity, will serve as class label, 1 is pathogenic mutation; 0 is neutral variant
if gene == pathogene:
patho = 1
if pathogene != '?':# and (gscore!=0 or fscore!=0 or vscore!=0 or pscore!=0 or bscore!=0): #nullvectors not allowed
if case+gene in vectors: # if a gene appears several times in the gene List of a case onnly its highest values will be assigned to its Sample
smpl=vectors[case + gene]
if gscore > smpl.gestalt:
smpl.gestalt = gscore
if fscore > smpl.feature:
smpl.feature = fscore
if vscore > smpl.cadd_phred:
smpl.cadd_phred = vscore
if pscore > smpl.phenomizer:
smpl.phenomizer = pscore
if bscore > smpl.boqa:
smpl.boqa = bscore
if case + gene not in vectors:
vectors[case + gene] = Sample(case = case, gene = gene, gestalt = gscore, feature = fscore, cadd_phred = vscore, phenomizer = pscore, boqa = bscore, pathogenicity = patho)
for vector in vectors:
self.samples.append(vectors[vector]) #loads samples with instances ofthe class Sample
casedisgene = []
cases = []
for smpl in self.samples:
if smpl.pathogenicity == 1:
casedisgene.append([smpl.case, smpl.gene])
cases.append(smpl.case)
for smpl in self.samples:
if smpl.case not in cases:
cases.append(smpl.case)
casedisgene.append([smpl.case, 'healthy?'])
self.casedisgene = casedisgene
def filter_gestalt(self):
new_samples=[]
for smpl in self.samples:
if smpl.feature!=0 or smpl.cadd_phred!=0 or smpl.phenomizer!=0 or smpl.boqa!=0:
new_samples.append(smpl)
self.samples = new_samples
def filter_cadd(self):
new_samples=[]
for smpl in self.samples:
if smpl.feature!=0 or smpl.gestalt!=0 or smpl.phenomizer!=0 or smpl.boqa!=0:
new_samples.append(smpl)
self.samples = new_samples
def filter_cadd_gestalt(self):
new_samples=[]
for smpl in self.samples:
if smpl.feature!=0 or smpl.phenomizer!=0 or smpl.boqa!=0:
new_samples.append(smpl)
self.samples = new_samples
def threshold(self, value, score='gestalt'):
for smpl in self.samples:
if getattr(smpl, score)<value:
setattr(smpl, score, 0)
def numhit(self, num):
""" filters self.samples for only those samples that have num scores featuring
values higher than 0, self.samples will be adjusted accordingly"""
newsamples = []
for smpl in self.samples:
num_scores = 0
if smpl.gestalt != 0:
num_scores += 1
if smpl.feature != 0:
num_scores += 1
if smpl.cadd_phred > 0:
num_scores += 1
if smpl.phenomizer > 0:
num_scores += 1
if smpl.boqa > 0:
num_scores += 1
if num_scores >= num:
newsamples.append(smpl)
self.samples = []
self.samples = newsamples
def bucketize_data(self):
"""A function to prepare 10x cross validation
It returns a list of len 10. each list (bucket) will contain case IDs. All case
IDs featuring a pathogenic mutation in the same gene will be in the same bucket.
The size of the buckets will be as similar as possible
"""
print('creating 10 buckets - same gene same bucket')
self.casedisgene.sort()
buckets = []
for i in range(10):
buckets.append([])
allgenes = [] # a list that will contain the names of all genes that have a pathogenic entry in allscores
numgenes = [] # a list that will contain the frequencies by which these genes are pathogentically altered in the data, the indices are corresponding with allgenes
for entry in self.casedisgene:
case = entry[0]
gene = entry[1]
# if a gene is not yet assigned to allgenes, an entry will be created, at it
# will get the frequency 1 in numgenes
if gene not in allgenes:
allgenes.append(gene)
numgenes.append(1)
elif gene in allgenes: #if a gene is in allgenes already...
x = 0 # index of that gene
for i in allgenes:
if i == gene:
# ...its index in mungenes will be determined and this index will be
# increased by 1 (frequency + 1)
numgenes[x] += 1
x += 1
for gene in allgenes:
# minbucket is the minimal number of IDs in a bucket its initial size is the number
# of cases in the data divideb by 10 plus 2
minbucket = len(self.casedisgene) / 10 + 2
for bucket in buckets:
minbucket = min(len(bucket),minbucket) #minbucket is adjusted to the length of the smallest bucket
for bucket in buckets:
if len(bucket) == minbucket: #(one of) the smallest bucket(s) is selected
for entry in self.casedisgene:
case = entry[0]
dgene = entry[1]
if dgene == gene:
bucket.append(case) #all the case IDs with a pathogenic mutation in a certain gene are added to this bucket
break
#print(buckets)
return buckets
def classify_10xSVM(self, C=1):
""" a 10x validation SVM classification of all samples in the instance of Data. The samples' pedia
atrribute will be adjusted accordingly. """
buckets = self.bucketize_data()
print('10 x cross validation')
bn = 1 #bucket number
# 10x cross validation, data will be split according to the ID entries in each bucket,
# that were created by bucketize_data()
for bucket in buckets:
print('computing results for bucket ' + str(bn))
X = []
y = []
for smpl in self.samples:
# only the data will be used for training that are of cases that are NOT in the topical bucket
if smpl.case not in bucket:
X.append([smpl.gestalt, smpl.feature, smpl.cadd_phred,smpl.phenomizer, smpl.boqa]) #feature vector
y.append(smpl.pathogenicity) #class labels
X = np.array(X) #the clf function needs np arrays
scaler = preprocessing.MinMaxScaler().fit(X)
X = scaler.transform(X) #data is scaled to values between 1 and 0 using minmax scaler
y = np.array(y) #the clf function needs np arrays
# the classifier is balanced because class 0 exceeds class 1 by far,
# (only one pathogenic mutation per case, but several hundred genes per case)
clf = svm.SVC(kernel='poly', C=C, degree=2, probability=False, class_weight='balanced')
clf.fit(X, y)
for smpl in self.samples:
# only those samples are tested with the classifier that ARE in the bucket
if smpl.case in bucket:
smpl.pedia = float(clf.decision_function(scaler.transform(np.array([smpl.gestalt, smpl.feature , smpl.cadd_phred, smpl.phenomizer, smpl.boqa]))))
bn += 1
def save_SVM(self, C=1):
""" saves the classifier so that it can be reloaded and quickly used for other purposes"""
print('loading data')
X = []
y = []
for smpl in self.samples:
X.append([smpl.gestalt, smpl.feature, smpl.cadd_phred, smpl.phenomizer, smpl.boqa])
y.append([smpl.pathogenicity])
X = np.array(X)
scaler = preprocessing.MinMaxScaler().fit(X)
X = scaler.transform(X)
y = np.array(y)
print('training classifier')
clf = svm.SVC(kernel='poly', C=C, degree=2, probability=False, class_weight='balanced')
clf.fit(X, y)
print('saving classifier')
joblib.dump(clf, 'pedia_classifier.pkl', compress=9)
print('saving scaler')
joblib.dump(scaler, 'pedia_scaler.pkl', compress=9)
print('done saving')
def classify_10xSVM_extom(self):
""" a 10x validation SVM classification of all samples in the instance of Data. The samples' pedia atrribute will be adjusted accordingly. """
buckets = self.bucketize_data()
print('10 x cross validation')
bn = 1 #bucket number
for bucket in buckets: #10x cross validation, data will be split according to the ID entries in each bucket, that were created by bucketize_data()
print('computing results for bucket ' + str(bn))
X = []
y = []
for smpl in self.samples:
if smpl.case not in bucket: #only the data will be used for training that are of cases that are NOT in the topical bucket
X.append([smpl.feature,smpl.cadd_phred, smpl.phenomizer, smpl.boqa]) #feature vector
y.append(smpl.pathogenicity) #class labels
X = np.array(X) #the clf function needs np arrays
scaler = preprocessing.MinMaxScaler().fit(X)
X = scaler.transform(X) #data is scaled to values between 1 and 0 using minmax scaler
y = np.array(y)#the clf function needs np arrays
clf = svm.SVC(kernel='poly', C=1, degree=2, probability=False, class_weight='balanced') #the classifier is balanced because class 0 exceeds class 1 by far, (only one pathogenic mutation per case,but several hundred genes per case)
clf.fit(X, y)
for smpl in self.samples:
if smpl.case in bucket: #only those samples are tested with the classifier that ARE in the bucket
smpl.extom = float(clf.decision_function(scaler.transform(np.array([smpl.feature, smpl.cadd_phred, smpl.phenomizer, smpl.boqa]))))
bn += 1
def classify_10xSVM_sgt(self): #sgt: specific gene testing
""" a 10x validation SVM classification of all samples in the instance of Data. The samples' pedia atrribute will be adjusted accordingly. """
buckets = self.bucketize_data()
print('10 x cross validation')
bn = 1 #bucket number
for bucket in buckets: #10x cross validation, data will be split according to the ID entries in each bucket, that were created by bucketize_data()
print('computing results for bucket ' + str(bn))
X = []
y = []
for smpl in self.samples:
if smpl.case not in bucket: #only the data will be used for training that are of cases that are NOT in the topical bucket
X.append([smpl.feature,smpl.gestalt, smpl.phenomizer, smpl.boqa]) #feature vector
y.append(smpl.pathogenicity) #class labels
X = np.array(X) #the clf function needs np arrays
scaler = preprocessing.MinMaxScaler().fit(X)
X = scaler.transform(X) #data is scaled to values between 1 and 0 using minmax scaler
y = np.array(y)#the clf function needs np arrays
clf = svm.SVC(kernel = 'poly', C=1, degree=2, probability=False, class_weight='balanced') #the classifier is balanced because class 0 exceeds class 1 by far, (only one pathogenic mutation per case,but several hundred genes per case)
clf.fit(X, y)
for smpl in self.samples:
if smpl.case in bucket: #only those samples are tested with the classifier that ARE in the bucket
smpl.extom = float(clf.decision_function(scaler.transform(np.array([smpl.feature, smpl.gestalt, smpl.phenomizer, smpl.boqa]))))
bn += 1
def classify_10xSVM_sympt(self): #sgt: specific gene testing
""" a 10x validation SVM classification of all samples in the instance of Data. The samples' pedia atrribute will be adjusted accordingly. """
buckets = self.bucketize_data()
print('10 x cross validation')
bn = 1 #bucket number
for bucket in buckets: #10x cross validation, data will be split according to the ID entries in each bucket, that were created by bucketize_data()
print('computing results for bucket ' + str(bn))
X = []
y = []
for smpl in self.samples:
if smpl.case not in bucket: #only the data will be used for training that are of cases that are NOT in the topical bucket
X.append([smpl.feature,smpl.phenomizer, smpl.boqa]) #feature vector
y.append(smpl.pathogenicity) #class labels
X = np.array(X) #the clf function needs np arrays
scaler = preprocessing.MinMaxScaler().fit(X)
X = scaler.transform(X) #data is scaled to values between 1 and 0 using minmax scaler
y = np.array(y)#the clf function needs np arrays
clf = svm.SVC(kernel='poly', C=1, degree=2, probability=False, class_weight='balanced') #the classifier is balanced because class 0 exceeds class 1 by far, (only one pathogenic mutation per case,but several hundred genes per case)
clf.fit(X, y)
for smpl in self.samples:
if smpl.case in bucket: #only those samples are tested with the classifier that ARE in the bucket
smpl.extom = float(clf.decision_function(scaler.transform(np.array([smpl.feature, smpl.phenomizer, smpl.boqa]))))
bn += 1
def classify_10xMLP(self):
""" a 10x validation SVM classification of all samples in the instance of Data. The samples' pedia atrribute will be adjusted accordingly. """
buckets = self.bucketize_data()
print('10 x cross validation')
bn = 1 #bucket number
for bucket in buckets: #10x cross validation, data will be split according to the ID entries in each bucket, that were created by bucketize_data()
print('computing results for bucket ' + str(bn))
X = []
y = []
for smpl in self.samples:
if smpl.case not in bucket: #only the data will be used for training that are of cases that are NOT in the topical bucket
X.append([smpl.gestalt, smpl.feature, smpl.cadd_phred, smpl.phenomizer, smpl.boqa]) #feature vector
y.append(smpl.pathogenicity) #class labels
X=np.array(X) #the clf function needs np arrays
scaler = preprocessing.MinMaxScaler().fit(X)
X=scaler.transform(X) #data is scaled to values between 1 and 0 using minmax scaler
y=np.array(y)#the clf function needs np arrays
clf = MLPClassifier(hidden_layer_sizes=(4, 3), max_iter=10, alpha=1e-4,
solver='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
clf.fit(X,y)
for smpl in self.samples:
if smpl.case in bucket: #only those samples are tested with the classifier that ARE in the bucket
smpl.pedia = float(clf.predict(scaler.transform(np.array([smpl.gestalt, smpl.feature , smpl.cadd_phred, smpl.phenomizer, smpl.boqa]))))
bn+=1
def classify_real(self, training_data):
""" SVM classification of all samples in the instance of Data against a given training
data set that is also an instance of class Data """
print('classification')
X = []
y = []
for smpl in training_data.samples:
X.append([smpl.gestalt, smpl.feature, smpl.cadd_phred, smpl.phenomizer, smpl.boqa]) #feature vector
y.append(smpl.pathogenicity) # class labels
X = np.array(X) # the clf function needs np arrays
scaler = preprocessing.MinMaxScaler().fit(X)
X = scaler.transform(X) # data is scaled to values between 1 and 0 using minmax scaler
y = np.array(y) # the clf function needs np arrays
# the classifier is balanced because class 0 exceeds class 1 by far,
# (only one pathogenic mutation per case,but several hundred genes per case)
clf = svm.SVC(kernel='poly', C=1, degree=2, probability=False, class_weight='balanced')
clf.fit(X, y)
for smpl in self.samples:
smpl.pedia = float(clf.decision_function(scaler.transform(np.array([smpl.gestalt, smpl.feature, smpl.cadd_phred, smpl.phenomizer, smpl.boqa]))))
def manhattan(self, ID='all', score='pedia'):
""" Displays the information in Data as a manhattan plot. If the optional variable ID is set to a string matching a case ID, only the results of this case will be displayed."""
genepos={}
chr_sizes=[249250621, 243199373, 198022430, 191154276, 180915260, 171115067, 159138663, 146364022, 141213431, 135534747, 135006516, 133851895, 115169878, 107349540, 102531392, 90354753, 81195210, 78077248, 59128983, 63025520, 48129895, 51304566, 155270560, 59373566, 16571]
for line in open('allgenepositions.txt'):
fields = line[:-1].split('\t')
nm = fields[0]
chro = fields[1]
pos = fields[2]
name = fields[3]
if name not in genepos:
genepos[name]=[chro,pos]
sanos=[]
sanos2=[]
pathos=[]
s_pos=[]
s_pos2=[]
p_pos=[]
names=[]
names_x=[]
names_y=[]
for smpl in self.samples:
if smpl.case==ID or ID=='all':
if smpl.gene not in genepos and smpl.pathogenicity == 1:
print(smpl.gene)
if smpl.gene in genepos:
chrom=genepos[smpl.gene][0][3:]
if chrom=='X':
chrom=23
elif chrom=='Y':
chrom=24
elif chrom=='M':
chrom=25
else:
chrom=int(chrom)
pos=0
for i in range(chrom-1):
pos+=chr_sizes[i]+10**6
pos+=int(genepos[smpl.gene][1])
if smpl.pathogenicity==0:
if chrom%2==0:
sanos2.append(getattr(smpl, score))
s_pos2.append(pos)
else:
sanos.append(getattr(smpl, score))
s_pos.append(pos)
if smpl.pathogenicity==1:
pathos.append(getattr(smpl, score))
p_pos.append(pos)
if smpl.gene in names:
for i in range(len(names)):
if names[i]==smpl.gene:
if names_y[i]<getattr(smpl, score):
names_y[i]=getattr(smpl, score)
if smpl.gene not in names:
names.append(smpl.gene)
names_x.append(pos)
names_y.append(getattr(smpl, score))
plt.scatter(s_pos,sanos, color='#70ACC0', alpha=0.6, marker='o', s=400, label=('neutrals')) #s=30
plt.scatter(s_pos2,sanos2, color='#008B8B', alpha=0.6, marker='o', s=400, label=('neutrals')) #s=30 #385660
plt.scatter(p_pos,pathos, color='#AA1C7D', alpha=0.6, marker='o', s=400, label='pathogenic') #s=30
for i in range(len(names)):
plt.annotate(names[i], xy = (names_x[i], names_y[i]), xytext = (names_x[i], names_y[i]), fontsize=70, color='#AA1C7D')#, textcoords = 'offset points')
plt.xlabel('chromosomal position', fontsize=30)
ticks=[]
tick=0
for i in chr_sizes:
tick+=i/2
ticks.append(tick)
tick+=(i/2)+10**6
plt.xticks(ticks)
plt.ylabel( score+' score', fontsize=30)
plt.legend(loc='upper left', fontsize=25)
frame1=plt.gca()
chr_names=[]
for i in range(1,26):
if i==23:
chr_names.append('X')
elif i==24:
chr_names.append('Y')
elif i==25:
chr_names.append('M')
else:
chr_names.append(str(i))
frame1.axes.xaxis.set_ticklabels(chr_names, fontsize=25)
frame1.axes.tick_params(axis='x',length=0)
frame1.axes.tick_params(axis='y', labelsize=25)
y_min=min([min(sanos),min(sanos2),min(pathos)])
y_max=max([max(sanos),max(sanos2),max(pathos)])
plt.ylim(y_min, y_max+(y_max/10)) #ymin-(ymax/30)
plt.xlim(0,ticks[-1]+(chr_sizes[-1]/2)+10**6)
def ranker(self, col, lab):
"""A function to evaluate (rank) the results of the classification and put into a plot.
only to be used after data was classified."""
# data is what is to be analyzed, it must have the structure of alldatascored in classify()
# col is the color of the plot
# lab is the label of the plot
print('ranking results based on',lab)
data = []
for smpl in self.samples:
data.append([smpl.case, smpl.pedia, smpl.gestalt, smpl.pathogenicity])
n_cases = len(self.casedisgene)
# sorts the data by case ID because it has index 0 in alldatascored, and then by pedia score,
# because it has index 1
data.sort()
# reverses the data so that each scases starts with the entry with the highest pedia score
data.reverse()
# a list that will contain lists of the IDs of each case and the rank of the respective
# pathogenic variant, ranked by the pedia-score
combined_rank = []
rank = 1
case = data[0][0]
pathoamongdata = False # is the pathogenic gene still among the data (it had not been filtered out?)
npf = 0 # number passed filter
for entry in data:
currcase = entry[0]
patho = entry[-1]
if currcase != case:
if not pathoamongdata:
combined_rank.append([case, 102])
# if the pathogenic gene had not been in that case anymore the case will be assigned
# a rank of 105 (so that it is higher than 100 will be regarded as having failed)
pathoamongdata = False #pathoamongdata is set back to false
rank = 1
case = currcase
if patho == 1:
combined_rank.append([case, rank]) # assignes the rank of the pathogenic gene to the case
pathoamongdata = True # true because there was a pathogenic gene in that case
npf += 1
rank += 1 # increased by 1 for each iteration, because the list is sorted by case and than pedia score
combined_performance = []
for i in range(101): # will evalute ranks in range 0 to 101)
sens = 0
for j in combined_rank:
rank = j[1]
if rank <= i:
sens += 1 # how many cases have a patho rank lower than or eqal to i
# the absolute number is divided by the total number of cases,
# so that one has the fraction of cases having a patho rank not higher than i
sens = (sens/n_cases)
# appends sens to i, so that combined rank is a list of floats, each float describing the
# fraction of cases that have a pathorank lower or eqaul to its index
combined_performance.append(sens)
plt.plot(range(1, len(combined_performance)), combined_performance[1:],
color=col, alpha=0.6, label=lab, linewidth=3)
plt.scatter([1,10,100],[combined_performance[1],combined_performance[10],combined_performance[100]],
color=col, alpha=0.6, marker='o', s=50)
print(lab, [combined_performance[1], combined_performance[10], combined_performance[100]],
'fraction passed filter:', (npf/n_cases))
plt.ylim(0, 1.01) #the last lines of code are only needed to display the results
plt.xlim(0, 100.5)
plt.xlabel('rank-cut-off')
plt.ylabel('Sensitivity')
plt.title('Sensitivity-rank-cut-off-correlation')
plt.legend(loc='lower right')
def ranker2(self,col,lab, score='pedia'):
"""A function to evaluate (rank) the results of the classification and put into a plot. only to be used after data was classified."""
#data is what is to be analyzed, it must have the structure of alldatascored in classify()
#col is the color of the plot
#lab is the label of the plot
print('ranking results based on',lab)
cases={}
combined_rank=[] #a list that will contain lists of the IDs of each case and the rank of the respective pathogenic variant, ranked by the pedia-score
n_cases = len(self.casedisgene)
npf=0 #number passed filter
for smpl in self.samples:
if smpl.case in cases:
cases[smpl.case].append([getattr(smpl, score), smpl.pathogenicity])
if smpl.case not in cases:
cases[smpl.case]=[[getattr(smpl, score), smpl.pathogenicity]]
for case in cases:
cases[case].sort()
cases[case].reverse()
ranks=(list(enumerate(cases[case])))
for i in ranks:
rank=102
if i[1][1]==1:
rank=i[0]+1
npf+=1
combined_rank.append([case,rank])
combined_performance=[]
for i in range(101): #will evalute ranks in range 0 to 101)
sens=0
for j in combined_rank:
rank=j[1]
if rank<=i:
sens+=1 #how many cases have a patho rank lower than or eqal to i
sens=(sens/n_cases) #the absolute number is divided by the total number of cases, so that one has the fraction of cases having a patho rank not higher than i
combined_performance.append(sens) #appends sens to i, so that combined rank is a list of floats, each float describing the fraction of cases that have a pathorank lower or eqaul to its index
plt.plot(range(1,len(combined_performance)),combined_performance[1:], color=col, alpha=0.6, label=lab, linewidth=3)
plt.scatter([1,10,100],[combined_performance[1],combined_performance[10],combined_performance[100]], color=col, alpha=0.6, marker='o', s=50)
print(lab,[combined_performance[1],combined_performance[10],combined_performance[100]],'fraction passed filter:',(npf/n_cases))
plt.ylim(0, 1.01) #the last lines of code are only needed to display the results
plt.xlim(0, 100.5)
plt.xlabel('rank-cut-off')
plt.ylabel('Sensitivity')
plt.title('Sensitivity-rank-cut-off-correlation')
plt.legend(loc='lower right')
def ranker_returner(self, lab, score='pedia'):
"""A function to evaluate (rank) the results of the classification and put into a plot.
only to be used after data was classified."""
# data is what is to be analyzed, it must have the structure of alldatascored in classify()
# col is the color of the plot
# lab is the label of the plot
print('ranking results based on', lab)
cases = {}
# a list that will contain lists of the IDs of each case and the rank of the respective
# pathogenic variant, ranked by the pedia-score
combined_rank = []
n_cases = len(self.casedisgene)
npf = 0 #number passed filter
for smpl in self.samples:
if smpl.case in cases:
cases[smpl.case].append([getattr(smpl, score), smpl.pathogenicity * (-1)])
if smpl.case not in cases:
cases[smpl.case] = [[getattr(smpl, score), smpl.pathogenicity * (-1)]]
for case in cases:
cases[case].sort()
cases[case].reverse()
ranks = (list(enumerate(cases[case])))
for i in ranks:
rank = 102
if i[1][1] == -1:
rank = i[0] + 1
npf += 1
combined_rank.append([case, rank])
combined_performance = []
for i in range(101): # will evalute ranks in range 0 to 101
sens = 0
for j in combined_rank:
rank = j[1]
if rank <= i:
sens += 1 # how many cases have a patho rank lower than or eqal to i
# the absolute number is divided by the total number of cases, so that one has
# the fraction of cases having a patho rank not higher than i
sens = (sens/n_cases)
# appends sens to i, so that combined rank is a list of floats, each float describing
# the fraction of cases that have a pathorank lower or eqaul to its index
combined_performance.append(sens)
# plt.plot(range(1,len(combined_performance)),combined_performance[1:], color=col, alpha=0.6, label=lab, linewidth=3)
# plt.scatter([1,10,100],[combined_performance[1],combined_performance[10],combined_performance[100]], color=col, alpha=0.6, marker='o', s=50)
# print(lab,[combined_performance[1],combined_performance[10],combined_performance[100]],'fraction passed filter:',(npf/n_cases))
# plt.ylim(0, 1.01) #the last lines of code are only needed to display the results
# plt.xlim(0, 100.5)
# plt.xlabel('rank-cut-off')
# plt.ylabel('Sensitivity')
# plt.title('Sensitivity-rank-cut-off-correlation')
# plt.legend(loc='lower right')
return([combined_performance[1], combined_performance[10], combined_performance[100]])
def ranker_returner2(self,lab, score='pedia'):
"""A function to evaluate (rank) the results of the classification and put into a plot. only to be used after data was classified."""
#data is what is to be analyzed, it must have the structure of alldatascored in classify()
#col is the color of the plot
#lab is the label of the plot
print('ranking results based on',lab)
cases={}
combined_rank=[] #a list that will contain lists of the IDs of each case and the rank of the respective pathogenic variant, ranked by the pedia-score
n_cases = len(self.casedisgene)
npf=0 #number passed filter
for smpl in self.samples:
if smpl.case in cases:
cases[smpl.case].append([getattr(smpl, score), smpl.pathogenicity*(-1)])
if smpl.case not in cases:
cases[smpl.case]=[[getattr(smpl, score), smpl.pathogenicity*(-1)]]
for case in cases:
cases[case].sort()
cases[case].reverse()
ranks=(list(enumerate(cases[case])))
print(ranks)
for i in ranks:
rank=102
if i[1][1]==-1:
rank=i[0]+1
npf+=1
combined_rank.append([case,rank])
combined_performance=[]
for i in range(101): #will evalute ranks in range 0 to 101)
sens=0
for j in combined_rank:
rank=j[1]
if rank<=i:
sens+=1 #how many cases have a patho rank lower than or eqal to i
sens=(sens/n_cases) #the absolute number is divided by the total number of cases, so that one has the fraction of cases having a patho rank not higher than i
combined_performance.append(sens) #appends sens to i, so that combined rank is a list of floats, each float describing the fraction of cases that have a pathorank lower or eqaul to its index
# plt.plot(range(1,len(combined_performance)),combined_performance[1:], color=col, alpha=0.6, label=lab, linewidth=3)
# plt.scatter([1,10,100],[combined_performance[1],combined_performance[10],combined_performance[100]], color=col, alpha=0.6, marker='o', s=50)
# print(lab,[combined_performance[1],combined_performance[10],combined_performance[100]],'fraction passed filter:',(npf/n_cases))
# plt.ylim(0, 1.01) #the last lines of code are only needed to display the results
# plt.xlim(0, 100.5)
# plt.xlabel('rank-cut-off')
# plt.ylabel('Sensitivity')
# plt.title('Sensitivity-rank-cut-off-correlation')
# plt.legend(loc='lower right')
print([combined_performance[1],combined_performance[10],combined_performance[100]])
def compare(self, score1='pedia', score2='gestalt', score3='extom'):
cases={}
rank1=1000
rank2=1000
rank5=1000
ranking={}
for smpl in self.samples:
if smpl.case in cases:
cases[smpl.case].append([getattr(smpl, score1), smpl.pathogenicity])
if smpl.case not in cases:
cases[smpl.case]=[[getattr(smpl, score1), smpl.pathogenicity]]
for case in cases:
cases[case].sort()
cases[case].reverse()
ranks=(list(enumerate(cases[case])))
for i in ranks:
if i[1][1]==1:
#print(i)
ranking[case]=[i[0]+1]
#print(case,ranking[case],[i[0]+1])
cases={}
for smpl in self.samples:
if smpl.case in cases:
cases[smpl.case].append([getattr(smpl, score2), smpl.pathogenicity])
if smpl.case not in cases:
cases[smpl.case]=[[getattr(smpl, score2), smpl.pathogenicity]]
for case in cases:
cases[case].sort()
cases[case].reverse()
ranks=(list(enumerate(cases[case])))
for i in ranks:
if i[1][1]==1:
ranking[case].append(i[0]+1)
cases={}
for smpl in self.samples:
if smpl.case in cases:
cases[smpl.case].append([getattr(smpl, score3), smpl.pathogenicity])
if smpl.case not in cases:
cases[smpl.case]=[[getattr(smpl, score3), smpl.pathogenicity]]
for case in cases:
cases[case].sort()
cases[case].reverse()
ranks=(list(enumerate(cases[case])))
for i in ranks:
if i[1][1]==1:
ranking[case].append(i[0]+1)
for case in ranking:
if ranking[case][0]<ranking[case][2]:
print(str(case),ranking[case])
def ranker3(self,col,lab, score='pedia'):
"""A function to evaluate (rank) the results of the classification and put into a plot. only to be used after data was classified."""
#data is what is to be analyzed, it must have the structure of alldatascored in classify()
#col is the color of the plot
#lab is the label of the plot
print('ranking results based on',lab)
genes={}
cdg={}
for entry in self.casedisgene:
genes[entry[1]]=[]
cdg[entry[0]]=entry[1]
cases={}
combined_rank=[] #a list that will contain lists of the IDs of each case and the rank of the respective pathogenic variant, ranked by the pedia-score
n_cases = len(self.casedisgene)
npf=0 #number passed filter
for smpl in self.samples:
if smpl.case in cases:
cases[smpl.case].append([getattr(smpl, score), smpl.pathogenicity, smpl.gene])
if smpl.case not in cases:
cases[smpl.case]=[[getattr(smpl, score), smpl.pathogenicity, smpl.gene]]
for case in cases:
cases[case].sort()
cases[case].reverse()
ranks=(list(enumerate(cases[case])))
rank=102
for i in ranks:
if i[1][1]==1:
rank=i[0]+1
npf+=1
genes[cdg[case]].append(rank)
#print('genes:',genes)
for gene in genes:
# if genes[gene]==[]:
# genes[gene]=[102]
ranksum=0
for rank in genes[gene]:
ranksum+=rank
ranksum/=len(genes[gene])
combined_rank.append([gene,ranksum])
print(gene, genes[gene], ranksum)
combined_performance=[]
for i in range(101): #will evalute ranks in range 0 to 101)
sens=0
for j in combined_rank:
rank=j[1]
if rank<=i:
sens+=1 #how many cases have a patho rank lower than or eqal to i
sens=(sens/len(genes)) #the absolute number is divided by the total number of cases, so that one has the fraction of cases having a patho rank not higher than i
combined_performance.append(sens) #appends sens to i, so that combined rank is a list of floats, each float describing the fraction of cases that have a pathorank lower or eqaul to its index
plt.plot(range(1,len(combined_performance)),combined_performance[1:], color=col, alpha=0.6, label=lab, linewidth=3)
plt.scatter([1,10,100],[combined_performance[1],combined_performance[10],combined_performance[100]], color=col, alpha=0.6, marker='o', s=50)
print(lab,[combined_performance[1],combined_performance[10],combined_performance[100]],'fraction passed filter:',(npf/n_cases))
plt.ylim(0, 1.01) #the last lines of code are only needed to display the results
plt.xlim(0, 100.5)
plt.xlabel('rank-cut-off')
plt.ylabel('Sensitivity')
plt.title('Sensitivity-rank-cut-off-correlation')
plt.legend(loc='lower right')
def save_jsons(self,path):
'''a function to save the pedia scores in their respective jsons'''
cwd=os.getcwd()
os.chdir(path)
print('saving results')
for file in os.listdir():
if file[-5:]=='.json':
print(file)
with open(file) as json_data:
casedata = json.load(json_data)
for smpl in self.samples:
for i in casedata['geneList']:
if i['gene_symbol']==smpl.gene:
i['pedia_score']=smpl.pedia
with open(file, 'w') as f:
json.dump(casedata, f)
os.chdir(cwd)
print('finished saving')
def hyper_search_helper(self, start=-5, stop=5, step=10, maximum=0, attempts=2, best=[0,[0,0,0]]):
for i in range(0, step + 1, 1):
exp = start + (i / step * (stop - start))
print('evaluating c-value of 10**' + str(exp) + '\nstep ' + str(i + 1) + ' of ' + str(step))
c_value=10**exp
self.classify_10xSVM(c_value)
performance = [exp, self.ranker_returner(lab=('c_value = 10**' + str(exp)))]
if performance[1][1] > best[1][1]:
best = performance
elif performance[1][1] == best[1][1]:
if performance[1][0] > best[1][0]:
best = performance
elif performance[1][0] == best[1][0] and performance[1][2] > best[1][2]:
best = performance
#results.append(performance)
print('best', best)
#print(results)
print('best',best)
if best[0] == maximum:
attempts -= 1
if best[0] != start and best[0] != stop:
result = [best[0] - (2 * ((stop - start) / step)), best[0] + (2 * ((stop-start) / step)), step, attempts, best]
else:
result=[start - ((stop - start)), stop + ((stop - start)), step, attempts, best]
return(result)
def hyper_search(self, start=-5, stop=5, step=10, maximum=0, attempts=2, best=[0,[0,0,0]]):
iteration = 1
while attempts > 0:
print('hyperparameter search round: ' + str(iteration) + ' \nremaining determination attempts ' + str(attempts))
new = self.hyper_search_helper(start, stop, step, maximum, attempts, best)
start = new[0]
stop = new[1]
step = new[2] # not really necessary as step doesnt change in hyper_search_helper
attempts = new[3]
maximum = new[4][0]
best = new[4]
iteration += 1
print('hyperparameter search determined best c-value at ' + str(best))
def main():
if len(sys.argv) < 2:
sys.exit('Usage: python %s path(simulation data)' % sys.argv[0])
path = sys.argv[1]
print('loading 1KG')
onekg = Data()
onekg.load(path + '/real/train/1KG')
onekg.numhit(0)
print('loading ExAC')
exac = Data()
exac.load(path + '/real/train/ExAC')
exac.numhit(0)
print('loading Iranian')
iran = Data()
iran.load(path + '/real/train/IRAN')
iran.numhit(0)
print('loading test data')
test = Data()
test.load(path + '/real/test')
test.numhit(0)
print('classifying against 1KG')
test.classify_real(onekg)
test.ranker('red', '1KG')
print('classifying against ExAC')
test.classify_real(exac)
test.ranker('blue', 'EXAC')
print('classifying against Iranian')
test.classify_real(iran)
test.ranker('purple', 'Iran')
plt.savefig('sensitivity_rank_cor.png')
results = []
best = [None, [0, 0, 0]]
print('loading 1KG')
onekg = Data()
onekg.load(path + '/1KG/CV')
onekg.save_SVM(C=10 ** (-1.45))
onekg.hyper_search(start=-3, stop=3)
smpl = Sample()
smpl.boqa = 0.5
smpl.phenomizer = 0.7
smpl.cadd_phred = 17
smpl.feature = 0
smpl.gestalt = 1.5
smpl.classify()
print('classifying 1KG by SVM')
onekg.classify_10xSVM()
onekg.manhattan('40639')
plt.savefig('10xSVM.png')
onekg.ranker2('red', 'PEDIA')
onekg.classify_10xSVM_extom()
onekg.ranker2('blue', 'extom', score = 'extom')
onekg.compare()
onekg.classify_10xSVM_extom()
onekg.ranker2('blue', 'extom no filter', score = 'extom')
onekg.ranker2('green', 'gestalt no filter', score = 'gestalt')
onekg2 = Data()
onekg2.load2(path + '/1KG/CV')
print('classifying 1KG by SVM')
onekg2.classify_10xSVM()
onekg2.ranker2('black', 'extom sep. loaded')
plt.savefig('10xSVM_extom.png')
#onekg.ranker2('purple','gestalt',score='gestalt')
#onekg.ranker2('orange','cadd_phred',score='cadd_phred')
onekg.filter_gestalt()
onekg.classify_10xSVM_extom()
onekg.ranker3('blue', 'extom post filter', score = 'extom')
onekg = Data()
onekg.load(path + '/real/train/1KG')
test = Data()
test.load(path + '/real/test')
test.classify_real(onekg)
test.ranker2('red', 'PEDIA')
onekg.filter_gestalt()
test.classify_real(onekg)
test.ranker2('blue', 'extom')
plt.show()
onekgnum = Data()
onekgnum.load(path + '/1KG/CV')
onekgnum.numhit(2)
print('classifying 1KGnum by SVM')
onekgnum.classify_10xSVM()
onekgnum.ranker3('green', 'PEDIA num')
onekgnum.filter_gestalt()
onekgnum.classify_10xSVM_extom()
onekgnum.ranker3('orange', 'extomnum', score = 'extom')
onekg.compare()
plt.show()
scores=[[30, 0], [7, 0], [346, 0], [9, 0], [65, 0], [39, 0], [87, 0], [124, 0], [39, 1], [30, 0], [-1, 0]]
scores.sort()
scores.reverse()
print(list(enumerate(scores)))
print('loading 1KG')
onekg = Data()
onekg.load(path + '/1KG/CV')
onekg.numhit(0)
print('loading ExAC')
exac = Data()
exac.load(path + '/ExAC/CV')
exac.numhit(0)
print('loading Iranian')
iran = Data()
iran.load(path + '/IRAN/CV')
iran.numhit(0)
print('classifying 1KG')
onekg.classify_10xSVM()
onekg.ranker('red','1KG')
print('classifying ExAC')
exac.classify_10xSVM()
exac.ranker('blue','EXAC')
print('classifying Iranian')
iran.classify_10xSVM()
iran.ranker('purple','Iran')
plt.show()
test=Data()
test.load(path + '/1KG/CV')
test.classify_10xSVM()
test.manhattan('97147')
plt.show()
os.chdir(path + '/1KG/CV')
for i in os.listdir():
with open(i) as json_data:
data = json.load(json_data)
print(data['ranks']['combined_rank'], i[:-5])
test=Data()
test.load(path + '/real/test')
cases=[]
patho=0
for smpl in test.samples:
if smpl.case not in cases:
cases.append(smpl.case)
for smpl in test.samples:
if smpl.pathogenicity == 1:
cases.pop(cases.index(smpl.case))
os.chdir(path + '/real/test')
for case in cases:
with open(str(case)+'.json') as json_data:
data = json.load(json_data)
disgene=data['genomicData'][0]['Test Information']['Gene Name']
print(disgene)
for entry in data['geneList']:
#print(entry['gene_symbol'])
if entry['gene_symbol']==disgene:
print('here')
if __name__ == '__main__':
main()
| [
"la60312@gmail.com"
] | la60312@gmail.com |
63b5c0c591f597c17991b2aed4f3f12c4bde6812 | 5603c820064b57b2ebffbebcf05a53ba83e0364c | /cases_increase_visulization.py | 13e31c37acb44ccc01075f5d833cb296b0bf5815 | [
"MIT"
] | permissive | Yiheng1999/COVID-19-Quebec-Data-Analysis | a3cecea4eb8de9a0ea8ee1cdd12889a79f0c1f93 | fee8a8a85d8b8f30d961decf8fbc3ec2158e96d7 | refs/heads/master | 2022-12-11T20:12:32.923428 | 2020-09-08T19:08:47 | 2020-09-08T19:08:47 | 287,821,425 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py | from read_data import *
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.ticker as ticker
date = qc_cases_data["date_report"]
cases = qc_cases_data["cases"]
cumulative_cases = qc_cases_data["cumulative_cases"]
plt.plot(date, cases, color='r', label="Cases Increased Each Day")
plt.plot(date, cumulative_cases, color='b', label="Cumulative Cases")
plt.legend(loc='upper left')
# my_x_ticks = np.arange(0, date.size, 5)
# plt.xticks(my_x_ticks)
# plt.xticks(np.arange(0,date.size+1, 10.0))
# plt.xticks(rotation=90)
fig, ax = plt.subplots()
ax.plot(date, cases, color='r', label="Cases Increased Each Day")
ax.plot(date, cumulative_cases, color='b', label="Cumulative Cases")
plt.legend(loc='upper left')
plt.xticks(rotation=45)
ax.xaxis.set_major_locator(ticker.MultipleLocator(10))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(1))
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
plt.title("The COVID-19 Cases Increase Trend From January 25")
plt.savefig("cases.jpg")
plt.show()
| [
"lluuyyiihh@icloud.com"
] | lluuyyiihh@icloud.com |
5a9660779063959ecef329d1b58ac42c1dc13e5e | 0da3ebae606295ee3c1613004c6f21650e914841 | /codestreak/extensions.py | 07761de1820e73e03a2ea21169597925d9435969 | [] | no_license | mfwarren/codestreak.io | 38bac87f2ddc6e7cff56a4bc95b6b1ca4a41ef1a | bd37dd7ad55c9926e7a4752afca5986c08145d34 | refs/heads/master | 2020-06-11T06:21:27.012529 | 2019-03-03T15:43:32 | 2019-03-03T15:43:32 | 75,747,414 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 448 | py | # -*- coding: utf-8 -*-
"""Extensions module. Each extension is initialized in the app factory located in app.py."""
from flask_debugtoolbar import DebugToolbarExtension
from flask_migrate import Migrate
from raven.contrib.flask import Sentry
from flask_sqlalchemy import SQLAlchemy
from flask_wtf.csrf import CsrfProtect
csrf_protect = CsrfProtect()
db = SQLAlchemy()
migrate = Migrate()
debug_toolbar = DebugToolbarExtension()
sentry = Sentry()
| [
"matt.warren@gmail.com"
] | matt.warren@gmail.com |
91b1a189b3f728d01fda709574934827203d64d8 | cfacde67a64b40f01aa4395f79c89681084d14f5 | /3/DrawingRectangle.py | bfcbe28d754d61e7bdc24f4eb5675df4123326c7 | [] | no_license | dzjfromChina/opencv | 5625be655fba806e2c632ad9677584bf34bf5d82 | 9ac790247cabd25903fdcb4f37ea6d347ab3b764 | refs/heads/master | 2020-04-19T22:47:55.771063 | 2019-02-09T07:57:15 | 2019-02-09T07:57:15 | 168,479,308 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,045 | py | """""""""""""""""""""""""""""""""""""""""""""""
@author: duzj
@contact: dzj0574@163.com
@software: PyCharm
@file: DrawingRectangle.py
@time: 2019/1/31 17:06
"""""""""""""""""""""""""""""""""""""""""""""""
"""
To draw a rectangle, you need top-left corner and bottom-right corner of rectangle.
This time we will draw a green rectangle at the top-right corner of image.
要绘制矩形,需要矩形的左上角和右下角。这次我们将在图像的右上角绘制一个绿色矩形。
"""
import numpy as np
import cv2 as cv
# Create a black image
# 生成一个512*512的举证 矩阵的每一个元素是一个3*3的0矩阵
img = np.zeros((512,512,3), np.uint8)
# Draw a diagonal blue line with thickness of 5 px
# 取(384,0) 和 (510,128) 两个点 (0,255,0)表示颜色 5表示粗细
cv.rectangle(img,(384,0),(510,128),(0,255,0),3)
#显示
cv.imshow('image',img)
cv.waitKey(0)
cv.destroyAllWindows() | [
"dzj0574@163.com"
] | dzj0574@163.com |
61d2852f4a8db76980d9c0c705e83ec85c83c828 | 8d92399f4b1aa961aba60c55d97d79b701fbe725 | /Remove_all_occurances_of_any_element_for_maxium_array_sum.py | 8f16617c2d1bbb37c6ed538337aedcf6497f2a8b | [] | no_license | shankarshastr/main | 858939bbbecdec2a268d59cdd6d4100744aa9f91 | 9815dc93e95335b2535e673f9218c655dc406db5 | refs/heads/master | 2020-04-29T10:26:16.545022 | 2019-04-25T12:51:53 | 2019-04-25T12:51:53 | 176,061,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | py | a = [1, 1, 3, 3, 2, 2, 1, 1, 1]
sum =0
dict = {}
max = 0
prod = 0
for i in a:
sum = sum + i
if i not in dict:
dict[i] = 1
else:
dict[i] += 1
for i in dict.keys():
prod = i * dict[i]
diff = sum - prod
if diff > max:
max = diff
print max
| [
"shankarnarayanshastri@gmail.com"
] | shankarnarayanshastri@gmail.com |
f051952d00de8a9fcedd3c7f62105461285f8d97 | e740862e1f335731bc537f3be5dbd48145e1775d | /01.DeepLearningForEveryone/SingleLinearRegression.py | 92c990223be819244568e78cf57d86f048499a8c | [] | no_license | inseok1121/tensorflow_2 | 0c8209beee3986362f8df84fd1ea4cba2c27bd00 | ed3c1efcfae1b91e73c9938231e0aa5cb33bf75c | refs/heads/master | 2020-04-18T07:07:00.148631 | 2019-02-01T12:36:59 | 2019-02-01T12:36:59 | 167,349,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | import tensorflow as tf
x_data = [2., 4., 6., 8.]
y_data = [81., 93., 91., 97.]
X = tf.placeholder(dtype=tf.float32, shape=[None])
Y = tf.placeholder(dtype=tf.float32, shape=[None])
a = tf.Variable(tf.random.uniform([1], 0, 10, dtype=tf.float32, seed=0))
b = tf.Variable(tf.random.uniform([1], 0, 100, dtype=tf.float32, seed=0))
y = a * x_data + b
cost = tf.sqrt(tf.reduce_mean(tf.square(y - y_data)))
learning_rate = 0.1
train = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(20001):
sess.run(train, feed_dict={X:x_data, Y:y_data})
if step % 100 == 0:
print("%.f, cost = %.04f, LEAN = %.4f, b = %.4f" % (step, sess.run(cost), sess .run(a), sess.run(b))) | [
"inseck1121@gmail.com"
] | inseck1121@gmail.com |
f6741c2252c2ee9fb6272ae320b6439a7870f901 | c0d47f66bb88026c79286857ad94729ba7bf369c | /Exercices/Photos/organize_photos.py | 95667a98bf7a900537af44dd3f71f130350cdf60 | [] | no_license | tahia910/PythonProjects | a44434640376be2a3f7a8bc53842b966fe078377 | 05963c023e06e27caa63f61bd270b929e4df975a | refs/heads/main | 2023-03-20T11:55:12.212950 | 2021-03-14T07:57:48 | 2021-03-14T07:57:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | import os
def extract_place(filename):
return filename.split("_")[1]
def make_place_directories(places):
for place in places:
os.mkdir(place)
def organize_photos(directory):
# First, extract place names
os.chdir(directory)
originals = os.listdir()
places = []
for filename in originals:
place = extract_place(filename)
if place not in places:
places.append(place)
# Second, make place directories
make_place_directories(places)
# Finally, move files to directories
for filename in originals:
place = extract_place(filename)
os.rename(filename, os.path.join(place, filename))
organize_photos("Photos") | [
"37906654+ootahiaoo@users.noreply.github.com"
] | 37906654+ootahiaoo@users.noreply.github.com |
3c2393b7cc3380369cbc4d1b55810fd9d9d82ed4 | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/cloud/clouddms/v1/clouddms-v1-py/google/cloud/clouddms_v1/services/data_migration_service/transports/grpc.py | 27875f61ce4a3548b8ad8719aabf93eec1c8092e | [
"Apache-2.0"
] | permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,544 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.clouddms_v1.types import clouddms
from google.cloud.clouddms_v1.types import clouddms_resources
from google.longrunning import operations_pb2 # type: ignore
from .base import DataMigrationServiceTransport, DEFAULT_CLIENT_INFO
class DataMigrationServiceGrpcTransport(DataMigrationServiceTransport):
"""gRPC backend transport for DataMigrationService.
Database Migration service
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(self, *,
host: str = 'datamigration.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(cls,
host: str = 'datamigration.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes)
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
**self_signed_jwt_kwargs,
**kwargs
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def list_migration_jobs(self) -> Callable[
[clouddms.ListMigrationJobsRequest],
clouddms.ListMigrationJobsResponse]:
r"""Return a callable for the list migration jobs method over gRPC.
Lists migration jobs in a given project and location.
Returns:
Callable[[~.ListMigrationJobsRequest],
~.ListMigrationJobsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_migration_jobs' not in self._stubs:
self._stubs['list_migration_jobs'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/ListMigrationJobs',
request_serializer=clouddms.ListMigrationJobsRequest.serialize,
response_deserializer=clouddms.ListMigrationJobsResponse.deserialize,
)
return self._stubs['list_migration_jobs']
@property
def get_migration_job(self) -> Callable[
[clouddms.GetMigrationJobRequest],
clouddms_resources.MigrationJob]:
r"""Return a callable for the get migration job method over gRPC.
Gets details of a single migration job.
Returns:
Callable[[~.GetMigrationJobRequest],
~.MigrationJob]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_migration_job' not in self._stubs:
self._stubs['get_migration_job'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/GetMigrationJob',
request_serializer=clouddms.GetMigrationJobRequest.serialize,
response_deserializer=clouddms_resources.MigrationJob.deserialize,
)
return self._stubs['get_migration_job']
@property
def create_migration_job(self) -> Callable[
[clouddms.CreateMigrationJobRequest],
operations_pb2.Operation]:
r"""Return a callable for the create migration job method over gRPC.
Creates a new migration job in a given project and
location.
Returns:
Callable[[~.CreateMigrationJobRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_migration_job' not in self._stubs:
self._stubs['create_migration_job'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/CreateMigrationJob',
request_serializer=clouddms.CreateMigrationJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['create_migration_job']
@property
def update_migration_job(self) -> Callable[
[clouddms.UpdateMigrationJobRequest],
operations_pb2.Operation]:
r"""Return a callable for the update migration job method over gRPC.
Updates the parameters of a single migration job.
Returns:
Callable[[~.UpdateMigrationJobRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_migration_job' not in self._stubs:
self._stubs['update_migration_job'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/UpdateMigrationJob',
request_serializer=clouddms.UpdateMigrationJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['update_migration_job']
@property
def delete_migration_job(self) -> Callable[
[clouddms.DeleteMigrationJobRequest],
operations_pb2.Operation]:
r"""Return a callable for the delete migration job method over gRPC.
Deletes a single migration job.
Returns:
Callable[[~.DeleteMigrationJobRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_migration_job' not in self._stubs:
self._stubs['delete_migration_job'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/DeleteMigrationJob',
request_serializer=clouddms.DeleteMigrationJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['delete_migration_job']
@property
def start_migration_job(self) -> Callable[
[clouddms.StartMigrationJobRequest],
operations_pb2.Operation]:
r"""Return a callable for the start migration job method over gRPC.
Start an already created migration job.
Returns:
Callable[[~.StartMigrationJobRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'start_migration_job' not in self._stubs:
self._stubs['start_migration_job'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/StartMigrationJob',
request_serializer=clouddms.StartMigrationJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['start_migration_job']
@property
def stop_migration_job(self) -> Callable[
[clouddms.StopMigrationJobRequest],
operations_pb2.Operation]:
r"""Return a callable for the stop migration job method over gRPC.
Stops a running migration job.
Returns:
Callable[[~.StopMigrationJobRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'stop_migration_job' not in self._stubs:
self._stubs['stop_migration_job'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/StopMigrationJob',
request_serializer=clouddms.StopMigrationJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['stop_migration_job']
@property
def resume_migration_job(self) -> Callable[
[clouddms.ResumeMigrationJobRequest],
operations_pb2.Operation]:
r"""Return a callable for the resume migration job method over gRPC.
Resume a migration job that is currently stopped and
is resumable (was stopped during CDC phase).
Returns:
Callable[[~.ResumeMigrationJobRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'resume_migration_job' not in self._stubs:
self._stubs['resume_migration_job'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/ResumeMigrationJob',
request_serializer=clouddms.ResumeMigrationJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['resume_migration_job']
@property
def promote_migration_job(self) -> Callable[
[clouddms.PromoteMigrationJobRequest],
operations_pb2.Operation]:
r"""Return a callable for the promote migration job method over gRPC.
Promote a migration job, stopping replication to the
destination and promoting the destination to be a
standalone database.
Returns:
Callable[[~.PromoteMigrationJobRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'promote_migration_job' not in self._stubs:
self._stubs['promote_migration_job'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/PromoteMigrationJob',
request_serializer=clouddms.PromoteMigrationJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['promote_migration_job']
@property
def verify_migration_job(self) -> Callable[
[clouddms.VerifyMigrationJobRequest],
operations_pb2.Operation]:
r"""Return a callable for the verify migration job method over gRPC.
Verify a migration job, making sure the destination
can reach the source and that all configuration and
prerequisites are met.
Returns:
Callable[[~.VerifyMigrationJobRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'verify_migration_job' not in self._stubs:
self._stubs['verify_migration_job'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/VerifyMigrationJob',
request_serializer=clouddms.VerifyMigrationJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['verify_migration_job']
@property
def restart_migration_job(self) -> Callable[
[clouddms.RestartMigrationJobRequest],
operations_pb2.Operation]:
r"""Return a callable for the restart migration job method over gRPC.
Restart a stopped or failed migration job, resetting
the destination instance to its original state and
starting the migration process from scratch.
Returns:
Callable[[~.RestartMigrationJobRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'restart_migration_job' not in self._stubs:
self._stubs['restart_migration_job'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/RestartMigrationJob',
request_serializer=clouddms.RestartMigrationJobRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['restart_migration_job']
@property
def generate_ssh_script(self) -> Callable[
[clouddms.GenerateSshScriptRequest],
clouddms.SshScript]:
r"""Return a callable for the generate ssh script method over gRPC.
Generate a SSH configuration script to configure the
reverse SSH connectivity.
Returns:
Callable[[~.GenerateSshScriptRequest],
~.SshScript]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'generate_ssh_script' not in self._stubs:
self._stubs['generate_ssh_script'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/GenerateSshScript',
request_serializer=clouddms.GenerateSshScriptRequest.serialize,
response_deserializer=clouddms.SshScript.deserialize,
)
return self._stubs['generate_ssh_script']
@property
def list_connection_profiles(self) -> Callable[
[clouddms.ListConnectionProfilesRequest],
clouddms.ListConnectionProfilesResponse]:
r"""Return a callable for the list connection profiles method over gRPC.
Retrieve a list of all connection profiles in a given
project and location.
Returns:
Callable[[~.ListConnectionProfilesRequest],
~.ListConnectionProfilesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_connection_profiles' not in self._stubs:
self._stubs['list_connection_profiles'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/ListConnectionProfiles',
request_serializer=clouddms.ListConnectionProfilesRequest.serialize,
response_deserializer=clouddms.ListConnectionProfilesResponse.deserialize,
)
return self._stubs['list_connection_profiles']
@property
def get_connection_profile(self) -> Callable[
[clouddms.GetConnectionProfileRequest],
clouddms_resources.ConnectionProfile]:
r"""Return a callable for the get connection profile method over gRPC.
Gets details of a single connection profile.
Returns:
Callable[[~.GetConnectionProfileRequest],
~.ConnectionProfile]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_connection_profile' not in self._stubs:
self._stubs['get_connection_profile'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/GetConnectionProfile',
request_serializer=clouddms.GetConnectionProfileRequest.serialize,
response_deserializer=clouddms_resources.ConnectionProfile.deserialize,
)
return self._stubs['get_connection_profile']
@property
def create_connection_profile(self) -> Callable[
[clouddms.CreateConnectionProfileRequest],
operations_pb2.Operation]:
r"""Return a callable for the create connection profile method over gRPC.
Creates a new connection profile in a given project
and location.
Returns:
Callable[[~.CreateConnectionProfileRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_connection_profile' not in self._stubs:
self._stubs['create_connection_profile'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/CreateConnectionProfile',
request_serializer=clouddms.CreateConnectionProfileRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['create_connection_profile']
@property
def update_connection_profile(self) -> Callable[
[clouddms.UpdateConnectionProfileRequest],
operations_pb2.Operation]:
r"""Return a callable for the update connection profile method over gRPC.
Update the configuration of a single connection
profile.
Returns:
Callable[[~.UpdateConnectionProfileRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_connection_profile' not in self._stubs:
self._stubs['update_connection_profile'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/UpdateConnectionProfile',
request_serializer=clouddms.UpdateConnectionProfileRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['update_connection_profile']
@property
def delete_connection_profile(self) -> Callable[
[clouddms.DeleteConnectionProfileRequest],
operations_pb2.Operation]:
r"""Return a callable for the delete connection profile method over gRPC.
Deletes a single Database Migration Service
connection profile. A connection profile can only be
deleted if it is not in use by any active migration
jobs.
Returns:
Callable[[~.DeleteConnectionProfileRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_connection_profile' not in self._stubs:
self._stubs['delete_connection_profile'] = self.grpc_channel.unary_unary(
'/google.cloud.clouddms.v1.DataMigrationService/DeleteConnectionProfile',
request_serializer=clouddms.DeleteConnectionProfileRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['delete_connection_profile']
__all__ = (
'DataMigrationServiceGrpcTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
9dd9d05f4cd541b85501ac0159eb87d7229d4dae | fed1c1865e98ed17fd5b871f2ac4ee54e892647f | /coronaCount.py | acfb80ca2aaf44f571bd1082c7c610bc1c5c43e9 | [] | no_license | cjws/Corona | 118137cb887d28802e3761d4eceaa88a704183f2 | cdc807b2e9bfc78d47ae804b8f04ceece659ab3f | refs/heads/master | 2021-03-29T07:17:57.809680 | 2020-03-19T07:19:07 | 2020-03-19T07:19:07 | 247,930,598 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | #!/usr/bin/python
import re
from bs4 import BeautifulSoup
import requests
page = requests.get('https://www.worldometers.info/coronavirus/')
soup = BeautifulSoup(page.content, 'html.parser')
# print(soup)
bat_soup = soup.find_all("div", {"id": "maincounter-wrap"})
titles=[]
for block in bat_soup:
titles.append(block.find("h1").get_text())
counts=[]
for block in bat_soup:
counts.append(block.find("span").get_text())
cases_count=re.sub('\D','',counts[0])
death_count=re.sub('\D','',counts[1])
print(cases_count, death_count)
| [
"christopher.smith@gmx.com"
] | christopher.smith@gmx.com |
6111b42cd4ab33f03d68f6ef6755a02237f6a2d9 | 141ca83daf77e83c2150b16c827db7b1e212e64c | /code/crossDet/main_video.py | 3a73aceb49aff92fb2ff5e924cbef839d6f74e31 | [] | no_license | zldodo/seed2020_vehicle_cross_lane | cbde356b66640b56a3469f589b7c59e7cfef6bad | f059910fd8a7b38c07c6da749acb03e4022c9cc8 | refs/heads/master | 2023-01-03T03:33:59.114042 | 2020-10-30T07:37:02 | 2020-10-30T07:37:02 | 308,556,804 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,602 | py |
import json
import cv2
from .utils import display,detect_vehicle_cross_line,draw_points,\
IsMatched,update_tracks,save_json,lane_tracking,fit_lane_points,\
draw_lane,remove_occluded_vehicle,match_vehicle
from .classify_lane_type import *
from .ekf_track import *
import time
import math
def main(ori_frame,vehicle_data,lanes_data,frame_time,trackers,pre_res):
vehicle_data_filtered = remove_occluded_vehicle(vehicle_data)
fitted_lane,meas_z = fit_lane_points(lanes_data,ori_frame)
lanes_type = classify_lane_type(ori_frame,fitted_lane)
if not trackers:
for z, style in zip(meas_z,lanes_type):
# theta = math.atan(z[0])
# X0 = np.array([[theta],[z[1]],[0],[0]])
X0 = np.array([[z[0]],[z[1]],[0],[0]])
tracker = init_lane_tracker(X0,style,500)
trackers.append(tracker)
# print('init lane tracks')
else:
trackers = lane_tracking(trackers,lanes_data,meas_z,lanes_type)
# img_lane = draw_lane(ori_frame,trackers)
match_res = match_vehicle(vehicle_data,pre_res)
out,cross,trackers = detect_vehicle_cross_line(ori_frame,vehicle_data_filtered,trackers,match_res)
res = []
for one in cross:
t = frame_time
mx,my,w,h= one[0],one[1],one[2],one[3]
lane_type = 1 if one[4]=='solid' else 0
#print(one[4])
res.append([t,mx,my,w,h,lane_type])
# # fused with previous frames
new_res = update_tracks(res,pre_res)
# display(ori_frame,lanes_data,vehicle_data)
return trackers, new_res, out
| [
"quelongkai2@csvw.com"
] | quelongkai2@csvw.com |
6b2d8024e715360e23bbbf8b3078b125720b0785 | bde716f863c2f473e8db1e5eceecf8f9b8b5762e | /tests/unit/types/arrays/test_documentarray.py | 8bed39befa0fb6200cc6d792ffbe52a3de86a2f5 | [
"Apache-2.0"
] | permissive | igsova/jina | 30f07e3cc0adc49f764fa9e76a58ef98df67c370 | 0c4b4a02cbce6938d53a54e8c54279fd9c35ca37 | refs/heads/master | 2023-06-19T11:12:48.999557 | 2021-07-06T23:05:17 | 2021-07-06T23:05:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,373 | py | import os
from copy import deepcopy
import pytest
import numpy as np
from scipy.sparse import coo_matrix
from jina import Document, DocumentArray
from jina.logging.profile import TimeContext
from jina.types.document.graph import GraphDocument
from tests import random_docs
DOCUMENTS_PER_LEVEL = 1
@pytest.fixture(scope='function')
def document_factory():
class DocumentFactory(object):
def create(self, idx, text):
with Document(id=idx) as d:
d.tags['id'] = idx
d.text = text
return d
return DocumentFactory()
@pytest.fixture
def docs(document_factory):
return [
document_factory.create(1, 'test 1'),
document_factory.create(2, 'test 1'),
document_factory.create(3, 'test 3'),
]
@pytest.fixture
def docarray(docs):
return DocumentArray(docs)
@pytest.fixture
def docarray_with_scipy_sparse_embedding(docs):
embedding = coo_matrix(
(
np.array([1, 2, 3, 4, 5, 6]),
(np.array([0, 0, 0, 0, 0, 0]), np.array([0, 2, 2, 0, 1, 2])),
),
shape=(1, 10),
)
for doc in docs:
doc.embedding = embedding
return DocumentArray(docs)
def test_length(docarray, docs):
assert len(docs) == len(docarray) == 3
def test_append(docarray, document_factory):
doc = document_factory.create(4, 'test 4')
docarray.append(doc)
assert docarray[-1].id == doc.id
def test_union(docarray, document_factory):
additional_docarray = DocumentArray([])
for idx in range(4, 10):
doc = document_factory.create(idx, f'test {idx}')
additional_docarray.append(doc)
union = docarray + additional_docarray
for idx in range(0, 3):
assert union[idx].id == docarray[idx].id
for idx in range(0, 6):
assert union[idx + 3].id == additional_docarray[idx].id
def test_union_inplace(docarray, document_factory):
additional_docarray = DocumentArray([])
for idx in range(4, 10):
doc = document_factory.create(idx, f'test {idx}')
additional_docarray.append(doc)
union = deepcopy(docarray)
union += additional_docarray
for idx in range(0, 3):
assert union[idx].id == docarray[idx].id
for idx in range(0, 6):
assert union[idx + 3].id == additional_docarray[idx].id
def test_extend(docarray, document_factory):
docs = [document_factory.create(4, 'test 4'), document_factory.create(5, 'test 5')]
docarray.extend(docs)
assert len(docarray) == 5
assert docarray[-1].tags['id'] == 5
assert docarray[-1].text == 'test 5'
def test_clear(docarray):
docarray.clear()
assert len(docarray) == 0
def test_delete_by_index(docarray, document_factory):
doc = document_factory.create(4, 'test 4')
docarray.append(doc)
del docarray[-1]
assert len(docarray) == 3
assert docarray == docarray
def test_delete_by_id(docarray: DocumentArray, document_factory):
doc = document_factory.create(4, 'test 4')
docarray.append(doc)
del docarray[doc.id]
assert len(docarray) == 3
assert docarray == docarray
def test_array_get_success(docarray, document_factory):
doc = document_factory.create(4, 'test 4')
doc_id = 2
docarray[doc_id] = doc
assert docarray[doc_id].text == 'test 4'
doc_0_id = docarray[0].id
docarray[doc_0_id] = doc
assert docarray[doc_0_id].text == 'test 4'
def test_array_get_from_slice_success(docs, document_factory):
docarray = DocumentArray(docs)
assert len(docarray[:1]) == 1
assert len(docarray[:2]) == 2
assert len(docarray[:3]) == 3
assert len(docarray[:100]) == 3
assert len(docarray[1:]) == 2
assert len(docarray[2:]) == 1
assert len(docarray[3:]) == 0
assert len(docarray[100:]) == 0
def test_array_get_fail(docarray, document_factory):
with pytest.raises(IndexError):
docarray[0.1] = 1 # Set fail, not a supported type
with pytest.raises(IndexError):
docarray[0.1] # Get fail, not a supported type
def test_docarray_init(docs, docarray):
# we need low-level protobuf generation for testing
assert len(docs) == len(docarray)
for d, od in zip(docs, docarray):
assert isinstance(d, Document)
assert d.id == od.id
assert d.text == od.text
def test_docarray_iterate_twice(docarray):
j = 0
for _ in docarray:
for _ in docarray:
j += 1
assert j == len(docarray) ** 2
def test_docarray_reverse(docs, docarray):
ids = [d.id for d in docs]
docarray.reverse()
ids2 = [d.id for d in docarray]
assert list(reversed(ids)) == ids2
def test_match_chunk_array():
with Document() as d:
d.content = 'hello world'
m = Document()
d.matches.append(m)
assert m.granularity == d.granularity
assert m.adjacency == 0
assert d.matches[0].adjacency == d.adjacency + 1
assert len(d.matches) == 1
c = Document()
d.chunks.append(c)
assert c.granularity == 0
assert d.chunks[0].granularity == d.granularity + 1
assert c.adjacency == d.adjacency
assert len(d.chunks) == 1
def add_chunk(doc):
with Document() as chunk:
chunk.granularity = doc.granularity + 1
chunk.adjacency = doc.adjacency
doc.chunks.append(chunk)
return chunk
def add_match(doc):
with Document() as match:
match.granularity = doc.granularity
match.adjacency = doc.adjacency + 1
doc.matches.append(match)
return match
def test_doc_array_from_generator():
NUM_DOCS = 100
def generate():
for _ in range(NUM_DOCS):
yield Document()
doc_array = DocumentArray(generate())
assert len(doc_array) == NUM_DOCS
@pytest.mark.slow
@pytest.mark.parametrize('method', ['json', 'binary'])
def test_document_save_load(method, tmp_path):
da1 = DocumentArray(random_docs(1000))
da2 = DocumentArray()
for doc in random_docs(10):
da2.append(doc)
for da in [da1, da2]:
tmp_file = os.path.join(tmp_path, 'test')
with TimeContext(f'w/{method}'):
da.save(tmp_file, file_format=method)
with TimeContext(f'r/{method}'):
da_r = DocumentArray.load(tmp_file, file_format=method)
assert len(da) == len(da_r)
for d, d_r in zip(da, da_r):
assert d.id == d_r.id
np.testing.assert_equal(d.embedding, d_r.embedding)
assert d.content == d_r.content
def test_documentarray_filter():
da = DocumentArray([Document() for _ in range(6)])
for j in range(6):
da[j].scores['score'].value = j
da = [d for d in da if d.scores['score'].value > 2]
assert len(DocumentArray(da)) == 3
for d in da:
assert d.scores['score'].value > 2
def test_da_with_different_inputs():
docs = [Document() for _ in range(10)]
da = DocumentArray(
[docs[i] if (i % 2 == 0) else docs[i].proto for i in range(len(docs))]
)
assert len(da) == 10
for d in da:
assert isinstance(d, Document)
def test_da_sort_by_document_interface_not_in_proto():
docs = [Document(embedding=np.array([1] * (10 - i))) for i in range(10)]
da = DocumentArray(
[docs[i] if (i % 2 == 0) else docs[i].proto for i in range(len(docs))]
)
assert len(da) == 10
assert da[0].embedding.shape == (10,)
da.sort(key=lambda d: d.embedding.shape[0])
assert da[0].embedding.shape == (1,)
def test_da_sort_by_document_interface_in_proto():
docs = [Document(embedding=np.array([1] * (10 - i))) for i in range(10)]
da = DocumentArray(
[docs[i] if (i % 2 == 0) else docs[i].proto for i in range(len(docs))]
)
assert len(da) == 10
assert da[0].embedding.shape == (10,)
da.sort(key=lambda d: d.embedding.dense.shape[0])
assert da[0].embedding.shape == (1,)
def test_da_reverse():
docs = [Document(embedding=np.array([1] * (10 - i))) for i in range(10)]
da = DocumentArray(
[docs[i] if (i % 2 == 0) else docs[i].proto for i in range(len(docs))]
)
assert len(da) == 10
assert da[0].embedding.shape == (10,)
da.reverse()
assert da[0].embedding.shape == (1,)
def test_da_sort_by_score():
da = DocumentArray(
[Document(id=i, copy=True, scores={'euclid': 10 - i}) for i in range(10)]
)
assert da[0].id == '0'
assert da[0].scores['euclid'].value == 10
da.sort(key=lambda d: d.scores['euclid'].value) # sort matches by their values
assert da[0].id == '9'
assert da[0].scores['euclid'].value == 1
def test_da_sort_by_score():
da = DocumentArray(
[Document(id=i, copy=True, scores={'euclid': 10 - i}) for i in range(10)]
)
assert da[0].id == '0'
assert da[0].scores['euclid'].value == 10
da.sort(key=lambda d: d.scores['euclid'].value) # sort matches by their values
assert da[0].id == '9'
assert da[0].scores['euclid'].value == 1
def test_traversal_path():
da = DocumentArray([Document() for _ in range(6)])
assert len(da) == 6
da.traverse_flat(['r'])
with pytest.raises(ValueError):
da.traverse_flat('r')
da.traverse(['r'])
with pytest.raises(ValueError):
for _ in da.traverse('r'):
pass
da.traverse(['r'])
with pytest.raises(ValueError):
for _ in da.traverse('r'):
pass
| [
"noreply@github.com"
] | noreply@github.com |
c9fa1a91e0f3e7cee9d024d8640788163d795026 | eebb9e36127b34592b1200aa3a4a4c25ea36568c | /tel_params/BINOSPEC.py | 3371b6a957a5e17b81983b0b5e26ed2cbc512829 | [] | no_license | souvikmanik/Imaging_pipelines | c20177d9f693d1608acbfea5738854656416a1a0 | b3eda61523b13ce18af79685cf4faa71d712e9de | refs/heads/master | 2023-06-26T13:10:33.002846 | 2021-07-30T15:27:13 | 2021-07-30T15:27:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,764 | py | #parameter file for BINOSPEC/MMT
import os
import astropy
import datetime
import numpy as np
from photutils import make_source_mask, Background2D, MeanBackground
from astropy.stats import SigmaClip
from astropy.io import fits
from astropy.time import Time
from astropy.nddata import CCDData
import astropy.units.astrophys as u
import astropy.units as u
import astropy.wcs as wcs
import ccdproc
from astropy.modeling import models
import create_mask
from utilities import util
__version__ = 1.3 #last edited 28/07/2021
def static_mask(proc):
if proc:
return ['','']#'./staticmasks/bino_proc_left.trim.staticmask.fits','./staticmasks/bino_proc_right.trim.staticmask.fits']
else:
return ['','']#'./staticmasks/bino_left.staticmask.fits','./staticmasks/bino_right.staticmask.fits']
def run_wcs():
return True
def wcs_extension():
return 0
def pixscale():
return 0.24
def saturation(hdr):
return 65000 #defualt hdr['DATAMAX']*hdr['GAIN']
def WCS_keywords_old(): #WCS keywords
return ['PC1_1','PC1_2','PC2_1','PC2_2','WCSNAMEA','CUNIT1A','CUNIT2A','CTYPE1A','CTYPE2A','CRPIX1A','CRPIX2A','CRVAL1A','CRVAL2A','CD1_1A','CD1_2A','CD2_1A','CD2_2A']
def WCS_keywords(): #WCS keywords
return ['CRPIX1','CRPIX2','PC1_1','PC1_2','PC2_1','PC2_2']
def cal_path():
return str(os.getenv("PIPELINE_HOME"))+'/Imaging_pipelines/BINOSPEC_calib/'
def raw_format(proc):
if proc:
return 'sci_img_*proc.fits'
else:
return 'sci_img*[!proc].fits'
def dark():
return False
def bias():
return False
def flat():
return False
def raw_header_ext():
return 1
def science_keyword():
return ['MASK','SCRN']
def science_files():
return ['imaging','stowed']
def flat_keyword():
return ['MASK','SCRN']
def flat_files():
return ['imaging','deployed']
def bias_keyword():
return []
def bias_files():
return []
def dark_keyword():
return []
def dark_files():
return []
def spec_keyword():
return ['MASK']
def spec_files():
return ['spectroscopy']
def bad_keyword():
return ['MASK']
def bad_files():
return ['mira']
def target_keyword():
return 'OBJECT'
def filter_keyword(hdr):
return hdr['FILTER'].replace(' ','').split('_')[0]
def amp_keyword(hdr):
return '1'
def bin_keyword(hdr):
return hdr['CCDSUM'].replace(' ','')
def time_format(hdr):
return hdr['MJD']
def wavelength():
return 'OPT'
def flat_name(cpath,fil,amp,binn):
return [cpath+'/mflat_'+fil+'_left.fits',cpath+'/mflat_'+fil+'_right.fits']
def load_flat(flat):
mflat = []
for f in flat:
mflat.append(CCDData.read(f,hdu=1,unit=u.electron))
return mflat
def gain():
return [1.085, 1.04649118, 1.04159151, 0.97505369, 1.028, 1.16341855, 1.04742053, 1.0447564]
def process_science(sci_list,fil,amp,binn,red_path,mbias=None,mflat=None,proc=None,log=None):
masks = []
processed = []
flat_left = mflat[0]
flat_right = mflat[1]
left_list = []
right_list = []
left_mask = []
right_mask = []
if proc:
for j,sci in enumerate(sci_list):
log.info('Loading file: '+sci)
log.info('Applying flat correction and trimming left image.')
left = CCDData.read(sci, hdu=1, unit=u.electron)
left = ccdproc.flat_correct(left,flat_left)
left = ccdproc.ccd_process(left, trim=left.header['DATASEC'])
log.info('Left image proccessed and trimmed.')
log.info('Cleaning cosmic rays and creating mask.')
mask = make_source_mask(left, nsigma=3, npixels=5)
left_mask.append(mask)
# clean, com_mask = create_mask.create_mask(sci,left,'_mask_left.fits',static_mask(proc)[0],mask,saturation(left.header),binning(proc,'left'),rdnoise(left.header),cr_clean_sigclip(),cr_clean_sigcfrac(),cr_clean_objlim(),log)
# left.data = clean
log.info('Calculating 2D background.')
bkg = Background2D(left, (120, 120), filter_size=(3, 3),sigma_clip=SigmaClip(sigma=3), bkg_estimator=MeanBackground(), mask=mask, exclude_percentile=80)
log.info('Median background for left iamge: '+str(np.median(bkg.background)))
fits.writeto(sci.replace('/raw/','/red/').replace('.fits','_bkg_left.fits'),np.array(bkg.background),overwrite=True)
left = left.subtract(CCDData(bkg.background,unit=u.electron),propagate_uncertainties=True,handle_meta='first_found')
log.info('Exposure time of left image is '+str(left.header['EXPTIME']))
left = left.divide(left.header['EXPTIME']*u.second,propagate_uncertainties=True,handle_meta='first_found')
log.info('Background subtracted and image divided by exposure time.')
left.header['DATASEC'] = '[1:'+str(np.shape(left)[1])+',1:'+str(np.shape(left)[0])+']'
left_list.append(left)
log.info('Applying flat correction and trimming right image.')
right = CCDData.read(sci, hdu=2, unit=u.electron)
right = ccdproc.flat_correct(right,flat_right)
right = ccdproc.ccd_process(right, trim=right.header['DATASEC'])
log.info('Right image proccessed and trimmed.')
log.info('Cleaning cosmic rays and creating mask.')
mask = make_source_mask(right, nsigma=3, npixels=5)
right_mask.append(mask)
# clean, com_mask = create_mask.create_mask(sci,right,'_mask_right.fits',static_mask(proc)[1],mask,saturation(right.header),binning(proc,'right'),rdnoise(right.header),cr_clean_sigclip(),cr_clean_sigcfrac(),cr_clean_objlim(),log)
# right.data = clean
log.info('Calculating 2D background.')
bkg = Background2D(right, (120, 120), filter_size=(3, 3),sigma_clip=SigmaClip(sigma=3), bkg_estimator=MeanBackground(), mask=mask, exclude_percentile=80)
log.info('Median background for right image : '+str(np.median(bkg.background)))
fits.writeto(sci.replace('/raw/','/red/').replace('.fits','_bkg_right.fits'),np.array(bkg.background),overwrite=True)
right = right.subtract(CCDData(bkg.background,unit=u.electron),propagate_uncertainties=True,handle_meta='first_found')
log.info('Exposure time of right image is '+str(right.header['EXPTIME']))
right = right.divide(right.header['EXPTIME']*u.second,propagate_uncertainties=True,handle_meta='first_found')
log.info('Background subtracted and image divided by exposure time.')
right.header['DATASEC'] = '[1:'+str(np.shape(right)[1])+',1:'+str(np.shape(right)[0])+']'
right_list.append(right)
else:
for j,sci in enumerate(sci_list):
log.info('Loading file: '+sci)
log.info('Applying gain correction, overscan correction, flat correction, and trimming image.')
with fits.open(sci) as hdr:
header_left = hdr[1].header
header_right = hdr[6].header
data_list = []
for i in range(8):
data = ccdproc.CCDData.read(sci,hdu=i+1,unit=u.adu)
red = ccdproc.ccd_process(data, oscan=data[:,0:50], oscan_model=models.Chebyshev1D(3), trim='[1200:2098,210:2056]', gain=gain()[i]*u.electron/u.adu, readnoise=4*u.electron)
data_list.append(np.asarray(red).astype(np.float32))
top_left = np.concatenate([data_list[0],np.fliplr(data_list[1])],axis=1)
bot_left = np.flipud(np.concatenate([data_list[3],np.fliplr(data_list[2])],axis=1))
left = CCDData(np.concatenate([top_left,bot_left]),unit=u.electron,header=header_left)
left = ccdproc.flat_correct(left,flat_left[209:3903,1149:2947])
log.info('Left image proccessed and trimmed.')
log.info('Cleaning cosmic rays and creating mask.')
mask = make_source_mask(left, nsigma=3, npixels=5)
left_mask.append(mask)
# clean, com_mask = create_mask.create_mask(sci,left,static_mask(proc)[0],mask,saturation(left.header),binning(proc,'left'),rdnoise(left.header),cr_clean_sigclip(),cr_clean_sigcfrac(),cr_clean_objlim(),log)
# processed_data.data = clean
log.info('Calculating 2D background.')
bkg = Background2D(left, (120, 120), filter_size=(3, 3),sigma_clip=SigmaClip(sigma=3), bkg_estimator=MeanBackground(), mask=mask, exclude_percentile=80)
log.info('Median background for left image : '+str(np.median(bkg.background)))
fits.writeto(sci.replace('/raw/','/red/').replace('.fits','_bkg_left.fits'),bkg.background,overwrite=True)
left = left.subtract(CCDData(bkg.background,unit=u.electron),propagate_uncertainties=True,handle_meta='first_found')
log.info('Exposure time of left image is '+str(left.header['EXPTIME']))
left = left.divide(left.header['EXPTIME']*u.second,propagate_uncertainties=True,handle_meta='first_found')
log.info('Background subtracted and image divided by exposure time.')
left.header['DATASEC'] = '[1:1798,1:3694]'
left.header['RADECSYS'] = 'ICRS'
left.header['CUNIT1'] = 'deg'
left.header['CUNIT2'] = 'deg'
left.header['CTYPE1'] = 'RA---TAN'
left.header['CTYPE2'] = 'DEC--TAN'
left.header['CRPIX1'] = 2301
left.header['CRPIX2'] = 1846
coord = util.parse_coord(left.header['RA'],left.header['DEC'])
left.header['CRVAL1'] = coord.ra.deg
left.header['CRVAL2'] = coord.dec.deg
left.header['PC1_1'] = -pixscale()/3600*np.sin(np.pi/180.*(left.header['POSANG']+90))
left.header['PC1_2'] = pixscale()/3600*np.cos(np.pi/180.*(left.header['POSANG']+90))
left.header['PC2_1'] = -pixscale()/3600*np.cos(np.pi/180.*(left.header['POSANG']+90))
left.header['PC2_2'] = pixscale()/3600*np.sin(np.pi/180.*(left.header['POSANG']+90))
left.write(sci.replace('/raw/','/red/').replace('.fits','_left.fits'),overwrite=True)
left_list.append(left)
top_right = np.concatenate([data_list[6],np.fliplr(data_list[7])],axis=1)
bot_right = np.flipud(np.concatenate([data_list[5],np.fliplr(data_list[4])],axis=1))
right = CCDData(np.concatenate([top_right,bot_right]),unit=u.electron,header=header_right)
right = ccdproc.flat_correct(right,flat_right[209:3903,1149:2947])
log.info('Right image proccessed and trimmed.')
log.info('Cleaning cosmic rays and creating mask.')
mask = make_source_mask(right, nsigma=3, npixels=5)
right_mask.append(mask)
# clean, com_mask = create_mask.create_mask(sci,right,static_mask(proc)[1],mask,saturation(right.header),binning(proc,'right'),rdnoise(right.header),cr_clean_sigclip(),cr_clean_sigcfrac(),cr_clean_objlim(),log)
# processed_data.data = clean
log.info('Calculating 2D background.')
bkg = Background2D(right, (120, 120), filter_size=(3, 3),sigma_clip=SigmaClip(sigma=3), bkg_estimator=MeanBackground(), mask=mask, exclude_percentile=80)
log.info('Median background for right image : '+str(np.median(bkg.background)))
fits.writeto(sci.replace('/raw/','/red/').replace('.fits','_bkg_right.fits'),bkg.background,overwrite=True)
right = right.subtract(CCDData(bkg.background,unit=u.electron),propagate_uncertainties=True,handle_meta='first_found')
log.info('Exposure time of right image is '+str(right.header['EXPTIME']))
right = right.divide(right.header['EXPTIME']*u.second,propagate_uncertainties=True,handle_meta='first_found')
log.info('Background subtracted and image divided by exposure time.')
right.header['DATASEC'] = '[1:1798,1:3694]'
right.header['RADECSYS'] = 'ICRS'
right.header['CUNIT1'] = 'deg'
right.header['CUNIT2'] = 'deg'
right.header['CTYPE1'] = 'RA---TAN'
right.header['CTYPE2'] = 'DEC--TAN'
right.header['CRPIX1'] = -504
right.header['CRPIX2'] = 1845
coord = util.parse_coord(right.header['RA'],right.header['DEC'])
right.header['CRVAL1'] = coord.ra.deg
right.header['CRVAL2'] = coord.dec.deg
right.header['PC1_1'] = -pixscale()/3600*np.sin(np.pi/180.*(right.header['POSANG']+90))
right.header['PC1_2'] = pixscale()/3600*np.cos(np.pi/180.*(right.header['POSANG']+90))
right.header['PC2_1'] = -pixscale()/3600*np.cos(np.pi/180.*(right.header['POSANG']+90))
right.header['PC2_2'] = pixscale()/3600*np.sin(np.pi/180.*(right.header['POSANG']+90))
right.write(sci.replace('/raw/','/red/').replace('.fits','_right.fits'),overwrite=True)
right_list.append(right)
return [left_list,right_list], [left_mask,right_mask]
def stacked_image(tar,red_path):
return [red_path+tar+'_left.fits',red_path+tar+'_right.fits']
def suffix():
return ['_red_left.fits','_red_right.fits']
def rdnoise(header):
return 4.0
def binning(proc,side):
if proc:
if side=='left':
return [4,5]
elif side=='right':
return [4,7]
else:
if side=='left':
return [4,4]
elif side=='right':
return [4,4]
def cr_clean_sigclip():
return 50
def cr_clean_sigcfrac():
return 0.1
def cr_clean_objlim():
return 100
def run_phot():
return True
def catalog_zp():
return ['SDSS','PS1']
def exptime(hdr):
return hdr['EXPTIME']
def fringe_correction(fil):
if fil == 'z':
return True
else:
return False | [
"kerry.paterson@northwestern.edu"
] | kerry.paterson@northwestern.edu |
1d8fbcb32fcc90105c32c2371521f7a102651765 | a7c9dfae07ca780e9522981477942e44e7043db4 | /config_files/__init__.py | 2193549b1ec23adb546f1edb977a51d8751d02e7 | [
"MIT"
] | permissive | cnrmrphy/crossReference | 2a563380dc8eceb9ae80321c4bc4e5925ed116e7 | 00948be4caca4329e6d7883d43b9c746c5e4ba13 | refs/heads/master | 2021-03-22T22:49:32.146550 | 2020-05-29T18:46:27 | 2020-05-29T18:46:27 | 247,403,694 | 1 | 0 | MIT | 2020-03-18T02:39:26 | 2020-03-15T04:52:18 | Python | UTF-8 | Python | false | false | 118 | py | import config_files.providers.load_providers
def __init__(self):
print('you shoulda just loaded load_providers')
| [
"conor.p.murphy52@gmail.com"
] | conor.p.murphy52@gmail.com |
5ebce1a0d30b4d6400f079d238ee11b7b6211139 | f47c540d5e8d1b773de57a9e66e0ad2af9b13e9c | /mwana/apps/reports/webreports/management/commands/tracing_analysis.py | 942b9e4a2337f872b45eeefdf8c6f08b59a0adb8 | [] | no_license | mwana/mwana | e70d1b0c59f2f3ad1300452f58b908a57210415d | 698c2f693f2c49f6dc41c1c0b6669300b619b579 | refs/heads/develop | 2021-01-17T10:11:17.502708 | 2016-02-15T10:56:56 | 2016-02-15T10:56:56 | 600,738 | 5 | 6 | null | 2016-02-03T19:12:45 | 2010-04-08T13:24:18 | Python | UTF-8 | Python | false | false | 5,670 | py | # vim: ai ts=4 sts=4 et sw=4
"""
"""
from django.core.management.base import CommandError
from django.core.management.base import LabelCommand
from mwana.apps.locations.models import Location
from mwana.apps.reminders.models import SentNotification
from rapidsms.contrib.messagelog.models import Message
class Command(LabelCommand):
help = ("\nUsage: tracing_analysis KEYWORD DISTRICT_NAMES"
'\nE.g. tracing_analysis TOLD Monze Mazabuka')
def handle(self, * args, ** options):
if len(args) < 2:
raise CommandError('Please specify Keyword followed by District Name(s).\n'
'E.g. tracing_analysis TOLD Monze Mazabuka')
keyword = args[0]
print args[1:]
district_names = args[1:]
facilities = Location.objects.filter(parent__slug__endswith='00',
parent__name__iregex='|'.join(name for name in district_names))
print "_" * 60
print "Processing %s for the following %s facilities: %s" % (keyword, len(facilities), ", ".join(fac.slug + ": " + fac.name for fac in facilities))
if keyword.lower() == 'told':
self.notification_told_interval(facilities)
elif keyword.lower() == 'told2':
self.notification_told_interval_exact(facilities)
elif keyword.lower() == 'confirm':
self.told_confirm_interval_exact(facilities)
def deidentify(self, name, deid=True):
if not deid:
return name
return "***** ".join(n[:2] for n in name.split())
def last_notified(self, cba_conn, toldtime):
notification = SentNotification.objects.filter(date_logged__lt=toldtime,
patient_event__cba_conn=cba_conn).\
order_by('-date_logged')[0]
return notification.date_logged, notification.patient_event.patient.name
def last_notified_exact(self, cba_conn, toldtime, who):
notification = SentNotification.objects.filter(date_logged__lt=toldtime,
patient_event__cba_conn=cba_conn,
patient_event__patient__name__icontains=who).\
order_by('-date_logged')[0]
return notification.date_logged, notification.patient_event.patient.name
def notification_told_interval(self, facilities):
msgs = Message.objects.filter(direction='I',
contact__location__parent__in=facilities,
text__iregex='^told|^toll|^teld|^tod|^telld|^t0ld|^TOLD|^t01d|^t0ld').distinct()
for msg in msgs:
try:
last_notified, remind_who = self.last_notified(msg.connection, msg.date)
interval = msg.date - last_notified
print "%s|%s|%s" % (interval, self.deidentify(remind_who, False), self.deidentify(msg.text[msg.text.index(' '):].strip(), False))
except:
pass
def notification_told_interval_exact(self, facilities):
msgs = Message.objects.filter(direction='I',
contact__location__parent__in=facilities,
text__iregex='^told|^toll|^teld|^tod|^telld|^t0ld|^TOLD|^t01d|^t0ld').distinct()
for msg in msgs:
try:
told_who = msg.text[msg.text.index(' '):].strip()
last_notified, remind_who = self.last_notified_exact(msg.connection, msg.date, told_who)
interval = msg.date - last_notified
facility = msg.contact.location.parent.name
district = msg.contact.location.parent.parent.name
print "%s|%s|%s|%s|%s|%s|%s" % (district, facility, last_notified, msg.date, interval, self.deidentify(remind_who, False), self.deidentify(told_who, False))
except Exception, e:
pass
def last_told_exact(self, connection, confirm_time, who):
return Message.objects.filter(direction='I',
date__lt=confirm_time,
text__icontains=who,
text__iregex='^told|^toll|^teld|^tod|^telld|^t0ld|^TOLD|^t01d|^t0ld',
).order_by('-date')[0].date
def told_confirm_interval_exact(self, facilities):
keyword = "^cofirm|^confirm|^conferm|^confhrm|^cnfrm|^CONFIRM|^Confirm|^C0nfirm|^comfirm|^c0mfirm|^comferm|^comfhrm|^cmfrm|^CONFIRM|^C0NFIRM|^Comfirm|^C0mfirm|^confirmed|^confermed|^confhrmed|^cnfrmed|^CONFIRMed|^Confirmed|^comfirmed|^comfermed|^comfhrmed|^cmfrmed|^CONFIRMed|^Comfirmed"
msgs = Message.objects.filter(direction='I',
contact__location__parent__in=facilities,
text__iregex=keyword).exclude(text__icontains='loveness').distinct()
for msg in msgs:
try:
confirmed_who = msg.text[msg.text.index(' '):].strip()
last_told = self.last_told_exact(msg.connection, msg.date, confirmed_who)
last_notified, remind_who = self.last_notified_exact(msg.connection, last_told, confirmed_who)
facility = msg.contact.location.parent.name
district = msg.contact.location.parent.parent.name
print "%s|%s|%s|%s|%s" % (district, facility, last_notified, last_told, msg.date)
except Exception, e:
# print e
pass
def __del__(self):
pass
| [
"sinkalation@gmail.com"
] | sinkalation@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.