id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3387396 | <gh_stars>10-100
# Copyright (C) 2008 <NAME>, Science and Technology Facilities Council,
# Daresbury Laboratory.
# All rights reserved.
#
# Developed by: <NAME>
# Science and Technology Facilities Council
# Daresbury Laboratory
# Computational Science and Engineering Department
# Computational Chemistry Group
# http://www.cse.clrc.ac.uk/ccg
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal with the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimers.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimers in the documentation
# and/or other materials provided with the distribution.
# Neither the names of the Science and Technology Facilities Council,
# Daresbury Laboratory, the Computational Science and Engineering Department,
# the Computational Chemistry Group, nor the names of its contributors may be
# used to endorse or promote products derived from this Software without
# specific prior written permission.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS WITH THE SOFTWARE.
import string
import sys
import math
import toldiff_lcs
class tokenized_file:
"""
This class is meant as a container for the data of tokenized reference
or data files. It has to hold a dictionary of the actual tokens as well
as a dictionary to translate line numbers to token numbers. I.e.
for a given line number it contains the number of the first token on that
line.
"""
def __init__(self):
"""Initialisation routine that creates two empty dictionaries."""
self.token = {}
self.line2token = {}
def change_tol(list):
"""
This function takes a list of strings and returns a single change
tolerance token. I.e. a token of the form (type,tolerance,line no,
token no in line).
"""
type = list[0]
tol = list[1]
lineno = list[2]
tokno = list[3]
if type[0] == "t":
pass
elif type[0] == "f":
tol = float(tol)
elif type[0] == "i":
tol = int(tol)
elif type[0] == "c":
tol = complex(tol)
else:
# Something has gone seriously wrong here... Error message to be added.
pass
lineno = int(lineno)
tokno = int(tokno)
return (type,tol,lineno,tokno)
def string2token(t,nl,nt):
"""
This function takes a string and returns a token. A token is a tuple
where the first element specifies the type of the data stored in the
second element.
In this case the data types are limited to numbers, either integer, real
or complex, and strings. The types a denoted as follows:
i - integer
f - float/real
c - complex
s - string
For navigational purposes two more elements added to identify the line
number (nl) the token was on, and the token number (nt) within the line.
"""
try:
i_a = int(t)
#
# Toldiff should recognise that -0 and 0 are the same, however, in
# a text based comparison that is not automatic so we have to force this.
#
if i_a == 0:
i_a = 0
token = ("i",i_a,nl,nt)
except ValueError:
#
# In Fortran double precision constants are often printed with a
# "D" for the exponent rather than an "E", i.e. 1.0E+01 might be
# printed as 1.0D+01 in Fortran. Python is not aware of this convention
# so we need to replace any potential "D"-s to obtain valid floating
# values.
#
z = t.replace("d","e")
z = z.replace("D","e")
try:
i_f = float(z)
#
# Toldiff should recognise that -0.0 and 0.0 are the same, however,
# in a text based comparison that is not automatic so we have to
# force this.
#
if i_f == 0.0:
i_f = 0.0
token = ("f",i_f,nl,nt)
except ValueError:
#
# The handling of complex numbers is unlikely to work in practice
# as in most cases complex numbers are printed as (1.0,2.0)
# rather than 1.0+2.0j. Therefore it is impossible to reliably
# distinguish between a complex number and a list of 2 real numbers.
#
try:
i_c = complex(z)
#
# Toldiff should recognise that x-0.0j and x+0.0j and that
# -0.0+y*j and 0.0+y*j are the same, however, in a text based
# comparison that is not automatic so we have to force this.
#
if i_c.real == 0.0:
i_c = complex(0.0,i_c.imag)
if i_c.imag == 0.0:
i_c = complex(i_c.real,0.0)
token = ("c",i_c,nl,nt)
except ValueError:
token = ("s",t,nl,nt)
return token
def line2strlist(l,separators):
"""
This routine breaks a line stored in l up and produces a list of
strings.
"""
#separators = ["=","(",")","{","}","[","]",",","*","%",":",";"]
a = l.split()
if len(a) == 0:
#
# We have found a blank line. Introduce a special token to cope with
# this. If this token is not introduced it is impossible to say whether
# blank lines match or not. As a result the diffs would change
# significantly.
#
# In practice we cannot add a dummy token as it leads to unexpected
# results. For example if we delete a line just before a white space
# line the diff procedure will match the newline token from the deleted
# line up with the newline token of the remain whitespace line. As a
# result deleting a single line will appear in the output as a change
# on 2 lines. Clearly this is very confusing.
#
# The alternative of adding dummy tokens on white space lines only
# turns out to lead to strange results as well. In particular because
# it is then not clear whether a line with a single token contains
# the dummy token or it is just a normal line with a single token on.
#
# So ultimately these dummy tokens only cause problems. Therefore they
# have to be avoided and the whitespace lines have to be dealt with in
# the token-to-line snake list conversion somehow.
#
#a = ["#newline#"]
pass
else:
nseps = len(separators)
isep = 0
while (isep < nseps):
sep = separators[isep]
b = []
while (len(a) > 0):
tmp = a.pop(0)
n = tmp.count(sep)
elm3 = tmp
while n > 0:
(elm1,elm2,elm3) = tmp.partition(sep)
if elm1 != "":
b.append(elm1)
if elm2 != "":
b.append(elm2)
tmp = elm3
n = n - 1
if elm3 != "":
b.append(elm3)
a = b
isep = isep + 1
# Do not do dummy tokens, see above.
#a.append("#newline#")
return a
def line2tokens(l,nline,separators):
"""
This routine takes a line and returns a list of tokens.
The separators are characters other than whitespace at which
strings will be split.
"""
a = line2strlist(l,separators)
b = []
ntok = 0
while (len(a) > 0):
ntok = ntok + 1
b.append(string2token(a.pop(0),nline,ntok))
return b
def compare_tokens(ref,dat,tol,feps,ieps):
"""
Compare two tokens taking a potential tolerance into account.
The number returned is the number of characters that are the same.
"""
tmp = str(tol)
(tr,dr,lnr,tnr) = ref
(td,dd,lnd,tnd) = dat
#result = -2*math.log10(feps)
result = 0
if tmp == "":
if tr == td:
if tr == "s":
length = min(len(dr),len(dd))
i = 0
while (i < length):
if dr[i] == dd[i]:
result = result + 1
i = i + 1
elif (tr == "f") or (tr == "c"):
denom = abs(dr)
#
# The error enume is divided by 2 to introduce a bonus for matching
# signs. So that if everything else is equal matching signs will
# be preferred.
#
enume = abs(dr-dd)/2.0
enuma = abs(abs(dr)-abs(dd))
enum = min(enume,enuma)
if enum <= 0.0:
inverr = 1.0/feps
else:
inverr = denom/enum
if inverr <= 0.0:
inverr = 1.1
result = result + min(-math.log10(feps),max(math.log10(inverr),0))
elif tr == "i":
#
# The factor 10.0 is there to ensure a non-zero if the reference
# number is exactly zero.
#
# The factor 5.0 is there to ensure that the result is
# 0 if the difference is 1 order of magnitude larger than the
# reference value.
#
denom = max(float(abs(dr)),10.0*ieps)*5.0
#
# The error enume is divided by 2 to introduce a bonus for matching
# signs. So that if everything else is equal matching signs will
# be preferred.
#
enume = max(float(abs(dr-dd)),ieps)/2.0
enuma = max(float(abs(abs(dr)-abs(dd))),ieps)
inverr = max(denom/enume,denom/enuma)
result = result + max(math.log10(inverr),0)
#
else:
#
# This must be a guide so if they match they match exactly
#
result = result + -math.log10(feps)
else:
result = -1
else:
(tt,dt,lnt,tnt) = tol
if tr != tt[0]:
# the type of the tolerance and the reference token do not match!?
sys.stdout.write("error mechanism needed here!\n")
if tr == td:
if tr == "s":
tmpref = toldiff_lcs.tol_decode(dt,dr)
tmpdat = toldiff_lcs.tol_decode(dt,dd)
length = min(len(tmpref),len(tmpdat))
i = 0
while (i < length):
if tmpref[i] == tmpdat[i]:
result = result + 1
i = i + 1
elif (tr == "f") or (tr == "c"):
denom = abs(dr)
if tt[1] == "a":
#
# Compare absolute values
#
enum = abs(abs(dr)-abs(dd))
else:
#
# Compare normal values
#
# The divide by 2 is to introduce a bonus for matching signs.
#
enume = abs(dr-dd)/2.0
enuma = abs(abs(dr)-abs(dd))
enum = min(enume,enuma)
if enum <= 0.0:
inverr = 1.0/feps
else:
inverr = denom/enum
if inverr <= 0.0:
inverr = 1.1
result = result + min(-math.log10(feps),max(math.log10(inverr),0))
elif tr == "i":
#
# The factor 10.0 is there to ensure a non-zero if the reference
# number is exactly zero.
#
# The factor 5.0 is there to ensure that the result is
# 0 if the difference is 1 order of magnitude larger than the
# reference value.
#
denom = max(float(abs(dr)),10.0*ieps)*5.0
if tt[1] == "a":
# compare absolute values
enume = max(float(abs(abs(dr)-abs(dd))),ieps)
else:
# compare normal values
# the additional term ieps introduces a small penalty for
# ignoring the sign change so that if everything else is equal
# the signs will tend to match up
enume = max(float(abs(dr-dd)),ieps)/2.0
enuma = max(float(abs(abs(dr)-abs(dd))),ieps)
enume = min(enume,enuma)
inverr = denom/enume
result = result + max(math.log10(inverr),0)
#
else:
#
# This must be a guide so if they match they match exactly
#
result = result + -math.log10(feps)
else:
result = -1
return result
def tokens_match(ref,dat,tol,feps,ieps):
"""
Compare two tokens taking a potential tolerance into account.
The value returned is "true" if the tokens match and false otherwise.
"""
true = (0 == 0)
false = not(true)
tmp = str(tol)
(tr,dr,lnr,tnr) = ref
(td,dd,lnd,tnd) = dat
if tmp == "":
if tr == td:
if tr == "s":
length = min(len(dr),len(dd))
result = len(dr) == len(dd)
i = 0
while (i < length):
result = result and dr[i] == dd[i]
i = i + 1
elif (tr == "f") or (tr == "c"):
#denom = max(abs(dr),feps)
#enume = abs(dr-dd)
err = abs(dr-dd)
result = err <= 0.0
elif tr == "i":
err = abs(dr-dd)
result = err == 0
else:
#
# This must guide and when the types match the guides must match
#
result = true
else:
result = false
else:
(tt,dt,lnt,tnt) = tol
if tr != tt[0]:
# the type of the tolerance and the reference token do not match!?
sys.stdout.write("error mechanism needed here!\n")
if tr == td:
if tr == "s":
tmpref = toldiff_lcs.tol_decode(dt,dr)
tmpdat = toldiff_lcs.tol_decode(dt,dd)
length = min(len(tmpref),len(tmpdat))
result = len(tmpref) == len(tmpdat)
i = 0
while (i < length):
result = result and tmpref[i] == tmpdat[i]
i = i + 1
elif (tr == "f") or (tr == "c"):
#denom = max(abs(dr),feps)
if tt[1] == "a":
# compare absolute values
#enume = abs(abs(dr)-abs(dd))
err = abs(abs(dr)-abs(dd))
else:
# compare normal values
#enume = abs(dr-dd)
err = abs(dr-dd)
#err = max(enume/denom,feps)
result = err <= dt
elif tr == "i":
#denom = max(abs(dr),ieps)
if tt[1] == "a":
# compare absolute values
#enume = abs(abs(dr)-abs(dd))
err = abs(abs(dr)-abs(dd))
else:
# compare normal values
#enume = abs(dr-dd)
err = abs(dr-dd)
result = err <= dt
else:
#
# This must guide and when the types match the guides must match
#
result = true
else:
result = false
return result
def tolerance(ref,dat,tol,feps,ieps,itol_scale,ftol_scale,ctol_scale):
"""
This function generates the tolerance needed to tolerate the difference
between the reference and the data value, taking any pre-existing
tolerances into account.
The tolerance may be scaled by a scale factor Xtol_scale where X refers
to the type of tolerance.
"""
tmp = str(tol)
(tr,dr,lnr,tnr) = ref
(td,dd,lnd,tnd) = dat
result = ""
#
# Increase the value for the precision to ensure that tolerances are
# rounded up to guarantee that accepted values are within the tolerances
#
if tr == td:
if tmp == "":
if tr == "s":
nmin = min(len(dr),len(dd))
nmax = max(len(dr),len(dd))
i = 0
tol_ln = ""
while (i < nmin):
if dr[i] != dd[i]:
tol_ln = tol_ln + "#"
else:
tol_ln = tol_ln + " "
i = i + 1
while (i < nmax):
tol_ln = tol_ln + "#"
i = i + 1
dt = toldiff_lcs.tol_encode(tol_ln)
if dt != "":
result = ("s",dt,lnr,tnr)
elif tr == "f":
#denom = max(abs(dr),feps)
enumn = abs(dr-dd)*(1.0+10.0*feps)*ftol_scale
enuma = abs(abs(dr)-abs(dd))*(1.0+10.0*feps)*ftol_scale
if max(enuma,enumn) > 0.0:
if enuma < 0.9*enumn:
#err = max(enuma/denom*(1.0+feps),feps)
result = ("fa",enuma,lnr,tnr)
else:
#err = max(enumn/denom*(1.0+feps),feps)
result = ("fd",enumn,lnr,tnr)
elif tr == "i":
diffa = int(abs(abs(dr)-abs(dd))*itol_scale+0.5)
diffn = int(abs(dr-dd)*itol_scale+0.5)
if max(diffa,diffn) > 0:
if diffa < 0.9*diffn:
result = ("ia",diffa,lnr,tnr)
else:
result = ("id",diffn,lnr,tnr)
elif tr == "c":
#denom = max(abs(dr),feps)
enumn = abs(dr-dd)*(1.0+10.0*feps)*ctol_scale
enuma = abs(abs(dr)-abs(dd))*(1.0+10.0*feps)*ctol_scale
if max(enuma,enumn) > 0.0:
if enuma < 0.9*enumn:
#err = max(enuma/denom*(1.0+feps),feps)
result = ("ca",enuma,lnr,tnr)
else:
#err = max(enumn/denom*(1.0+feps),feps)
result = ("cd",enumn,lnr,tnr)
else:
(tt,dt,lnt,tnt) = tol
if tr == "s":
nmin = min(len(dr),len(dd))
nmax = max(len(dr),len(dd))
i = 0
tol_ln = toldiff_lcs.tol_decode(dt,"")
while (i < nmin):
if dr[i] != dd[i]:
tol_ln = tol_ln[:i] + "#" + tol_ln[i+1:]
i = i + 1
while (i < nmax):
tol_ln = tol_ln[:i] + "#" + tol_ln[i+1:]
tol_ln = tol_ln + "#"
i = i + 1
result = ("s",toldiff_lcs.tol_encode(tol_ln),lnt,tnt)
elif tr == "f":
#denom = max(abs(dr),feps)
enumn = abs(dr-dd)*(1.0+10.0*feps)
enuma = abs(abs(dr)-abs(dd))*(1.0+10.0*feps)
if enuma < 0.9*enumn or tt == "fa":
err = enuma
if err > dt:
err = err*ftol_scale
result = ("fa",err,lnt,tnt)
else:
result = ("fa",dt,lnt,tnt)
else:
err = enumn
if err > dt:
err = err*ftol_scale
result = ("fd",err,lnt,tnt)
else:
result = ("fd",dt,lnt,tnt)
elif tr == "i":
diffa = abs(abs(dr)-abs(dd))
diffn = abs(dr-dd)
if diffa < 0.9*diffn or tt == "ia":
if diffa > dt:
diffa = int(diffa*itol_scale+0.5)
result = ("ia",diffa,lnt,tnt)
else:
result = ("ia",dt,lnt,tnt)
else:
if diffn > dt:
diffn = int(diffn*itol_scale+0.5)
result = ("id",diffn,lnt,tnt)
else:
result = ("id",dt,lnt,tnt)
elif tr == "c":
#denom = max(abs(dr),feps)
enumn = abs(dr-dd)*(1.0+10.0*feps)
enuma = abs(abs(dr)-abs(dd))*(1.0+10.0*feps)
if enuma < 0.9*enumn or tt == "ca":
err = enuma
if err > dt:
err = err*ctol_scale
result = ("ca",err,lnt,tnt)
else:
result = ("ca",dt,lnt,tnt)
else:
err = enumn
if err > dt:
err = err*ctol_scale
result = ("cd",err,lnt,tnt)
else:
result = ("cd",dt,lnt,tnt)
return result
def reconstruct_line(dat,tokno,nguides):
"""
As all lines have been broken down into tokens it is non-trivial to
produce the line that holds a particular token.
This routine reconstructs as best as possible the line that holds a
particular token. Of course this is limited by the fact that all the
information about the white space has been lost in the tokenisation.
In the reconstruction all guides have to suppressed of course, so that
only the token in the original file are reproduced.
Returns the reconstructed line and the token number of the first token
on the next line.
"""
if len(dat.token) == 0:
return ("",-1)
(type,token,lineno,tmp) = dat.token[tokno]
ntb = lineno2tokenno(dat,lineno)
nte = lineno2tokenno(dat,lineno+1)-1
line = ""
while (ntb <= nte):
(type,token,linenum,tmp) = dat.token[ntb]
line = line + " " + str(token)
ntb = ntb + nguides + 1
ntb = lineno2tokenno(dat,lineno+1)
return (line,ntb)
def reconstruct_line_old(dat,tokno):
"""
As all lines have been broken down into tokens it is non-trivial to
produce the line that holds a particular token.
This routine reconstructs as best as possible the line that holds a
particular token. Of course this is limited by the fact that all the
information about the white space has been lost in the tokenisation.
Returns the reconstructed line and the token number of the first token
on the next line.
"""
(type,token,lineno,tmp) = dat.token[tokno]
ntb = dat.line2token[lineno]
nte = len(dat.token)
(type,token,linenum,tmp) = dat.token[ntb]
line = str(token)
while (lineno == linenum):
ntb = ntb + 1
if ntb <= nte:
(type,token,linenum,tmp) = dat.token[ntb]
else:
linenum = 0
if lineno == linenum:
line = line + " " + str(token)
return (line,ntb)
def tokenno2lineno(dat,tokenno):
"""
This routine takes a token number and returns the corresponding line
number.
This routine is needed to produce the diff output as this is represented
in terms of line numbers whereas the Longest Common Subsequence is given
in terms of tokens.
"""
ntoken = len(dat.token)
if ntoken == 0:
lineno = 0
elif tokenno <= 0:
list = dat.line2token.keys()
list.sort()
lineno = list[0]-1
# lineno = 0
elif tokenno > ntoken:
list = dat.line2token.keys()
list.sort()
nlist = len(list)
lineno = list[nlist-1]+1
# (type,token,lineno,tokno) = dat.token[ntoken]
# lineno = lineno+1
else:
(type,token,lineno,tokno) = dat.token[tokenno]
return lineno
def lineno2tokenno(dat,lineno):
"""
This routine takes a line number and returns the token number of the
token corresponding to the first token on that line.
"""
nlines = len(dat.line2token)
if lineno == 0:
tokenno = 0
elif lineno > nlines:
tokenno = len(dat.token)
tokenno = tokenno + 1
else:
tokenno = dat.line2token[lineno]
return tokenno
def max(a,b):
"""
Return the maximum of A and B.
"""
if a > b:
return a
else:
return b
def min(a,b):
"""
Return the minimum of A and B.
"""
if a > b:
return b
else:
return a
| StarcoderdataPython |
1778995 | <filename>adjacentes.py
# Função que mostra os dígitos adjacentes iguais de um número
def adjacentes(n):
anterior = -1
atual = -2
adjacentes_iguais = False # Indicador de passagem
while n > 0 and not adjacentes_iguais:
atual = n % 10 # Pega o último dígito do número
anterior = (n // 10) % 10 # Retira o último dígito do número e pega o anterior
if atual == anterior:
adjacentes_iguais = True
n = n // 10 # Retira o último dígito do número
if adjacentes_iguais:
print("O número tem dois '" + str(atual) + "' como dígitos adjacentes iguais.")
else:
print("O número não possui dígitos adjacentes iguais.")
| StarcoderdataPython |
16160 | #coding:utf-8
#gaussian plot (position category)
#<NAME> 2016/06/16
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from __init__ import *
from numpy.random import multinomial,uniform,dirichlet
from scipy.stats import multivariate_normal,invwishart,rv_discrete
trialname = "testss"#raw_input("trialname?(folder) >")
start = "1"#raw_input("start number?>")
end = "40"#raw_input("end number?>")
filename = raw_input("learning trial name?>")#"001"#
sn = int(start)
en = int(end)
Data = int(en) - int(sn) +1
foldername = datafolder + trialname+"("+str(sn).zfill(3)+"-"+str(en).zfill(3)+")"
Mu_p = [ np.array([0 for i in xrange(dim_p)]) for k in xrange(Kp) ]
Sig_p = [ np.eye(dim_p)*sig_p_init for k in xrange(Kp) ]
#p_dm = [[[-0.3945, 0.0165]], [[-0.3555, -0.006], [-0.336, 0.18]], [[-0.438, -0.0315], [-0.315, 0.0225], [-0.2355, 0.18]], [[-0.453, -0.018], [-0.3, -0.1005], [-0.258, -0.0255]], [[-0.438, 0.036], [-0.318, 0.1875], [-0.3, 0.0795]], [[-0.5535, 0.0675], [-0.336, -0.0465]], [[-0.3885, 0.0555], [-0.3465, -0.126]], [[-0.3555, -0.1425], [-0.324, -0.039], [-0.273, 0.0825]], [[-0.3885, 0.135]], [[-0.285, -0.0135]], [[-0.5265, 0.045], [-0.33, 0.18], [-0.2685, 0.0165]], [[-0.453, 0.015], [-0.3795, 0.231]], [[-0.3825, -0.231]], [[-0.327, -0.18], [-0.309, -0.0075]], [[-0.3735, -0.1455]], [[-0.2685, -0.0135]], [[-0.438, 0.033], [-0.36, 0.204], [-0.2955, 0.0855]], [[-0.45, 0.048]], [[-0.447, -0.006], [-0.3735, 0.1785]], [[-0.4005, 0.1755], [-0.2655, -0.0705]]]
p_temp = []
#for d in xrange(D):
# p_temp = p_temp + p_dm[d]
#[[-0.319936213, 0.117489433],[-0.345566772, -0.00810185],[-0.362990185, -0.042447971],[-0.277759177, 0.083363745]]
#Sig_p = [[] , [], [] ,[]]
#Sig_p[0] = [[0.010389635, 0.001709343],[0.001709343, 0.018386732]]
#[[0.005423979, 0.000652657],[0.000652657, 0.001134736]]
#Sig_p[1] = [[0.001920786, -0.001210214],[-0.001210214, 0.002644612]]
#Sig_p[2] = [[0.003648299, -0.000312398],[-0.000312398, 0.001518234]]
#Sig_p[3] = [[0.001851727, -0.000656013],[-0.000656013, 0.004825636]]
k=0
for line in open(foldername +'/' + filename + '/' + trialname + '_'+ filename +'_Mu_p.csv', 'r'):
itemList = line[:-1].split(',')
#for i in xrange(len(itemList)):
Mu_p[k] = [float(itemList[0]),float(itemList[1])]
k = k + 1
k=0
i=0
for line in open(foldername +'/' + filename + '/' + trialname + '_'+ filename +'_Sig_p.csv', 'r'):
itemList = line[:-1].split(',')
if k < Kp:
if (i == 0):
#for i in xrange(len(itemList)):
print itemList
Sig_p[k][0][0] = float(itemList[0])
Sig_p[k][0][1] = float(itemList[1])
i = i + 1
elif (i == 1):
#for i in xrange(len(itemList)):
print itemList
Sig_p[k][1][0] = float(itemList[0])
Sig_p[k][1][1] = float(itemList[1])
i = i + 1
elif (i == 2):
i = 0
k = k + 1
zp = []
pi_p = [0.0 for k in range(Kp)] #[0.017826621173443864,0.28554229470170217,0.041570976925928926,0.1265347852145472,0.52852532198437785]
dm = 0
for line in open(foldername +'/' + filename + '/' + trialname + '_'+ filename +'_zp.csv', 'r'):
itemList = line[:-1].split(',')
for i in range(len(itemList)):
if itemList[i] != '':
#print dm,itemList[i]
zp = zp + [int(itemList[i])]
dm = dm + 1
for line in open(foldername +'/' + filename + '/' + trialname + '_'+ filename +'_pi_p.csv', 'r'):
itemList = line[:-1].split(',')
for i in range(len(pi_p)):
pi_p[i] = float(itemList[i])
colors = ['b', 'g', 'm', 'r', 'c', 'y', 'k', 'orange', 'purple', 'brown']
color_iter = itertools.cycle(colors)
splot = plt.subplot(1, 1,1)
for k,(mean,covar,color) in enumerate(zip(Mu_p,Sig_p,color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse([mean[1],mean[0]], v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
#splot.add_artist(ell)
#ガウス分布から大量にサンプリングしてプロットする場合
for i in range(int(5000*2*pi_p[k])):#)):#
X = multivariate_normal.rvs(mean=mean, cov=covar)
plt.scatter(X[1],X[0], s=5, marker='.', color=color, alpha=0.2)
#データをクラスごとに色分けしてプロットする場合
#for i in range(len(p_temp)):
# plt.scatter(p_temp[i][1],p_temp[i][0], marker='x', c=colors[zp[i]])
"""
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
#gmm = mixture.GMM(n_components=5, covariance_type='full')
#gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
#for i, (clf, title) in enumerate([#(gmm, 'GMM'),
# (dpgmm, 'Dirichlet Process GMM')]):
"""
#clf=dpgmm
title = 'Position category'#data'
#Y_ = clf.predict(X)
#print Y_
"""
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
print covar
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
#if not np.any(Y_ == i):
# continue
#plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
"""
plt.ylim(-0.2, -0.8)
plt.xlim(-0.3, 0.3)
#plt.xticks([-0.8+0.1*i for i in range(7)])
#plt.yticks([-0.3+0.1*i for i in range(7)])
plt.title(title)
#w, h = plt.get_figwidth(), plt.get_figheight()
#ax = plt.add_axes((0.5 - 0.5 * 0.8 * h / w, 0.1, 0.8 * h / w, 0.8))
#aspect = (ax.get_xlim()[1] - ax.get_xlim()[0]) / (ax.get_ylim()[1] - ax.get_ylim()[0])
#ax.set_aspect(aspect)
plt.gca().set_aspect('equal', adjustable='box')
plt.savefig(foldername +'/' + filename + '/' + trialname + '_'+ filename +'_position_data_plot_p1nd.eps', dpi=150)
plt.savefig(foldername +'/' + filename + '/' + trialname + '_'+ filename +'_position_data_plot_p1nd.png', dpi=150)
plt.show()
| StarcoderdataPython |
100464 | import copy
if __name__ == '__main__':
epss = np.logspace(-10, -1, 30)
baseline_objective = augmented_objective(x0)
xis = []
for eps in epss:
xi = copy.copy(x0)
xi[4] += eps
xis.append(xi)
objs = [augmented_objective(xi) for xi in xis]
# pool = mp.Pool(mp.cpu_count())
# objs = pool.map(augmented_objective, xis)
# pool.close()
objs = np.array(objs)
derivs = (objs - baseline_objective) / epss
fig, ax = plt.subplots(1, 1, figsize=(6.4, 4.8), dpi=200)
plt.loglog(epss, np.abs(derivs), ".-")
plt.show() | StarcoderdataPython |
159749 | tutor = False
def pancakesort(array):
if len(array) <= 1:
return array
if tutor:
print()
for size in range(len(array), 1, -1):
maxindex = max(range(size), key=lamdba i: array[i])
if maxindex+1 != size:
if maxindex != 0:
if tutor:
print(
'With: %r doflip %i' % (
' '.join(str(x) for x in array), maxindex+1)
)
array[:maxindex+1] = reversed(array[:maxindex+1])
if tutor:
print(
'With: %r doflip %i' % (
' '.join(str(x) for x in array), size
)
)
array[:size] = reversed(array[:size])
if tutor:
print()
| StarcoderdataPython |
1797937 | <gh_stars>0
from sssom import parse, collapse, export_ptable
import unittest
import os
import logging
cwd = os.path.abspath(os.path.dirname(__file__))
data_dir = os.path.join(cwd, 'data')
class TestCollapse(unittest.TestCase):
def setUp(self) -> None:
self.df = parse(f'{data_dir}/basic.tsv')
def test_df(self):
df = self.df
print(df[0:20])
self.assertTrue(True)
def test_collapse(self):
df = collapse(self.df)
print(df[0:20])
def test_ptable(self):
export_ptable(self.df)
| StarcoderdataPython |
3249330 | #!/usr/bin/env python3
import datetime
import time
import unicornhathd
# 使用する色の定義
COLOR = (128, 0, 0)
# 0から9とコロンのマッピング
# 横3 x 縦6 = 18ピクセルのフォントを定義
NUMBERS = (
0b111101101101101111, # 0
0b110010010010010111, # 1
0b111001001111100111, # 2
0b111001111001001111, # 3
0b100100100101111001, # 4
0b111100111001001111, # 5
0b111100100111101111, # 6
0b111001001001001001, # 7
0b111101111101101111, # 8
0b111101111001001111, # 9
0b000010000000010000, # :
)
# 表示向きの定義(0度)
unicornhathd.rotation(0)
# フォント表示関数の定義
def render_numeric(x, y, number):
# 行ごとに分割する
for row_number in range(0, 6):
try:
# マッピングから該当行の部分を取得する
row = NUMBERS[number] >> ((5 - row_number) * 3) & 0b111
except KeyError:
return None
# 列ごとの処理
for col_number in reversed(range(0, 3)):
# 該当列のビットが1の場合
if row & (0b1 << col_number):
# x位置の算出
# 指定位置からマイナスをしていくことでunicornHatHDのx軸反転をする
x_point = x - (2 - col_number)
# y位置の算出
y_point = y + row_number
# ピクセルを追加する
unicornhathd.set_pixel(x_point, y_point, *COLOR)
# ループ
while True:
# バッファのクリア
unicornhathd.clear()
# 現在時刻の取得
now = datetime.datetime.now()
# 時の二桁目がゼロ以外の場合、
# 二桁目を表示する
if now.hour >= 10:
render_numeric(15, 0, now.hour // 10)
# 時の二桁目を表示する
render_numeric(12, 0, now.hour % 10)
# コロンの表示
# 秒が奇数だった場合のみ表示する
if now.second % 2:
# コロンはNUMBERS[10]に格納されている
render_numeric(9, 0, 10)
# 分の表示
render_numeric(6, 0, now.minute // 10)
render_numeric(2, 0, now.minute % 10)
# バッファの描写命令
unicornhathd.show()
# 0.1秒待つ
time.sleep(0.1)
| StarcoderdataPython |
3244572 | <gh_stars>0
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text as text # Registers the ops.
from benchmarking_tools.model.prediction_model import PredictionModel
# Peculiar models that need more time to code:
# https://tfhub.dev/google/LaBSE/1
# Other models that do not fit our needs
# question-answer models
# https://tfhub.dev/google/universal-sentence-encoder-multilingual-qa/3
# https://tfhub.dev/google/universal-sentence-encoder-qa/3
# Specific for medical field
# https://tfhub.dev/google/experts/bert/pubmed/2
# https://tfhub.dev/google/experts/bert/pubmed/squad2/2
class HubPredictionModelWithPreprocessor(PredictionModel):
source="tf.hub"
preprocessor_url=""
tf_hub_url=""
family=""
def build(self):
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string)
preprocessor = hub.KerasLayer(self.preprocessor_url)
encoder_inputs = preprocessor(text_input)
encoder = hub.KerasLayer(self.tf_hub_url, trainable=False)
outputs = encoder(encoder_inputs)
pooled_output = outputs["pooled_output"] # [batch_size, 1024].
sequence_output = outputs["sequence_output"] # [batch_size, seq_length, 1024].
self.model = tf.keras.Model(text_input, pooled_output)
def predict(self, sentences):
sentences_tensor = tf.constant(sentences)
output_tensor = self.model(sentences_tensor)
return output_tensor.numpy()
def additional_infos(self):
return {
"source":self.source,
"preprocessor_url":self.preprocessor_url,
"tf_hub_url":self.tf_hub_url,
"family":self.family,
"word_level_output_available":True
}
class talkheads_ggelu_bert_en_large(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/talkheads_ggelu_bert_en_large/1"
class bert_en_uncased_L12_H768_A12(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3"
class small_bert_en_uncased_L4_H512_A8(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-512_A-8/1"
class bert_en_uncased_L2_H128_A2(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/1"
class bert_en_uncased_L24_H1024_A16(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/3"
class bert_en_cased_L12_H768_A12(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/3"
class bert_en_uncased_L2_H512_A8(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-512_A-8/1"
class bert_en_uncased_L4_H128_A2(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-128_A-2/1"
class bert_en_uncased_L2_H768_A12(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-768_A-12/1"
class bert_en_uncased_L2_H256_A4(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-256_A-4/1"
class lambert_en_uncased_L24_H1024_A16(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/lambert_en_uncased_L-24_H-1024_A-16/1"
class small_bert_en_uncased_L12_H768_A12(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-768_A-12/1"
class bert_en_uncased_L4_H256_A4(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-256_A-4/1"
class bert_en_uncased_L4_H768_A12(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-768_A-12/1"
class bert_en_uncased_L8_H512_A8(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-512_A-8/1"
class bert_en_cased_L24_H1024_A16(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_cased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/bert_en_cased_L-24_H-1024_A-16/3"
class bert_en_wwm_cased_L24_H1024_A16(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_cased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/bert_en_wwm_cased_L-24_H-1024_A-16/3"
class bert_en_uncased_L8_H256_A4(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-256_A-4/1"
class bert_en_uncased_L6_H256_A4(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-256_A-4/1"
class bert_en_uncased_L12_H512_A8(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-512_A-8/1"
class bert_en_uncased_L8_H128_A2(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-128_A-2/1"
class bert_en_wwm_uncased_L24_H1024_A16(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/bert_en_wwm_uncased_L-24_H-1024_A-16/3"
class bert_en_uncased_L12_H256_A4(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-256_A-4/1"
class talkheads_ggelu_bert_en_base(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/talkheads_ggelu_bert_en_base/1"
class bert_en_uncased_L8_H768_A12(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-768_A-12/1"
class bert_en_uncased_L6_H768_A12(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-768_A-12/1"
class bert_en_uncased_L6_H512_A8(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-512_A-8/1"
class bert_en_uncased_L6_H128_A2(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-128_A-2/1"
class bert_en_uncased_L12_H128_A2(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-128_A-2/1"
class bert_en_uncased_L10_H768_A12(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-768_A-12/1"
class bert_en_uncased_L10_H512_A8(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-512_A-8/1"
class bert_en_uncased_L10_H256_A4(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-256_A-4/1"
class bert_en_uncased_L10_H128_A2(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-128_A-2/1"
class bert_wiki_books(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url='https://tfhub.dev/google/experts/bert/wiki_books/2'
class bert_wiki_books_stt2(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/google/experts/bert/wiki_books/sst2/2"
class bert_wiki_books_squad2(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/google/experts/bert/wiki_books/squad2/2"
class bert_wiki_books_qqp(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/google/experts/bert/wiki_books/qqp/2"
class bert_wiki_books_qnli(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/google/experts/bert/wiki_books/qnli/2"
class bert_wiki_books_mnli(HubPredictionModelWithPreprocessor):
family="BERT"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/google/experts/bert/wiki_books/mnli/2"
class HubPredictionModelWithPreprocessorAndDefaultSignature(PredictionModel):
source="tf.hub"
preprocessor_url=""
tf_hub_url=""
family=""
def build(self):
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string)
preprocessor = hub.KerasLayer(self.preprocessor_url)
encoder_inputs = preprocessor(text_input)
encoder = hub.KerasLayer(self.tf_hub_url, trainable=False)
outputs = encoder(encoder_inputs)
pooled_output = outputs["default"] # [batch_size, emb_size].
self.model = tf.keras.Model(text_input, pooled_output)
def predict(self, sentences):
sentences_tensor = tf.constant(sentences)
output_tensor = self.model(sentences_tensor)
return output_tensor.numpy()
def additional_infos(self):
return {
"source":self.source,
"preprocessor_url":self.preprocessor_url,
"tf_hub_url":self.tf_hub_url,
"family": self.family,
"word_level_output_available":False
}
class UniversalSentenceEncoderCmlmEnBase(HubPredictionModelWithPreprocessorAndDefaultSignature):
family="universal sentence encoder"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-base/1"
class UniversalSentenceEncoderCmlmMultilingualBaseBr(HubPredictionModelWithPreprocessorAndDefaultSignature):
family="universal sentence encoder"
preprocessor_url="https://tfhub.dev/google/universal-sentence-encoder-cmlm/multilingual-preprocess/2"
tf_hub_url="https://tfhub.dev/google/universal-sentence-encoder-cmlm/multilingual-base-br/1"
class UniversalSentenceEncoderCmlmMultilingualBase(HubPredictionModelWithPreprocessorAndDefaultSignature):
family="universal sentence encoder"
preprocessor_url="https://tfhub.dev/google/universal-sentence-encoder-cmlm/multilingual-preprocess/2"
tf_hub_url="https://tfhub.dev/google/universal-sentence-encoder-cmlm/multilingual-base/1"
class UniversalSentenceEncoderCmlm(HubPredictionModelWithPreprocessorAndDefaultSignature):
family="universal sentence encoder"
preprocessor_url="https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
tf_hub_url="https://tfhub.dev/google/universal-sentence-encoder-cmlm/en-large/1"
class SimpleHubPredctionModel(PredictionModel):
source="tf.hub"
tf_hub_url=""
family=""
def build(self):
self.model = hub.load(self.tf_hub_url)
def predict(self, sentences):
output_tensor = self.model(sentences)
return output_tensor.numpy()
def additional_infos(self):
return {
"source":self.source,
"tf_hub_url":self.tf_hub_url,
"family": self.family,
"word_level_output_available":False
}
class UniversalSentenceEncoder(SimpleHubPredctionModel):
family="universal sentence encoder"
tf_hub_url="https://tfhub.dev/google/universal-sentence-encoder/4"
class UniversalSentenceEncoderMultilingual(SimpleHubPredctionModel):
family="universal sentence encoder"
tf_hub_url="https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
class UniversalSentenceEncoderLarge(SimpleHubPredctionModel):
family="universal sentence encoder"
tf_hub_url="https://tfhub.dev/google/universal-sentence-encoder-large/5"
class UniversalSentenceEncoderMultilingualLarge(SimpleHubPredctionModel):
family="universal sentence encoder"
tf_hub_url="https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3"
class NnlmEnDim128(SimpleHubPredctionModel):
family="NNLM"
tf_hub_url="https://tfhub.dev/google/nnlm-en-dim128/2"
class NnlmEnDim128WithNormalization(SimpleHubPredctionModel):
family="NNLM"
tf_hub_url="https://tfhub.dev/google/nnlm-en-dim128-with-normalization/2"
class NnlmEnDim50(SimpleHubPredctionModel):
family="NNLM"
tf_hub_url="https://tfhub.dev/google/nnlm-en-dim50/2"
class NnlmEnDim50WithNormalization(SimpleHubPredctionModel):
family="NNLM"
tf_hub_url="https://tfhub.dev/google/nnlm-en-dim50-with-normalization/2"
class GnewsSwivel20dim(SimpleHubPredctionModel):
family="Swivel matrix factorization"
tf_hub_url="https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1"
class WikiWords250(SimpleHubPredctionModel):
family="Skipgram model"
tf_hub_url="https://tfhub.dev/google/Wiki-words-250/2"
class WikiWords250WithNormalization(SimpleHubPredctionModel):
family="Skipgram model"
tf_hub_url="https://tfhub.dev/google/Wiki-words-250-with-normalization/2"
class WikiWords500(SimpleHubPredctionModel):
family="Skipgram model"
tf_hub_url="https://tfhub.dev/google/Wiki-words-500/2"
class WikiWords500WithNormalization(SimpleHubPredctionModel):
family="Skipgram model"
tf_hub_url="https://tfhub.dev/google/Wiki-words-500-with-normalization/2" | StarcoderdataPython |
3317902 | ########
# Copyright (c) 2014-2018 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import requests_mock
import json
import os
from mock import MagicMock, patch
import logging
from cloudify.exceptions import RecoverableError, NonRecoverableError
from cloudify.mocks import MockCloudifyContext
from cloudify.state import current_ctx
from cloudify.manager import DirtyTrackingDict
from cloudify_rest import tasks
class TestPlugin(unittest.TestCase):
def test_execute_mock_sdk(self):
_ctx = MockCloudifyContext('node_name',
properties={'hosts': ['--fake.cake--',
'test123.test'],
'port': -1,
'ssl': False,
'verify': False,
'params': {'f': 'e'}})
_ctx.instance._runtime_properties = DirtyTrackingDict(
{'b': {'c': 'd'}})
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(__location__, 'template1.yaml'), 'r') as f:
template = f.read()
_ctx.get_resource = MagicMock(return_value=template.encode('utf-8'))
_ctx.logger.setLevel(logging.DEBUG)
current_ctx.set(_ctx)
check_mock = MagicMock(return_value={})
with patch(
"cloudify_rest.tasks.utility.process", check_mock
):
tasks.bunch_execute(templates=[{
'params': {'USER': 'testuser'},
'template_file': 'mock_param',
'save_to': 'saved_params',
'params_attributes': {
'a': ['b', 'c']}}])
check_mock.assert_called_with(
{'f': 'e', 'ctx': _ctx, 'a': 'd', 'USER': 'testuser'},
template,
{'params': {'f': 'e'}, 'verify': False, 'ssl': False,
'port': -1, 'hosts': ['--fake.cake--', 'test123.test']},
prerender=None, resource_callback=_ctx.get_resource)
# overwrite hosts
_ctx.instance._runtime_properties = DirtyTrackingDict(
{'b': {'c': 'd'}})
check_mock = MagicMock(return_value={})
with patch(
"cloudify_rest.tasks.utility.process", check_mock
):
tasks.bunch_execute(
templates=[{
'params': {'USER': 'testuser'},
'template_file': 'mock_param',
'save_to': 'saved_params',
'params_attributes': {
'a': ['b', 'c']}}],
# new hosts
auth={'hosts': ['over_write']})
check_mock.assert_called_with(
{'f': 'e', 'ctx': _ctx, 'a': 'd', 'USER': 'testuser'},
template,
{'params': {'f': 'e'}, 'verify': False, 'ssl': False,
'port': -1, 'hosts': ['over_write']},
prerender=None, resource_callback=_ctx.get_resource)
def test_execute_bunch_http_no_exception(self):
_ctx = MockCloudifyContext('node_name',
properties={'hosts': ['--fake.cake--',
'test123.test'],
'port': -1,
'ssl': False,
'verify': False})
_ctx.instance._runtime_properties = DirtyTrackingDict(
{'b': {'c': 'd'}})
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(__location__, 'template1.yaml'), 'r') as f:
template = f.read()
_ctx.get_resource = MagicMock(return_value=template.encode('utf-8'))
_ctx.logger.setLevel(logging.DEBUG)
current_ctx.set(_ctx)
with requests_mock.mock(
real_http=True) as m: # real_http to check fake uri and get ex
# call 1
with open(
os.path.join(__location__, 'get_response1.json'), 'r'
) as f:
m.get('http://test123.test:80/testuser/test_rest/get',
json=json.load(f),
status_code=200)
def _match_request_text(request):
return '101' in (request.text or '')
# call 2
m.post('http://test123.test:80/test_rest/posts',
additional_matcher=_match_request_text,
request_headers={'Content-type': 'test/type'},
text='resp')
# call 1
with open(
os.path.join(__location__, 'get_response2.json'), 'r'
) as f:
m.get('http://test123.test:80/get',
json=json.load(f),
headers={'Content-Type': 'application/json'},
status_code=200)
tasks.bunch_execute(templates=[{
'params': {'USER': 'testuser'},
'template_file': 'mock_param',
'save_to': 'saved_params',
'params_attributes': {
'a': ['b', 'c']}}])
self.assertDictEqual(
_ctx.instance.runtime_properties.get(
'saved_params', {}).get('result_properties', {}),
{'nested_key0': u'nested_value1',
'nested_key1': u'nested_value2',
'id0': u'1',
'id1': u'101',
'owner1': {'id': 'Bob'},
'owner2': {'colour': 'red', 'name': 'bed', 'id': 'Carol'},
'owner0': {'colour': 'black', 'name': 'book'}})
def test_execute_http_no_exception(self):
_ctx = MockCloudifyContext('node_name',
properties={'hosts': ['--fake.cake--',
'test123.test'],
'port': -1,
'ssl': False,
'verify': False})
_ctx.instance._runtime_properties = DirtyTrackingDict({})
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(__location__, 'template1.yaml'), 'r') as f:
template = f.read()
_ctx.get_resource = MagicMock(return_value=template.encode('utf-8'))
_ctx.logger.setLevel(logging.DEBUG)
current_ctx.set(_ctx)
params = {'USER': 'testuser'}
with requests_mock.mock(
real_http=True) as m: # real_http to check fake uri and get ex
# call 1
with open(
os.path.join(__location__, 'get_response1.json'), 'r'
) as f:
m.get('http://test123.test:80/testuser/test_rest/get',
json=json.load(f),
status_code=200)
def _match_request_text(request):
return '101' in (request.text or '')
# call 2
m.post('http://test123.test:80/test_rest/posts',
additional_matcher=_match_request_text,
request_headers={'Content-type': 'test/type'},
text='resp')
# call 1
with open(
os.path.join(__location__, 'get_response2.json'), 'r'
) as f:
m.get('http://test123.test:80/get',
json=json.load(f),
headers={'Content-Type': 'application/json'},
status_code=200)
tasks.execute(params=params, template_file='mock_param')
# _ctx = current_ctx.get_ctx()
self.assertDictEqual(
_ctx.instance.runtime_properties.get('result_properties'),
{'nested_key0': u'nested_value1',
'nested_key1': u'nested_value2',
'id0': u'1',
'id1': u'101',
'owner1': {'id': 'Bob'},
'owner2': {'colour': 'red', 'name': 'bed', 'id': 'Carol'},
'owner0': {'colour': 'black', 'name': 'book'}})
def test_execute_https_port_reco(self):
_ctx = MockCloudifyContext('node_name',
properties={'host': 'test123.test',
'port': 12345,
'ssl': 'true',
'verify': True})
_ctx.instance._runtime_properties = DirtyTrackingDict({})
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(__location__, 'template2.yaml'), 'r') as f:
template = f.read()
_ctx.get_resource = MagicMock(return_value=template.encode('utf-8'))
current_ctx.set(_ctx)
with requests_mock.mock() as m:
m.delete('https://test123.test:12345/v1/delete',
text='resp',
status_code=477)
with self.assertRaises(RecoverableError) as context:
tasks.execute(params={}, template_file='mock_param')
self.assertTrue(
'Response code 477 '
'defined as recoverable' in str(context.exception))
def test_execute_overwrite_host_response_expecation(self):
_ctx = MockCloudifyContext('node_name',
properties={'hosts': ['test123.test'],
'port': 12345,
'ssl': 'true',
'verify': True})
_ctx.instance._runtime_properties = DirtyTrackingDict({})
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(__location__, 'template3.yaml'), 'r') as f:
template = f.read()
_ctx.get_resource = MagicMock(return_value=template.encode('utf-8'))
_ctx.logger.setLevel(logging.DEBUG)
current_ctx.set(_ctx)
with requests_mock.mock() as m:
with open(
os.path.join(__location__, 'put_response3.json'), 'r'
) as f:
m.put(
'https://hostfrom_template.test:12345/v1/put_%20response3',
json=json.load(f),
status_code=200)
with self.assertRaises(RecoverableError) as context:
tasks.execute(params={}, template_file='mock_param')
self.assertSequenceEqual(
'Trying one more time...\n'
"Response value:wrong_value "
"does not match regexp: proper_value|good"
" from response_expectation",
str(context.exception))
def test_execute_nonrecoverable_response(self):
_ctx = MockCloudifyContext('node_name',
properties={'hosts': ['test123.test'],
'port': 12345,
'ssl': 'true',
'verify': True})
_ctx.instance._runtime_properties = DirtyTrackingDict({})
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(__location__, 'template4.yaml'), 'r') as f:
template = f.read()
_ctx.get_resource = MagicMock(return_value=template.encode("utf-8"))
_ctx.logger.setLevel(logging.DEBUG)
current_ctx.set(_ctx)
with requests_mock.mock() as m:
with open(
os.path.join(__location__, 'get_response1.json'), 'r'
) as f:
m.get('https://test123.test:12345/v1/get_response1',
json=json.load(f),
status_code=200)
with self.assertRaises(NonRecoverableError) as context:
tasks.execute(params={}, template_file='mock_param')
self.assertSequenceEqual(
'Giving up... \n'
"Response value: active matches "
"regexp:active from nonrecoverable_response. ",
str(context.exception))
def test_execute_http_xml(self):
_ctx = MockCloudifyContext('node_name',
properties={'hosts': ['test123.test'],
'port': -1,
'ssl': False,
'verify': False})
_ctx.instance._runtime_properties = DirtyTrackingDict({})
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(__location__, 'template5.yaml'), 'r') as f:
template = f.read()
_ctx.get_resource = MagicMock(return_value=template.encode("utf-8"))
_ctx.logger.setLevel(logging.DEBUG)
current_ctx.set(_ctx)
with requests_mock.mock() as m:
with open(
os.path.join(__location__, 'get_response5.xml'), 'r'
) as f:
m.get('http://test123.test:80/v1/get_response5',
text=f.read(),
status_code=200)
tasks.execute(params={}, template_file='mock_param')
# _ctx = current_ctx.get_ctx()
self.assertDictEqual(
_ctx.instance.runtime_properties.get('result_properties'),
{'UUID': '111111111111111111111111111111',
'CPUID': 'ABS:FFF222777'})
def test_execute_jinja_block_parse(self):
_ctx = MockCloudifyContext('node_name',
properties={'hosts': ['test123.test'],
'port': -1,
'ssl': False,
'verify': False})
_ctx.instance._runtime_properties = DirtyTrackingDict({})
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(__location__, 'template6.yaml'), 'r') as f:
template = f.read()
_ctx.get_resource = MagicMock(return_value=template.encode("utf-8"))
_ctx.logger.setLevel(logging.DEBUG)
current_ctx.set(_ctx)
custom_list = [{'key1': 'val1'},
{'key2': 'val2'},
['element1', 'element2']]
params = {'custom_list': custom_list}
with requests_mock.mock(
real_http=True) as m:
m.post('http://test123.test:80/v1/post_jinja_block',
text="resp")
tasks.execute(params=params, template_file='mock_param')
parsed_list = _ctx.instance.runtime_properties.get(
'calls')[0].get('payload').get('jinja_block')
self.assertListEqual(parsed_list, custom_list)
| StarcoderdataPython |
2203 | <reponame>fruttasecca/hay_checker<filename>examples/dhc/rule_example.py<gh_stars>1-10
#!/usr/bin/python3
from pyspark.sql import SparkSession
from haychecker.dhc.metrics import rule
spark = SparkSession.builder.appName("rule_example").getOrCreate()
df = spark.read.format("csv").option("header", "true").load("examples/resources/employees.csv")
df.show()
condition1 = {"column": "salary", "operator": "gt", "value": 2100}
conditions = [condition1]
r1 = rule(conditions, df)[0]
print("Rule salary>2100: {}".format(r1))
condition1 = {"column": "salary", "operator": "lt", "value": 2100}
condition2 = {"column": "title", "operator": "eq", "value": "Sales Representative"}
conditions = [condition1, condition2]
task1 = rule(conditions)
condition1 = {"column": "salary", "operator": "lt", "value": 2100}
condition2 = {"column": "city", "operator": "eq", "value": "London"}
conditions = [condition1, condition2]
task2 = rule(conditions)
task3 = task1.add(task2)
result = task3.run(df)
r1 = result[0]["scores"][0]
r2 = result[1]["scores"][0]
print("Rule salary<2100 and title=\"Sales Representative\": {},"
" rule salary<2100 and city=\"London\": {}".format(r1, r2)) | StarcoderdataPython |
3378136 | """
functions for bin/desi_compute_nightly_bias script
"""
import argparse
from desispec.ccdcalib import compute_nightly_bias
from desispec.io.util import decode_camword, parse_cameras
def parse(options=None):
p = argparse.ArgumentParser(
description="Compute nightly bias from ZEROs")
p.add_argument('-n', '--night', type=int, required=True,
help='YEARMMDD to process')
p.add_argument('-c', '--cameras', type=str,
default='a0123456789', help='list of cameras to process')
p.add_argument('-o', '--outdir', type=str,
help='output directory')
p.add_argument('--nzeros', type=int, default=25,
help='number of input ZEROS to use (saves memory)')
p.add_argument('--minzeros', type=int, default=20,
help='minimum number of good ZEROs required')
p.add_argument('--mpi', action='store_true',
help='use_mpi')
args = p.parse_args(options) #- uses sys.argv if options is None
#- Convert cameras into list
args.cameras = decode_camword(parse_cameras(args.cameras))
return args
def main(args=None, comm=None):
if args is None:
args = parse()
elif isinstance(args, (list, tuple)):
args = parse(args)
if comm is None and args.mpi:
from mpi4py import MPI
comm = MPI.COMM_WORLD
del args.__dict__['mpi']
compute_nightly_bias(**args.__dict__, comm=comm)
| StarcoderdataPython |
42565 | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///gosts.db')
Session = sessionmaker(bind=engine)
session = Session()
Base = declarative_base()
#по идеи это все надо вынестии в __init__ файл
class Gost(Base):
__tablename__ = 'gosts'
id = Column(Integer, primary_key=True)
name = Column(String)
description = Column(String)
def __str__(self):
return self.name | StarcoderdataPython |
124848 | class Solution:
def mySqrt(self, x: int) -> int:
left, right = 0, x
while left <= right:
mid = left + (right - left) // 2
square = mid ** 2
if square <= x:
left = mid + 1
elif square > x :
right = mid -1
return left-1
# n : the number of input value
## Time Complexity: O( log n )
#
# The overhead in time is the upper-bound of binary search, which is of O( log n ).
## Space Complexity: O( 1 )
#
# The overhead in space is the variable for mathematical computation, which is of O( 1 )
def test_bench():
test_data = [0, 1, 80, 63, 48 ]
# expected output:
'''
0
1
8
7
6
'''
for n in test_data:
print( Solution().mySqrt(n) )
return
if __name__ == '__main__':
test_bench() | StarcoderdataPython |
1740432 | <filename>settings.py
#define some colors (R, G, B)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
DARKGREY = (40, 40, 40)
LIGHTGREY = (100, 100, 100)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
#game settings
WIDTH = 1024 # 16 * 64 or 32 * 32 or 64 * 16
HEIGHT = 768 # 16 * 48 or 32 * 24 or 64 * 12
FPS = 60
TITLE = "Tilemap Demo"
BGCOLOR = DARKGREY
#defines tile size
TILESIZE = 32
GRIDWIDTH = WIDTH / TILESIZE
GRIDHEIGHT = HEIGHT / TILESIZE | StarcoderdataPython |
17063 | # pylint: disable=C0121
"""http://www.logilab.org/ticket/124337"""
import gtk
def print_some_constant(arg=gtk.BUTTONS_OK):
"""crash because gtk.BUTTONS_OK, a gtk enum type, is returned by
astroid as a constant
"""
print(arg)
| StarcoderdataPython |
3202103 | <reponame>scikit-hep/statutils
# -*- coding: utf-8 -*-
from typing import Union
from ..calculators.basecalculator import BaseCalculator
from ..parameters import POI, POIarray
"""
Module defining the base class for hypothesis tests.
"""
class BaseTest(object):
def __init__(
self,
calculator: BaseCalculator,
poinull: Union[POI, POIarray],
poialt: Union[POI, POIarray, None] = None,
):
"""Base class for hypothesis tests.
Args:
calculator: calculator to use for computing the pvalues
poinull: parameters of interest for the null hypothesis
poialt: parameters of interest for the alternative hypothesis
Raises:
TypeError: if calculator is not a BaseCalculator instance
"""
if not isinstance(calculator, BaseCalculator):
msg = "Invalid type, {0}, for calculator. Calculator required."
raise TypeError(msg)
self._calculator = calculator
self.calculator.check_pois(poinull)
if poialt:
self.calculator.check_pois(poialt)
self.calculator.check_pois_compatibility(poinull, poialt)
self._poinull = poinull
self._poialt = poialt
@property
def poinull(self):
"""
Returns the POI for the null hypothesis.
"""
return self._poinull
@property
def poialt(self):
"""
Returns the POI for the alternative hypothesis.
"""
return self._poialt
@property
def calculator(self):
"""
Returns the calculator.
"""
return self._calculator
| StarcoderdataPython |
1733411 | #
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from exploration_policies.exploration_policy import *
class Boltzmann(ExplorationPolicy):
def __init__(self, tuning_parameters):
"""
:param tuning_parameters: A Preset class instance with all the running paramaters
:type tuning_parameters: Preset
"""
ExplorationPolicy.__init__(self, tuning_parameters)
self.temperature = tuning_parameters.exploration.initial_temperature
self.final_temperature = tuning_parameters.exploration.final_temperature
self.temperature_decay_delta = (
tuning_parameters.exploration.initial_temperature - tuning_parameters.exploration.final_temperature) \
/ float(tuning_parameters.exploration.temperature_decay_steps)
def decay_temperature(self):
if self.temperature > self.final_temperature:
self.temperature -= self.temperature_decay_delta
def get_action(self, action_values):
if self.phase == RunPhase.TRAIN:
self.decay_temperature()
# softmax calculation
exp_probabilities = np.exp(action_values / self.temperature)
probabilities = exp_probabilities / np.sum(exp_probabilities)
probabilities[-1] = 1 - np.sum(probabilities[:-1]) # make sure probs sum to 1
# choose actions according to the probabilities
return np.random.choice(range(self.action_space_size), p=probabilities)
def get_control_param(self):
return self.temperature
| StarcoderdataPython |
1769916 | # Generated by Django 2.0.3 on 2018-04-09 20:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('entrance', '0070_auto_20180409_2028'),
]
operations = [
migrations.AddField(
model_name='selectedenrollmenttype',
name='reviewed_at',
field=models.DateTimeField(blank=True, default=None, help_text='Когда заявка была рассмотрена', null=True, verbose_name='время модерации'),
),
migrations.AlterField(
model_name='selectedenrollmenttype',
name='reviewed_by',
field=models.ForeignKey(help_text='Пользователь, который одобрил или отклонил заявку', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='модератор'),
),
]
| StarcoderdataPython |
3214836 | '''
Created on 2020-09-22 16:07:04
Last modified on 2020-09-23 07:10:39
@author: <NAME> (<EMAIL>))
'''
# imports
from abaqus import session
# variable initialization and odb opening
job_name = 'Simul_SUPERCOMPRESSIBLE_RIKS'
odb_name = '{}.odb'.format(job_name)
odb = session.openOdb(name=odb_name)
riks_results = {}
# reference point data
variables = ['U', 'UR', 'RF', 'RM']
set_name = 'ZTOP_REF_POINT'
step = odb.steps[odb.steps.keys()[-1]]
directions = (1, 2, 3)
nodes = odb.rootAssembly.nodeSets[set_name].nodes[0]
# get variables
for variable in variables:
y = []
for node in nodes:
instance_name = node.instanceName if node.instanceName else 'ASSEMBLY'
name = 'Node ' + instance_name + '.' + str(node.label)
historyOutputs = step.historyRegions[name].historyOutputs
node_data = []
for direction in directions:
node_data.append([data[1] for data in historyOutputs['%s%i' % (variable, direction)].data])
y.append(node_data)
riks_results[variable] = y[0]
# deformation
frames = step.frames
nodeSet = odb.rootAssembly.elementSets[' ALL ELEMENTS']
directions = (1, 3,)
variable = 'E'
values = []
for frame in frames:
varFieldOutputs = frame.fieldOutputs[variable]
outputs = varFieldOutputs.getSubset(region=nodeSet).values
output_frame = []
for direction in directions:
output_frame.append([output.data[direction - 1] for output in outputs])
values.append(output_frame)
riks_results[variable] = values
| StarcoderdataPython |
82822 | <gh_stars>0
"""Items models description."""
from colorfield.fields import ColorField
from django.core.validators import MinValueValidator
from django.db import models
class ItemStatuses:
"""Constant item statuses."""
NEVER = "Never"
ONCE = "Once"
SELDOM = "Seldom"
OFTEN = "Often"
DAILY = "Daily"
WEEKLY = "Weekly"
MONTHLY = "Monthly"
YEARLY = "Yearly"
ITEM_STATUSES_CHOICES = [
(NEVER, NEVER),
(ONCE, ONCE),
(SELDOM, SELDOM),
(OFTEN, OFTEN),
(DAILY, DAILY),
(WEEKLY, WEEKLY),
(MONTHLY, MONTHLY),
(YEARLY, YEARLY),
]
class Item(models.Model):
"""Item model description."""
city = models.CharField(
max_length=200,
verbose_name="Item city name",
)
start_date = models.DateField(
db_index=True,
verbose_name="Item start date",
)
end_date = models.DateField(
db_index=True,
verbose_name="Item end date",
)
price = models.FloatField(
validators=[MinValueValidator(0.0)],
verbose_name="Item price",
)
status = models.CharField(
max_length=50,
choices=ItemStatuses.ITEM_STATUSES_CHOICES,
verbose_name="Item status",
)
color = ColorField(
format="hex",
verbose_name="Item color",
)
class Meta:
verbose_name = "Item"
verbose_name_plural = "Items"
def __str__(self):
"""Return string view of a city field."""
return self.city
| StarcoderdataPython |
92245 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 24 16:57:31 2017
@author: Jean-Michel
"""
import AstarClass as AC
import sys
sys.path.append("../model")
from WeatherClass import Weather
import numpy as np
import SimulatorTLKT as SimC
from SimulatorTLKT import Simulator
import matplotlib.pyplot as plt
import mpl_toolkits
from mpl_toolkits.basemap import Basemap
import matplotlib
from matplotlib import animation
matplotlib.rcParams.update({'font.size': 16})
import copy
import pickle
import sys
sys.path.append("../solver")
from MyTree import Tree
# We load the forecast files
mydate = '20170519'
modelcycle = '00'
pathToSaveObj = '../data/' + mydate + '_' + modelcycle + '.obj'
Wavg = Weather.load(pathToSaveObj)
# We shift the times so that all times are in the correct bounds for interpolations
Tini = Wavg.time[0]
Wavg.time = Wavg.time - Tini
# We set up the parameters of the simulation
# times=np.arange(0,min([Wavg.time[-1],Wspr.time[-1]]),1*HOURS_TO_DAY)
# Tf=len(times)
Tf = 24 * 5
HOURS_TO_DAY = 1/24
times = np.arange(0, Tf * HOURS_TO_DAY, 1 * HOURS_TO_DAY)
lats = np.arange(Wavg.lat[0],Wavg.lat[-1], 0.05)
lons = np.arange(Wavg.lon[0], Wavg.lon[-1], 0.05)
stateInit = [0, 47.5, -3.5 + 360]
SimC.Boat.UNCERTAINTY_COEFF = 0
Sim = Simulator(times, lats, lons, Wavg, stateInit)
# We set up the parameters of the simulation : destination
heading = 230
tra = []
for t in Sim.times[0:5]:
tra.append(list(Sim.doStep(heading)))
destination = copy.copy(Sim.state[1:3])
#destination = [47.45, 356.40]
# %% test unitaires pour la class AstarClass
Sim.reset(stateInit)
solver_iso = AC.Pathfinder(Sim,stateInit[1:3],destination)
# %%
list_voisins = solver_iso.currentvoisin()
for noeud in list_voisins:
print(noeud)
# doit afficher 6 noeuds
solver_iso.openlist = list_voisins
print(solver_iso.petitfopen())
# doit afficher le noeud de plus petit f parmi la liste précédente
#solver_iso.openlist = [noeud]
#for noeud in list_voisins:
# solver_iso.ajout_openlist_trie_par_f_et_g(noeud)
#for noeud in solver_iso.openlist:
# print(noeud)
# doit afficher la liste précédente par ordre de f croissant et si égalité par g décroissant
# %%
noeud_faux = AC.Node(4,5,6)
noeud_vrai = AC.Node(8,list_voisins[3].lat,list_voisins[3].lon)
solver_iso.openlist = list_voisins
fait,noeud_id = solver_iso.testpresopen(noeud_faux)
print(fait)
print(noeud_id)
# doit afficher "false" et "None"
fait,noeud_id = solver_iso.testpresopen(noeud_vrai)
print(fait)
print(noeud_id,'\n',noeud_vrai)
# doit afficher "true" et 2 noeuds avec même lat. et lon. mais temps et val différents
# (le premier à la première liste affichée)
# testpresclose() étant identique à testpresopen(), je ne refais pas les tests
solver_iso.reset()
# %% We do the simulation of isochrone solving
Sim.reset(stateInit)
solver_iso.reset(stateInit[1:3],destination)
politique1 = solver_iso.solver()
print(politique1)
Sim.reset(stateInit)
solver_iso.reset(stateInit[1:3],destination)
#politique2 = solver_iso.solverplus()
#print(politique2)
#le résultat doit être le même pour les 2 politiques mais le temps de calcul peut être pas.
#vitesse max du bateau = 3 m/s ??? | StarcoderdataPython |
26846 | from stronghold.views import StrongholdPublicMixin
import django
from django.views.generic import View
from django.views.generic.base import TemplateResponseMixin
if django.VERSION[:2] < (1, 9):
from django.utils import unittest
else:
import unittest
class StrongholdMixinsTests(unittest.TestCase):
def test_public_mixin_sets_attr(self):
class TestView(StrongholdPublicMixin, View):
pass
self.assertTrue(TestView.dispatch.STRONGHOLD_IS_PUBLIC)
def test_public_mixin_sets_attr_with_multiple_mixins(self):
class TestView(StrongholdPublicMixin, TemplateResponseMixin, View):
template_name = 'dummy.html'
self.assertTrue(TestView.dispatch.STRONGHOLD_IS_PUBLIC)
| StarcoderdataPython |
65946 | #!/usr/bin/env python
# coding: utf-8
# In[5]:
1 + 1 * 2
# In[4]:
20 // 3 + 20 // 7 ** 2
# In[2]:
import random
4 + random.randint(10, 100)
# In[5]:
import random
4 + random.randint(10, 100)
# In[ ]:
| StarcoderdataPython |
1662947 | '''
File name: test_predictPlantStatistics
Date created: 27/11/2018
Feature: #Enter feature description here
'''
from unittest import TestCase
import pytest
from elecsim.plants.plant_costs.estimate_costs.estimate_modern_power_plant_costs.predict_modern_plant_costs import \
PredictModernPlantParameters
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__license__ = "MIT"
__email__ = "<EMAIL>"
class TestPredictPlantParameters(TestCase):
def test_parameter_estimation_for_ccgt_1200(self):
estimated_plant_parameters = PredictModernPlantParameters("CCGT", 1200, 2018).parameter_estimation()
assert estimated_plant_parameters['connection_cost_per_mw'] == 3300
assert estimated_plant_parameters['construction_cost_per_mw'] == 500000
assert estimated_plant_parameters['fixed_o_and_m_per_mw'] == 12200
assert estimated_plant_parameters['infrastructure'] == 15100
assert estimated_plant_parameters['insurance_cost_per_mw'] == 2100
assert estimated_plant_parameters['pre_dev_cost_per_mw'] == 10000
assert estimated_plant_parameters['variable_o_and_m_per_mwh'] == 3.00
assert estimated_plant_parameters['pre_dev_period'] == 3
assert estimated_plant_parameters['operating_period'] == 25
assert estimated_plant_parameters['construction_period'] == 3
assert estimated_plant_parameters['efficiency'] == 0.54
assert estimated_plant_parameters['average_load_factor'] == 0.93
assert estimated_plant_parameters['construction_spend_years'] == [0.4, 0.4, 0.2]
assert estimated_plant_parameters['pre_dev_spend_years'] == [0.44, 0.44, 0.12]
def test_parameter_estimation_for_ccgt_1335_5(self):
estimated_plant_parameters = PredictModernPlantParameters("CCGT", 1335.5, 2018).parameter_estimation()
assert estimated_plant_parameters['connection_cost_per_mw'] == 3300
assert estimated_plant_parameters['construction_cost_per_mw'] == 500000
assert estimated_plant_parameters['fixed_o_and_m_per_mw'] == 11800
assert estimated_plant_parameters['infrastructure'] == 15100
assert estimated_plant_parameters['insurance_cost_per_mw'] == 2000
assert estimated_plant_parameters['pre_dev_cost_per_mw'] == 10000
assert estimated_plant_parameters['variable_o_and_m_per_mwh'] == 3.00
assert estimated_plant_parameters['pre_dev_period'] == 3
assert estimated_plant_parameters['operating_period'] == 25
assert estimated_plant_parameters['construction_period'] == 3
assert estimated_plant_parameters['efficiency'] == 0.54
assert estimated_plant_parameters['average_load_factor'] == 0.93
assert estimated_plant_parameters['construction_spend_years'] == [0.4, 0.4, 0.2]
assert estimated_plant_parameters['pre_dev_spend_years'] == [0.44, 0.44, 0.12]
def setup_method(self, module):
self.initial_stub_cost_parameters = ['Connect_system_cost-Medium _', 'Constr_cost-Medium _',
'Fixed_cost-Medium _',
'Infra_cost-Medium _', 'Insurance_cost-Medium _', 'Pre_dev_cost-Medium _',
'Var_cost-Medium _']
def test_creation_of_parameter_names_2018(self):
predict_plant = PredictModernPlantParameters("CCGT", 1200, 2018)
cost_parameter_variables = predict_plant._create_parameter_names(self.initial_stub_cost_parameters)
assert cost_parameter_variables == ['Connect_system_cost-Medium _2018', 'Constr_cost-Medium _2018',
'Fixed_cost-Medium _2018',
'Infra_cost-Medium _2018', 'Insurance_cost-Medium _2018',
'Pre_dev_cost-Medium _2018',
'Var_cost-Medium _2018']
def test_creation_of_parameter_names_2019(self):
predict_plant = PredictModernPlantParameters("CCGT", 1200, 2019)
cost_parameter_variables = predict_plant._create_parameter_names(self.initial_stub_cost_parameters)
assert cost_parameter_variables == ['Connect_system_cost-Medium _2018', 'Constr_cost-Medium _2018',
'Fixed_cost-Medium _2018',
'Infra_cost-Medium _2018', 'Insurance_cost-Medium _2018',
'Pre_dev_cost-Medium _2018',
'Var_cost-Medium _2018']
def test_creation_of_parameter_names_2020(self):
PredictPlant = PredictModernPlantParameters("CCGT", 1200, 2020)
cost_parameter_variables = PredictPlant._create_parameter_names(self.initial_stub_cost_parameters)
assert cost_parameter_variables == ['Connect_system_cost-Medium _2020', 'Constr_cost-Medium _2020',
'Fixed_cost-Medium _2020',
'Infra_cost-Medium _2020', 'Insurance_cost-Medium _2020',
'Pre_dev_cost-Medium _2020',
'Var_cost-Medium _2020']
def test_creation_of_parameter_names_2021(self):
PredictPlant = PredictModernPlantParameters("CCGT", 1200, 2021)
cost_parameter_variables = PredictPlant._create_parameter_names(self.initial_stub_cost_parameters)
assert cost_parameter_variables == ['Connect_system_cost-Medium _2020', 'Constr_cost-Medium _2020',
'Fixed_cost-Medium _2020',
'Infra_cost-Medium _2020', 'Insurance_cost-Medium _2020',
'Pre_dev_cost-Medium _2020',
'Var_cost-Medium _2020']
def test_creation_of_parameter_names_2022(self):
PredictPlant = PredictModernPlantParameters("CCGT", 1200, 2022)
cost_parameter_variables = PredictPlant._create_parameter_names(self.initial_stub_cost_parameters)
assert cost_parameter_variables == ['Connect_system_cost-Medium _2020', 'Constr_cost-Medium _2020',
'Fixed_cost-Medium _2020',
'Infra_cost-Medium _2020', 'Insurance_cost-Medium _2020',
'Pre_dev_cost-Medium _2020',
'Var_cost-Medium _2020']
def test_creation_of_parameter_names_2023(self):
PredictPlant = PredictModernPlantParameters("CCGT", 1200, 2023)
cost_parameter_variables = PredictPlant._create_parameter_names(self.initial_stub_cost_parameters)
assert cost_parameter_variables == ['Connect_system_cost-Medium _2020', 'Constr_cost-Medium _2020',
'Fixed_cost-Medium _2020',
'Infra_cost-Medium _2020', 'Insurance_cost-Medium _2020',
'Pre_dev_cost-Medium _2020',
'Var_cost-Medium _2020']
def test_creation_of_parameter_names_2024(self):
PredictPlant = PredictModernPlantParameters("CCGT", 1200, 2024)
cost_parameter_variables = PredictPlant._create_parameter_names(self.initial_stub_cost_parameters)
assert cost_parameter_variables == ['Connect_system_cost-Medium _2020', 'Constr_cost-Medium _2020',
'Fixed_cost-Medium _2020',
'Infra_cost-Medium _2020', 'Insurance_cost-Medium _2020',
'Pre_dev_cost-Medium _2020',
'Var_cost-Medium _2020']
def test_creation_of_parameter_names_2025(self):
PredictPlant = PredictModernPlantParameters("CCGT", 1200, 2025)
cost_parameter_variables = PredictPlant._create_parameter_names(self.initial_stub_cost_parameters)
assert cost_parameter_variables == ['Connect_system_cost-Medium _2025', 'Constr_cost-Medium _2025',
'Fixed_cost-Medium _2025',
'Infra_cost-Medium _2025', 'Insurance_cost-Medium _2025',
'Pre_dev_cost-Medium _2025',
'Var_cost-Medium _2025']
def test_creation_of_parameter_names_high_year(self):
PredictPlant = PredictModernPlantParameters("CCGT", 1200, 200000)
cost_parameter_variables = PredictPlant._create_parameter_names(self.initial_stub_cost_parameters)
assert cost_parameter_variables == ['Connect_system_cost-Medium _2025', 'Constr_cost-Medium _2025',
'Fixed_cost-Medium _2025',
'Infra_cost-Medium _2025', 'Insurance_cost-Medium _2025',
'Pre_dev_cost-Medium _2025',
'Var_cost-Medium _2025']
def test_creation_of_parameter_names_low_year(self):
PredictPlant = PredictModernPlantParameters("CCGT", 1200, 0)
cost_parameter_variables = PredictPlant._create_parameter_names(self.initial_stub_cost_parameters)
assert cost_parameter_variables == ['Connect_system_cost-Medium _2018', 'Constr_cost-Medium _2018',
'Fixed_cost-Medium _2018',
'Infra_cost-Medium _2018', 'Insurance_cost-Medium _2018',
'Pre_dev_cost-Medium _2018',
'Var_cost-Medium _2018']
def test_check_plant_exists_fails_with_no_data(self):
with pytest.raises(ValueError) as excinfo:
PredictModernPlantParameters("Fake_Plant", 1200, 2018).check_plant_exists(
{'connection_cost_per_mw': 0, 'construction_cost_per_mw': 0, 'fixed_o_and_m_per_mw': 0,
'infrastructure': 0, 'insurance_cost_per_mw': 0, 'pre_dev_cost_per_mw': 0,
'variable_o_and_m_per_mwh': 0, 'pre_dev_period': 0, 'operating_period': 0, 'construction_period': 0,
'efficiency': 0, 'average_load_factor': 0, 'construction_spend_years': 0, 'pre_dev_spend_years': 0})
assert "No cost data for power plant of type: Fake_Plant" in str(excinfo.value)
def test_check_plant_exists_with_data(self):
PredictModernPlantParameters("Fake_Plant", 1200, 2018).check_plant_exists(
{'connection_cost_per_mw': 100, 'construction_cost_per_mw': 100, 'fixed_o_and_m_per_mw': 100,
'infrastructure': 100, 'insurance_cost_per_mw': 100, 'pre_dev_cost_per_mw': 100,
'variable_o_and_m_per_mwh': 100, 'pre_dev_period': 100, 'operating_period': 100, 'construction_period': 100,
'efficiency': 100, 'average_load_factor': 100, 'construction_spend_years': 100, 'pre_dev_spend_years': 100})
def test_estimate_non_interpolatable_parameters_for_ccgt_1200(self):
predict_modern_parameters = PredictModernPlantParameters("CCGT", 1200, 2018)
assert predict_modern_parameters._estimate_non_interpolatable_parameters("Pre_Dur") == 3
assert predict_modern_parameters._estimate_non_interpolatable_parameters("Operating_Period") ==25
assert predict_modern_parameters._estimate_non_interpolatable_parameters("Constr_Dur") == 3
assert predict_modern_parameters._estimate_non_interpolatable_parameters("Efficiency") == 0.54
assert predict_modern_parameters._estimate_non_interpolatable_parameters("Average_Load_Factor") == 0.93
def test_estimate_non_interpolatable_parameters_for_ccgt_1450(self):
predict_modern_parameters = PredictModernPlantParameters("CCGT", 1450, 2018)
assert predict_modern_parameters._estimate_non_interpolatable_parameters("Pre_Dur") == 3
assert predict_modern_parameters._estimate_non_interpolatable_parameters("Operating_Period") ==25
assert predict_modern_parameters._estimate_non_interpolatable_parameters("Constr_Dur") == 3
assert predict_modern_parameters._estimate_non_interpolatable_parameters("Efficiency") == 0.53
assert predict_modern_parameters._estimate_non_interpolatable_parameters("Average_Load_Factor") == 0.93
def test_payment_spread_estimator_for_ccgt_1200(self):
predict_modern_parameters = PredictModernPlantParameters("CCGT", 1200, 2018)
assert predict_modern_parameters._payment_spread_estimator("Constr") == [0.4, 0.4, 0.2]
assert predict_modern_parameters._payment_spread_estimator("Pre") == [0.44, 0.44, 0.12]
def test_payment_spread_estimator_for_ccgt_160(self):
predict_modern_parameters = PredictModernPlantParameters("CCGT", 160, 2018)
assert predict_modern_parameters._payment_spread_estimator("Constr") == [0.4, 0.4, 0.2]
assert predict_modern_parameters._payment_spread_estimator("Pre") == [0.435, 0.435, 0.13]
| StarcoderdataPython |
3255558 | from sklearn.metrics import silhouette_samples, silhouette_score
from random import randint
import sys
sys.path.insert(0, 'src/genetic_algorithm/')
from individual import Individual
from sklearn.metrics.pairwise import cosine_similarity, euclidean_distances, polynomial_kernel, sigmoid_kernel, cosine_distances
import numpy as np
from joblib import Parallel, delayed
import multiprocessing
class GeneticAlgorithm:
def __init__(self, population_size, constructiveHeuristic_percent, mutation_rate, cross_over_rate, shots, docSim, n_chunks, generations, local_search_percent, video_length, stopwords, ocr_on):
self.population_size = population_size
self.constructiveHeuristic_percent = constructiveHeuristic_percent
self.mutation_rate = mutation_rate
self.cross_over_rate = cross_over_rate
self.individuals = []
self.shots = shots
self.docSim = docSim
self.samples_features = []
self.dna_size = n_chunks
self.ocr_on = ocr_on
self.stopwords = stopwords
self.empty_transcript_indexes = []
self.window_size = 10
self.sim_score_shot_window = []
self.window_init_indexes = []
self.getVectorRepresentation()
self.sim = cosine_similarity(X=self.samples_features)
self.sim_windows = None
self.buildWindowSim()
self.initializePopulation()
self.generations = generations
self.local_search_percent = local_search_percent
self.video_length = video_length
self.max_topic_duration_in_seconds = 300
self.min_topic_duration_in_seconds = 300
'''calculate the similarity between neighbor audio chunks'''
def buildWindowSim(self):
sim_win = []
for i in range(len(self.shots) - 1):
sim_win.append(self.sim[i][i + 1])
sim_win.append(0)
self.sim_windows = sim_win
'''calculates the fit value of an individual'''
def calculate_fit_value(self, individual):
count = 0
depths = []
sum_utility_points = 0
for i in range(len(individual.dna)):
if individual.dna[i] == 1:
# calculates the similarity depth between a point and its neighbors
if i == 0:
depths.append(self.sim_windows[i + 1] - self.sim_windows[i])
elif i == len(individual.dna) - 1:
depths.append(self.sim_windows[i - 1] - self.sim_windows[i])
else:
depths.append(self.sim_windows[i - 1] - self.sim_windows[i]
+ self.sim_windows[i + 1] - self.sim_windows[i])
# utility value of a point i
sum_utility_points += self.shots[i].pause_duration + 0.02 * self.shots[i].duration
+ ((1 + (i/(len(self.shots)-1)) * 0.1) * self.shots[i].volume) + 10 * depths[-1] +\
self.shots[i].pitch * 0.01 + self.shots[i].adv_count
count += 1
if count > 0:
individual.fit_value = 0.4 * sum_utility_points - \
0.6 * count
else:
individual.fit_value = -100000
'''gets the word embeddings representation of audio transcripts'''
def getVectorRepresentation(self):
samples = []
i = 0
for s in self.shots:
samples.append(s.word2vec)
self.samples_features = samples
'''Implements the 2-point crossover'''
def crossover(self, individual1, individual2):
new_dna = []
point1 = randint(0,self.dna_size -2)
point2 = randint(0,self.dna_size -2)
while(point1 == point2):
point2 = randint(0,len(individual1.dna) -1)
if(point1 < point2):
new_dna += individual1.dna[:point1]
new_dna += individual2.dna[point1:point2]
new_dna += individual1.dna[point2:]
else:
new_dna += individual1.dna[:point2]
new_dna += individual2.dna[point2:point1]
new_dna += individual1.dna[point1:]
return new_dna
'''Implements the mutation'''
def mutation(self, individual):
index = randint(0, self.dna_size - 1)
if (individual.dna[index] == 1):
individual.dna[index] = 0
else:
individual.dna[index] = 1
'''Initializes the population'''
def initializePopulation(self):
'''Fully random population initialization'''
for i in range(int(self.population_size*(1-self.constructiveHeuristic_percent))):
individual = Individual()
for j in range(self.dna_size):
gene = randint(0, 1)
individual.dna.append(gene)
self.individuals.append(individual)
'''Heuristic population initialization'''
for i in range(int(self.population_size*self.constructiveHeuristic_percent)):
individual = Individual()
individual.dna = self.constructiveHeuristic()
self.individuals.append(individual)
'''Runs the steps of GeneticAlgorithm'''
def run(self):
num_cores = multiprocessing.cpu_count()
iter = 0
iterations_without_change = 0
best_solution = None
best_fit = -1000000
k_coefficient = 1
while iter < self.generations:
num_of_crossovers = self.population_size - int(self.cross_over_rate * self.population_size)
'''Evaluates the population'''
for p in self.individuals:
self.calculate_fit_value(p)
'''Sort the population in the reverse order by the fit
value of the individuals'''
self.individuals.sort(key=lambda x: x.fit_value, reverse=True)
'''Calls the localsearch on the best individuals'''
for i in range(int(self.population_size*self.local_search_percent)):
self.localsearch(self.individuals[i])
self.individuals.sort(key=lambda x: x.fit_value, reverse=True)
#print(self.individuals[0].fit_value)
if(self.individuals[0].fit_value > best_fit):
print("Objective function value: " + str(self.individuals[0].fit_value))
best_fit = self.individuals[0].fit_value
best_solution = self.individuals[0].dna
else:
iterations_without_change += 1
if iterations_without_change > 150:
break
'''Selects the individuals for crossover'''
for i in range(num_of_crossovers):
parent1 = randint(0, int(self.cross_over_rate * self.population_size) - 1)
parent2 = randint(0, int(self.population_size) - 1)
while parent1 == parent2:
parent2 = randint(0, int(self.cross_over_rate * self.population_size) - 1)
new_dna = self.crossover(self.individuals[parent1], self.individuals[parent2])
self.individuals[int(self.cross_over_rate * self.population_size)+i].dna = new_dna
#self.mutation(self.individuals[int(self.cross_over_rate * self.population_size)+i])
'''Apply mutation on the individuals according to a probability'''
for i in range(int(self.population_size*self.mutation_rate)):
individual_index = randint(0, self.population_size-1)
self.mutation(self.individuals[individual_index])
iter += 1
print(best_solution)
u = [0]
for i in range(len(best_solution)):
if best_solution[i] == 1:
u.append(self.shots[i].init_time)
'''return the best solution of all generations'''
return sorted(list(set(u)))
'''Implements a random greedy constructive heuristic for the problem'''
def constructiveHeuristic(self):
hash_map = {}
dna = [0]
num_of_topics = randint(0, int((len(self.shots) - 1) / 2))
depth = 0
for i in range(len(self.shots)):
if i == 0:
depth = (self.sim_windows[i + 1] - self.sim_windows[i])
elif i == self.dna_size - 1:
depth = (self.sim_windows[i - 1] - self.sim_windows[i])
else:
depth = (self.sim_windows[i - 1] - self.sim_windows[i] +
self.sim_windows[i + 1] - self.sim_windows[i])
hash_map[i] = self.shots[i].pause_duration + 0.02 * self.shots[i].duration + \
((1 + (i/(len(self.shots)-1)) * 0.1) * self.shots[i].volume) + \
10 * depth + self.shots[i].pitch * 0.01 + self.shots[i].adv_count
hash_map = sorted(hash_map.items(), key=lambda kv: kv[1], reverse=True)
chosen_topics = 0
while chosen_topics < num_of_topics:
index = randint(0, int(0.3 * len(hash_map)))
dna.append(hash_map[index][0])
chosen_topics += 1
dna = sorted(dna)
dna_f = []
for i in range(self.dna_size):
if i in dna:
dna_f.append(1)
else:
dna_f.append(0)
return dna_f
#Divides one topic in two'''
def divideTopic(self, dna):
index_split = randint(0, self.dna_size-1)
max_attempts = 10
attempts = 0
while(index_split != 0):
index_split = randint(0, self.dna_size-1)
attempts += 1
if(attempts >= max_attempts):
break
if(dna[index_split] == 0):
dna[index_split] = 1
return dna
# Merge two topics in one'''
def mergeTopic(self, dna):
index_merge = randint(0, self.dna_size-1)
max_attempts = 10
attempts = 0
while(index_merge != 1):
index_merge = randint(0, self.dna_size-1)
attempts+=1
if(attempts >= max_attempts):
break
if(dna[index_merge] == 1):
dna[index_merge] = 0
return dna
# Moves a topic bound to another place'''
def moveBound(self, dna):
index_init = randint(0, self.dna_size - 1)
steps = randint(0, self.dna_size-1 - index_init)
dna[index_init] = 0
dna[index_init + steps] = 1
return dna
# Explores the neighborhood of a solution trying to improve it'''
def localsearch(self, individual):
movement_list = ['merge', 'divide', 'move']
self.calculate_fit_value(individual)
current_fit_value = individual.fit_value
i = 0
while True:
previous_individual_dna = individual.dna
if movement_list[i] == 'merge':
individual.dna = self.mergeTopic(individual.dna)
elif movement_list[i] == 'divide':
individual.dna = self.divideTopic(individual.dna)
elif movement_list[i] == 'move':
individual.dna = self.moveBound(individual.dna)
self.calculate_fit_value(individual)
post_search_fit_value = individual.fit_value
if post_search_fit_value > current_fit_value and movement_list[i] != 'move':
i += 1
elif post_search_fit_value <= current_fit_value:
i -= 1
if i == -1:
break
| StarcoderdataPython |
3322302 | from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (
QApplication, QWidget,
QHBoxLayout, QVBoxLayout,
QGroupBox, QRadioButton,
QPushButton, QLabel, QButtonGroup, QListWidget,
QTextEdit, QInputDialog, QMessageBox)
import json
notes = {}
'''
with open('info.json', 'w', encoding= 'utf-8') as file:
json.dump(notes, file, sort_keys = True, ensure_ascii = False)'''
app = QApplication([])
window = QWidget()
window.setWindowTitle('Умные заметки')
window.resize(900,600)
line_g = QHBoxLayout()
line_1 = QVBoxLayout()
line_2 = QVBoxLayout()
list_name = QLabel('Список заметок')
list_main = QListWidget()
text_in_list = QTextEdit()
button_create = QPushButton('Добавить заметку')
button_delete = QPushButton('Удалить заметку')
button_save = QPushButton('Сохранить заметку')
line_2.addWidget(list_name)
line_2.addWidget(list_main)
line_1.addWidget(text_in_list)
line_2.addWidget(button_create)
line_2.addWidget(button_save)
line_2.addWidget(button_delete)
line_g.addLayout(line_1)
line_g.addLayout(line_2)
window.setLayout(line_g)
def show_note():
name = list_main.selectedItems()[0].text()
text_in_list.setText(notes[name])
list_main.itemClicked.connect(show_note)
def add_note():
note_name, res = QInputDialog.getText(
window, 'Добавить заметку', 'Название заметки:'
)
if note_name != '':
notes[note_name] = ''
list_main.addItem(note_name)
button_create.clicked.connect(add_note)
def save_note():
if list_main.selectedItems():
key = list_main.selectedItems()[0].text()
notes[key] = text_in_list.toPlainText()
with open('info.json', 'w', encoding= 'utf-8') as file:
json.dump(notes, file, sort_keys=True, ensure_ascii=False)
else:
message = QMessageBox()
message.setText('Заметка для сохранения не выбрана!')
message.exec()
def del_note():
if list_main.selectedItems():
key = list_main.selectedItems()[0].text()
del notes[key]
list_main.clear()
text_in_list.clear()
list_main.addItems(notes)
with open('info.json', 'w', encoding= 'utf-8') as file:
json.dump(notes, file, sort_keys = True, ensure_ascii = False)
else:
message = QMessageBox()
message.setText('Заметка для удаления не выбрана!')
message.exec()
button_save.clicked.connect(save_note)
button_delete.clicked.connect(del_note)
window.show()
with open('info.json', 'r', encoding= 'utf-8') as file:
notes = json.load(file)
list_main.addItems(notes)
app.exec() | StarcoderdataPython |
3271185 | <reponame>opengovsg/ttt-scanner
from subprocess import check_output
from datetime import datetime
import json
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
from time import sleep
from time import strftime
from os import listdir, path, makedirs
today = strftime("%Y-%m-%d")
cred = credentials.Certificate("traintraintrain-bb07a.json")
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://traintraintrain-bb07a.firebaseio.com/'
})
filenames = []
# mkdir data/
if not path.exists('data/'):
print "Making data dir"
makedirs('data/')
# mkdir data/[date-today]
directory = 'data/' + str(today)
if not path.exists(directory):
print "Making data/" + str(today) + " dir"
makedirs(directory)
for f in listdir(directory):
filenames.append(int(f[6:11]))
count = max(filenames)+1 if (len(filenames) > 0) else 1
sessionId = ''
while sessionId == '':
sessionId = raw_input("Enter the sessionId\n")
print "Using sessionId =", sessionId
ref = db.reference('raw-data/' + sessionId)
while True:
try:
print "Started scanning...",
airportOutput = check_output(['airport', '-s'])
print "complete!"
result = dict()
result2 = []
time = datetime.now()
result['time'] = str(time)
result2.append(str(time))
print ('Timestamp: ' + str(time))
for line in airportOutput.split('\n'):
if line != '':
line = line.split()
address = line[1]
result[address] = line
result2.append(address)
with open('data/' + str(today) + '/output%05d.json' % count, 'w') as file:
print ("Saving to data/" + str(today) + "/output%05d.json" % count)
json.dump(result2, file)
count += 1
# print "Pushing to firebase...",
# ref.push({
# 'local_time': str(time),
# 'server_time': {'.sv': 'timestamp'},
# 'data': result
# })
print "complete!"
except Exception as error:
print "Something crashed"
print error
finally:
sleep(5)
| StarcoderdataPython |
193402 | <reponame>HiAwesome/dive-into-python3-practice<gh_stars>0
import c02.p044_humansize as humansize
print(humansize.__name__)
"""
c02.p044_humansize
"""
| StarcoderdataPython |
48404 | import logging
import pickle
from datetime import datetime
import munch
from rocketgram import Bot, Dispatcher, DefaultValuesMiddleware, ParseModeType
logger = logging.getLogger('mybot')
router = Dispatcher()
def get_bot(token: str):
bot = Bot(token, router=router, globals_class=munch.Munch, context_data_class=munch.Munch)
bot.middleware(DefaultValuesMiddleware(parse_mode=ParseModeType.html))
return bot
| StarcoderdataPython |
3367022 | <filename>spektral/layers/ops/modes.py<gh_stars>1-10
from tensorflow.keras import backend as K
SINGLE = 1 # Single (rank(a)=2, rank(b)=2)
MIXED = 2 # Mixed (rank(a)=2, rank(b)=3)
iMIXED = 3 # Inverted mixed (rank(a)=3, rank(b)=2)
BATCH = 4 # Batch (rank(a)=3, rank(b)=3)
UNKNOWN = -1 # Unknown
def autodetect_mode(a, b):
"""
Return a code identifying the mode of operation (single, mixed, inverted mixed and
batch), given a and b. See `ops.modes` for meaning of codes.
:param a: Tensor or SparseTensor.
:param b: Tensor or SparseTensor.
:return: mode of operation as an integer code.
"""
a_dim = K.ndim(a)
b_dim = K.ndim(b)
if b_dim == 2:
if a_dim == 2:
return SINGLE
elif a_dim == 3:
return iMIXED
elif b_dim == 3:
if a_dim == 2:
return MIXED
elif a_dim == 3:
return BATCH
return UNKNOWN | StarcoderdataPython |
1630550 | from distutils.core import setup
try:
from setuptools import find_packages
except ImportError:
print ("Please install Distutils and setuptools"
" before installing this package")
raise
setup(
name='relay.runner',
version='0.1.10.dev0',
description=(
'A smart thermostat. Given a metric, or some timeseries that should'
' approach a given target, add heat or coolant as necessary'
' You can use Relay to auto-scale workers in large'
' distributed systems or do anything a thermostat might do.'
),
long_description="Check the project homepage for details",
keywords=[
'relay', 'pid', 'pid controller', 'thermostat', 'tuning',
'oscilloscope', 'auto-scale'],
author='<NAME>',
author_email='<EMAIL>',
url='http://github.com/sailthru/relay',
packages=find_packages(),
include_package_data=True,
install_requires=['argparse_tools>=1.0.6', 'colorlog', 'numpy'],
extras_require={
'webui': ['pyzmq'],
},
tests_require=['nose'],
test_suite="nose.main",
zip_safe=True,
entry_points = {
'console_scripts': [
'relay = relay.__main__:go',
],
'setuptools.installation': [
'eggsecutable = relay.__main__:go',
],
},
)
| StarcoderdataPython |
4805089 | <filename>CODES/S7 - Functions-Methods - Working With Reusable Code/3-methodsdemo3.py
"""
Positional Parameters
They are like optional parameters
And can be assigned a default value, if no value is provided from outside
"""
def sum_nums(n1, n2=4):
"""
Get sum of two numbers
:param n1:
:param n2:
:return:
"""
return n1 + n2
sum1 = sum_nums(4, n2=12)
print(sum1) | StarcoderdataPython |
1602397 | import os
from base64 import b64encode, b64decode
from typing import AnyStr, List, Dict
from collections import Counter
import numpy as np
import cv2 as cv
import keras
import tensorflow as tf
from yolo4.model import yolo4_body
from decode_np import Decode
__all__ = ("DetectJapan", "detect_japan_obj")
session = tf.Session()
keras.backend.set_session(session)
def get_class(classes_path):
classes_path = os.path.expanduser(classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
anchors_path = os.path.expanduser(anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(",")]
return np.array(anchors).reshape(-1, 2)
class DetectJapan:
model_path = "JPY_weight.h5" # Keras model or weights must be a .h5 file.
anchors_path = "model_data/yolo4_anchors.txt"
classes_path = "model_data/JPY_classes.txt"
jpy_classes = ('JPY_500', 'JPY_100', 'JPY_50', 'JPY_10', 'JPY_1', 'JPY_5')
def __init__(self, conf_thresh: float = 0.8, nms_thresh: float = 0.8):
class_names = get_class(self.classes_path)
anchors = get_anchors(self.anchors_path)
model_image_size = (416, 416)
self._model: keras.Model = yolo4_body(
inputs=keras.Input(shape=model_image_size + (3,)),
num_anchors=len(anchors) // 3,
num_classes=len(class_names),
)
self._model.load_weights(os.path.expanduser(self.model_path))
self._decoder: Decode = Decode(
obj_threshold=conf_thresh,
nms_threshold=nms_thresh,
input_shape=model_image_size,
_yolo=self._model,
all_classes=class_names,
)
@property
def model(self) -> keras.Model:
return self._model
@property
def decoder(self) -> Decode:
return self._decoder
def detect(self, image_b64: AnyStr, *, fmt: str = ".png") -> Dict:
image_bin: bytes = b64decode(image_b64)
image = cv.imdecode(np.frombuffer(image_bin, np.uint8), cv.IMREAD_COLOR)
image = cv.resize(image, dsize=(1080, 1080), interpolation=cv.INTER_AREA)
with session.as_default():
with session.graph.as_default():
detect_image, *_, classes = self._decoder.detect_image(image, True)
is_success, buffer = cv.imencode(fmt, detect_image)
return {
"img": b64encode(buffer.tobytes()).decode(),
"count": self.count(classes)
}
def count(self, classes: List[int]):
counter = Counter(classes)
for key in tuple(counter.keys()): # 딕셔너리 키 이름 변경
counter[self.jpy_classes[key]] = counter.pop(key)
# for class_ in tuple(counter):
# counter[self.jpy_classes[class_]] = counter.pop(class_)
return counter
detect_japan_obj = DetectJapan()
| StarcoderdataPython |
1609859 | import cv2
import pandas as pd
from tqdm import tqdm
train = pd.read_csv('Christof/assets/train_ext1.csv')
#test = pd.read_csv('Christof/assets/sample_submission.csv')
path_to_train = 'Christof/assets/ext_tomomi/'
#path_to_test = 'Christof/assets/test_rgby_512/'
fns = [path_to_train + f[:-4] + '.png' for f in train['Id']]
import numpy as np
channel_avg = np.zeros(3)
channel_std = np.zeros(3)
#images = np.zeros((len(fns),512,512,3))
for i, fn in tqdm(enumerate(fns)):
image = cv2.imread(fn, cv2.IMREAD_UNCHANGED)
channel_avg += np.mean(np.reshape(image,(-1,3)),axis=0)
channel_std += np.std(np.reshape(image,(-1,3)),axis=0)
channel_avg/=len(fns)
channel_std/=len(fns)
print(channel_avg/255)
print(channel_std/255) | StarcoderdataPython |
3679 | <reponame>AaronFriel/pulumi-google-native
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['TestMatrixArgs', 'TestMatrix']
@pulumi.input_type
class TestMatrixArgs:
def __init__(__self__, *,
environment_matrix: pulumi.Input['EnvironmentMatrixArgs'],
result_storage: pulumi.Input['ResultStorageArgs'],
test_specification: pulumi.Input['TestSpecificationArgs'],
client_info: Optional[pulumi.Input['ClientInfoArgs']] = None,
fail_fast: Optional[pulumi.Input[bool]] = None,
flaky_test_attempts: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a TestMatrix resource.
:param pulumi.Input['EnvironmentMatrixArgs'] environment_matrix: The devices the tests are being executed on.
:param pulumi.Input['ResultStorageArgs'] result_storage: Where the results for the matrix are written.
:param pulumi.Input['TestSpecificationArgs'] test_specification: How to run the test.
:param pulumi.Input['ClientInfoArgs'] client_info: Information about the client which invoked the test.
:param pulumi.Input[bool] fail_fast: If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation.
:param pulumi.Input[int] flaky_test_attempts: The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns.
:param pulumi.Input[str] project: The cloud project that owns the test matrix.
"""
pulumi.set(__self__, "environment_matrix", environment_matrix)
pulumi.set(__self__, "result_storage", result_storage)
pulumi.set(__self__, "test_specification", test_specification)
if client_info is not None:
pulumi.set(__self__, "client_info", client_info)
if fail_fast is not None:
pulumi.set(__self__, "fail_fast", fail_fast)
if flaky_test_attempts is not None:
pulumi.set(__self__, "flaky_test_attempts", flaky_test_attempts)
if project is not None:
pulumi.set(__self__, "project", project)
if request_id is not None:
pulumi.set(__self__, "request_id", request_id)
@property
@pulumi.getter(name="environmentMatrix")
def environment_matrix(self) -> pulumi.Input['EnvironmentMatrixArgs']:
"""
The devices the tests are being executed on.
"""
return pulumi.get(self, "environment_matrix")
@environment_matrix.setter
def environment_matrix(self, value: pulumi.Input['EnvironmentMatrixArgs']):
pulumi.set(self, "environment_matrix", value)
@property
@pulumi.getter(name="resultStorage")
def result_storage(self) -> pulumi.Input['ResultStorageArgs']:
"""
Where the results for the matrix are written.
"""
return pulumi.get(self, "result_storage")
@result_storage.setter
def result_storage(self, value: pulumi.Input['ResultStorageArgs']):
pulumi.set(self, "result_storage", value)
@property
@pulumi.getter(name="testSpecification")
def test_specification(self) -> pulumi.Input['TestSpecificationArgs']:
"""
How to run the test.
"""
return pulumi.get(self, "test_specification")
@test_specification.setter
def test_specification(self, value: pulumi.Input['TestSpecificationArgs']):
pulumi.set(self, "test_specification", value)
@property
@pulumi.getter(name="clientInfo")
def client_info(self) -> Optional[pulumi.Input['ClientInfoArgs']]:
"""
Information about the client which invoked the test.
"""
return pulumi.get(self, "client_info")
@client_info.setter
def client_info(self, value: Optional[pulumi.Input['ClientInfoArgs']]):
pulumi.set(self, "client_info", value)
@property
@pulumi.getter(name="failFast")
def fail_fast(self) -> Optional[pulumi.Input[bool]]:
"""
If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation.
"""
return pulumi.get(self, "fail_fast")
@fail_fast.setter
def fail_fast(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "fail_fast", value)
@property
@pulumi.getter(name="flakyTestAttempts")
def flaky_test_attempts(self) -> Optional[pulumi.Input[int]]:
"""
The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns.
"""
return pulumi.get(self, "flaky_test_attempts")
@flaky_test_attempts.setter
def flaky_test_attempts(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "flaky_test_attempts", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The cloud project that owns the test matrix.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="requestId")
def request_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "request_id")
@request_id.setter
def request_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "request_id", value)
class TestMatrix(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_info: Optional[pulumi.Input[pulumi.InputType['ClientInfoArgs']]] = None,
environment_matrix: Optional[pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']]] = None,
fail_fast: Optional[pulumi.Input[bool]] = None,
flaky_test_attempts: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
result_storage: Optional[pulumi.Input[pulumi.InputType['ResultStorageArgs']]] = None,
test_specification: Optional[pulumi.Input[pulumi.InputType['TestSpecificationArgs']]] = None,
__props__=None):
"""
Creates and runs a matrix of tests according to the given specifications. Unsupported environments will be returned in the state UNSUPPORTED. A test matrix is limited to use at most 2000 devices in parallel. May return any of the following canonical error codes: - PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed or if the matrix tries to use too many simultaneous devices.
Auto-naming is currently not supported for this resource.
Note - this resource's API doesn't support deletion. When deleted, the resource will persist
on Google Cloud even though it will be deleted from Pulumi state.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ClientInfoArgs']] client_info: Information about the client which invoked the test.
:param pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']] environment_matrix: The devices the tests are being executed on.
:param pulumi.Input[bool] fail_fast: If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation.
:param pulumi.Input[int] flaky_test_attempts: The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns.
:param pulumi.Input[str] project: The cloud project that owns the test matrix.
:param pulumi.Input[pulumi.InputType['ResultStorageArgs']] result_storage: Where the results for the matrix are written.
:param pulumi.Input[pulumi.InputType['TestSpecificationArgs']] test_specification: How to run the test.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TestMatrixArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates and runs a matrix of tests according to the given specifications. Unsupported environments will be returned in the state UNSUPPORTED. A test matrix is limited to use at most 2000 devices in parallel. May return any of the following canonical error codes: - PERMISSION_DENIED - if the user is not authorized to write to project - INVALID_ARGUMENT - if the request is malformed or if the matrix tries to use too many simultaneous devices.
Auto-naming is currently not supported for this resource.
Note - this resource's API doesn't support deletion. When deleted, the resource will persist
on Google Cloud even though it will be deleted from Pulumi state.
:param str resource_name: The name of the resource.
:param TestMatrixArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TestMatrixArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_info: Optional[pulumi.Input[pulumi.InputType['ClientInfoArgs']]] = None,
environment_matrix: Optional[pulumi.Input[pulumi.InputType['EnvironmentMatrixArgs']]] = None,
fail_fast: Optional[pulumi.Input[bool]] = None,
flaky_test_attempts: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
request_id: Optional[pulumi.Input[str]] = None,
result_storage: Optional[pulumi.Input[pulumi.InputType['ResultStorageArgs']]] = None,
test_specification: Optional[pulumi.Input[pulumi.InputType['TestSpecificationArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TestMatrixArgs.__new__(TestMatrixArgs)
__props__.__dict__["client_info"] = client_info
if environment_matrix is None and not opts.urn:
raise TypeError("Missing required property 'environment_matrix'")
__props__.__dict__["environment_matrix"] = environment_matrix
__props__.__dict__["fail_fast"] = fail_fast
__props__.__dict__["flaky_test_attempts"] = flaky_test_attempts
__props__.__dict__["project"] = project
__props__.__dict__["request_id"] = request_id
if result_storage is None and not opts.urn:
raise TypeError("Missing required property 'result_storage'")
__props__.__dict__["result_storage"] = result_storage
if test_specification is None and not opts.urn:
raise TypeError("Missing required property 'test_specification'")
__props__.__dict__["test_specification"] = test_specification
__props__.__dict__["invalid_matrix_details"] = None
__props__.__dict__["outcome_summary"] = None
__props__.__dict__["state"] = None
__props__.__dict__["test_executions"] = None
__props__.__dict__["test_matrix_id"] = None
__props__.__dict__["timestamp"] = None
super(TestMatrix, __self__).__init__(
'google-native:testing/v1:TestMatrix',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'TestMatrix':
"""
Get an existing TestMatrix resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = TestMatrixArgs.__new__(TestMatrixArgs)
__props__.__dict__["client_info"] = None
__props__.__dict__["environment_matrix"] = None
__props__.__dict__["fail_fast"] = None
__props__.__dict__["flaky_test_attempts"] = None
__props__.__dict__["invalid_matrix_details"] = None
__props__.__dict__["outcome_summary"] = None
__props__.__dict__["project"] = None
__props__.__dict__["result_storage"] = None
__props__.__dict__["state"] = None
__props__.__dict__["test_executions"] = None
__props__.__dict__["test_matrix_id"] = None
__props__.__dict__["test_specification"] = None
__props__.__dict__["timestamp"] = None
return TestMatrix(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="clientInfo")
def client_info(self) -> pulumi.Output['outputs.ClientInfoResponse']:
"""
Information about the client which invoked the test.
"""
return pulumi.get(self, "client_info")
@property
@pulumi.getter(name="environmentMatrix")
def environment_matrix(self) -> pulumi.Output['outputs.EnvironmentMatrixResponse']:
"""
The devices the tests are being executed on.
"""
return pulumi.get(self, "environment_matrix")
@property
@pulumi.getter(name="failFast")
def fail_fast(self) -> pulumi.Output[bool]:
"""
If true, only a single attempt at most will be made to run each execution/shard in the matrix. Flaky test attempts are not affected. Normally, 2 or more attempts are made if a potential infrastructure issue is detected. This feature is for latency sensitive workloads. The incidence of execution failures may be significantly greater for fail-fast matrices and support is more limited because of that expectation.
"""
return pulumi.get(self, "fail_fast")
@property
@pulumi.getter(name="flakyTestAttempts")
def flaky_test_attempts(self) -> pulumi.Output[int]:
"""
The number of times a TestExecution should be re-attempted if one or more of its test cases fail for any reason. The maximum number of reruns allowed is 10. Default is 0, which implies no reruns.
"""
return pulumi.get(self, "flaky_test_attempts")
@property
@pulumi.getter(name="invalidMatrixDetails")
def invalid_matrix_details(self) -> pulumi.Output[str]:
"""
Describes why the matrix is considered invalid. Only useful for matrices in the INVALID state.
"""
return pulumi.get(self, "invalid_matrix_details")
@property
@pulumi.getter(name="outcomeSummary")
def outcome_summary(self) -> pulumi.Output[str]:
"""
Output Only. The overall outcome of the test. Only set when the test matrix state is FINISHED.
"""
return pulumi.get(self, "outcome_summary")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The cloud project that owns the test matrix.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter(name="resultStorage")
def result_storage(self) -> pulumi.Output['outputs.ResultStorageResponse']:
"""
Where the results for the matrix are written.
"""
return pulumi.get(self, "result_storage")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
Indicates the current progress of the test matrix.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="testExecutions")
def test_executions(self) -> pulumi.Output[Sequence['outputs.TestExecutionResponse']]:
"""
The list of test executions that the service creates for this matrix.
"""
return pulumi.get(self, "test_executions")
@property
@pulumi.getter(name="testMatrixId")
def test_matrix_id(self) -> pulumi.Output[str]:
"""
Unique id set by the service.
"""
return pulumi.get(self, "test_matrix_id")
@property
@pulumi.getter(name="testSpecification")
def test_specification(self) -> pulumi.Output['outputs.TestSpecificationResponse']:
"""
How to run the test.
"""
return pulumi.get(self, "test_specification")
@property
@pulumi.getter
def timestamp(self) -> pulumi.Output[str]:
"""
The time this test matrix was initially created.
"""
return pulumi.get(self, "timestamp")
| StarcoderdataPython |
2011 | # -*- coding: utf-8 -*-
from ddtrace.compat import PY2
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.contrib.flask.patch import flask_version
from ddtrace.ext import http
from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID, HTTP_HEADER_PARENT_ID
from flask import abort
from . import BaseFlaskTestCase
from ...utils import assert_span_http_status_code
base_exception_name = 'builtins.Exception'
if PY2:
base_exception_name = 'exceptions.Exception'
class FlaskRequestTestCase(BaseFlaskTestCase):
def test_request(self):
"""
When making a request
We create the expected spans
"""
@self.app.route('/')
def index():
return 'Hello Flask', 200
res = self.client.get('/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b'Hello Flask')
spans = self.get_spans()
self.assertEqual(len(spans), 8)
# Assert the order of the spans created
self.assertListEqual(
[
'flask.request',
'flask.try_trigger_before_first_request_functions',
'flask.preprocess_request',
'flask.dispatch_request',
'tests.contrib.flask.test_request.index',
'flask.process_response',
'flask.do_teardown_request',
'flask.do_teardown_appcontext',
],
[s.name for s in spans],
)
# Assert span services
for span in spans:
self.assertEqual(span.service, 'flask')
# Root request span
req_span = spans[0]
self.assertEqual(req_span.service, 'flask')
self.assertEqual(req_span.name, 'flask.request')
self.assertEqual(req_span.resource, 'GET /')
self.assertEqual(req_span.span_type, 'web')
self.assertEqual(req_span.error, 0)
self.assertIsNone(req_span.parent_id)
# Request tags
self.assertEqual(req_span.get_tag('flask.endpoint'), 'index')
self.assertEqual(req_span.get_tag('flask.url_rule'), '/')
self.assertEqual(req_span.get_tag('http.method'), 'GET')
self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/')
assert_span_http_status_code(req_span, 200)
assert http.QUERY_STRING not in req_span.meta
# Handler span
handler_span = spans[4]
self.assertEqual(handler_span.service, 'flask')
self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index')
self.assertEqual(handler_span.resource, '/')
self.assertEqual(req_span.error, 0)
def test_request_query_string_trace(self):
"""Make sure when making a request that we create the expected spans and capture the query string."""
@self.app.route('/')
def index():
return 'Hello Flask', 200
with self.override_http_config('flask', dict(trace_query_string=True)):
self.client.get('/?foo=bar&baz=biz')
spans = self.get_spans()
# Request tags
assert spans[0].get_tag(http.QUERY_STRING) == 'foo=bar&baz=biz'
def test_analytics_global_on_integration_default(self):
"""
When making a request
When an integration trace search is not event sample rate is not set and globally trace search is enabled
We expect the root span to have the appropriate tag
"""
@self.app.route('/')
def index():
return 'Hello Flask', 200
with self.override_global_config(dict(analytics_enabled=True)):
res = self.client.get('/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b'Hello Flask')
root = self.get_root_span()
root.assert_matches(
name='flask.request',
metrics={
ANALYTICS_SAMPLE_RATE_KEY: 1.0,
},
)
for span in self.spans:
if span == root:
continue
self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY))
def test_analytics_global_on_integration_on(self):
"""
When making a request
When an integration trace search is enabled and sample rate is set and globally trace search is enabled
We expect the root span to have the appropriate tag
"""
@self.app.route('/')
def index():
return 'Hello Flask', 200
with self.override_global_config(dict(analytics_enabled=True)):
with self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)):
res = self.client.get('/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b'Hello Flask')
root = self.get_root_span()
root.assert_matches(
name='flask.request',
metrics={
ANALYTICS_SAMPLE_RATE_KEY: 0.5,
},
)
for span in self.spans:
if span == root:
continue
self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY))
def test_analytics_global_off_integration_default(self):
"""
When making a request
When an integration trace search is not set and sample rate is set and globally trace search is disabled
We expect the root span to not include tag
"""
@self.app.route('/')
def index():
return 'Hello Flask', 200
with self.override_global_config(dict(analytics_enabled=False)):
res = self.client.get('/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b'Hello Flask')
root = self.get_root_span()
self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY))
for span in self.spans:
if span == root:
continue
self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY))
def test_analytics_global_off_integration_on(self):
"""
When making a request
When an integration trace search is enabled and sample rate is set and globally trace search is disabled
We expect the root span to have the appropriate tag
"""
@self.app.route('/')
def index():
return 'Hello Flask', 200
with self.override_global_config(dict(analytics_enabled=False)):
with self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)):
res = self.client.get('/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b'Hello Flask')
root = self.get_root_span()
root.assert_matches(
name='flask.request',
metrics={
ANALYTICS_SAMPLE_RATE_KEY: 0.5,
},
)
for span in self.spans:
if span == root:
continue
self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY))
def test_distributed_tracing(self):
"""
When making a request
When distributed tracing headers are present
We create the expected spans
"""
@self.app.route('/')
def index():
return 'Hello Flask', 200
# Default: distributed tracing enabled
res = self.client.get('/', headers={
HTTP_HEADER_PARENT_ID: '12345',
HTTP_HEADER_TRACE_ID: '678910',
})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b'Hello Flask')
# Assert parent and trace id are properly set on the root span
span = self.find_span_by_name(self.get_spans(), 'flask.request')
self.assertEqual(span.trace_id, 678910)
self.assertEqual(span.parent_id, 12345)
# Explicitly enable distributed tracing
with self.override_config('flask', dict(distributed_tracing_enabled=True)):
res = self.client.get('/', headers={
HTTP_HEADER_PARENT_ID: '12345',
HTTP_HEADER_TRACE_ID: '678910',
})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b'Hello Flask')
# Assert parent and trace id are properly set on the root span
span = self.find_span_by_name(self.get_spans(), 'flask.request')
self.assertEqual(span.trace_id, 678910)
self.assertEqual(span.parent_id, 12345)
# With distributed tracing disabled
with self.override_config('flask', dict(distributed_tracing_enabled=False)):
res = self.client.get('/', headers={
HTTP_HEADER_PARENT_ID: '12345',
HTTP_HEADER_TRACE_ID: '678910',
})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b'Hello Flask')
# Assert parent and trace id are properly set on the root span
span = self.find_span_by_name(self.get_spans(), 'flask.request')
self.assertNotEqual(span.trace_id, 678910)
self.assertIsNone(span.parent_id)
def test_request_query_string(self):
"""
When making a request
When the request contains a query string
We create the expected spans
"""
@self.app.route('/')
def index():
return 'Hello Flask', 200
res = self.client.get('/', query_string=dict(hello='flask'))
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b'Hello Flask')
spans = self.get_spans()
self.assertEqual(len(spans), 8)
# Assert the order of the spans created
self.assertListEqual(
[
'flask.request',
'flask.try_trigger_before_first_request_functions',
'flask.preprocess_request',
'flask.dispatch_request',
'tests.contrib.flask.test_request.index',
'flask.process_response',
'flask.do_teardown_request',
'flask.do_teardown_appcontext',
],
[s.name for s in spans],
)
# Assert span services
for span in spans:
self.assertEqual(span.service, 'flask')
# Root request span
req_span = spans[0]
self.assertEqual(req_span.service, 'flask')
self.assertEqual(req_span.name, 'flask.request')
# Note: contains no query string
self.assertEqual(req_span.resource, 'GET /')
self.assertEqual(req_span.span_type, 'web')
self.assertEqual(req_span.error, 0)
self.assertIsNone(req_span.parent_id)
# Request tags
self.assertEqual(req_span.get_tag('flask.endpoint'), 'index')
# Note: contains no query string
self.assertEqual(req_span.get_tag('flask.url_rule'), '/')
self.assertEqual(req_span.get_tag('http.method'), 'GET')
# Note: contains no query string
self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/')
assert_span_http_status_code(req_span, 200)
# Handler span
handler_span = spans[4]
self.assertEqual(handler_span.service, 'flask')
self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index')
# Note: contains no query string
self.assertEqual(handler_span.resource, '/')
self.assertEqual(req_span.error, 0)
def test_request_unicode(self):
"""
When making a request
When the url contains unicode
We create the expected spans
"""
@self.app.route(u'/üŋïĉóđē')
def unicode():
return 'üŋïĉóđē', 200
res = self.client.get(u'/üŋïĉóđē')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.data, b'\xc3\xbc\xc5\x8b\xc3\xaf\xc4\x89\xc3\xb3\xc4\x91\xc4\x93')
spans = self.get_spans()
self.assertEqual(len(spans), 8)
# Assert the order of the spans created
self.assertListEqual(
[
'flask.request',
'flask.try_trigger_before_first_request_functions',
'flask.preprocess_request',
'flask.dispatch_request',
'tests.contrib.flask.test_request.unicode',
'flask.process_response',
'flask.do_teardown_request',
'flask.do_teardown_appcontext',
],
[s.name for s in spans],
)
# Assert span services
for span in spans:
self.assertEqual(span.service, 'flask')
# Root request span
req_span = spans[0]
self.assertEqual(req_span.service, 'flask')
self.assertEqual(req_span.name, 'flask.request')
self.assertEqual(req_span.resource, u'GET /üŋïĉóđē')
self.assertEqual(req_span.span_type, 'web')
self.assertEqual(req_span.error, 0)
self.assertIsNone(req_span.parent_id)
# Request tags
self.assertEqual(req_span.get_tag('flask.endpoint'), 'unicode')
self.assertEqual(req_span.get_tag('flask.url_rule'), u'/üŋïĉóđē')
self.assertEqual(req_span.get_tag('http.method'), 'GET')
self.assertEqual(req_span.get_tag(http.URL), u'http://localhost/üŋïĉóđē')
assert_span_http_status_code(req_span, 200)
# Handler span
handler_span = spans[4]
self.assertEqual(handler_span.service, 'flask')
self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.unicode')
self.assertEqual(handler_span.resource, u'/üŋïĉóđē')
self.assertEqual(req_span.error, 0)
def test_request_404(self):
"""
When making a request
When the requested endpoint was not found
We create the expected spans
"""
res = self.client.get('/not-found')
self.assertEqual(res.status_code, 404)
spans = self.get_spans()
self.assertEqual(len(spans), 9)
# Assert the order of the spans created
self.assertListEqual(
[
'flask.request',
'flask.try_trigger_before_first_request_functions',
'flask.preprocess_request',
'flask.dispatch_request',
'flask.handle_user_exception',
'flask.handle_http_exception',
'flask.process_response',
'flask.do_teardown_request',
'flask.do_teardown_appcontext',
],
[s.name for s in spans],
)
# Assert span services
for span in spans:
self.assertEqual(span.service, 'flask')
# Root request span
req_span = spans[0]
self.assertEqual(req_span.service, 'flask')
self.assertEqual(req_span.name, 'flask.request')
self.assertEqual(req_span.resource, 'GET 404')
self.assertEqual(req_span.span_type, 'web')
self.assertEqual(req_span.error, 0)
self.assertIsNone(req_span.parent_id)
# Request tags
self.assertEqual(req_span.get_tag('http.method'), 'GET')
self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found')
assert_span_http_status_code(req_span, 404)
# Dispatch span
dispatch_span = spans[3]
self.assertEqual(dispatch_span.service, 'flask')
self.assertEqual(dispatch_span.name, 'flask.dispatch_request')
self.assertEqual(dispatch_span.resource, 'flask.dispatch_request')
self.assertEqual(dispatch_span.error, 1)
self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404 Not Found'))
self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound')
def test_request_abort_404(self):
"""
When making a request
When the requested endpoint calls `abort(404)`
We create the expected spans
"""
@self.app.route('/not-found')
def not_found():
abort(404)
res = self.client.get('/not-found')
self.assertEqual(res.status_code, 404)
spans = self.get_spans()
self.assertEqual(len(spans), 10)
# Assert the order of the spans created
self.assertListEqual(
[
'flask.request',
'flask.try_trigger_before_first_request_functions',
'flask.preprocess_request',
'flask.dispatch_request',
'tests.contrib.flask.test_request.not_found',
'flask.handle_user_exception',
'flask.handle_http_exception',
'flask.process_response',
'flask.do_teardown_request',
'flask.do_teardown_appcontext',
],
[s.name for s in spans],
)
# Assert span services
for span in spans:
self.assertEqual(span.service, 'flask')
# Root request span
req_span = spans[0]
self.assertEqual(req_span.service, 'flask')
self.assertEqual(req_span.name, 'flask.request')
self.assertEqual(req_span.resource, 'GET /not-found')
self.assertEqual(req_span.span_type, 'web')
self.assertEqual(req_span.error, 0)
self.assertIsNone(req_span.parent_id)
# Request tags
self.assertEqual(req_span.get_tag('http.method'), 'GET')
self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found')
assert_span_http_status_code(req_span, 404)
self.assertEqual(req_span.get_tag('flask.endpoint'), 'not_found')
self.assertEqual(req_span.get_tag('flask.url_rule'), '/not-found')
# Dispatch span
dispatch_span = spans[3]
self.assertEqual(dispatch_span.service, 'flask')
self.assertEqual(dispatch_span.name, 'flask.dispatch_request')
self.assertEqual(dispatch_span.resource, 'flask.dispatch_request')
self.assertEqual(dispatch_span.error, 1)
self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404 Not Found'))
self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound')
# Handler span
handler_span = spans[4]
self.assertEqual(handler_span.service, 'flask')
self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.not_found')
self.assertEqual(handler_span.resource, '/not-found')
self.assertEqual(handler_span.error, 1)
self.assertTrue(handler_span.get_tag('error.msg').startswith('404 Not Found'))
self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound')
def test_request_500(self):
"""
When making a request
When the requested endpoint raises an exception
We create the expected spans
"""
@self.app.route('/500')
def fivehundred():
raise Exception('500 error')
res = self.client.get('/500')
self.assertEqual(res.status_code, 500)
spans = self.get_spans()
self.assertEqual(len(spans), 9)
# Assert the order of the spans created
self.assertListEqual(
[
'flask.request',
'flask.try_trigger_before_first_request_functions',
'flask.preprocess_request',
'flask.dispatch_request',
'tests.contrib.flask.test_request.fivehundred',
'flask.handle_user_exception',
'flask.handle_exception',
'flask.do_teardown_request',
'flask.do_teardown_appcontext',
],
[s.name for s in spans],
)
# Assert span services
for span in spans:
self.assertEqual(span.service, 'flask')
# Root request span
req_span = spans[0]
self.assertEqual(req_span.service, 'flask')
self.assertEqual(req_span.name, 'flask.request')
self.assertEqual(req_span.resource, 'GET /500')
self.assertEqual(req_span.span_type, 'web')
self.assertEqual(req_span.error, 1)
self.assertIsNone(req_span.parent_id)
# Request tags
self.assertEqual(req_span.get_tag('http.method'), 'GET')
self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/500')
assert_span_http_status_code(req_span, 500)
self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundred')
self.assertEqual(req_span.get_tag('flask.url_rule'), '/500')
# Dispatch span
dispatch_span = spans[3]
self.assertEqual(dispatch_span.service, 'flask')
self.assertEqual(dispatch_span.name, 'flask.dispatch_request')
self.assertEqual(dispatch_span.resource, 'flask.dispatch_request')
self.assertEqual(dispatch_span.error, 1)
self.assertTrue(dispatch_span.get_tag('error.msg').startswith('500 error'))
self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(dispatch_span.get_tag('error.type'), base_exception_name)
# Handler span
handler_span = spans[4]
self.assertEqual(handler_span.service, 'flask')
self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundred')
self.assertEqual(handler_span.resource, '/500')
self.assertEqual(handler_span.error, 1)
self.assertTrue(handler_span.get_tag('error.msg').startswith('500 error'))
self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(handler_span.get_tag('error.type'), base_exception_name)
# User exception span
user_ex_span = spans[5]
self.assertEqual(user_ex_span.service, 'flask')
self.assertEqual(user_ex_span.name, 'flask.handle_user_exception')
self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception')
self.assertEqual(user_ex_span.error, 1)
self.assertTrue(user_ex_span.get_tag('error.msg').startswith('500 error'))
self.assertTrue(user_ex_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(user_ex_span.get_tag('error.type'), base_exception_name)
def test_request_501(self):
"""
When making a request
When the requested endpoint calls `abort(501)`
We create the expected spans
"""
@self.app.route('/501')
def fivehundredone():
abort(501)
res = self.client.get('/501')
self.assertEqual(res.status_code, 501)
spans = self.get_spans()
self.assertEqual(len(spans), 10)
# Assert the order of the spans created
self.assertListEqual(
[
'flask.request',
'flask.try_trigger_before_first_request_functions',
'flask.preprocess_request',
'flask.dispatch_request',
'tests.contrib.flask.test_request.fivehundredone',
'flask.handle_user_exception',
'flask.handle_http_exception',
'flask.process_response',
'flask.do_teardown_request',
'flask.do_teardown_appcontext',
],
[s.name for s in spans],
)
# Assert span services
for span in spans:
self.assertEqual(span.service, 'flask')
# Root request span
req_span = spans[0]
self.assertEqual(req_span.service, 'flask')
self.assertEqual(req_span.name, 'flask.request')
self.assertEqual(req_span.resource, 'GET /501')
self.assertEqual(req_span.span_type, 'web')
self.assertEqual(req_span.error, 1)
self.assertIsNone(req_span.parent_id)
# Request tags
self.assertEqual(req_span.get_tag('http.method'), 'GET')
self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/501')
assert_span_http_status_code(req_span, 501)
self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundredone')
self.assertEqual(req_span.get_tag('flask.url_rule'), '/501')
# Dispatch span
dispatch_span = spans[3]
self.assertEqual(dispatch_span.service, 'flask')
self.assertEqual(dispatch_span.name, 'flask.dispatch_request')
self.assertEqual(dispatch_span.resource, 'flask.dispatch_request')
self.assertEqual(dispatch_span.error, 1)
self.assertTrue(dispatch_span.get_tag('error.msg').startswith('501 Not Implemented'))
self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented')
# Handler span
handler_span = spans[4]
self.assertEqual(handler_span.service, 'flask')
self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundredone')
self.assertEqual(handler_span.resource, '/501')
self.assertEqual(handler_span.error, 1)
self.assertTrue(handler_span.get_tag('error.msg').startswith('501 Not Implemented'))
self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented')
# User exception span
user_ex_span = spans[5]
self.assertEqual(user_ex_span.service, 'flask')
self.assertEqual(user_ex_span.name, 'flask.handle_user_exception')
self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception')
self.assertEqual(user_ex_span.error, 0)
def test_request_error_handler(self):
"""
When making a request
When the requested endpoint raises an exception
We create the expected spans
"""
@self.app.errorhandler(500)
def error_handler(e):
return 'Whoops', 500
@self.app.route('/500')
def fivehundred():
raise Exception('500 error')
res = self.client.get('/500')
self.assertEqual(res.status_code, 500)
self.assertEqual(res.data, b'Whoops')
spans = self.get_spans()
if flask_version >= (0, 12, 0):
self.assertEqual(len(spans), 11)
# Assert the order of the spans created
self.assertListEqual(
[
'flask.request',
'flask.try_trigger_before_first_request_functions',
'flask.preprocess_request',
'flask.dispatch_request',
'tests.contrib.flask.test_request.fivehundred',
'flask.handle_user_exception',
'flask.handle_exception',
'tests.contrib.flask.test_request.error_handler',
'flask.process_response',
'flask.do_teardown_request',
'flask.do_teardown_appcontext',
],
[s.name for s in spans],
)
else:
self.assertEqual(len(spans), 10)
# Assert the order of the spans created
self.assertListEqual(
[
'flask.request',
'flask.try_trigger_before_first_request_functions',
'flask.preprocess_request',
'flask.dispatch_request',
'tests.contrib.flask.test_request.fivehundred',
'flask.handle_user_exception',
'flask.handle_exception',
'tests.contrib.flask.test_request.error_handler',
'flask.do_teardown_request',
'flask.do_teardown_appcontext',
],
[s.name for s in spans],
)
# Assert span services
for span in spans:
self.assertEqual(span.service, 'flask')
# Root request span
req_span = spans[0]
self.assertEqual(req_span.service, 'flask')
self.assertEqual(req_span.name, 'flask.request')
self.assertEqual(req_span.resource, 'GET /500')
self.assertEqual(req_span.span_type, 'web')
self.assertEqual(req_span.error, 1)
self.assertIsNone(req_span.parent_id)
# Request tags
self.assertEqual(req_span.get_tag('http.method'), 'GET')
self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/500')
assert_span_http_status_code(req_span, 500)
self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundred')
self.assertEqual(req_span.get_tag('flask.url_rule'), '/500')
# Dispatch span
dispatch_span = spans[3]
self.assertEqual(dispatch_span.service, 'flask')
self.assertEqual(dispatch_span.name, 'flask.dispatch_request')
self.assertEqual(dispatch_span.resource, 'flask.dispatch_request')
self.assertEqual(dispatch_span.error, 1)
self.assertTrue(dispatch_span.get_tag('error.msg').startswith('500 error'))
self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(dispatch_span.get_tag('error.type'), base_exception_name)
# Handler span
handler_span = spans[4]
self.assertEqual(handler_span.service, 'flask')
self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundred')
self.assertEqual(handler_span.resource, '/500')
self.assertEqual(handler_span.error, 1)
self.assertTrue(handler_span.get_tag('error.msg').startswith('500 error'))
self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(handler_span.get_tag('error.type'), base_exception_name)
# User exception span
user_ex_span = spans[5]
self.assertEqual(user_ex_span.service, 'flask')
self.assertEqual(user_ex_span.name, 'flask.handle_user_exception')
self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception')
self.assertEqual(user_ex_span.error, 1)
self.assertTrue(user_ex_span.get_tag('error.msg').startswith('500 error'))
self.assertTrue(user_ex_span.get_tag('error.stack').startswith('Traceback'))
self.assertEqual(user_ex_span.get_tag('error.type'), base_exception_name)
| StarcoderdataPython |
3358134 | <gh_stars>0
class CommandScaffold:
""" scaffold class """
def __init__(self, args):
"""initialize the class
:params args: type object
"""
from netnir.constants import NR
from netnir.core.connection import register_connections
import logging
self.args = args
self.logging = logging.getLogger("nornir")
self.nr = NR
register_connections()
@staticmethod
def parser(parser):
"""command parser function
:params parser: type object
"""
from netnir.helpers.common.args import (
filter_host,
filter_hosts,
filter_group,
num_workers,
make_changes,
verbose,
)
filter_host(parser)
filter_hosts(parser)
filter_group(parser)
num_workers(parser)
make_changes(parser)
verbose(parser)
def run(self):
"""things to do"""
return "things to do"
def _verbose(self):
self.logging.setLevel(self.args.verbose)
to_console = True if self.args.verbose == "DEBUG" else False
return {"level": self.logging.level, "to_console": to_console}
def _inventory(self):
"""filter inventory
:returns: filtered nornir inventory object
"""
from netnir.helpers import inventory_filter, filter_type
devices_filter = filter_type(
host=self.args.host, filter=self.args.filter, group=self.args.group
)
self.nr = inventory_filter(
nr=self.nr,
device_filter=devices_filter["data"],
type=devices_filter["type"],
)
return self.nr
| StarcoderdataPython |
1637560 | import torch
import Corr2D_ext
def int_2_tensor(intList):
return torch.tensor(intList, dtype=torch.int, requires_grad=False)
def tensor_2_int(t):
assert len(t.size()) == 1
assert t.size()[0] == 5
assert t.dtype == torch.int
return t.tolist()
class Corr2DF(torch.autograd.Function):
@staticmethod
def forward(ctx, x0, x1, maxDisplacement, \
padding=1, kernelSize=3, strideK=1, strideD=1):
ctx.maxDisplacement = maxDisplacement
ctx.padding = padding
ctx.kernelSize = kernelSize
ctx.strideK = strideK
ctx.strideD = strideD
out = Corr2D_ext.forward(x0, x1, padding, kernelSize, maxDisplacement, strideK, strideD)
ctx.save_for_backward(x0, x1)
return out[0]
@staticmethod
def backward(ctx, grad):
x0, x1 = ctx.saved_tensors
output = Corr2D_ext.backward( grad, x0, x1,
ctx.padding, ctx.kernelSize, ctx.maxDisplacement, ctx.strideK, ctx.strideD )
return output[0], output[1], None, None, None, None, None
class Corr2DM(torch.nn.Module):
def __init__(self, maxDisplacement, padding=1, kernelSize=3, strideK=1, strideD=1):
super(Corr2DM, self).__init__()
assert maxDisplacement > 0
assert kernelSize > 0
assert kernelSize % 2 == 1
assert strideK > 0
assert strideD > 0
self.maxDisplacement = maxDisplacement
self.padding = padding
self.kernelSize = kernelSize
self.strideK = strideK
self.strideD = strideD
def forward(self, x0, x1):
return Corr2DF.apply( x0, x1, self.maxDisplacement, \
self.padding, self.kernelSize, self.strideK, self.strideD )
class Corr2DZNF(torch.autograd.Function):
@staticmethod
def forward(ctx, x0, x1, maxDisplacement, \
padding=1, kernelSize=3, strideK=1, strideD=1):
ctx.maxDisplacement = maxDisplacement
ctx.padding = padding
ctx.kernelSize = kernelSize
ctx.strideK = strideK
ctx.strideD = strideD
out = Corr2D_ext.forward_zn(x0, x1, padding, kernelSize, maxDisplacement, strideK, strideD)
ctx.save_for_backward(x0, x1, out[0], out[1], out[2])
return out[0]
@staticmethod
def backward(ctx, grad):
x0, x1, C, L0, L1 = ctx.saved_tensors
output = Corr2D_ext.backward_zn( grad, x0, x1, C, L0, L1,
ctx.padding, ctx.kernelSize, ctx.maxDisplacement, ctx.strideK, ctx.strideD )
return output[0], output[1], None, None, None, None, None
class Corr2DZNM(torch.nn.Module):
def __init__(self, maxDisplacement, padding=1, kernelSize=3, strideK=1, strideD=1):
super(Corr2DZNM, self).__init__()
assert maxDisplacement > 0
assert kernelSize > 0
assert kernelSize % 2 == 1
assert strideK > 0
assert strideD > 0
self.maxDisplacement = maxDisplacement
self.padding = padding
self.kernelSize = kernelSize
self.strideK = strideK
self.strideD = strideD
def forward(self, x0, x1):
return Corr2DZNF.apply( x0, x1, self.maxDisplacement, \
self.padding, self.kernelSize, self.strideK, self.strideD )
| StarcoderdataPython |
3359589 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 9 12:52:44 2019
@author: Excalibur
"""
import numpy as np
import numpy.linalg as LA
from numpy import random
import matplotlib.pyplot as plt
from parameterDefaults import defaults
from jacobianSalt import computeJac
from parameterRanges import ranges
from tqdm import tqdm
dp = 0.01
param = 'betaA'
J1 = computeJac(defaults)
w1, v = LA.eig(J1)
#perturb = defaults
#perturb[param] = defaults[param] + dp
#J2 = computeJac(perturb)
#w2, v = LA.eig(J2)
#dw2 = w2-w1
#fracPar = defaults[param]/dp
#eigSens = [fracPar*dwi for dwi in dw2]
mangs = {'propM', 'propS', 'growM','growS', 'drownHyd','drownM',
'stressM', 'stressS', 'littM','propPrecip','growPrecip',
'evaptM','precipEvapt'}
peats = {'accSed', 'sedHyd', 'accM','retLitt', 'retHyd', 'volGrow',
'volP','volPrecip', 'eroM', 'subsMort', 'subsHyd', 'subsP',
'hydP','volHyd'}
salts = {'concEvapt','concHyd', 'concS', 'decrS','decrPrecip','evaptS'}
elasPars = mangs.union(peats).union(salts)
nRuns = 1000
elasEigs = {par:np.zeros(nRuns) for par in elasPars}
for par in elasPars:
r0 = ranges[par][0]
r1 = ranges[par][1]
parRange = np.linspace(r0,r1,nRuns)
parSet = defaults
for j in range(nRuns):
parVal = parRange[j]
for p in elasPars:
parSet[p] = random.uniform(ranges[p][0], ranges[p][1])
parSet[par] = parVal
J = computeJac(parSet)
w, v = LA.eig(J)
maxW = np.max(np.real(w))
elasEigs[par][j] = maxW
p1 = plt.figure()
for par in mangs:
plt.plot(range(nRuns), elasEigs[par], label=par, marker='+')
plt.legend(loc='best')
p2 = plt.figure()
for par in peats:
plt.plot(range(nRuns), elasEigs[par], label=par, marker='+')
plt.legend(loc='best')
p3 = plt.figure()
for par in salts:
plt.plot(range(nRuns), elasEigs[par], label=par, marker='+')
plt.legend(loc='best') | StarcoderdataPython |
3380107 | # SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
from typing import Dict, Tuple, Any
from pandas import DataFrame, Series, isna
from edfi_google_classroom_extractor.mapping.constants import SOURCE_SYSTEM
# States returned by API
CREATED_STATE = "CREATED"
NEW_STATE = "NEW"
RECLAIMED_STATE = "RECLAIMED_BY_STUDENT"
TURNED_IN_STATE = "TURNED_IN"
RETURNED_STATE = "RETURNED"
# States derived from API "late" flag
LATE_STATE = "LATE"
MISSING_STATE = "MISSING"
def derive_state(submission_row: Series) -> str:
"""
Takes a Pandas row of API assign submission data and returns the submission
state for that row based on the API provided state and late flag.
Parameters
----------
pandas_row: Any
is a row of assignment submission data
Returns
-------
str
Submission state for the row of data
"""
api_state: str = submission_row["state"]
if "late" not in submission_row:
return api_state
if isna(submission_row["late"]):
return api_state
if isinstance(submission_row["late"], bool) and submission_row["late"] is False:
return api_state
if (
isinstance(submission_row["late"], str)
and submission_row["late"].lower() != "true"
):
return api_state
if api_state == TURNED_IN_STATE:
return LATE_STATE
if (
api_state == CREATED_STATE
or api_state == NEW_STATE
or api_state == RECLAIMED_STATE
):
return MISSING_STATE
return api_state
def submissions_to_assignment_submissions_dfs(
submissions_df: DataFrame,
) -> Dict[Tuple[str, str], DataFrame]:
"""
Convert a Submission API DataFrame to a Dict of AssignmentSubmission UDM DataFrames
grouped by source system section id/assignment id tuple pairs
Parameters
----------
submissions_df: DataFrame
is a Submission API DataFrame
Returns
-------
Dict[Tuple[str, str], DataFrame]
LMS UDM AssignmentSubmission DataFrames grouped by
source system section id/assignment id tuple pairs
Notes
-----
AssignmentSubmission DataFrame columns are:
AssignmentSourceSystemIdentifier: A unique numeric identifier assigned to the assignment
EarnedPoints: The points earned for the submission
Grade: The grade received for the submission
SourceSystem: The system code or name providing the AssignmentSubmission data
SourceSystemIdentifier: A unique number or alphanumeric code assigned to
an AssignmentSubmission by the source system
SubmissionStatus: The status of the submission in relation to the late acceptance policy
SubmissionDateTime: The date and time of the assignment submission
LMSUserSourceSystemIdentifier: A unique numeric identifier assigned to the user
SourceCreateDate: Date this record was created in the LMS
SourceLastModifiedDate: Date this record was last updated in the LMS
"""
assert "courseId" in submissions_df.columns
assert "courseWorkId" in submissions_df.columns
assert "id" in submissions_df.columns
assert "userId" in submissions_df.columns
assert "creationTime" in submissions_df.columns
assert "updateTime" in submissions_df.columns
assert "state" in submissions_df.columns
assert "assignedGrade" in submissions_df.columns
submissions_df["SourceSystemIdentifier"] = submissions_df[
["courseId", "courseWorkId", "id"]
].agg("-".join, axis=1)
submissions_df["AssignmentSourceSystemIdentifier"] = submissions_df[
["courseId", "courseWorkId"]
].agg("-".join, axis=1)
submissions_df["Grade"] = submissions_df["assignedGrade"]
submissions_df["SubmissionDateTime"] = submissions_df.apply(
lambda row: row["updateTime"] if row["state"] == TURNED_IN_STATE else "",
axis=1,
)
submissions_df["SubmissionStatus"] = submissions_df.apply(derive_state, axis=1)
assignment_submissions_df: DataFrame = submissions_df[
[
"SourceSystemIdentifier",
"AssignmentSourceSystemIdentifier",
"Grade",
"SubmissionDateTime",
"assignedGrade",
"userId",
"courseId",
"creationTime",
"updateTime",
"SubmissionStatus",
"CreateDate",
"LastModifiedDate",
]
]
assignment_submissions_df = assignment_submissions_df.rename(
columns={
"assignedGrade": "EarnedPoints",
"userId": "LMSUserSourceSystemIdentifier",
"courseId": "SourceSystemSectionIdentifier",
"creationTime": "SourceCreateDate",
"updateTime": "SourceLastModifiedDate",
}
)
assignment_submissions_df["SourceSystem"] = SOURCE_SYSTEM
# group by section id and assignment id as a Dict of DataFrames
result: Dict[
Any, DataFrame
] = dict( # Any because Pylance doesn't believe Tuple[str, str]
tuple(
assignment_submissions_df.groupby(
[
"SourceSystemSectionIdentifier",
"AssignmentSourceSystemIdentifier",
]
)
)
)
# no longer need group by column
for grouped_df in result.values():
grouped_df.drop(columns=["SourceSystemSectionIdentifier"], inplace=True)
return result
| StarcoderdataPython |
16965 | import torch
import pytest
# NOTE: also registers the KL divergence
from chmp.torch_utils import NormalModule, WeightsHS, fixed
def test_kl_divergence__gamma__log_normal():
p = torch.distributions.LogNormal(torch.zeros(2), torch.ones(2))
q = torch.distributions.Gamma(torch.ones(2), torch.ones(2))
torch.distributions.kl_divergence(p, q)
def test__module_parameters():
module = NormalModule(loc=torch.zeros(1), scale=fixed(torch.ones(1)))
assert {k for k, _ in module.named_parameters()} == {"loc"}
module = NormalModule(loc=torch.zeros(1), scale=torch.ones(1))
assert {k for k, _ in module.named_parameters()} == {"loc", "scale"}
module = NormalModule(torch.zeros(1), scale=fixed(torch.ones(1)))
assert {k for k, _ in module.named_parameters()} == {"loc"}
def test__module_fixed_parameters_optimize():
module = NormalModule(torch.zeros(1), fixed(torch.ones(1)))
optimizer = torch.optim.Adam(module.parameters(), lr=0.1)
for _ in range(100):
optimizer.zero_grad()
x = module.rsample((20,))
loss = torch.mean((x - 2.0) ** 2.0)
loss.backward()
optimizer.step()
assert float(module.loc) != pytest.approx(0.0)
assert float(module.scale) == pytest.approx(1.0)
def test_weight_hs_api():
w = WeightsHS([10, 20, 30], tau_0=1e-5)
assert w().shape == (10, 20, 30)
assert w.kl_divergence().shape == ()
| StarcoderdataPython |
1672558 | # Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aesara
import numpy as np
import pytest
import scipy.stats as st
from aesara import tensor as at
from numpy.testing import assert_allclose
from scipy.special import logsumexp
import pymc as pm
from pymc import (
Dirichlet,
Exponential,
Gamma,
LogNormal,
Metropolis,
Mixture,
Model,
MvNormal,
Normal,
NormalMixture,
Poisson,
sample,
)
from pymc.aesaraf import floatX
from pymc.distributions.shape_utils import to_tuple
from pymc.tests.helpers import SeededTest
pytestmark = pytest.mark.xfail(reason="Mixture not refactored.")
# Generate data
def generate_normal_mixture_data(w, mu, sd, size=1000):
component = np.random.choice(w.size, size=size, p=w)
mu, sd = np.broadcast_arrays(mu, sd)
out_size = to_tuple(size) + mu.shape[:-1]
mu_ = np.array([mu[..., comp] for comp in component.ravel()])
sd_ = np.array([sd[..., comp] for comp in component.ravel()])
mu_ = np.reshape(mu_, out_size)
sd_ = np.reshape(sd_, out_size)
x = np.random.normal(mu_, sd_, size=out_size)
return x
def generate_poisson_mixture_data(w, mu, size=1000):
component = np.random.choice(w.size, size=size, p=w)
mu = np.atleast_1d(mu)
out_size = to_tuple(size) + mu.shape[:-1]
mu_ = np.array([mu[..., comp] for comp in component.ravel()])
mu_ = np.reshape(mu_, out_size)
x = np.random.poisson(mu_, size=out_size)
return x
class TestMixture(SeededTest):
@classmethod
def setup_class(cls):
super().setup_class()
cls.norm_w = np.array([0.75, 0.25])
cls.norm_mu = np.array([0.0, 5.0])
cls.norm_sd = np.ones_like(cls.norm_mu)
cls.norm_x = generate_normal_mixture_data(cls.norm_w, cls.norm_mu, cls.norm_sd, size=1000)
cls.pois_w = np.array([0.4, 0.6])
cls.pois_mu = np.array([5.0, 20.0])
cls.pois_x = generate_poisson_mixture_data(cls.pois_w, cls.pois_mu, size=1000)
def test_dimensions(self):
a1 = Normal.dist(mu=0, sigma=1)
a2 = Normal.dist(mu=10, sigma=1)
mix = Mixture.dist(w=np.r_[0.5, 0.5], comp_dists=[a1, a2])
assert mix.mode.ndim == 0
assert mix.logp(0.0).ndim == 0
value = np.r_[0.0, 1.0, 2.0]
assert mix.logp(value).ndim == 1
def test_mixture_list_of_normals(self):
with Model() as model:
w = Dirichlet("w", floatX(np.ones_like(self.norm_w)), shape=self.norm_w.size)
mu = Normal("mu", 0.0, 10.0, shape=self.norm_w.size)
tau = Gamma("tau", 1.0, 1.0, shape=self.norm_w.size)
Mixture(
"x_obs",
w,
[Normal.dist(mu[0], tau=tau[0]), Normal.dist(mu[1], tau=tau[1])],
observed=self.norm_x,
)
step = Metropolis()
trace = sample(5000, step, random_seed=self.random_seed, progressbar=False, chains=1)
assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(self.norm_w), rtol=0.1, atol=0.1)
assert_allclose(
np.sort(trace["mu"].mean(axis=0)), np.sort(self.norm_mu), rtol=0.1, atol=0.1
)
def test_normal_mixture(self):
with Model() as model:
w = Dirichlet("w", floatX(np.ones_like(self.norm_w)), shape=self.norm_w.size)
mu = Normal("mu", 0.0, 10.0, shape=self.norm_w.size)
tau = Gamma("tau", 1.0, 1.0, shape=self.norm_w.size)
NormalMixture("x_obs", w, mu, tau=tau, observed=self.norm_x)
step = Metropolis()
trace = sample(5000, step, random_seed=self.random_seed, progressbar=False, chains=1)
assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(self.norm_w), rtol=0.1, atol=0.1)
assert_allclose(
np.sort(trace["mu"].mean(axis=0)), np.sort(self.norm_mu), rtol=0.1, atol=0.1
)
@pytest.mark.parametrize(
"nd,ncomp", [(tuple(), 5), (1, 5), (3, 5), ((3, 3), 5), (3, 3), ((3, 3), 3)], ids=str
)
def test_normal_mixture_nd(self, nd, ncomp):
nd = to_tuple(nd)
ncomp = int(ncomp)
comp_shape = nd + (ncomp,)
test_mus = np.random.randn(*comp_shape)
test_taus = np.random.gamma(1, 1, size=comp_shape)
observed = generate_normal_mixture_data(
w=np.ones(ncomp) / ncomp, mu=test_mus, sd=1 / np.sqrt(test_taus), size=10
)
with Model() as model0:
mus = Normal("mus", shape=comp_shape)
taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
mixture0 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd, comp_shape=comp_shape)
obs0 = NormalMixture(
"obs", w=ws, mu=mus, tau=taus, shape=nd, comp_shape=comp_shape, observed=observed
)
with Model() as model1:
mus = Normal("mus", shape=comp_shape)
taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
comp_dist = [
Normal.dist(mu=mus[..., i], tau=taus[..., i], shape=nd) for i in range(ncomp)
]
mixture1 = Mixture("m", w=ws, comp_dists=comp_dist, shape=nd)
obs1 = Mixture("obs", w=ws, comp_dists=comp_dist, shape=nd, observed=observed)
with Model() as model2:
# Expected to fail if comp_shape is not provided,
# nd is multidim and it does not broadcast with ncomp. If by chance
# it does broadcast, an error is raised if the mixture is given
# observed data.
# Furthermore, the Mixture will also raise errors when the observed
# data is multidimensional but it does not broadcast well with
# comp_dists.
mus = Normal("mus", shape=comp_shape)
taus = Gamma("taus", alpha=1, beta=1, shape=comp_shape)
ws = Dirichlet("ws", np.ones(ncomp), shape=(ncomp,))
if len(nd) > 1:
if nd[-1] != ncomp:
with pytest.raises(ValueError):
NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
mixture2 = None
else:
mixture2 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
else:
mixture2 = NormalMixture("m", w=ws, mu=mus, tau=taus, shape=nd)
observed_fails = False
if len(nd) >= 1 and nd != (1,):
try:
np.broadcast(np.empty(comp_shape), observed)
except Exception:
observed_fails = True
if observed_fails:
with pytest.raises(ValueError):
NormalMixture("obs", w=ws, mu=mus, tau=taus, shape=nd, observed=observed)
obs2 = None
else:
obs2 = NormalMixture("obs", w=ws, mu=mus, tau=taus, shape=nd, observed=observed)
testpoint = model0.initial_point
testpoint["mus"] = test_mus
testpoint["taus"] = test_taus
assert_allclose(model0.logp(testpoint), model1.logp(testpoint))
assert_allclose(mixture0.logp(testpoint), mixture1.logp(testpoint))
assert_allclose(obs0.logp(testpoint), obs1.logp(testpoint))
if mixture2 is not None and obs2 is not None:
assert_allclose(model0.logp(testpoint), model2.logp(testpoint))
if mixture2 is not None:
assert_allclose(mixture0.logp(testpoint), mixture2.logp(testpoint))
if obs2 is not None:
assert_allclose(obs0.logp(testpoint), obs2.logp(testpoint))
def test_poisson_mixture(self):
with Model() as model:
w = Dirichlet("w", floatX(np.ones_like(self.pois_w)), shape=self.pois_w.shape)
mu = Gamma("mu", 1.0, 1.0, shape=self.pois_w.size)
Mixture("x_obs", w, Poisson.dist(mu), observed=self.pois_x)
step = Metropolis()
trace = sample(5000, step, random_seed=self.random_seed, progressbar=False, chains=1)
assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(self.pois_w), rtol=0.1, atol=0.1)
assert_allclose(
np.sort(trace["mu"].mean(axis=0)), np.sort(self.pois_mu), rtol=0.1, atol=0.1
)
def test_mixture_list_of_poissons(self):
with Model() as model:
w = Dirichlet("w", floatX(np.ones_like(self.pois_w)), shape=self.pois_w.shape)
mu = Gamma("mu", 1.0, 1.0, shape=self.pois_w.size)
Mixture("x_obs", w, [Poisson.dist(mu[0]), Poisson.dist(mu[1])], observed=self.pois_x)
step = Metropolis()
trace = sample(5000, step, random_seed=self.random_seed, progressbar=False, chains=1)
assert_allclose(np.sort(trace["w"].mean(axis=0)), np.sort(self.pois_w), rtol=0.1, atol=0.1)
assert_allclose(
np.sort(trace["mu"].mean(axis=0)), np.sort(self.pois_mu), rtol=0.1, atol=0.1
)
def test_mixture_of_mvn(self):
mu1 = np.asarray([0.0, 1.0])
cov1 = np.diag([1.5, 2.5])
mu2 = np.asarray([1.0, 0.0])
cov2 = np.diag([2.5, 3.5])
obs = np.asarray([[0.5, 0.5], mu1, mu2])
with Model() as model:
w = Dirichlet("w", floatX(np.ones(2)), transform=None, shape=(2,))
mvncomp1 = MvNormal.dist(mu=mu1, cov=cov1)
mvncomp2 = MvNormal.dist(mu=mu2, cov=cov2)
y = Mixture("x_obs", w, [mvncomp1, mvncomp2], observed=obs)
# check logp of each component
complogp_st = np.vstack(
(
st.multivariate_normal.logpdf(obs, mu1, cov1),
st.multivariate_normal.logpdf(obs, mu2, cov2),
)
).T
complogp = y.distribution._comp_logp(aesara.shared(obs)).eval()
assert_allclose(complogp, complogp_st)
# check logp of mixture
testpoint = model.initial_point
mixlogp_st = logsumexp(np.log(testpoint["w"]) + complogp_st, axis=-1, keepdims=False)
assert_allclose(y.logp_elemwise(testpoint), mixlogp_st)
# check logp of model
priorlogp = st.dirichlet.logpdf(
x=testpoint["w"],
alpha=np.ones(2),
)
assert_allclose(model.logp(testpoint), mixlogp_st.sum() + priorlogp)
def test_mixture_of_mixture(self):
if aesara.config.floatX == "float32":
rtol = 1e-4
else:
rtol = 1e-7
nbr = 4
with Model() as model:
# mixtures components
g_comp = Normal.dist(
mu=Exponential("mu_g", lam=1.0, shape=nbr, transform=None), sigma=1, shape=nbr
)
l_comp = LogNormal.dist(
mu=Exponential("mu_l", lam=1.0, shape=nbr, transform=None), sigma=1, shape=nbr
)
# weight vector for the mixtures
g_w = Dirichlet("g_w", a=floatX(np.ones(nbr) * 0.0000001), transform=None, shape=(nbr,))
l_w = Dirichlet("l_w", a=floatX(np.ones(nbr) * 0.0000001), transform=None, shape=(nbr,))
# mixture components
g_mix = Mixture.dist(w=g_w, comp_dists=g_comp)
l_mix = Mixture.dist(w=l_w, comp_dists=l_comp)
# mixture of mixtures
mix_w = Dirichlet("mix_w", a=floatX(np.ones(2)), transform=None, shape=(2,))
mix = Mixture("mix", w=mix_w, comp_dists=[g_mix, l_mix], observed=np.exp(self.norm_x))
test_point = model.initial_point
def mixmixlogp(value, point):
floatX = aesara.config.floatX
priorlogp = (
st.dirichlet.logpdf(
x=point["g_w"],
alpha=np.ones(nbr) * 0.0000001,
).astype(floatX)
+ st.expon.logpdf(x=point["mu_g"]).sum(dtype=floatX)
+ st.dirichlet.logpdf(
x=point["l_w"],
alpha=np.ones(nbr) * 0.0000001,
).astype(floatX)
+ st.expon.logpdf(x=point["mu_l"]).sum(dtype=floatX)
+ st.dirichlet.logpdf(
x=point["mix_w"],
alpha=np.ones(2),
).astype(floatX)
)
complogp1 = st.norm.logpdf(x=value, loc=point["mu_g"]).astype(floatX)
mixlogp1 = logsumexp(
np.log(point["g_w"]).astype(floatX) + complogp1, axis=-1, keepdims=True
)
complogp2 = st.lognorm.logpdf(value, 1.0, 0.0, np.exp(point["mu_l"])).astype(floatX)
mixlogp2 = logsumexp(
np.log(point["l_w"]).astype(floatX) + complogp2, axis=-1, keepdims=True
)
complogp_mix = np.concatenate((mixlogp1, mixlogp2), axis=1)
mixmixlogpg = logsumexp(
np.log(point["mix_w"]).astype(floatX) + complogp_mix, axis=-1, keepdims=False
)
return priorlogp, mixmixlogpg
value = np.exp(self.norm_x)[:, None]
priorlogp, mixmixlogpg = mixmixlogp(value, test_point)
# check logp of mixture
assert_allclose(mixmixlogpg, mix.logp_elemwise(test_point), rtol=rtol)
# check model logp
assert_allclose(priorlogp + mixmixlogpg.sum(), model.logp(test_point), rtol=rtol)
# check input and check logp again
test_point["g_w"] = np.asarray([0.1, 0.1, 0.2, 0.6])
test_point["mu_g"] = np.exp(np.random.randn(nbr))
priorlogp, mixmixlogpg = mixmixlogp(value, test_point)
assert_allclose(mixmixlogpg, mix.logp_elemwise(test_point), rtol=rtol)
assert_allclose(priorlogp + mixmixlogpg.sum(), model.logp(test_point), rtol=rtol)
def test_sample_prior_and_posterior(self):
def build_toy_dataset(N, K):
pi = np.array([0.2, 0.5, 0.3])
mus = [[1, 1, 1], [-1, -1, -1], [2, -2, 0]]
stds = [[0.1, 0.1, 0.1], [0.1, 0.2, 0.2], [0.2, 0.3, 0.3]]
x = np.zeros((N, 3), dtype=np.float32)
y = np.zeros((N,), dtype=np.int)
for n in range(N):
k = np.argmax(np.random.multinomial(1, pi))
x[n, :] = np.random.multivariate_normal(mus[k], np.diag(stds[k]))
y[n] = k
return x, y
N = 100 # number of data points
K = 3 # number of mixture components
D = 3 # dimensionality of the data
X, y = build_toy_dataset(N, K)
with pm.Model() as model:
pi = pm.Dirichlet("pi", np.ones(K), shape=(K,))
comp_dist = []
mu = []
packed_chol = []
chol = []
for i in range(K):
mu.append(pm.Normal("mu%i" % i, 0, 10, shape=D))
packed_chol.append(
pm.LKJCholeskyCov(
"chol_cov_%i" % i, eta=2, n=D, sd_dist=pm.HalfNormal.dist(2.5)
)
)
chol.append(pm.expand_packed_triangular(D, packed_chol[i], lower=True))
comp_dist.append(pm.MvNormal.dist(mu=mu[i], chol=chol[i], shape=D))
pm.Mixture("x_obs", pi, comp_dist, observed=X)
with model:
idata = pm.sample(30, tune=10, chains=1)
n_samples = 20
with model:
ppc = pm.sample_posterior_predictive(idata, n_samples)
prior = pm.sample_prior_predictive(samples=n_samples)
assert ppc["x_obs"].shape == (n_samples,) + X.shape
assert prior["x_obs"].shape == (n_samples,) + X.shape
assert prior["mu0"].shape == (n_samples, D)
assert prior["chol_cov_0"].shape == (n_samples, D * (D + 1) // 2)
class TestMixtureVsLatent(SeededTest):
def setup_method(self, *args, **kwargs):
super().setup_method(*args, **kwargs)
self.nd = 3
self.npop = 3
self.mus = at.as_tensor_variable(
np.tile(
np.reshape(
np.arange(self.npop),
(
1,
-1,
),
),
(
self.nd,
1,
),
)
)
def test_1d_w(self):
nd = self.nd
npop = self.npop
mus = self.mus
size = 100
with pm.Model() as model:
m = pm.NormalMixture(
"m", w=np.ones(npop) / npop, mu=mus, sigma=1e-5, comp_shape=(nd, npop), shape=nd
)
z = pm.Categorical("z", p=np.ones(npop) / npop)
latent_m = pm.Normal("latent_m", mu=mus[..., z], sigma=1e-5, shape=nd)
m_val = m.random(size=size)
latent_m_val = latent_m.random(size=size)
assert m_val.shape == latent_m_val.shape
# Test that each element in axis = -1 comes from the same mixture
# component
assert all(np.all(np.diff(m_val) < 1e-3, axis=-1))
assert all(np.all(np.diff(latent_m_val) < 1e-3, axis=-1))
self.samples_from_same_distribution(m_val, latent_m_val)
self.logp_matches(m, latent_m, z, npop, model=model)
def test_2d_w(self):
nd = self.nd
npop = self.npop
mus = self.mus
size = 100
with pm.Model() as model:
m = pm.NormalMixture(
"m",
w=np.ones((nd, npop)) / npop,
mu=mus,
sigma=1e-5,
comp_shape=(nd, npop),
shape=nd,
)
z = pm.Categorical("z", p=np.ones(npop) / npop, shape=nd)
mu = at.as_tensor_variable([mus[i, z[i]] for i in range(nd)])
latent_m = pm.Normal("latent_m", mu=mu, sigma=1e-5, shape=nd)
m_val = m.random(size=size)
latent_m_val = latent_m.random(size=size)
assert m_val.shape == latent_m_val.shape
# Test that each element in axis = -1 can come from independent
# components
assert not all(np.all(np.diff(m_val) < 1e-3, axis=-1))
assert not all(np.all(np.diff(latent_m_val) < 1e-3, axis=-1))
self.samples_from_same_distribution(m_val, latent_m_val)
self.logp_matches(m, latent_m, z, npop, model=model)
def samples_from_same_distribution(self, *args):
# Test if flattened samples distributions match (marginals match)
_, p_marginal = st.ks_2samp(*(s.flatten() for s in args))
# Test if correlations within non independent draws match
_, p_correlation = st.ks_2samp(
*(np.array([np.corrcoef(ss) for ss in s]).flatten() for s in args)
)
assert p_marginal >= 0.05 and p_correlation >= 0.05
def logp_matches(self, mixture, latent_mix, z, npop, model):
if aesara.config.floatX == "float32":
rtol = 1e-4
else:
rtol = 1e-7
test_point = model.initial_point
test_point["latent_m"] = test_point["m"]
mix_logp = mixture.logp(test_point)
logps = []
for component in range(npop):
test_point["z"] = component * np.ones(z.distribution.shape)
# Count the number of axes that should be broadcasted from z to
# modify the logp
sh1 = test_point["z"].shape
sh2 = test_point["latent_m"].shape
if len(sh1) > len(sh2):
sh2 = (1,) * (len(sh1) - len(sh2)) + sh2
elif len(sh2) > len(sh1):
sh1 = (1,) * (len(sh2) - len(sh1)) + sh1
reps = np.prod([s2 if s1 != s2 else 1 for s1, s2 in zip(sh1, sh2)])
z_logp = z.logp(test_point) * reps
logps.append(z_logp + latent_mix.logp(test_point))
latent_mix_logp = logsumexp(np.array(logps), axis=0)
assert_allclose(mix_logp, latent_mix_logp, rtol=rtol)
class TestMixtureSameFamily(SeededTest):
@classmethod
def setup_class(cls):
super().setup_class()
cls.size = 50
cls.n_samples = 1000
cls.mixture_comps = 10
@pytest.mark.parametrize("batch_shape", [(3, 4), (20,)], ids=str)
def test_with_multinomial(self, batch_shape):
p = np.random.uniform(size=(*batch_shape, self.mixture_comps, 3))
n = 100 * np.ones((*batch_shape, 1))
w = np.ones(self.mixture_comps) / self.mixture_comps
mixture_axis = len(batch_shape)
with pm.Model() as model:
comp_dists = pm.Multinomial.dist(p=p, n=n, shape=(*batch_shape, self.mixture_comps, 3))
mixture = pm.MixtureSameFamily(
"mixture",
w=w,
comp_dists=comp_dists,
mixture_axis=mixture_axis,
shape=(*batch_shape, 3),
)
prior = pm.sample_prior_predictive(samples=self.n_samples)
assert prior["mixture"].shape == (self.n_samples, *batch_shape, 3)
assert mixture.random(size=self.size).shape == (self.size, *batch_shape, 3)
if aesara.config.floatX == "float32":
rtol = 1e-4
else:
rtol = 1e-7
comp_logp = comp_dists.logp(model.initial_point["mixture"].reshape(*batch_shape, 1, 3))
log_sum_exp = logsumexp(
comp_logp.eval() + np.log(w)[..., None], axis=mixture_axis, keepdims=True
).sum()
assert_allclose(
model.logp(model.initial_point),
log_sum_exp,
rtol,
)
# TODO: Handle case when `batch_shape` == `sample_shape`.
# See https://github.com/pymc-devs/pymc/issues/4185 for details.
def test_with_mvnormal(self):
# 10 batch, 3-variate Gaussian
mu = np.random.randn(self.mixture_comps, 3)
mat = np.random.randn(3, 3)
cov = mat @ mat.T
chol = np.linalg.cholesky(cov)
w = np.ones(self.mixture_comps) / self.mixture_comps
with pm.Model() as model:
comp_dists = pm.MvNormal.dist(mu=mu, chol=chol, shape=(self.mixture_comps, 3))
mixture = pm.MixtureSameFamily(
"mixture", w=w, comp_dists=comp_dists, mixture_axis=0, shape=(3,)
)
prior = pm.sample_prior_predictive(samples=self.n_samples)
assert prior["mixture"].shape == (self.n_samples, 3)
assert mixture.random(size=self.size).shape == (self.size, 3)
if aesara.config.floatX == "float32":
rtol = 1e-4
else:
rtol = 1e-7
comp_logp = comp_dists.logp(model.initial_point["mixture"].reshape(1, 3))
log_sum_exp = logsumexp(
comp_logp.eval() + np.log(w)[..., None], axis=0, keepdims=True
).sum()
assert_allclose(
model.logp(model.initial_point),
log_sum_exp,
rtol,
)
def test_broadcasting_in_shape(self):
with pm.Model() as model:
mu = pm.Gamma("mu", 1.0, 1.0, shape=2)
comp_dists = pm.Poisson.dist(mu, shape=2)
mix = pm.MixtureSameFamily(
"mix", w=np.ones(2) / 2, comp_dists=comp_dists, shape=(1000,)
)
prior = pm.sample_prior_predictive(samples=self.n_samples)
assert prior["mix"].shape == (self.n_samples, 1000)
| StarcoderdataPython |
3206812 | import numpy as np
from houghvst.estimation import gat
def compare_variance_stabilization(img, img_noisy, sigma_gt, alpha_gt,
sigma_est, alpha_est):
assess_variance_stabilization(img, img_noisy, sigma_gt, alpha_gt,
heading='Ground truth')
assess_variance_stabilization(img, img_noisy, sigma_est, alpha_est)
def assess_variance_stabilization(img, img_noisy, sigma, alpha,
correct_noiseless=True, verbose=True,
heading='Estimated'):
if correct_noiseless:
img = alpha * img
img_gat = gat.compute_gat(img, sigma, alpha=alpha)
img_noisy_gat = gat.compute_gat(img_noisy, sigma, alpha=alpha)
diff = img_gat - img_noisy_gat
variance = np.var(diff, ddof=1)
if verbose:
print('--->', heading, 'variance', variance)
# print(np.var(diff, ddof=1, axis=1).min(),
# np.var(diff, ddof=1, axis=1).max())
return variance
def compute_temporal_mean_var(movie):
means = np.mean(movie, axis=0)
variances = np.var(movie, axis=0, ddof=1)
return means, variances
| StarcoderdataPython |
3294403 | <reponame>amalinovskiy/Appraise
# Generated by Django 2.2 on 2019-05-17 16:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('EvalData', '0033_auto_20190228_0826'),
]
operations = [
migrations.CreateModel(
name='TextPairWithContext',
fields=[
('textpair_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='EvalData.TextPair')),
('documentID', models.CharField(help_text='(max. 100 characters)', max_length=100, verbose_name='Document ID')),
('isCompleteDocument', models.BooleanField(blank=True, db_index=True, default=False, verbose_name='Complete document?')),
('sourceContextLeft', models.CharField(help_text='(max. 2000 characters)', max_length=2000, verbose_name='Source context (left)')),
('sourceContextRight', models.CharField(help_text='(max. 2000 characters)', max_length=2000, verbose_name='Source context (right)')),
('targetContextLeft', models.CharField(help_text='(max. 2000 characters)', max_length=2000, verbose_name='Target context (left)')),
('targetContextRight', models.CharField(help_text='(max. 2000 characters)', max_length=2000, verbose_name='Target context (right)')),
],
options={
'ordering': ['_str_name'],
'abstract': False,
},
bases=('EvalData.textpair',),
),
]
| StarcoderdataPython |
113616 | <reponame>plaf2000/webspec
from django.urls import path
from . import views
#This is urls.py
urlpatterns = [
path('get/', views.get, name='get'),
path('save/', views.save, name='save'),
path('create/', views.create, name='create'),
path('delete/', views.delete, name='delete'),
] | StarcoderdataPython |
1668240 | <filename>tmy.py<gh_stars>0
"""Module to do processing of TMY3 files into Pandas dataframes and CSV files.
"""
from datetime import datetime
import csv
from pathlib import Path
import pandas as pd
import util as au # a utility library in this repo.
def process_tmy(raw_tmy_dir, output_dir):
"""Takes raw TMY files and some supplemental files in the 'raw_tmy_dir' and processes
them into Pandas DataFrames and CSV files that are written to the 'output_dir'.
"""
print('Processing TMY files...\n')
raw_path = Path(raw_tmy_dir)
out_path = Path(output_dir)
meta_list = []
# Read the Design Heating Temperature data into a DataFrame to
# eventually add to the metadata dataframe.
df_design = pd.read_excel(raw_path / 'design_temps.xlsx', index_col='tmy_id')
for f_path in raw_path.glob('*.csv'):
# Use a csvreader just to process the header row
with open(f_path) as csvfile:
tmyreader = csv.reader(csvfile)
hdr = next(tmyreader)
meta = dict(
tmy_id = int(hdr[0]),
city = hdr[1].strip(),
state = hdr[2].strip(),
utc_offset = float(hdr[3]),
latitude = float(hdr[4]),
longitude = float(hdr[5]),
elevation = float(hdr[6]) * 3.28084 # in feet
)
# read the rest of the lines into a DataFrame
df = pd.read_csv(csvfile)
# start making final DataFrame
df['db_temp'] = df['Dry-bulb (C)'] * 1.8 + 32.0 # deg F
df['rh'] = df['RHum (%)'] # 0 - 100
df['wind_spd'] = df['Wspd (m/s)'] * 2.23694 # miles per hour
df_final = df[['db_temp', 'rh', 'wind_spd']].copy()
# make a list of date/times with the stamp occurring in the
# middle of the hour associated with the data. Also, use
# the year 2018 for all the timestamps
ts = []
for dt, tm in zip(df['Date (MM/DD/YYYY)'], df['Time (HH:MM)']):
m, d, _ = dt.split('/')
h, _ = tm.split(':')
ts.append( datetime(2018, int(m), int(d), int(h) - 1, 30))
df_final.index = ts
df_final.index.name = 'timestamp'
df_final['month'] = df_final.index.month
meta['db_temp_avg'] = df_final.db_temp.mean()
meta['rh_avg'] = df_final.rh.mean()
meta['wind_spd_avg'] = df_final.wind_spd.mean()
# If available, add the Design Heating Temperature to the metadata;
# If not available, calculate it from the 1% temperature value
try:
meta['heating_design_temp'] = df_design.loc[meta['tmy_id']].htg_design_temp
except:
meta['heating_design_temp'] = df_final.db_temp.quantile(0.01)
base_no_ext = f_path.stem
meta_list.append(meta)
# --- Store the site's DataFrame
au.save_df(df_final, out_path / base_no_ext)
df_meta = pd.DataFrame(meta_list)
df_meta.set_index('tmy_id', inplace=True)
au.save_df(df_meta, out_path / 'tmy3_meta')
| StarcoderdataPython |
3282211 | <filename>finalArtisticTransfer.py
# <NAME> and <NAME>
# W4731 Computer Vision Final Project - Artistic Style Transfer
# Keras implementation of Artistic Style Transfer as described by Gatys et al 2015/6
# NOTE: keras.image_data_format assumed to be channels last
import sys
import time
import numpy as np
from keras.applications import vgg19
from keras import backend as K
from keras.preprocessing.image import load_img, save_img, img_to_array
from scipy.optimize import fmin_l_bfgs_b
OUT_SHAPE = (224,224)
N,M = OUT_SHAPE
LAYERS = ['block1_conv1','block2_conv1','block3_conv1','block4_conv1','block5_conv1']
class NeuralStyleTransfer():
def __init__(self,contentPath,stylePath,outPath):
self.contentPath = contentPath
self.stylePath = stylePath
self.outPath = outPath
self.loss_value = None
self.grads_values = None
self.outShape = OUT_SHAPE
# process the input image to be keras tensor variable for vgg net
self.contentTensor = K.variable(self.imgToTensor(contentPath))
self.styleTensor = K.variable(self.imgToTensor(stylePath))
self.finalTensor = K.placeholder((1, *OUT_SHAPE, 3))
self.mainTensor = K.concatenate([self.contentTensor,\
self.styleTensor,\
self.finalTensor], axis=0)
#alhpa and beta for the total loss equation
# totalLoss = alpha * contentLoss + beta * styleLoss
self.alpha = 0.05
self.beta = 5.0
#building VGG 19 pretrained from imagenet and setting up a dictionary the layers
self.VGG19 = vgg19.VGG19(input_tensor=self.mainTensor,weights='imagenet', include_top=False)
self.layersDict = dict([(layer.name, layer.output) for layer in self.VGG19.layers])
# combine these loss functions into a single scalar
self.L_total = K.variable(0.0)
self.block5_conv2_features = self.layersDict['block5_conv2']
self.contentRepresentation = self.block5_conv2_features[0, :, :, :]
self.outputRepresentation = self.block5_conv2_features[2, :, :, :]
# initializing the content loss to be the SSR of the difference between content representation
# and whitenoise image representation /out image just like the formula in the paper
self.L_content = K.sum(K.square(self.contentRepresentation - self.outputRepresentation))
self.L_total += self.alpha * self.L_content
# iterate over all the layers of the vgg and add up the style loss across
# the layers to the total loss
for layer in LAYERS:
#
self.blockFeatures = self.layersDict[layer]
self.styleFeatures = self.blockFeatures[1, :, :, :]
self.bothFeatures = self.blockFeatures[2, :, :, :]
# calculating the gram matrixes
self.gramStyle = self.G(self.styleFeatures)
self.gramContent = self.G(self.bothFeatures)
self.size = N**2
#getting the SSR of between the G_style and G_content
#just like formula 4 from the 2015 Gatys paper
self.L_style = K.sum(K.square(self.gramStyle - self.gramContent)) / (4.0 * (3 ** 2) * (self.size ** 2))
self.L_total += (self.beta / len(LAYERS)) * self.L_style
# getting derivatives of the tensor with respective to the total Loss, L_total
self.grads = K.gradients(self.L_total, self.finalTensor)
# setting the output values for the total loss and adding the gradients
self.outputs = [self.L_total]
self.outputs += self.grads
# self.features = K.function([finalTensor], self.outputs)
self.features = K.function([self.finalTensor], self.outputs)
#this function computes the loss and gradient values for the input Keras tensor x
def L_dK(self,x):
x = x.reshape((1, *OUT_SHAPE, 3))
outs = self.features([x])
loss_value = outs[0]
if len(outs[1:]) == 1:
grad_values = outs[1].flatten().astype('float64')
else:
grad_values = np.array(outs[1:]).flatten().astype('float64')
return loss_value, grad_values
# takes and image path as input, and the output shape
# outputs the Keras VGG tensor representation of the image
def imgToTensor(self,imgPath,shape=OUT_SHAPE):
kerasImg = load_img(imgPath, target_size=shape)
numpyImg = np.expand_dims(img_to_array(kerasImg), axis=0)
tensor = vgg19.preprocess_input(numpyImg)
return tensor
# from keras tensor to img
def tensorToImg(self,tensorX):
tensorX = tensorX.reshape((*OUT_SHAPE, 3))
#converting brg to rgb tensor for output
# removing mean value to make the final image brighter as
# was suggested by stack over flow
tensorX[:, :, 0] += 100.0
tensorX[:, :, 1] += 110.0
tensorX[:, :, 2] += 120.0
tensorX = tensorX[:, :, ::-1]
tensorX = np.clip(tensorX, 0, 255).astype('uint8')
return tensorX
def lossDescent(self, x):
loss_value, grad_values = self.L_dK(x)
# loss_value = loss_value
# grad_values = grad_values
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def gradsDescent(self, x):
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
# input: a three dimension tensor, out: gram matrix for tensor
def G(self,x):
features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
gram = K.dot(features, K.transpose(features))
return gram
def gradientDescent(NeuralTransfer, epochsCount):
# convert content image to keras tensor
x = NeuralTransfer.imgToTensor(NeuralTransfer.contentPath)
for i in range(epochsCount):
# we found this function very useful to perform gradient descent
x, min_val, info = fmin_l_bfgs_b(NeuralTransfer.lossDescent, x.flatten(),fprime=NeuralTransfer.gradsDescent, maxfun=20)
# savging image with style transferred
img = NeuralTransfer.tensorToImg(x.copy())
# img = NeuralTransfer.tensorToImg(x
#saving a generated image at each epoch
fname = NeuralTransfer.outPath +str(i)+".png"
save_img(fname, img)
if __name__ == "__main__":
# print(len(sys.argv))
try:
assert len(sys.argv) == 4
except AssertionError as error:
print("Incorrect Usage")
print("Usage:python neuralStylerTransfer.py contentPath stylePath outPath/out")
else:
contentPath = sys.argv[1]
stylePath = sys.argv[2]
outPath = sys.argv[3]
# print(contentPath,stylePath,outPath)
transfer = NeuralStyleTransfer(contentPath,stylePath,outPath)
gradientDescent(transfer,10)
| StarcoderdataPython |
3383082 | <reponame>NathaliaBarreiros/nlp_api
from app.models.zeroshot_inference import ZeroShotInferenceBase
from app.models.user import UserBase
from pydantic import BaseModel
from typing import Optional
class ZeroShotInferenceCreate(ZeroShotInferenceBase):
result: dict[str, float]
class ZeroShotInferenceRead(ZeroShotInferenceBase):
id: int
result: dict[str, float]
created_by: UserBase
class ZeroShotInferenceUpdate(BaseModel):
text: Optional[str] = None
candidate_labels: Optional[list[str]] = None
| StarcoderdataPython |
1758260 | from autumn.projects.covid_19.mixing_optimisation.constants import PHASE_2_START_TIME
from autumn.models.covid_19.mixing_matrix import (
build_dynamic_mixing_matrix,
)
from autumn.tools.inputs.demography.queries import get_iso3_from_country_name
from .mixing_opti import build_params_for_phases_2_and_3
# FIXME this is broken
def get_mixing_matrices(
output_dir, country, config=2, mode="by_age", objective="deaths", from_streamlit=False
):
iso_3 = get_iso3_from_country_name(country.title()) if country != "united-kingdom" else "GBR"
params, decision_vars = get_mle_params_and_vars(
output_dir, country, config, mode, objective, from_streamlit
)
if mode == "by_location":
new_decision_variables = {
"other_locations": decision_vars[0],
"school": decision_vars[1],
"work": decision_vars[2],
}
decision_vars = new_decision_variables
sc_1_params = build_params_for_phases_2_and_3(decision_vars, config, mode)
if mode == "by_location":
sc_1_params["mixing_age_adjust"] = {}
# FIXME: this is probably broken!
mixing_func = build_dynamic_mixing_matrix(
iso_3,
country,
mixing=sc_1_params["mixing"],
mixing_age_adjust=sc_1_params["mixing_age_adjust"],
npi_effectiveness_params={},
google_mobility_locations={
"work": ["workplaces"],
"other_locations": [
"retail_and_recreation",
"grocery_and_pharmacy",
"transit_stations",
],
},
is_periodic_intervention=False,
periodic_int_params={},
periodic_end_time=0.0,
microdistancing_params={},
smooth_google_data=True,
)
original_prem = mixing_func(10000.0)
optimised = mixing_func(PHASE_2_START_TIME + 10.0)
return original_prem, optimised
| StarcoderdataPython |
156455 | <reponame>anirudhakulkarni/codes
for _ in range(int(input())):
a,b,q=map(int,input().split())
arr=[]
for i in range(q):
arr+=[list(map(int,input().split()))]
res=0
if a!=b:
for j in range(arr[i][0],arr[i][1]+1):
if (j%a)%b!=(j%b)%a:
res+=1
if i!=q-1:
print(res,end=" ")
if i==q-1:
print(res) | StarcoderdataPython |
4833854 | <reponame>JohanComparat/pyEmerge
import h5py # HDF5 support
import os
import glob
import numpy as n
from scipy.interpolate import interp1d
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as p
plotDir = os.path.join(os.environ['HOME'], 'wwwDir', "eRoMok", "logNlogS")
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115, Ob0=0.048206)
def get_lognlogs(path_to_lc, area, z_max=3., ra_max=10., dec_max=10.):
f = h5py.File(path_to_lc, 'r+')
is_gal = (f['/sky_position/selection'].value)&(f['/sky_position/redshift_R'].value<z_max)&(abs(f['/sky_position/DEC'].value)<dec_max)&(abs(f['/sky_position/RA'].value)<ra_max)
is_agn = (f['/sky_position/selection'].value)&(f['/agn_properties/agn_activity'].value==1)&(f['/agn_properties/rxay_flux_05_20'].value>0)
n_gal = len(f['/sky_position/redshift_S'].value[is_gal])
n_agn = len(f['/sky_position/redshift_S'].value[is_agn])
z = f['/sky_position/redshift_S'].value[is_agn]
#logm = n.log10(f['/moster_2013_data/stellar_mass'].value[is_agn])
#lsar = f['/agn_properties/log_lambda_sar'].value[is_agn]
#lx = logm + lsar
log_f_05_20 = n.log10(f['/agn_properties/rxay_flux_05_20'].value[is_agn]) #- 0.6
f.close()
out = n.histogram(log_f_05_20, bins = n.arange(-18, -8., 0.2))
# cumulative number density per square degrees
x_out = 0.5*(out[1][1:] + out[1][:-1])
N_out = n.array([n.sum(out[0][ii:]) for ii in range(len(out[0])) ])
c_out = n.array([n.sum(out[0][ii:]) for ii in range(len(out[0])) ]) / area
c_out_up = (1 + N_out**(-0.5)) * c_out
c_out_low = (1 - N_out**(-0.5)) * c_out
c_err = (n.log10(c_out_up) - n.log10(c_out_low))/2.
return x_out, c_out, c_err
p.figure(1, (6,6))
path_to_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_L3.hdf5'
area = 6.7529257176359*2. * 2* 8.269819492449505
x_out, c_out, c_err = get_lognlogs(path_to_lc, area, 1.1, 6.7529257176359, 8.269819492449505)
#p.plot(x_out, n.log10(c_out), lw=2, rasterized = True, label = 'z<1.08' )
p.errorbar(x_out, n.log10(c_out), yerr = c_err, rasterized = True, label = 'L3 z<1.08, 223deg2' )
x_out_a, c_out_a, c_err_a = x_out, c_out, c_err
p.axhline(n.log10(300), ls='dashed')
#path_to_lc=='/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_remaped_position_L3_z1.hdf5'
#area = 3.3764628588325674*2. * 2* 4.134909746242654
#x_out, c_out, c_err = get_lognlogs(path_to_lc, area, z_max=3.)
#p.errorbar(x_out, n.log10(c_out), yerr = c_err, rasterized = True, label = 'L3 1.08<z<3.' )
#p.plot(x_out, n.log10(c_out+c_out_a), ls='dashed', label='total')
path_to_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_L6.hdf5'
area = 1.9766516114702513*2. * 2*2.0047373031569915
x_out, c_out, c_err = get_lognlogs(path_to_lc, area, 3., 1.9766516114702513, 2.0047373031569915)
p.errorbar(x_out, n.log10(c_out), yerr = c_err, rasterized = True, label = 'L6 z<3., 15deg2' )
#p.plot(x_out-0.1, n.log10(c_out), 'k', lw=2, rasterized = True, label = 'L3 lc-0.1' )
#p.plot(x_out, n.log10(c_out*(1-frac_err_13deg2)), 'k--', lw=1, rasterized = True, label = 'v0.6, 13.3deg2 scatter' )
#p.plot(x_out, n.log10(c_out*(1+frac_err_13deg2)), 'k--', lw=1, rasterized = True)
#p.plot(x_out, n.log10(c_out*(1-frac_err_3deg2)), 'r--', lw=1, rasterized = True, label = 'v0.6, 3.5deg2 scatter' )
#p.plot(x_out, n.log10(c_out*(1+frac_err_3deg2)), 'r--', lw=1, rasterized = True)
#p.plot(x_out_0, n.log10(c_out_0), 'm--', rasterized = True, label = 'Planck mock v0.0' )
path_to_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_L15.hdf5'
area = 14.323944878104827*2. * 2*20.257311381848154
x_out, c_out, c_err = get_lognlogs(path_to_lc, area, 3., 14.323944878104827, 20.257311381848154)
p.errorbar(x_out, n.log10(c_out), yerr = c_err, rasterized = True, label = 'L15 z<0.54 1160deg2' )
path_2_logNlogS_data = os.path.join(os.environ["DARKSIM_DIR"], 'observations', 'logNlogS', 'logNlogS_Georgakakis_08_AGN.data')
x_data, y_data, yerr = n.loadtxt(path_2_logNlogS_data, unpack=True)
p.fill_between(x_data, y1 = n.log10(y_data-yerr), y2=n.log10(y_data+yerr), color='b' , rasterized = True, alpha=0.5, label = 'Georgakakis 08' )
#p.plot(x_data, n.log10(y_data))
path_2_logNlogS_data = os.path.join(os.environ["DARKSIM_DIR"], 'observations', 'logNlogS', 'logNlogS_Merloni_12_AGN.data')
x_data, y_data = n.loadtxt(path_2_logNlogS_data, unpack=True)
p.plot(x_data, n.log10(y_data), label = 'Merloni 12' )
p.axhline(7, ls='dashed')
p.xlabel('log(F[0.5-2 keV])')
p.ylabel('log(>F) [/deg2]')
p.legend(frameon=False, loc=0)
#p.yscale('log')
p.xlim((-17, -12))
p.ylim((-2, 4.))
#p.title('Mocks')
p.grid()
p.savefig(os.path.join(plotDir, "logN_logS_AGN.jpg"))
p.clf()
| StarcoderdataPython |
3370416 | <reponame>zkouba/advent-of-code
import unittest
from aoc2020.task11.task11 import load, _interlink_neighboring_seats, Seat
class LobbyTest(unittest.TestCase):
def test_full_flow(self):
threshold = 5
lobby = load("./test_input.txt", -1)
self.assertEqual(10, len(lobby.plan))
self.assertEqual(10, len(lobby.plan[0]))
self.assertEqual(10, len(lobby.plan[-1]))
self.assertEqual(
"""L.LL.LL.LL
LLLLLLL.LL
L.L.L..L..
LLLL.LL.LL
L.LL.LL.LL
L.LLLLL.LL
..L.L.....
LLLLLLLLLL
L.LLLLLL.L
L.LLLLL.LL""",
str(lobby)
)
lobby._iteration(threshold)
self.assertEqual(
"""#.##.##.##
#######.##
#.#.#..#..
####.##.##
#.##.##.##
#.#####.##
..#.#.....
##########
#.######.#
#.#####.##""",
str(lobby)
)
lobby._iteration(threshold)
self.assertEqual(
"""#.LL.LL.L#
#LLLLLL.LL
L.L.L..L..
LLLL.LL.LL
L.LL.LL.LL
L.LLLLL.LL
..L.L.....
LLLLLLLLL#
#.LLLLLL.L
#.LLLLL.L#""",
str(lobby)
)
lobby._iteration(threshold)
self.assertEqual(
"""#.L#.##.L#
#L#####.LL
L.#.#..#..
##L#.##.##
#.##.#L.##
#.#####.#L
..#.#.....
LLL####LL#
#.L#####.L
#.L####.L#""",
str(lobby)
)
lobby._iteration(threshold)
self.assertEqual(
"""#.L#.L#.L#
#LLLLLL.LL
L.L.L..#..
##LL.LL.L#
L.LL.LL.L#
#.LLLLL.LL
..L.L.....
LLLLLLLLL#
#.LLLLL#.L
#.L#LL#.L#""",
str(lobby)
)
lobby._iteration(threshold)
self.assertEqual(
"""#.L#.L#.L#
#LLLLLL.LL
L.L.L..#..
##L#.#L.L#
L.L#.#L.L#
#.L####.LL
..#.#.....
LLL###LLL#
#.LLLLL#.L
#.L#LL#.L#""",
str(lobby)
)
lobby._iteration(threshold)
self.assertEqual(
"""#.L#.L#.L#
#LLLLLL.LL
L.L.L..#..
##L#.#L.L#
L.L#.LL.L#
#.LLLL#.LL
..#.L.....
LLL###LLL#
#.LLLLL#.L
#.L#LL#.L#""",
str(lobby)
)
self.assertEqual(26, lobby.count_occupied())
def test_linking_neighbors(self):
s0 = Seat(Seat.FREE_SEAT)
s1 = Seat(Seat.FREE_SEAT)
s2 = Seat(Seat.FREE_SEAT)
s3 = Seat(Seat.FREE_SEAT)
s4 = Seat(Seat.FREE_SEAT)
s5 = Seat(Seat.FREE_SEAT)
seats = _interlink_neighboring_seats(
plan=[
[s0, s1, Seat(Seat.EMPTY_SPACE)],
[s2, Seat(Seat.EMPTY_SPACE), Seat(Seat.EMPTY_SPACE)],
[Seat(Seat.EMPTY_SPACE), s3, Seat(Seat.EMPTY_SPACE)],
[s4, Seat(Seat.EMPTY_SPACE), s5]
],
radius=-1
)
self.assertEqual(6, len(seats))
self.assertEqual(2, len(seats[0].neighbors))
self.assertTrue(s1 in s0.neighbors)
self.assertTrue(s2 in s0.neighbors)
self.assertEqual(3, len(seats[1].neighbors))
self.assertTrue(s0 in s1.neighbors)
self.assertTrue(s3 in s1.neighbors)
self.assertTrue(s2 in s1.neighbors)
self.assertEqual(4, len(seats[2].neighbors))
self.assertTrue(s0 in s2.neighbors)
self.assertTrue(s1 in s2.neighbors)
self.assertTrue(s3 in s2.neighbors)
self.assertTrue(s4 in s2.neighbors)
self.assertEqual(4, len(seats[3].neighbors))
self.assertTrue(s2 in s3.neighbors)
self.assertTrue(s1 in s3.neighbors)
self.assertTrue(s5 in s3.neighbors)
self.assertTrue(s4 in s3.neighbors)
self.assertEqual(3, len(seats[4].neighbors))
self.assertTrue(s2 in s4.neighbors)
self.assertTrue(s3 in s4.neighbors)
self.assertTrue(s5 in s4.neighbors)
self.assertEqual(2, len(seats[5].neighbors))
self.assertTrue(s3 in s5.neighbors)
self.assertTrue(s4 in s5.neighbors)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1656150 | """ BiotSavart_CUDA module. """
# ISC License
#
# Copyright (c) 2020–2021, <NAME>, <NAME>. <<EMAIL>>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import math
import numpy as np
from numba import cuda
from PyQt5.QtCore import QThread
from magneticalc.Constants import Constants
from magneticalc.Debug import Debug
from magneticalc.Field_Types import A_FIELD, B_FIELD
from magneticalc.Theme import Theme
class BiotSavart_CUDA:
"""
Implements the Biot-Savart law for calculating the magnetic flux density (B-field) and vector potential (A-field).
"""
def __init__(
self,
field_type: int,
distance_limit: float,
length_scale: float,
dc: float,
current_elements,
sampling_volume_points,
sampling_volume_permeabilities,
progress_callback
):
"""
Initializes the class attributes.
@param field_type: Field type
@param distance_limit: Distance limit (mitigating divisions by zero)
@param length_scale: Length scale (m)
@param dc: Wire current (A)
@param current_elements: Ordered list of current elements (pairs: [element center, element direction])
@param sampling_volume_points: Ordered list of sampling volume points
@param sampling_volume_permeabilities: Ordered list of sampling volume's relative permeabilities µ_r
@param progress_callback: Progress callback
"""
self.field_type = field_type
self._distance_limit = distance_limit
self._length_scale = length_scale
self._dc = dc
self._current_elements = current_elements
self._sampling_volume_points = sampling_volume_points
self._sampling_volume_permeabilities = sampling_volume_permeabilities
self._progress_callback = progress_callback
@staticmethod
def is_available():
"""
Indicates the availability of this backend.
@return: True if this backend is available, False otherwise
"""
return cuda.is_available()
@staticmethod
@cuda.jit
def worker(
field_type,
distance_limit,
length_scale,
element_centers,
element_directions,
sampling_volume_points,
sampling_volume_permeabilities,
field_vectors,
total_calculations,
total_skipped_calculations
):
"""
Applies the Biot-Savart law for calculating the magnetic flux density (B-field) or vector potential (A-field)
for all sampling volume points.
@param field_type: Field type
@param distance_limit: Distance limit (mitigating divisions by zero)
@param length_scale: Length scale (m)
@param element_centers: Ordered list of current elements centers
@param element_directions: Ordered list of current elements directions
@param sampling_volume_points: Sampling volume points
@param sampling_volume_permeabilities: Ordered list of sampling volume's relative permeabilities µ_r
@param field_vectors: Field vectors (output array)
@param total_calculations: Total number of calculations (output array)
@param total_skipped_calculations: Total number of skipped calculations (output array)
"""
# noinspection PyUnresolvedReferences
sampling_volume_index = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
if sampling_volume_index >= sampling_volume_points.shape[0]:
return
total_calculations[sampling_volume_index] = 0
total_skipped_calculations[sampling_volume_index] = 0
vector_x = 0
vector_y = 0
vector_z = 0
for current_element_index in range(element_centers.shape[0]):
vector_distance_x = (sampling_volume_points[sampling_volume_index][0] -
element_centers[current_element_index][0]) * length_scale
vector_distance_y = (sampling_volume_points[sampling_volume_index][1] -
element_centers[current_element_index][1]) * length_scale
vector_distance_z = (sampling_volume_points[sampling_volume_index][2] -
element_centers[current_element_index][2]) * length_scale
# Calculate distance (mitigating divisions by zero)
scalar_distance = math.sqrt(vector_distance_x ** 2 + vector_distance_y ** 2 + vector_distance_z ** 2)
if scalar_distance < distance_limit:
scalar_distance = distance_limit
total_skipped_calculations[sampling_volume_index] += 1
total_calculations[sampling_volume_index] += 1
if field_type == A_FIELD:
# Calculate A-field (vector potential)
vector_x += element_directions[current_element_index][0] * length_scale / scalar_distance
vector_y += element_directions[current_element_index][1] * length_scale / scalar_distance
vector_z += element_directions[current_element_index][2] * length_scale / scalar_distance
elif field_type == B_FIELD:
# Calculate B-field (flux density)
a_1 = element_directions[current_element_index][0] * length_scale
a_2 = element_directions[current_element_index][1] * length_scale
a_3 = element_directions[current_element_index][2] * length_scale
vector_x += (a_2 * vector_distance_z - a_3 * vector_distance_y) / (scalar_distance ** 3)
vector_y += (a_3 * vector_distance_x - a_1 * vector_distance_z) / (scalar_distance ** 3)
vector_z += (a_1 * vector_distance_y - a_2 * vector_distance_x) / (scalar_distance ** 3)
field_vectors[sampling_volume_index, 0] = vector_x * sampling_volume_permeabilities[sampling_volume_index]
field_vectors[sampling_volume_index, 1] = vector_y * sampling_volume_permeabilities[sampling_volume_index]
field_vectors[sampling_volume_index, 2] = vector_z * sampling_volume_permeabilities[sampling_volume_index]
def get_result(self):
"""
Calculates the field at every point of the sampling volume.
@return: (Total # of calculations, total # of skipped calculations, field) if successful, None if interrupted
"""
Debug(self, ".get_result()", color=Theme.PrimaryColor)
element_centers = [element[0] for element in self._current_elements]
element_directions = [element[1] for element in self._current_elements]
element_centers_global = cuda.to_device(element_centers)
element_directions_global = cuda.to_device(element_directions)
total_calculations = 0
total_skipped_calculations = 0
field_vectors = np.zeros(shape=(0, 3))
# Split the calculation into chunks for progress update and interruption handling
chunk_size_max = 1024 * 16
chunk_start = 0
remaining = len(self._sampling_volume_points)
while remaining > 0:
if remaining >= chunk_size_max:
chunk_size = chunk_size_max
else:
chunk_size = remaining
sampling_volume_points_global = cuda.to_device(
self._sampling_volume_points[chunk_start:chunk_start + chunk_size]
)
sampling_volume_permeabilities_global = cuda.to_device(
self._sampling_volume_permeabilities[chunk_start:chunk_start + chunk_size]
)
# Signal progress update, handle interrupt
self._progress_callback(100 * chunk_start / len(self._sampling_volume_points))
if QThread.currentThread().isInterruptionRequested():
Debug(self, ".get_result(): Interruption requested, exiting now", color=Theme.PrimaryColor)
return None
remaining -= chunk_size
chunk_start += chunk_size
total_calculations_global = cuda.to_device(np.zeros(chunk_size))
total_skipped_calculations_global = cuda.to_device(np.zeros(chunk_size))
field_vectors_global = cuda.device_array((chunk_size, 3))
TPB = 1024 # Maximum threads per block
BPG = 65536 # Maximum blocks per grid
BiotSavart_CUDA.worker[BPG, TPB](
self.field_type,
self._distance_limit,
self._length_scale,
element_centers_global,
element_directions_global,
sampling_volume_points_global,
sampling_volume_permeabilities_global,
field_vectors_global,
total_calculations_global,
total_skipped_calculations_global
)
total_calculations_local = total_calculations_global.copy_to_host()
total_skipped_calculations_local = total_skipped_calculations_global.copy_to_host()
field_vectors_local = field_vectors_global.copy_to_host()
if self.field_type == A_FIELD or self.field_type == B_FIELD:
# Field is A-field or B-field
field_vectors_local = field_vectors_local * self._dc * Constants.mu_0 / 4 / np.pi
total_calculations += int(sum(total_calculations_local))
total_skipped_calculations += int(sum(total_skipped_calculations_local))
field_vectors = np.append(field_vectors, field_vectors_local, axis=0)
self._progress_callback(100)
return total_calculations, total_skipped_calculations, np.array(field_vectors)
| StarcoderdataPython |
3337684 | <gh_stars>0
# -*- coding: utf-8 -*-
"""Code for training model for contradictory_claims."""
| StarcoderdataPython |
169535 | <filename>examples/account.py
from kauripay.processing import KauriPay
api_key = ''
api_secret = ''
host = ''
pay = KauriPay(api_key=api_key,
api_secret=api_secret,
host=host)
def get_total_balance(view_currency='BTC') -> float:
"""
Shows total balance for account in chosen currency
:param view_currency: currency for total balance
:return: total balance amount for account
"""
result = pay.get_balance()
balance_dict = result.get('balance')
total = 0
for currency in balance_dict:
total += ((balance_dict.get(currency).get(view_currency).get('total')) +
(balance_dict.get(currency).get(view_currency).get('reserved')))
return total
def get_balance_currency_converted_to_another_currency(base_currency='BTC',
view_currency='UAH') -> float:
"""
Shows balance for chosen currency converted to the desired currency
:param base_currency: currency, which amount will be shown converted
:param view_currency: currency to which the amount to show will be converted
:return: balance amount
"""
result = pay.get_balance()
if result['status'] == 'success':
balance_dict = result.get('balance')
balance = balance_dict.get(base_currency).get(view_currency).get('total')
return balance
def get_processing_limits(order_type='WITHDRAWAL',
currency='ETH',
payment_method: str = None) -> tuple:
"""
Shows limits for processing of chosen currency, depending on the order type
:param order_type: choice from ('INTERNAL', 'WITHDRAWAL', 'INVOICE', 'DEPOSIT')
:param currency: processing currency
:param payment_method: you must specify this param if order_type == 'WITHDRAWAL' and
if several blockchains are available for chosen currency.
E.g. if currency == 'USDT' payment_method can be 'ERC20', 'TRC20, 'BEP20'
:return: min_limit, max_limit
"""
result = pay.get_account_info()
if result['status'] == 'success':
if order_type == 'INTERNAL':
prefix = result['internal_movement_limits'][currency]
min_limit = prefix.get('CROSS_ACCOUNT').get('min_amount')
max_limit = prefix.get('CROSS_ACCOUNT').get('max_amount')
elif order_type == 'WITHDRAWAL':
prefix = result['withdrawal_order_limits'][currency]['GATEWAY']
if payment_method:
min_limit = prefix[payment_method].get('min_amount')
max_limit = prefix[payment_method].get('max_amount')
else:
min_limit = prefix.get('min_amount')
max_limit = prefix.get('max_amount')
elif order_type == 'INVOICE':
prefix = result['invoice_order_limits'][currency]
min_limit = prefix['min_amount']
max_limit = prefix['max_amount']
elif order_type == 'DEPOSIT':
prefix = result['deposit_order_limits'][currency]
min_limit = prefix['GATEWAY']['P2P']['min_amount']
max_limit = prefix['GATEWAY']['P2P']['max_amount']
return min_limit, max_limit
def get_exchange_limits(currency_to_get='ETH',
currency_to_spend='UAH') -> tuple:
"""
Shows limits for exchange of chosen currency pair
:param currency_to_get: currency to buy
:param currency_to_spend: currency to sell
:return: min_limit, max_limit for "currency_to_spend"
"""
result = pay.get_account_info()
if result['status'] == 'success':
pair = currency_to_get + '_' + currency_to_spend
min_limit = result['exchange_order_limits'][pair]['min_amount']
max_limit = result['exchange_order_limits'][pair]['max_amount']
return min_limit, max_limit
def get_account_fees(order_type='withdrawal',
currency='UAH') -> tuple:
"""
Shows fees for withdrawal or deposit of chosen currency
:param order_type: choice from ("withdrawal", "deposit")
:param currency: currency, for which fees will be shown
:return: static_fee, percent_fee values
"""
result = pay.get_account_info()
if result['status'] == 'success':
if order_type == 'withdrawal':
data = result['withdrawal_order_fees'][currency]['GATEWAY']
elif order_type == 'deposit':
data = result['deposit_order_fees'][currency]['GATEWAY']['P2P']
return data['static_fee'], data['percent_fee']
def get_crypto_account_wallet(cryptocurrency='BTC',
payment_method: str = None) -> str:
"""
Shows the user's main wallet's address for chosen cryptocurrency
:param cryptocurrency: currency, for which wallet will be shown
:param payment_method: you must specify this param if several blockchains are available for chosen currency.
E.g. if currency == 'USDT' payment_method can be 'ERC20', 'TRC20, 'BEP20'
:return: wallet address
"""
result = pay.get_balance()
if result['status'] == 'success':
if payment_method:
address = result.get('wallets').get(cryptocurrency, {}).get(payment_method, {}).get('address')
else:
address = result.get('wallets').get(cryptocurrency, {}).get('address')
return address
| StarcoderdataPython |
76809 | <filename>analysis_scripts/check_direction.py<gh_stars>1-10
#! /usr/bin/env python
"""
Calculating the fraction of upgoing events
CAUTION: Assuming chan0 is the uppermost and
chan1 is below chan0
"""
import numpy as n
import pylab as p
import sys
f = open(sys.argv[1])
directions = []
for line in f.readlines():
try:
line = line.split()
directions.append(float(line[3][2:-1]) - float(line[1][2:-1]))
except:
pass
up = 0
down = 0
for item in directions:
if item > 0:
down += 1
else:
up += 1
print "Upgoing events:", up
print "Downgoing events:", down
print "Fraction of upgoing events:", float(up)/(up+down)
| StarcoderdataPython |
3331709 | <reponame>jperras/Flask-ApiExceptions<gh_stars>1-10
"""
Flask-APIExceptions
~~~~~~~~~~~~~~~~~~~
Providing HTTP error responses in the form of Python exceptions that can
be serialized as response objects.
"""
import os
from setuptools import setup
with open('README.rst') as file:
LONG_DESCRIPTION = file.read()
MODULE_PATH = os.path.join(os.path.dirname(__file__), 'flask_apiexceptions.py')
with open(MODULE_PATH) as module:
for line in module:
if line.startswith('__version_info__'):
version_line = line
break
#pylint: disable=locally-disabled,eval-used
__version__ = '.'.join(eval(version_line.split('__version_info__ = ')[-1]))
URL_BASE = 'https://github.com/jperras/Flask-ApiExceptions'
setup(
name='Flask-ApiExceptions',
version=__version__,
author='<NAME>',
author_email='<EMAIL>',
description='Python exceptions serializable to Flask HTTP responses.',
url=URL_BASE,
download_url='{}/archive/{}.tar.gz'.format(URL_BASE, __version__),
long_description=LONG_DESCRIPTION,
py_modules=['flask_apiexceptions'],
license='MIT',
platforms='any',
install_requires=['Flask>=0.10'],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
keywords=['flask', 'json', 'exceptions', 'api'],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
)
| StarcoderdataPython |
1698006 | from .icp import *
# TODO: move contents from nonconformist.icp here
# -----------------------------------------------------------------------------
# TcpClassifier
# -----------------------------------------------------------------------------
class TcpClassifier(BaseEstimator, ClassifierMixin):
"""Transductive conformal classifier.
Parameters
----------
nc_function : BaseScorer
Nonconformity scorer object used to calculate nonconformity of
calibration examples and test patterns. Should implement ``fit(x, y)``
and ``calc_nc(x, y)``.
smoothing : boolean
Decides whether to use stochastic smoothing of p-values.
Attributes
----------
train_x : numpy array of shape [n_cal_examples, n_features]
Inputs of training set.
train_y : numpy array of shape [n_cal_examples]
Outputs of calibration set.
nc_function : BaseScorer
Nonconformity scorer object used to calculate nonconformity scores.
classes : numpy array of shape [n_classes]
List of class labels, with indices corresponding to output columns
of TcpClassifier.predict()
See also
--------
IcpClassifier
References
----------
.. [1] <NAME>., <NAME>., & <NAME>. (2005). Algorithmic learning
in a random world. Springer Science & Business Media.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import load_iris
>>> from sklearn.svm import SVC
>>> from conformalgnn.base import ClassifierAdapter
>>> from conformalgnn.cp import TcpClassifier
>>> from conformalgnn.nc import ClassifierNc, MarginErrFunc
>>> iris = load_iris()
>>> idx = np.random.permutation(iris.target.size)
>>> train = idx[:int(idx.size / 2)]
>>> test = idx[int(idx.size / 2):]
>>> model = ClassifierAdapter(SVC(probability=True))
>>> nc = ClassifierNc(model, MarginErrFunc())
>>> tcp = TcpClassifier(nc)
>>> tcp.fit(iris.data[train, :], iris.target[train])
>>> tcp.predict(iris.data[test, :], significance=0.10)
... # doctest: +SKIP
array([[ True, False, False],
[False, True, False],
...,
[False, True, False],
[False, True, False]], dtype=bool)
"""
def __init__(self, nc_function, condition=None, smoothing=True):
self.train_x, self.train_y = None, None
self.nc_function = nc_function
super(TcpClassifier, self).__init__()
# Check if condition-parameter is the default function (i.e.,
# lambda x: 0). This is so we can safely clone the object without
# the clone accidentally having self.conditional = True.
default_condition = lambda x: 0
is_default = (callable(condition) and
(condition.__code__.co_code ==
default_condition.__code__.co_code))
if is_default:
self.condition = condition
self.conditional = False
elif callable(condition):
self.condition = condition
self.conditional = True
else:
self.condition = lambda x: 0
self.conditional = False
self.smoothing = smoothing
self.base_icp = IcpClassifier(
self.nc_function,
self.condition,
self.smoothing
)
self.classes = None
def fit(self, x, y):
self.train_x, self.train_y = x, y
self.classes = np.unique(y)
def predict(self, x, significance=None):
"""Predict the output values for a set of input patterns.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of patters for which to predict output values.
significance : float or None
Significance level (maximum allowed error rate) of predictions.
Should be a float between 0 and 1. If ``None``, then the p-values
are output rather than the predictions.
Returns
-------
p : numpy array of shape [n_samples, n_classes]
If significance is ``None``, then p contains the p-values for each
sample-class pair; if significance is a float between 0 and 1, then
p is a boolean array denoting which labels are included in the
prediction sets.
"""
n_test = x.shape[0]
n_train = self.train_x.shape[0]
p = np.zeros((n_test, self.classes.size))
for i in range(n_test):
for j, y in enumerate(self.classes):
train_x = np.vstack([self.train_x, x[i, :]])
train_y = np.hstack([self.train_y, y])
self.base_icp.fit(train_x, train_y)
self.base_icp.calibrate(train_x, train_y)
ncal_ngt_neq = self.base_icp._get_stats(x[i, :].reshape(1, x.shape[1]))
ncal = ncal_ngt_neq[:, j, 0]
ngt = ncal_ngt_neq[:, j, 1]
neq = ncal_ngt_neq[:, j, 2]
p[i, j] = calc_p(ncal - 1, ngt, neq - 1, self.smoothing)
if significance is not None:
return p > significance
else:
return p
def predict_conf(self, x):
"""Predict the output values for a set of input patterns, using
the confidence-and-credibility output scheme.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of patters for which to predict output values.
Returns
-------
p : numpy array of shape [n_samples, 3]
p contains three columns: the first column contains the most
likely class for each test pattern; the second column contains
the confidence in the predicted class label, and the third column
contains the credibility of the prediction.
"""
p = self.predict(x, significance=None)
label = p.argmax(axis=1)
credibility = p.max(axis=1)
for i, idx in enumerate(label):
p[i, idx] = -np.inf
confidence = 1 - p.max(axis=1)
return np.array([label, confidence, credibility]).T
| StarcoderdataPython |
1661049 | <reponame>Zoomdata/err-stackstorm
import setuptools
#with open("README.md", "r") as fh:
# long_description = fh.read()
setuptools.setup(
name="err-stackstorm",
version="2.1.4",
author="Err-StackStorm Plugin contributors",
author_email="<EMAIL>",
description="An Errbot plugin for StackStorm ChatOps.",
long_description="Not available",
long_description_content_type="text/markdown",
url="https://github.com/nzlosh/err-stackstorm",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache 2.0 License",
"Operating System :: OS Independent",
],
)
| StarcoderdataPython |
64951 | <reponame>lycantropos/ground
"""Basis of computational geometry."""
__version__ = '7.1.1'
| StarcoderdataPython |
3372252 | """
Collection of utils for testing tree converters.
"""
gbdt_implementation_map = {
"tree_trav": "<class 'hummingbird.ml.operator_converters._tree_implementations.TreeTraversalGBDTImpl'>",
"perf_tree_trav": "<class 'hummingbird.ml.operator_converters._tree_implementations.PerfectTreeTraversalGBDTImpl'>",
"gemm": "<class 'hummingbird.ml.operator_converters._tree_implementations.GEMMGBDTImpl'>",
}
dt_implementation_map = {
"tree_trav": "<class 'hummingbird.ml.operator_converters._tree_implementations.TreeTraversalDecisionTreeImpl'>",
"perf_tree_trav": "<class 'hummingbird.ml.operator_converters._tree_implementations.PerfectTreeTraversalDecisionTreeImpl'>",
"gemm": "<class 'hummingbird.ml.operator_converters._tree_implementations.GEMMDecisionTreeImpl'>",
}
| StarcoderdataPython |
3344346 | import numpy as np
from scipy.integrate import odeint
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams['font.sans-serif'] = "Arial"
matplotlib.rcParams['font.family'] = "sans-serif"
# Solve the ODE of 2-node negative feedback loop model
def ode(y, t):
dydt = np.zeros(y.shape)
ka1 = 0.8
Km1 = 1.0
kd1 = 0.06
ka2 = 0.95
Km2 = 1.0
kd2 = 0.7
dydt[0] = ka1/(y[1]**4 + Km1**4) - kd1*y[0]
dydt[1] = ka2*y[1]*y[0]**2/(y[0]**2 + Km2**2) - kd2*y[1]
return dydt
t = np.arange(0, 100, 1)
y0 = np.array([1., 1.])
y = odeint(ode, y0, t)
fig = plt.figure(figsize=(8, 4))
plt.plot(t, y)
plt.xlabel('Time', fontsize=24, labelpad=10)
plt.ylabel('X(t)', fontsize=24, labelpad=10)
plt.legend(["A", "B"], fontsize=24)
plt.tight_layout()
plt.show()
plt.savefig("2nnfl-time-series.png", dpi=300) | StarcoderdataPython |
4816070 | from modules import skeleton
from lib.core import utils
from lib.mode import speed
from lib.sender import execute
from lib.sender import polling
from lib.sender import report
from lib.sender import summary
class LinkFinding(skeleton.Skeleton):
"""docstring for LinkFinding"""
def banner(self):
utils.print_banner("Starting Linkfinding")
utils.make_directory(self.options['WORKSPACE'] + '/links')
utils.make_directory(self.options['WORKSPACE'] + '/links/raw')
def clean_waybackurls(self, command):
raw_output = command.get('output_path')
final_output = command.get('cleaned_output')
utils.strip_blank_line(final_output, raw_output)
def clean_linkfinder(self, command):
final_output = command.get('cleaned_output')
# simple hack here
raw_outputs = utils.list_files(final_output + '/../raw/', '.txt')
utils.join_files(raw_outputs, final_output)
utils.check_output(final_output)
# update screenshot in summaries
| StarcoderdataPython |
3338537 | <gh_stars>0
import enum
from datetime import datetime
from app import db, login
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from sqlalchemy import Enum
@login.user_loader
def load_user(id):
return User.query.get(int(id))
group_x_course = db.Table('group_x_course',
db.Column('group_id', db.Integer, db.ForeignKey('group.id'), primary_key=True),
db.Column('course_id', db.Integer, db.ForeignKey('course.id'), primary_key=True)
)
teacher_x_course = db.Table('teacher_x_course',
db.Column('teacher_id', db.Integer, db.ForeignKey('teacher.id'), primary_key=True),
db.Column('course_id', db.Integer, db.ForeignKey('course.id'), primary_key=True)
)
monitor_x_course = db.Table('monitor_x_course',
db.Column('monitor_id', db.Integer, db.ForeignKey('student.id'), primary_key=True),
db.Column('course_id', db.Integer, db.ForeignKey('course.id'), primary_key=True)
)
class role_enum(enum.Enum):
admin = "admin"
teacher = "teacher"
student = "student"
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
role = db.Column(db.Enum(role_enum))
verification_code = db.Column(db.String(120), index=True, unique=True)
last_name = db.Column(db.String(64), index=True)
first_name = db.Column(db.String(64), index=True)
middle_name = db.Column(db.String(64), index=True)
email = db.Column(db.String(120), index=True, unique=True)
phone_number = db.Column(db.String(12))
city = db.Column(db.String(64))
about_me = db.Column(db.String(140))
vk_link = db.Column(db.String(64))
facebook_link = db.Column(db.String(64))
linkedin_link = db.Column(db.String(64))
instagram_link = db.Column(db.String(64))
password_hash = db.Column(db.String(128))
def __repr__(self):
return '<User {}>'.format(str(self.last_name) + ' ' + str(self.first_name) + ' ' + str(self.middle_name) + ' ' +
str(self.verification_code) + ' ' + str(self.email))
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
__mapper_args__ = {
'polymorphic_on': role
}
class degree_enum(enum.Enum):
bachelor = 'bachelor'
specialist = 'specialist'
master = 'master'
class form_enum(enum.Enum):
fulltime = 'fulltime'
distance = 'distance'
evening = 'evening'
class basis_enum(enum.Enum):
budget = 'budget'
contract = 'contract'
class Student(User):
id = db.Column(db.Integer, db.ForeignKey('user.id'), primary_key=True)
group_id = db.Column(db.Integer, db.ForeignKey('group.id'), nullable=False)
year_admission = db.Column(db.Integer)
degree = db.Column(db.Enum(degree_enum))
form = db.Column(db.Enum(form_enum))
basis = db.Column(db.Enum(basis_enum))
# courses = db.relationship(
# 'Course', secondary=monitor_x_course,
# backref=db.backref('monitors', lazy='dynamic'), lazy='dynamic')
__mapper_args__ = {
'polymorphic_identity': role_enum.student,
}
def __repr__(self):
return '<Student: {}>'.format(str(self.id) + ' ' + self.verification_code + ' ' + str(self.email))
class Teacher(User):
id = db.Column(db.Integer, db.ForeignKey('user.id'), primary_key=True)
# courses = db.relationship('Course', secondary=teacher_x_course, lazy='dynamic',
# backref=db.backref('teachers', lazy=True))
__mapper_args__ = {
'polymorphic_identity': role_enum.teacher,
}
def __repr__(self):
return '<Teacher: {}>'.format(str(self.id) + ' ' + str(self.verification_code))
class Admin(User):
id = db.Column(db.Integer, db.ForeignKey('user.id'), primary_key=True)
__mapper_args__ = {
'polymorphic_identity': role_enum.admin,
}
def __repr__(self):
return '<Administrator: {}>'.format(str(self.id) + str(self.role))
class Group(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
faculty = db.Column(db.String(64))
course_number = db.Column(db.Integer)
students = db.relationship('Student', backref='student', lazy=True)
# courses = db.relationship('Course', secondary=group_x_course, lazy='dynamic',
# backref=db.backref('groups', lazy=True))
def __repr__(self):
return '<Group: {}>'.format(self.id)
class Course(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
description = db.Column(db.String(200))
groups = db.relationship('Group', secondary=group_x_course, lazy='dynamic',
backref=db.backref('courses', lazy=True))
teachers = db.relationship('Teacher', secondary=teacher_x_course, lazy='dynamic',
backref=db.backref('courses', lazy=True))
monitors = db.relationship('Student', secondary=monitor_x_course, lazy='dynamic',
backref=db.backref('courses', lazy=True))
def __repr__(self):
return '<Course: {}>'.format(self.id)
class Materials(db.Model):
id = db.Column(db.Integer, primary_key=True)
course_id = db.Column(db.Integer, db.ForeignKey('course.id'))
name = db.Column(db.String(64))
description = db.Column(db.String(200))
created_date = db.Column(db.DateTime, index=True, default=datetime.utcnow)
def __repr__(self):
return '<Materials: {}>'.format(self.id)
class Homework(db.Model):
id = db.Column(db.Integer, primary_key=True)
course_id = db.Column(db.Integer, db.ForeignKey('course.id'))
name = db.Column(db.String(64))
description = db.Column(db.String(200))
start_date = db.Column(db.DateTime, index=True, default=datetime.utcnow)
end_date = db.Column(db.DateTime, index=True, default=datetime.utcnow)
def __repr__(self):
return '<Homework: {}>'.format(self.id)
class Homework_parcel(db.Model):
id = db.Column(db.Integer, primary_key=True)
student_id = db.Column(db.Integer, db.ForeignKey('student.id'), primary_key=True)
homework_id = db.Column(db.Integer, db.ForeignKey('homework.id'), primary_key=True)
send_date = db.Column(db.DateTime, index=True, default=datetime.utcnow)
text = db.Column(db.String(200))
def __repr__(self):
return '<Homework_parcel: {}>'.format(self.id)
| StarcoderdataPython |
3273852 | import os
import pudb
import shutil
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from scipy.io import loadmat, savemat
def patientwise_splitting(train, test, img_list):
patient_ids = [f.split('_')[1] for f in img_list]
patient_ids = list(set(patient_ids))
train_ids, test_ids = train_test_split(patient_ids, train_size=train)
print('Train_ids=')
print(train_ids)
test_ids, val_ids = train_test_split(patient_ids, train_size=test)
print('Test_ids=')
print(test_ids)
print('Val_ids=')
print(val_ids)
x_train = []
x_test = []
x_val = []
for fname in img_list:
patient_id = fname.split('_')[1]
if patient_id in train_ids:
x_train.append(fname)
elif patient_id in test_ids:
x_test.append(fname)
elif patient_id in val_ids:
x_val.append(fname)
else:
raise ValueError(
'file [{}] is not in train-test-split'.format(fname))
return x_train, x_test, x_val
def instructionwise_splitting(split_instructions, img_list):
instr_mat = loadmat(split_instructions)
train_ids = instr_mat['train_pats']
train_ids = list(train_ids[0])
test_ids = instr_mat['test_pats']
test_ids = list(test_ids[0])
val_ids = instr_mat['valid_pats']
val_ids = list(val_ids[0])
x_train = []
x_test = []
x_val = []
for fname in img_list:
patient_id = int(fname.split('_')[1].split('.')[0])
if patient_id in train_ids:
x_train.append(fname)
elif patient_id in test_ids:
x_test.append(fname)
elif patient_id in val_ids:
x_val.append(fname)
else:
raise ValueError(
'file [{}] is not in train-test-split'.format(fname))
return x_train, x_test, x_val
def ttsplit_and_copy(aaron_dir,
data_dir,
train,
test,
split_by_patient=False,
split_instructions=None):
img_list = os.listdir(aaron_dir)
if split_by_patient:
x_train, x_test, x_val = patientwise_splitting(train, test, img_list)
elif split_instructions:
x_train, x_test, x_val = instructionwise_splitting(
split_instructions, img_list)
else:
x_train, x_test = train_test_split(img_list, train_size=train)
x_test, x_val = train_test_split(x_test, train_size=test)
print('Train_ids=')
print(x_train)
print('Test_ids=')
print(x_test)
print('Val_ids=')
print(x_val)
for fname in tqdm(x_train):
shutil.copy(
os.path.join(aaron_dir, fname),
os.path.join(data_dir, 'train', fname))
for fname in tqdm(x_test):
shutil.copy(
os.path.join(aaron_dir, fname),
os.path.join(data_dir, 'test', fname))
for fname in tqdm(x_val):
shutil.copy(
os.path.join(aaron_dir, fname), os.path.join(
data_dir, 'val', fname))
def move_to_cancerGAN(aaron_dir, data_dir, new_dir=None, train=0.6, test=0.5):
''' Taking aaron's jpegs and parsed them. '''
img_list = os.listdir(aaron_dir)
num_files = len(img_list)
if new_dir is not None:
for i in tqdm(range(num_files)):
shutil.copy(
os.path.join(aaron_dir, img_list[i]),
os.path.join(new_dir, '{}.jpg'.format(i + 1)))
aaron_dir = new_dir
ttsplit_and_copy(aaron_dir, data_dir, train, test)
def collect_parse_mat_slices(aaron_dir,
data_dir,
new_dir=None,
train=0.6,
test=0.5,
split_by_patient=False,
with_copy=False):
''' Takes 2 folders, merges them appropriately, then ttsplit and resave.'''
if new_dir is not None and len(aaron_dir) == 2:
clin_dir == aaron_dir[0]
ct_dir = aaron_dir[1]
clin_list = os.listdir(clin_dir)
ct_list = os.listdir(ct_dir)
for clinFile in tqdm(clin_list):
if os.path.isfile(os.path.join(ct_dir, clinFile)):
try:
clin = loadmat(os.path.join(clin_dir, clinFile))
ct = loadmat(os.path.join(ct_dir, clinFile))
mDict = {'dMs': clin['dMs'], 'iMs': ct['iMs']}
saveFile = os.path.join(new_dir, clinFile)
savemat(saveFile, mDict)
except:
pass
else:
print('File [{}] does not exist in CT directory'.format(
clinFile))
aaron_dir = new_dir
ttsplit_and_copy(aaron_dir, data_dir, train, test, split_by_patient)
def test_corruptions(aaron_dir, new_dir):
img_list = os.listdir(aaron_dir)
for img_file in tqdm(img_list):
try:
img = loadmat(os.path.join(aaron_dir, img_file))
shutil.copy(
os.path.join(aaron_dir, img_file),
os.path.join(new_dir, img_file))
# imgDict = {'dMs': img['dMs'], 'iMs': img['iMs']}
# savefile = os.path.join(new_dir, img_file)
# savemat(savefile, imgDict)
except:
print('Failed on file [{}]'.format(img_file))
if __name__ == '__main__':
twoDee = False
if twoDee:
aaron_dir = os.path.join('Aaron', 'MedPhys_Gan_4mm_2D_noCT')
new_dir = 'merged_2d_noct'
data_dir = '/home/rm/Python/cancerGAN/cancerGAN/datasets/cancer_noct'
split_by_patient = True
test_corruptions(aaron_dir, new_dir)
collect_parse_mat_slices(new_dir, data_dir, split_by_patient=True)
else:
# 3-D
aaron_dir = os.path.join('Aaron', 'MedPhys_Gan_4mm_3D')
data_dir = '/home/rm/Python/cancerGAN/cancerGAN/datasets/voxels_128'
new_dir = 'merged_3d'
split_instructions = os.path.join('Aaron', 'pat_cats.mat')
# test_corruptions(aaron_dir, new_dir=new_dir)
ttsplit_and_copy(
new_dir,
data_dir,
train=0.6,
test=0.4,
split_by_patient=False,
split_instructions=split_instructions)
| StarcoderdataPython |
1749369 | //this will start the dashboard, all interfaces, etc
| StarcoderdataPython |
4824059 | import sys
import os
import numpy as np
import pickle
from nltk.corpus import wordnet as wn
inpfile=sys.argv[1]
opdir=sys.argv[2]
opname=sys.argv[3]
d = np.load(inpfile)
embeddings = d['embeddings']
synsets = d['synsets']
print ('input', embeddings.shape)
emb_dim = embeddings.shape[1]
zeros = np.zeros(emb_dim)
synset_to_idx = {v:i for i,v in enumerate(synsets)}
o_id_to_o_token = pickle.load(open(os.path.join(opdir, 'o_id_to_o_token.p'), 'rb'))
i_id_to_i_token = pickle.load(open(os.path.join(opdir, 'i_id_to_i_token.p'), 'rb'))
i_id_embedding = pickle.load(open(os.path.join(opdir, 'i_id_embedding_glove.p'), 'rb'))
o_id_remainingWordNet_to_o_token = pickle.load(open(os.path.join(opdir, 'o_id_remainingWordNet_to_o_token.p'), 'rb'))
v_s_start = len(i_id_to_i_token)
v_s_length = len(o_id_to_o_token)
v_r_length = len(o_id_remainingWordNet_to_o_token)
output_embeddings = []
for i in range(0,v_s_start):
output_embeddings.append(zeros)
for i in range(0,v_s_length):
synset = wn.lemma_from_key(o_id_to_o_token[i+v_s_start]).synset().name()
output_embeddings.append(embeddings[synset_to_idx[synset]])
for i in range(0,v_r_length):
synset = wn.lemma_from_key(o_id_remainingWordNet_to_o_token[i+v_s_start+v_s_length]).synset().name()
output_embeddings.append(embeddings[synset_to_idx[synset]])
output_embeddings = np.stack(output_embeddings, 0)
print ('output', output_embeddings.shape)
np.savez_compressed(os.path.join(opdir, 'o_id_embedding_{}.npz'.format(opname)), embeddings=output_embeddings)
| StarcoderdataPython |
3304759 | <reponame>naegawa/pict_generator
#!/usr/bin/env python
"""Variational auto-encoder for MNIST data.
References
----------
http://edwardlib.org/tutorials/decoder
http://edwardlib.org/tutorials/inference-networks
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import os
import tensorflow as tf
from edward.models import Bernoulli, Normal
from edward.util import Progbar
from keras.layers import *
from keras import backend as K
from observations import mnist
from scipy.misc import imsave
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import sys
K.set_learning_phase(0)
log_enabled=True
if len(sys.argv)>=2:
if sys.argv[1]=="log":
log_enabled=False
print("aaaa")
def plot(samples):
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(128, 128,nch))
#plt.imshow(sample.reshape(128, 128), cmap='Greys_r')
return fig
def generator(array, batch_size):
"""Generate batch with respect to array's first axis."""
start = 0 # pointer to where we are in iteration
while True:
stop = start + batch_size
diff = stop - array.shape[0]
if diff <= 0:
batch = array[start:stop]
start += batch_size
else:
batch = np.concatenate((array[start:], array[:diff]))
start = diff
batch = batch.astype(np.float32) / 255.0 # normalize pixel intensities
batch = np.random.binomial(1, batch) # binarize images
yield batch.reshape((batch_size,-1))
ed.set_seed(42)
out_dir = "anime/out"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
out_model = "anime/model"
if not os.path.exists(out_model):
os.makedirs(out_model)
M = 10 # batch size during training
d = 100# latent dimension
nch=3
#data_dir = "anime/data"
# DATA. MNIST batches are fed at training time.
#(x_train, _), (x_test, _) = mnist(data_dir)
x_data=np.load("./anime.npy")
x_train=x_data
print(x_train.shape)
x_train_generator = generator(x_train, M)
# MODEL
# Define a subgraph of the full model, corresponding to a minibatch of
# size M.
z = Normal(loc=tf.zeros([M, d]), scale=tf.ones([M, d]))
hidden = Dense(4*4*128, activation=None)(z.value())
hidden=Reshape([4,4,128])(hidden)
act=None
seq=[
normalization.BatchNormalization(),
convolutional.Conv2DTranspose(64,(2,2),strides=(1, 1), padding='same',activation=act),
normalization.BatchNormalization(),
advanced_activations.LeakyReLU(alpha=0.3),
convolutional.Conv2DTranspose(64,(2,2),strides=(2, 2), padding='same',activation=act),
normalization.BatchNormalization(),
advanced_activations.LeakyReLU(alpha=0.3),
convolutional.Conv2DTranspose(32,(4,4),strides=(2, 2), padding='same',activation=act),
normalization.BatchNormalization(),
advanced_activations.LeakyReLU(alpha=0.3),
convolutional.Conv2DTranspose(16,(4,4),strides=(2, 2), padding='same',activation=act),
normalization.BatchNormalization(),
advanced_activations.LeakyReLU(alpha=0.3),
convolutional.Conv2DTranspose(8,(4,4),strides=(2, 2), padding='same',activation=act),
normalization.BatchNormalization(),
advanced_activations.LeakyReLU(alpha=0.3),
convolutional.Conv2DTranspose(4,(4,4),strides=(2, 2), padding='same',activation=act),
normalization.BatchNormalization(),
advanced_activations.LeakyReLU(alpha=0.3),
convolutional.Conv2DTranspose(nch,(1,1),strides=(1, 1), padding='same'),
#convolutional.UpSampling2D(size=(1, 1)),
Reshape([128*128*nch])
]
for layer in seq:
hidden=layer(hidden)
print(hidden)
#quit()
x = Bernoulli(logits=hidden)
# INFERENCE
# Define a subgraph of the variational model, corresponding to a
# minibatch of size M.
x_ph = tf.placeholder(tf.int32, [M, 128 * 128*nch])
hidden = tf.reshape((tf.cast(x_ph, tf.float32)),[M,128,128,nch])
act=None
seq=[
convolutional.Conv2D(4,(1,1),strides=(1, 1), padding='same',activation=act),
advanced_activations.LeakyReLU(alpha=0.3),
convolutional.Conv2D(8,(4,4),strides=(2, 2), padding='same',activation=act),
normalization.BatchNormalization(),
advanced_activations.LeakyReLU(alpha=0.3),
# 64x64
convolutional.Conv2D(16,(4,4),strides=(2, 2), padding='same',activation=act),
normalization.BatchNormalization(),
advanced_activations.LeakyReLU(alpha=0.3),
# 32x32
convolutional.Conv2D(32,(4,4),strides=(2, 2), padding='same',activation=act),
normalization.BatchNormalization(),
advanced_activations.LeakyReLU(alpha=0.3),
convolutional.Conv2D(64,(4,4),strides=(2, 2), padding='same',activation=act),
normalization.BatchNormalization(),
advanced_activations.LeakyReLU(alpha=0.3),
# 8x8
convolutional.Conv2D(128,(4,4),strides=(2, 2), padding='same',activation=act),
normalization.BatchNormalization(),
advanced_activations.LeakyReLU(alpha=0.3),
# 4x4
Flatten(),
]
for layer in seq:
hidden=layer(hidden)
qz = Normal(loc=Dense(d)(hidden),
scale=Dense(d, activation='softplus')(hidden)+1.0e-6)
# Bind p(x, z) and q(z | x) to the same TensorFlow placeholder for x.
inference = ed.KLqp({z: qz}, data={x: x_ph})
#optimizer = tf.train.RMSPropOptimizer(0.01, epsilon=1.0)
optimizer = tf.train.AdamOptimizer(0.001)
inference.initialize(optimizer=optimizer)
sess = ed.get_session()
saver = tf.train.Saver()
tf.global_variables_initializer().run()
n_epoch = 1000
n_iter_per_epoch = x_train.shape[0] // M
i=0
for epoch in range(1, n_epoch + 1):
print("Epoch: {0}".format(epoch))
avg_loss = 0.0
if(log_enabled):
pbar = Progbar(n_iter_per_epoch)
for t in range(1, n_iter_per_epoch + 1):
if(log_enabled):
pbar.update(t)
x_batch = next(x_train_generator)
info_dict = inference.update(feed_dict={x_ph: x_batch})
avg_loss += info_dict['loss']/d
# Print a lower bound to the average marginal likelihood for an
# image.
avg_loss = avg_loss / n_iter_per_epoch
avg_loss = avg_loss / M
print("-log p(x) <= {:0.3f}".format(avg_loss))
saver.save(sess, out_model+"/model.%05d.ckpt"%(epoch))
if np.isnan(avg_loss):
print("[ERR0R]")
break
idx = np.random.randint(M, size=16)
samples = x.eval()
samples = samples[idx, ]
fig = plot(samples)
plt.savefig(os.path.join(out_dir, '{}.png').format(
str(i).zfill(3)), bbox_inches='tight')
plt.close(fig)
i+=1
| StarcoderdataPython |
125888 | # Sciprt to calculate user location centroids with parallel processing
import multiprocessing
import psycopg2 # For connecting to PostgreSQL database
import pandas as pd # Data analysis toolkit with flexible data structures
import numpy as np # Fundamental toolkit for scientific computation with N-dimensional array support
from sklearn.mixture import GaussianMixture # Gaussian Mixutre Model in scikit-learn
from sqlalchemy import create_engine
# Connect to database and collect business location for all reviwes posted by users
conn = psycopg2.connect("dbname='yelp' host='' user='' password=''")
cur = conn.cursor()
cur.execute("select r.user_id, b.latitude, b.longitude, b.city from review as r join business as b on b.business_id = r.business_id order by user_id;")
data = cur.fetchall()
# Save the fetched data into a dataframe
df = pd.DataFrame(data)
df.rename(columns={df.columns[0]: 'user_id', df.columns[1]: 'latitude', df.columns[2]: 'longitude', df.columns[3]: 'city'}, inplace=True)
# Create a list of unique users from the dataframe
users = df.user_id.unique()
#frames = {}
# for user in users:
# t = df.loc[df['user_id'] == user]
# frames[user] = t.index
col = ['user_id', 'latitude', 'longitude', 'probability']
t = pd.DataFrame(columns=col)
cur.close()
conn.close()
# Function to calculate user location centroids
def user_location(user):
columns = ['user_id']
location = pd.DataFrame(columns=columns)
#test = df.ix[frames[user]]
test = df.loc[df['user_id'] == user]
unique_city = test.city.unique()
x = test
r = test.columns[1:3]
gmix = GaussianMixture(n_components=len(unique_city), covariance_type='full')
gmix.fit(x[r].values)
label = gmix.predict(x[r].values)
a = pd.DataFrame(label)
b = a[0].groupby(a[0]).count()
c = pd.DataFrame(b)
reviews = len(a.index)
p = c/reviews
for i in range(0, len(gmix.means_)):
location.loc[i] = [user]
columns2 = ['latitude', 'longitude']
location2 = pd.DataFrame(data=gmix.means_, columns=columns2)
location2 ['user_id'] = location
location2 ['probability'] = p
#loc_temp = loc_temp.append(location2, ignore_index=True)
#location = location[0:0]
#del location2
return location2
if __name__ == '__main__':
# Run the calculations in parallel
pool = multiprocessing.Pool(processes=8)
results = pool.map(user_location, users)
#loc_temp.head()
#print(type(results))
# Serialize the results of GMM calculations into a dataframe
for result in results:
t = t.append(result)
# Save dataframe into a datbase
engine = create_engine('postgresql://user:pass)(@server-ip:5432/yelp')
t.to_sql('user_location_parallel', engine)
| StarcoderdataPython |
3279036 | # Generated by Django 2.1 on 2018-08-26 10:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0021_auto_20180826_1541'),
]
operations = [
migrations.CreateModel(
name='Community',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=250)),
('place', models.CharField(max_length=250)),
('rank', models.IntegerField(unique=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
],
),
]
| StarcoderdataPython |
1688196 | <reponame>NIVANorge/s-enda-playground
from dataclasses import dataclass
from bindings.csw.cartesian_csref_type import CartesianCsrefType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class CartesianCsref(CartesianCsrefType):
class Meta:
name = "cartesianCSRef"
namespace = "http://www.opengis.net/gml"
| StarcoderdataPython |
1673956 | from .clipboard import start_import
action_name = 'Clipboard' | StarcoderdataPython |
2903 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""recumpiler
Recompile text to be semi-readable memey garbage.
"""
__version__ = (0, 0, 0)
| StarcoderdataPython |
1776734 | <reponame>d--j/salt
'''
Module for configuring DNS Client on Windows systems
'''
def __virtual__():
'''
Load if the module win_dns_client is loaded
'''
return 'win_dns_client' if 'win_dns_client.add_dns' in __salt__ else False
def dns_exists(name, servers=None, interface='Local Area Connection'):
'''
Configure the DNS server list in the specified interface
Example::
config_dns_servers:
win_dns_client.dns_exists:
- servers:
- 8.8.8.8
- 192.168.3.11
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
# Validate syntax
if type(servers) != list:
ret['result'] = False
ret['comment'] = 'servers entry is not a list !'
return ret
# Do nothing is already configured
configured_list = __salt__['win_dns_client.get_dns_servers'](interface)
if configured_list == servers:
ret['comment'] = '{0} are already configured'.format(servers)
return ret
else:
ret['changes'] = {'configure servers': servers}
if __opts__['test']:
return ret
# add the DNS servers
for i, server in enumerate(servers):
if not __salt__['win_dns_client.add_dns'](server, interface, i+1):
ret['comment'] = (
'Failed to add {0} as DNS server number {1}'
).format(server, i+1)
ret['result'] = False
if i > 0:
ret['changes'] = {'configure servers': servers[:i]}
else:
ret['changes'] = {}
return ret
return ret
def dns_dhcp(name, interface='Local Area Connection'):
'''
Configure the DNS server list from DHCP Server
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
# Check the config
config = __salt__['win_dns_client.get_dns_config'](interface)
if config == 'dhcp':
ret['comment'] = '{0} already configured with DNS from DHCP'.format(
interface)
return ret
else:
ret['changes'] = {'dns': 'configured from DHCP'}
if __opts__['test']:
return ret
# change the configuration
ret['result'] = __salt__['win_dns_client.dns_dhcp'](interface)
if not ret['result']:
ret['changes'] = {}
ret['comment'] = (
'Could not configure "{0}" DNS servers from DHCP'
).format(interface)
return ret
| StarcoderdataPython |
3373384 | import pandas as pd
REGEX_SEARCHES = {
'class_matches': '^([OABFGKM])',
'type_matches': '^.*([VI])+',
'number_matches': '^[OABFGKM]([0-9])'
}
USED_SEARCHES = ['class', 'type']
def run():
raw_df = load_csv_data('rawStars.csv')
raw_df = determine_matches(raw_df)
df = apply_regex(raw_df)
df.to_csv('stars.csv')
def load_csv_data(filepath):
df = pd.read_csv(filepath)
df.columns = map(str.lower, df.columns)
return df
def determine_matches(df):
df.loc[pd.isnull(df['spectrum']), 'spectrum'] = ''
df['spectrum'] = df['spectrum'].str.upper()
for category, regex in REGEX_SEARCHES.items():
regex_for_matching = regex.replace('(', '')
regex_for_matching = regex_for_matching.replace(')', '')
df[category] = df['spectrum'].str.match(regex_for_matching)
return df
def apply_regex(df):
filter_columns = list(map(lambda x: "%s_matches" % x, USED_SEARCHES))
df['complete'] = True
print(filter_columns)
for column_name in filter_columns:
df['complete'] *= df[column_name]
df = df[df['complete']]
for column_name in USED_SEARCHES:
regex_string = REGEX_SEARCHES.get("%s_matches" % column_name)
df[column_name] = df['spectrum'].str.extract(regex_string)
return df
run()
| StarcoderdataPython |
1749099 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Lab(pulumi.CustomResource):
artifacts_storage_account_id: pulumi.Output[str]
"""
The ID of the Storage Account used for Artifact Storage.
"""
default_premium_storage_account_id: pulumi.Output[str]
"""
The ID of the Default Premium Storage Account for this Dev Test Lab.
"""
default_storage_account_id: pulumi.Output[str]
"""
The ID of the Default Storage Account for this Dev Test Lab.
"""
key_vault_id: pulumi.Output[str]
"""
The ID of the Key used for this Dev Test Lab.
"""
location: pulumi.Output[str]
"""
Specifies the supported Azure location where the Dev Test Lab should exist. Changing this forces a new resource to be created.
"""
name: pulumi.Output[str]
"""
Specifies the name of the Dev Test Lab. Changing this forces a new resource to be created.
"""
premium_data_disk_storage_account_id: pulumi.Output[str]
"""
The ID of the Storage Account used for Storage of Premium Data Disk.
"""
resource_group_name: pulumi.Output[str]
"""
The name of the resource group under which the Dev Test Lab resource has to be created. Changing this forces a new resource to be created.
"""
storage_type: pulumi.Output[str]
"""
The type of storage used by the Dev Test Lab. Possible values are `Standard` and `Premium`. Defaults to `Premium`. Changing this forces a new resource to be created.
"""
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the resource.
"""
unique_identifier: pulumi.Output[str]
"""
The unique immutable identifier of the Dev Test Lab.
"""
def __init__(__self__, resource_name, opts=None, location=None, name=None, resource_group_name=None, storage_type=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Manages a Dev Test Lab.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location: Specifies the supported Azure location where the Dev Test Lab should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Dev Test Lab. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group under which the Dev Test Lab resource has to be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_type: The type of storage used by the Dev Test Lab. Possible values are `Standard` and `Premium`. Defaults to `Premium`. Changing this forces a new resource to be created.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/dev_test_lab.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['location'] = location
__props__['name'] = name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['storage_type'] = storage_type
__props__['tags'] = tags
__props__['artifacts_storage_account_id'] = None
__props__['default_premium_storage_account_id'] = None
__props__['default_storage_account_id'] = None
__props__['key_vault_id'] = None
__props__['premium_data_disk_storage_account_id'] = None
__props__['unique_identifier'] = None
super(Lab, __self__).__init__(
'azure:devtest/lab:Lab',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, artifacts_storage_account_id=None, default_premium_storage_account_id=None, default_storage_account_id=None, key_vault_id=None, location=None, name=None, premium_data_disk_storage_account_id=None, resource_group_name=None, storage_type=None, tags=None, unique_identifier=None):
"""
Get an existing Lab resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] artifacts_storage_account_id: The ID of the Storage Account used for Artifact Storage.
:param pulumi.Input[str] default_premium_storage_account_id: The ID of the Default Premium Storage Account for this Dev Test Lab.
:param pulumi.Input[str] default_storage_account_id: The ID of the Default Storage Account for this Dev Test Lab.
:param pulumi.Input[str] key_vault_id: The ID of the Key used for this Dev Test Lab.
:param pulumi.Input[str] location: Specifies the supported Azure location where the Dev Test Lab should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Dev Test Lab. Changing this forces a new resource to be created.
:param pulumi.Input[str] premium_data_disk_storage_account_id: The ID of the Storage Account used for Storage of Premium Data Disk.
:param pulumi.Input[str] resource_group_name: The name of the resource group under which the Dev Test Lab resource has to be created. Changing this forces a new resource to be created.
:param pulumi.Input[str] storage_type: The type of storage used by the Dev Test Lab. Possible values are `Standard` and `Premium`. Defaults to `Premium`. Changing this forces a new resource to be created.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] unique_identifier: The unique immutable identifier of the Dev Test Lab.
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/dev_test_lab.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["artifacts_storage_account_id"] = artifacts_storage_account_id
__props__["default_premium_storage_account_id"] = default_premium_storage_account_id
__props__["default_storage_account_id"] = default_storage_account_id
__props__["key_vault_id"] = key_vault_id
__props__["location"] = location
__props__["name"] = name
__props__["premium_data_disk_storage_account_id"] = premium_data_disk_storage_account_id
__props__["resource_group_name"] = resource_group_name
__props__["storage_type"] = storage_type
__props__["tags"] = tags
__props__["unique_identifier"] = unique_identifier
return Lab(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| StarcoderdataPython |
1738867 | # -*- coding: utf-8 -*-
"""
@author: <NAME>, Ph.D. (2020)
Single-Molecule TIRF Viewer App
"""
from PyQt5.QtWidgets import QApplication, QSizePolicy
from PyQt5 import QtWidgets, QtCore, QtGui
import sys
from collections import OrderedDict
from smtirf import gui
# ==============================================================================
# MAIN APPLICATION
# ==============================================================================
class SMTirfViewerApp(gui.SMTirfMainWindow):
def __init__(self, **kwargs):
super().__init__(title="smTIRF Analysis", **kwargs)
self.setup_toolbar()
self.switch_app("viewer")
def setup_toolbar(self):
toolbar = self.addToolBar("Main")
gui.add_toolbar_button(toolbar, "microscope", "Viewer", lambda: self.switch_app("viewer"))
gui.add_toolbar_button(toolbar, "polyline", "Results", lambda: self.switch_app("results"))
gui.add_toolbar_button(toolbar, "settings", "Settings", None)
gui.format_toolbar(toolbar)
def set_title(self, path):
if path is None:
path = "*"
self.setWindowTitle(f"smTIRF Analysis ({path})")
def switch_app(self, appType):
try:
self.pnl.unbind()
except AttributeError:
pass
if appType == "viewer":
self.pnl = TraceViewerSubApp(toolbarName="Experiment", parent=self)
if self.controller.expt is not None:
self.controller.experimentLoaded.emit(self.controller.expt)
self.controller.update_index(self.controller.index)
elif appType == "results":
self.pnl = ExperimentResultsSubApp(toolbarName="Results", parent=self)
self.setCentralWidget(self.pnl)
# ==============================================================================
# TRACE VIEWER
# ==============================================================================
class TraceViewerSubApp(gui.SMTirfPanel):
def setup_toolbar(self):
gui.add_toolbar_button(self.toolbar, "download", "Import", self.controller.import_experiment_from_pma)
gui.add_toolbar_button(self.toolbar, "merge", "Merge", self.controller.merge_experiments)
gui.add_toolbar_button(self.toolbar, "open", "Open", self.controller.open_experiment, shortcut="Ctrl+O")
gui.add_toolbar_button(self.toolbar, "save", "Save", self.controller.save_experiment, shortcut="Ctrl+S")
self.toolbar.addSeparator()
# ======================================================================
gui.add_toolbar_button(self.toolbar, "ecg", "Baseline", self.controller.detect_baseline)
gui.add_toolbar_button(self.toolbar, "process", "Train All", self.controller.train_all_traces)
self.toolbar.addSeparator()
# ======================================================================
actions = OrderedDict([("Index", self.controller.sort_by_index),
("Selected", self.controller.sort_by_selected),
("Cluster", self.controller.sort_by_cluster),
("Correlation", self.controller.sort_by_correlation)])
gui.add_toolbar_menu(self.toolbar, "sort_alpha", "Sort", actions)
actions = OrderedDict([("Select All", self.controller.select_all),
("Select None", self.controller.select_none)])
gui.add_toolbar_menu(self.toolbar, "check_all", "Select", actions)
self.toolbar.addSeparator()
# ======================================================================
actions = OrderedDict([("Reset Offsets", None),
("Reset Limits", None)])
gui.add_toolbar_menu(self.toolbar, "ruler", "Attributes", actions)
gui.format_toolbar(self.toolbar)
self.parent().addToolBar(self.toolbar)
self.toolbar.addAction(gui.widgets.ToggleSelectionAction(self.toolbar))
def layout(self):
mainBox = QtWidgets.QVBoxLayout()
hboxTop = QtWidgets.QHBoxLayout()
hboxTrace = QtWidgets.QHBoxLayout()
hboxModel = QtWidgets.QHBoxLayout()
hboxNav = QtWidgets.QHBoxLayout()
hboxTrace.addWidget(gui.widgets.ExportTraceButton(self.controller))
hboxTrace.addWidget(gui.widgets.TraceIdLabel(self.controller))
hboxTrace.addWidget(gui.widgets.CorrelationLabel(self.controller))
hboxTrace.addItem(QtWidgets.QSpacerItem(10, 10, QSizePolicy.Expanding, QSizePolicy.Fixed))
hboxTrace.addWidget(gui.widgets.CoordinateLabel(self.controller))
hboxTrace.setContentsMargins(10,0,10,0)
grpTrace = QtWidgets.QGroupBox("Trace")
grpTrace.setLayout(hboxTrace)
hboxModel.addWidget(gui.widgets.TrainModelButton(self.controller))
hboxModel.setContentsMargins(0,0,0,0)
grpModel = QtWidgets.QGroupBox("Model")
grpModel.setLayout(hboxModel)
hboxTop.addWidget(grpTrace)
hboxTop.addWidget(grpModel)
hboxNav.addWidget(gui.widgets.NavBar(self.controller), stretch=1)
hboxNav.addWidget(gui.widgets.SelectedItemsCounter(self.controller))
mainBox.addLayout(hboxTop)
mainBox.addWidget(gui.plots.TraceViewerPlot(self.controller), stretch=1)
mainBox.addLayout(hboxNav)
self.setLayout(mainBox)
# ==============================================================================
# EXPERIMENT RESULTS
# ==============================================================================
class ExperimentResultsSubApp(gui.SMTirfPanel):
def setup_toolbar(self):
gui.add_toolbar_button(self.toolbar, "histogram", "State Populations",
lambda: self.change_view("splithist"))
gui.add_toolbar_button(self.toolbar, "tdp", "TDP",
lambda: self.change_view("tdp"))
gui.add_toolbar_button(self.toolbar, "kinetics", "Kinetics",
lambda: self.change_view("kinetics"))
self.toolbar.addSeparator()
gui.format_toolbar(self.toolbar)
self.parent().addToolBar(self.toolbar)
def layout(self):
mainBox = QtWidgets.QVBoxLayout()
grpResults = QtWidgets.QGroupBox("Results")
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(gui.widgets.ExportHistogramButton(self.controller))
# hbox.addWidget(gui.widgets.SaveHistogramImageButton(self.controller))
hbox.addWidget(gui.widgets.ExportTdpButton(self.controller))
# hbox.addWidget(gui.widgets.SaveTdpImageButton(self.controller))
hbox.addItem(QtWidgets.QSpacerItem(10, 10, QSizePolicy.Expanding, QSizePolicy.Fixed))
hbox.addWidget(gui.widgets.UpdateResultsButton(self.controller))
grpResults.setLayout(hbox)
mainBox.addWidget(grpResults)
mainBox.addWidget(gui.plots.ResultViewerPlot(self.controller), stretch=1)
self.setLayout(mainBox)
def change_view(self, view):
self.controller.currentResultViewChanged.emit(view)
# ==============================================================================
if __name__ == "__main__":
app = QApplication(sys.argv) if not QApplication.instance() else QApplication.instance()
win = SMTirfViewerApp()
win.show()
sys.exit(app.exec_())
| StarcoderdataPython |
3251007 | <filename>build/lib/sbmltopyode/python3ClassGenerator.py
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 3 15:50:45 2018
@author: Steve
"""
import re
import numpy as np
import sys
from sbmltopyode.ModelDataClasses import *
def GenerateModel(modelData, outputFilePath, objectName = 'SBMLmodel'):
"""
This function takes model data, either from ParseSBMLFIle() or imported from a .json file,
and generates a Python file containing a class that implements the SBML model.
Parameters
----------
modelData : ModelData
An object containing all of the model components and values.
outputFilePath : str
The desired file path of the resulting python file.
objectName : str
The name of the class defined in the resulting python file.
Returns
-------
None
"""
#The library mathFuncs serves to both only allow functions supported
#functions in SBML/user defined functions, but also the python equivalent
np.set_printoptions(threshold=sys.maxsize)
outputFile = open(outputFilePath, "w")
parameters = modelData.parameters
compartments = modelData.compartments
species = modelData.species
reactions = modelData.reactions
functions = modelData.functions
assignmentRules = modelData.assignmentRules
rateRules = modelData.rateRules
initialAssignments = modelData.initialAssignments
mathFuncs = {'abs' : 'abs',
'max' : 'max',
'min' : 'min',
'pow' : 'pow',
'exp' : 'math.exp',
'floor' : 'np.floor',
'ceiling' : 'math.ceil',
'exp' : 'math.exp',
'ln' : 'math.log',
'log' : 'math.log10',
'factorial' : 'math.factorial',
'sqrt' : 'math.sqrt',
'eq' : 'operator.eq',
'neq' : 'operator.ne',
'gt' : 'operator.gt',
'lt' : 'operator.lt',
'geq' : 'operator.ge',
'leq' : 'operator.le',
'and' : 'operator.and_',
'or' : 'operator.or_',
'xor' : 'operator.xor_',
'not' : 'operator.not_',
'sin' : 'np.sin',
'cos' : 'np.cos',
'tan' : 'np.tan',
'sec' : '1/np.cos',
'csc' : '1/np.sin',
'cot' : '1/np.tan',
'sinh' : 'np.sinh',
'cosh' : 'np.cosh',
'tanh' : 'np.tanh',
'sech' : '1/np.cosh',
'csch' : '1/np.sinh',
'coth' : '1/np.tanh',
'arcsin' : 'np.arcsin',
'arccos' : 'np.arccos',
'arctan' : 'np.arctan',
'arcsinh' : 'np.arcsinh',
'arccosh' : 'np.arccosh',
'arctanh' : 'np.arctanh',
'true' : 'True',
'false' : 'False',
'notanumber' : 'np.nan',
'pi' : 'np.pi',
'infinity' : 'np.inf',
'exponentiale' : 'np.e',
'piecewise' : 'Piecewise'
}
#Add in user defined functions
# for function in functions:
# mathFuncs[function] = "self." + function
#Set up stoichCoeffMat, a matrix of stoichiometric coefficients for solving the reactions
reactantCounter = 0
reactantIndex = {}
reactionCounter = 0
reactionIndex = {}
rateRuleVars = []
rateParams = 0
for specie in species:
reactantIndex[specie] = reactantCounter
reactantCounter += 1
for key, rateRule in rateRules.items():
if rateRule.variable in parameters or rateRule.variable in compartments:
rateParams += 1
reactantIndex[rateRule.variable] = reactantCounter
reactantCounter += 1
rateRuleVars.append(rateRule.variable)
elif rateRule.variable in species:
pass
else:
raise Exception("Rate Rule adjusting something other than specie amount, parameter value, or compartment size.")
stoichCoeffMat = np.zeros([len(species) + rateParams, max(len(reactions),1)])
for rxnId in reactions:
reactionIndex[rxnId] = reactionCounter
reactionCounter += 1
reaction = reactions[rxnId]
for reactant in reaction.reactants:
if reactant[1] not in reactantIndex:
reactantIndex[reactant[1]] = reactantCounter
reactantCounter += 1
if not (species[reactant[1]].isBoundarySpecies == "True"):
stoichCoeffMat[reactantIndex[reactant[1]], reactionIndex[rxnId]] += reactant[0]
# for reaction in reactions:
# for reactant in reactions[reaction][0]:
# if reactant[1] not in reactantIndex:
# reactantIndex[reactant[1]] = reactantCounter
# reactantCounter += 1
# if not species[reactant[1]][4]:
# stoichCoeffMat[reactantIndex[reactant[1]], reaction-1] += reactant[0]
#print(rateParams)
#print(stoichCoeffMat)
outputFile.write("from sbmltopyode.SBMLModelClasses import *\n")
outputFile.write("from scipy.integrate import odeint\n")
outputFile.write("import numpy as np\n")
outputFile.write("import operator\n")
outputFile.write("import math\n\n")
outputFile.write("class " + objectName +":\n\n")
outputFile.write("\tdef __init__(self):\n\n")
outputFile.write("\t\tself.p = {} #Dictionary of model parameters\n")
for paramId in parameters:
outputFile.write("\t\tself.p[\'" + paramId + "\'] = Parameter(" + str(parameters[paramId].value)+ ", \'"+ paramId + "\', " + str(parameters[paramId].isConstant) +")\n")
outputFile.write("\n\t\tself.c = {} #Dictionary of compartments\n")
for compartmentId in compartments:
outputFile.write("\t\tself.c[\'" + compartmentId + "\'] = Compartment(" + str(compartments[compartmentId].size) + ", " + str(compartments[compartmentId].dimensionality)+ ", " + str(compartments[compartmentId].isConstant) + ")\n")
outputFile.write("\n\t\tself.s = {} #Dictionary of chemical species\n")
for speciesId in species:
outputFile.write("\t\tspeciesMetadata = SBMLMetadata('" + species[speciesId].name +"')\n")
outputFile.write("\t\tself.s[\'" + speciesId + "\'] = Species(" + str(species[speciesId].value) + ", '" + species[speciesId].valueType + "', self.c['" + species[speciesId].compartment + "'], " + str(species[speciesId].hasOnlySubstanceUnits) + ", constant = " + str(species[speciesId].isConstant) + ")\n")
for key, rule in assignmentRules.items():
if rule.variable == speciesId:
outputFile.write("\t\tself.s[\'" + speciesId + "\']._modifiedBy = " + rule.Id + "\n")
for key, rule in rateRules.items():
if rule.variable == speciesId:
outputFile.write("\t\tself.s[\'" + speciesId + "\']._modifiedBy = " + rule.Id + "\n")
outputFile.write("\n\t\tself.r = {} #Dictionary of reactiions\n")
for reactionId in reactions:
outputFile.write("\t\tself.r[\'" + reactionId + "\'] = " + reactionId + "(self, SBMLMetadata('" + reactions[reactionId].name + "'))\n")
outputFile.write("\t\tself.time = 0\n\n")
outputFile.write("\t\tself.reactionMetadata = {")
commaFlag = 0
for reactionId in reactions:
if commaFlag == 0:
commaFlag = 1
outputFile.write("\n\t\t")
else:
outputFile.write(",\n\t\t")
outputFile.write("self.Reaction" + reactionId + ": SBMLMetadata('" + reactions[reactionId].name + "')")
outputFile.write("\n\t\t}\n")
outputFile.write('\t\tself.AssignmentRules()\n\n')
outputFile.write("\n\n")
outputFile.write("\tdef AssignmentRules(self):\n\n")
#These functions are defined here due to reading variables in the parent function's namespace
#These are not intended to be used elsewhere
def ParseLHS(rawLHS):
returnLHS = ''
if rawLHS in parameters:
returnLHS = "self.p[\'" + rawLHS + "\'].value = "
elif rawLHS in species:
if not species[rawLHS].hasOnlySubstanceUnits:
returnLHS = 'self.s[\'' + rawLHS + '\'].concentration = '
else:
returnLHS = 'self.s[\'' + rawLHS + '\'].amount = '
elif rawLHS in compartments:
returnLHS = 'self.c[\'' + rawLHS + '\'].size = '
else:
raise(Exception("New case: rule LHS not in p: " + rawLHS))
return returnLHS
def ParseRHS(rawRHS, extendedParams = [], objectText = "self"):
#objectText is not "self" when parsing reaction math
#The main purpose of this function is to turn math strings given by libSBML into
#code formated to properly call members of the resulting class
#For example k_1*C_A may turn to
rawRHS = rawRHS.replace("^", "**") #Replaces carrot notation for exponentiation with ** operator
variables = []
for match in re.finditer(r'\b[a-zA-Z_]\w*', rawRHS): #look for variable names
#ToDo: check for function calls
variables.append([rawRHS[match.start():match.end()], match.span()])
#rule[1] contains the right hand side
returnRHS = ''
oldSpan = None
if variables != []:
for variable in variables:
if oldSpan == None and variable[1][0] != 0:
returnRHS += rawRHS[0:variable[1][0]]
elif oldSpan != None:
returnRHS += rawRHS[oldSpan[1]:variable[1][0]]
oldSpan = variable[1]
if variable[0] in parameters:
returnRHS += objectText + '.p[\'' + variable[0] + '\'].value'
elif variable[0] in species:
if not species[variable[0]].hasOnlySubstanceUnits == "True":
returnRHS += objectText + '.s[\'' + variable[0] + '\'].concentration'
else:
returnRHS += objectText + '.s[\'' + variable[0] + '\'].amount'
elif variable[0] in compartments:
returnRHS += objectText + '.c[\'' + variable[0] + '\'].size'
elif variable[0] in mathFuncs:
returnRHS += mathFuncs[variable[0]]
elif variable[0] in functions:
returnRHS += objectText + '.' + variable[0]
elif variable[0] in extendedParams:
if objectText == "self":
returnRHS += variable[0]
else:
returnRHS += "self.p[\'" + variable[0] + "\'].value"
elif variable[0] == "time":
returnRHS += objectText + '.time'
elif variable[0] == "pi":
returnRHS += "np.pi"
else:
raise(Exception('New case: unkown RHS variable: ' + variable[0]))
returnRHS += rawRHS[variable[1][1]:len(rawRHS)]
# print(rule[1][variable[1][1]])
#print(rule[1][-1])
else:
returnRHS = rawRHS
return returnRHS
ruleDefinedVars = [rule.variable for rule in assignmentRules.values()]
for key, assignment in initialAssignments.items():
ruleDefinedVars.append(assignment.variable)
for key, rule in assignmentRules.items():
rule.dependents = []
for match in re.finditer(r'\b[a-zA-Z_]\w*', rule.math): #look for variable names
rule.dependents.append(rule.math[match.start():match.end()])
originalLen = len(rule.dependents)
for i in range(originalLen):
if rule.dependents[originalLen - i -1] not in ruleDefinedVars:
rule.dependents.pop(originalLen- i-1)
for key, assignment in initialAssignments.items():
assignment.dependents = []
for match in re.finditer(r'\b[a-zA-Z_]\w*', assignment.math): #look for variable names
assignment.dependents.append(assignment.math[match.start():match.end()])
originalLen = len(assignment.dependents)
for i in range(originalLen):
if assignment.dependents[originalLen - i -1] not in ruleDefinedVars :
assignment.dependents.pop(originalLen- i-1)
# breakVar = False
while True:
continueVar = False
breakVar = True
varDefinedThisLoop = None
for key, rule in assignmentRules.items():
if rule.dependents == []:
ruleLHS = ParseLHS(rule.variable)
ruleRHS = ParseRHS(rule.math)
outputFile.write("\t\t" + ruleLHS + ruleRHS + '\n\n')
varDefinedThisLoop = rule.variable
rule.dependents = None
continueVar = True
breakVar = False
break
elif not rule.dependents == None:
breakVar = False
if not continueVar:
for key, assignment in initialAssignments.items():
if assignment.dependents == []:
assignmentLHS = ParseLHS(assignment.variable)
assignmentRHS = ParseRHS(assignment.math)
outputFile.write("\t\tif self.time <= 0 :\n")
if assignment.variable in parameters:
outputFile.write("\t\t\tisConstantValue = self.p['" + assignment.variable + "']._constant\n")
outputFile.write("\t\t\tself.p['" + assignment.variable + "']._constant = False\n")
outputFile.write("\t\t\t" + assignmentLHS + assignmentRHS + '\n')
outputFile.write("\t\t\tself.p['" + assignment.variable + "']._constant = isConstantValue\n\n")
elif assignment.variable in species:
outputFile.write("\t\t\tisConstantValue = self.s['" + assignment.variable + "']._constant\n")
outputFile.write("\t\t\tself.s['" + assignment.variable + "']._constant = False\n")
outputFile.write("\t\t\t" + assignmentLHS + assignmentRHS + '\n')
outputFile.write("\t\t\tself.s['" + assignment.variable + "']._constant = isConstantValue\n\n")
elif assignment.variable in compartment:
outputFile.write("\t\t\tisConstantValue = self.c['" + assignment.variable + "']._constant\n")
outputFile.write("\t\t\tself.c['" + assignment.variable + "']._constant = False\n")
outputFile.write("\t\t\t" + assignmentLHS + assignmentRHS + '\n')
outputFile.write("\t\t\tself.c['" + assignment.variable + "']._constant = isConstantValue\n\n")
varDefinedThisLoop = assignment.variable
assignment.dependents = None
continueVar = True
breakVar = False
break
elif not rule.dependents == None:
breakVar = False
for rule in assignmentRules.values():
if not rule.dependents == None:
originalLen = len(rule.dependents)
for i in range(originalLen):
if rule.dependents[originalLen - i -1] == varDefinedThisLoop:
rule.dependents.pop(originalLen - i -1)
# print(rule.variable + ':' + str(rule.dependents))
for assignment in initialAssignments.values():
if not assignment.dependents == None:
originalLen = len(assignment.dependents)
for i in range(originalLen):
if assignment.dependents[originalLen - i - 1] == varDefinedThisLoop:
assignment.dependents.pop(originalLen - i - 1)
# print(assignment.variable + ':' + str(assignment.dependents))
if continueVar:
continue
elif breakVar:
break
else:
raise Exception('Algebraic Loop in AssignmentRules')
outputFile.write("\t\treturn\n\n")
for functionId in functions:
arguments = functions[functionId].arguments
argumentString = ""
for i in range(len(arguments)):
argumentString += arguments[i]
if i != len(arguments) - 1:
argumentString += ", "
outputFile.write("\tdef " + functionId + "(self, " + argumentString + "):\n")
outputFile.write("\t\treturn " + functions[functionId].mathString.replace("^", "**") + "\n")
for reactionId in reactions:
outputFile.write("\tdef Reaction" + str(reactionId) + "(self):\n\n")
rxnParameters = []
for param in reactions[reactionId].rxnParameters:
outputFile.write("\t\t" + param[0] + " = " + str(param[1]) + "\n")
rxnParameters.append(param[0])
rateLaw = ParseRHS(reactions[reactionId].rateLaw, rxnParameters)
outputFile.write('\t\treturn ' + rateLaw + '\n\n')
rateRuleLHSVars = []
for key, rateRule in rateRules.items():
rateRuleLHSVars.append(rateRule.variable)
outputFile.write("\tdef Rate" + rateRule.variable + "(self):\n\n")
rateLaw = ParseRHS(rateRule.math)
outputFile.write('\t\treturn ' + rateLaw + '\n\n')
yArray = ''
i = 0
yArrayVars = [0 for x in range(len(species) + rateParams)]
for variable, index in reactantIndex.items():
yArrayVars[index] = variable
for index in range(len(yArrayVars)):
# print(yArrayVars[index])
if index != 0:
yArray += ', '
if yArrayVars[index] in species:
yArray += 'self.s[\'' + yArrayVars[index] + '\'].amount'
continue
if yArrayVars[index] in parameters:
yArray += 'self.p[\'' + yArrayVars[index] + '\'].value'
continue
if yArrayVars[index] in compartments:
yArray += 'self.c\'' + yArrayVars[index] + '\'].size'
continue
outputFile.write('\tdef _SolveReactions(self, y, t):\n\n')
outputFile.write('\t\tself.time = t\n')
outputFile.write('\t\t' + yArray + ' = y\n')
outputFile.write('\t\tself.AssignmentRules()\n\n')
# outputFile.write('\t\t[self.s[speciesId].UpdateCompartmentSizeMember() for speciesId in self.s]\n')
rateArray = '[ '
i = 0
rateArrayVars = [0 for x in range(len(species) + rateParams)]
for variable, index in reactantIndex.items():
if variable in rateRuleLHSVars:
rateArrayVars[index] = variable
for variable in rateArrayVars:
if i != 0:
rateArray += ', '
i += 1
if variable == 0:
rateArray += '0'
else:
rateArray += 'self.Rate' + variable + '()'
rateArray += ']'
outputFile.write('\t\trateRuleVector = np.array(' + str(rateArray) + ', dtype = np.float64)\n\n')
outputFile.write('\t\tstoichiometricMatrix = np.array(' + re.sub('\n,', ',\n\t\t\t\t\t', re.sub('[^[] +', ',' ,str(stoichCoeffMat))) + ', dtype = np.float64)\n\n')
outputFile.write('\t\treactionVelocities = np.array([')
reactionElements = ''
if reactions:
for reactionId in reactions:
if reactionElements == '':
reactionElements += ('self.r[\'' + str(reactionId) + '\']()')
else:
reactionElements += (', self.r[\'' + str(reactionId) + '\']()')
else:
reactionElements = '0'
outputFile.write(reactionElements + '], dtype = np.float64)\n\n')
outputFile.write('\t\trateOfSpeciesChange = stoichiometricMatrix @ reactionVelocities + rateRuleVector\n\n')
outputFile.write('\t\treturn rateOfSpeciesChange\n\n')
outputFile.write('\tdef RunSimulation(self, deltaT, absoluteTolerance = 1e-12, relativeTolerance = 1e-6):\n\n')
outputFile.write('\t\tfinalTime = self.time + deltaT\n')
outputFile.write('\t\ty0 = np.array([' + yArray + '], dtype = np.float64)\n')
outputFile.write('\t\t' + yArray + ' = odeint(self._SolveReactions, y0, [self.time, finalTime], atol = absoluteTolerance, rtol = relativeTolerance, mxstep=5000000)[-1]\n')
outputFile.write('\t\tself.time = finalTime\n')
outputFile.write('\t\tself.AssignmentRules()\n')
# outputFile.write('\t\t[self.s[speciesId].UpdateCompartmentSizeMember() for speciesId in self.s]\n')
outputFile.write('\n')
for key in reactions.keys():
outputFile.write('class ' + key + ':\n\n')
outputFile.write('\tdef __init__(self, parent, metadata = None):\n\n')
outputFile.write('\t\tself.parent = parent\n')
outputFile.write('\t\tself.p = {}\n')
outputFile.write('\t\tself.metadata = metadata\n\n')
for param in reactions[key].rxnParameters:
outputFile.write("\t\tself.p[\'" + param[0] + "\'] = Parameter(" + str(param[1]) + ", '" + param[0] + "')\n")
#"\t\tself.p[\'" + paramId + "\'] = Parameter(" + str(parameters[paramId].value)+ ", "+ paramId + ", " + str(parameters[paramId].isConstant) +")\n"
outputFile.write('\n\tdef __call__(self):\n')
# print(key)
# print(reactions[key].rxnParameters)
rxnParamNames = [param[0] for param in reactions[key].rxnParameters]
rateLaw = ParseRHS(reactions[key].rateLaw, rxnParamNames, "self.parent")
outputFile.write('\t\treturn ' + rateLaw + '\n\n')
for key in functions.keys():
outputFile.write('class ' + key + ':\n\n')
outputFile.write('\tdef __init__(self, parent, metadata = None):\n\n')
outputFile.write('\t\tself.parent = parent\n')
outputFile.write('\t\tself.metadata = metadata\n\n')
arguments = functions[key].arguments
argumentString = ""
for i in range(len(arguments)):
argumentString += arguments[i]
if i != len(arguments) - 1:
argumentString += ", "
outputFile.write('\tdef __call__(self, ' + argumentString + '):\n')
outputFile.write("\t\treturn " + functions[key].mathString.replace("^", "**") + "\n\n")
outputFile.close()
#GenerateModel("Waugh2006_Diabetic_Wound_Healing_TGF_B_Dynamics.txt") | StarcoderdataPython |
3290357 | <reponame>johanesmikhael/pyinn
from .ncrelu import ncrelu
from .dgmm import dgmm
from .cdgmm import cdgmm
from .im2col import im2col, col2im
from .conv2d_depthwise import conv2d_depthwise
from .modules import Conv2dDepthwise
| StarcoderdataPython |
3277480 | <filename>utils/utils_statistics.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Utils to perform to compute MAR coefficients and perform statistical tests
Written by H.Turbé, March 2022.
"""
import copy
import multiprocessing as mp
import os
import random
import re
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from statsmodels.tsa.stattools import adfuller
from tqdm import tqdm
def autoregressive_model(signal):
"""
Returns the first order autoregressive model of the signal.
Input:
signal: numpy array - Signal with shape (timestep, nb_channels)
Output:
A: numpy array - MAR Coefficients of the model with shape (nb_channels, nb_channels)
residual: numpy array - Residual between initial and reconstructed signal using MAR coefficients
"""
scaler = StandardScaler()
scaler.fit(signal)
transformed_signal = scaler.transform(signal)
transformed_signal = signal.T
Z = transformed_signal[:, 0:-1]
Y = transformed_signal[:, 1:]
A = Y @ Z.T @ np.linalg.inv(Z @ Z.T)
residual = Y - A @ Z # Residuals of the model
return A, residual
def test_stationarity(signal):
"""
Tests the stationarity of the signal.
"""
result = adfuller(signal)
if result[1] > 0.05:
print("Series is not stationary")
else:
print("Series is stationary")
def permutation_test(data_1, data_2, n_permutation=1000, name_fig=None):
"""
Permutation test for two data sets
Input:
data_1: numpy array
data_2: numpy array
n_permutation: int
name_fig: str
Output:
p_value: float
"""
gT = np.abs(np.average(data_1) - np.average(data_2)) # — np.average(data_2)
pV = np.append(data_1, data_2)
# Copy pooled distribution:
pS = copy.copy(pV)
# Initialize permutation:
pD = []
# Define p (number of permutations):
# Permutation loop:
"""
Parallel(n_jobs=-1)delayed(self._generate_series)(name, i) for i in range(nb_simulation)
)
"""
for i in range(0, n_permutation):
# Shuffle the data:
random.shuffle(pS)
# Compute permuted absolute difference of your two sampled distributions and store it in pD:
pD.append(
np.abs(
np.average(pS[0 : int(len(pS) / 2)])
- np.average(pS[int(len(pS) / 2) :])
)
)
p_val = len(np.where(pD >= gT)[0]) / n_permutation
return p_val
def permutation_test_matrix(np_coeff1, np_coeff2, n_permutation=1000):
"""
Permutation test for two matrices of coefficients of the autoregressive model.
Input:
np_coeff1: numpy array
np_coeff2: numpy array
n_permutation: int
Output:
p_value: float
"""
assert (
np_coeff1.shape[1:] == np_coeff2.shape[1:]
), "The two coefficients matrices must have the same nb of coefficients per sample"
p_val = np.empty(np_coeff1.shape[1:])
for i in tqdm(
range(np_coeff1.shape[1]),
desc="Computing significance for each index of the matrix",
):
for j in range(np_coeff1.shape[2]):
p_val[i, j] = permutation_test(
np_coeff1[:, i, j], np_coeff2[:, i, j], n_permutation
)
return p_val
def matrix_corr_coeff(path_array, path_results):
"""
Save and returns the matrix of coefficients of the autoregressive model.
Input:
path_array: str - path to the array of signals
path_results: str - path to the results folder
"""
name_array = os.path.split(path_array)[-1]
pat = r"(?<=_).+?(?=.npy)"
name_save = re.search(pat, name_array).group(0)
np_signal = np.load(path_array)
np_coeff = np.empty([np_signal.shape[0], 12, 12])
for idx in tqdm(
range(np_signal.shape[0]),
desc=f"Computing MAR coefficients matrix for {name_array} array",
):
try:
np_coeff[idx, :, :], _ = autoregressive_model(np_signal[idx, :, :])
except:
np_coeff[idx, :, :] = np.nan
np_coeff_normal = np_coeff[~np.isnan(np_coeff).any(axis=1).any(axis=1), :, :]
path_save = os.path.join(path_results, "coeff_matrices")
if not os.path.exists(path_save):
os.makedirs(path_save)
np.save(os.path.join(path_save, f"coeff_{name_save}.npy"), np_coeff_normal)
return True
| StarcoderdataPython |
1738433 | <gh_stars>1-10
#!/usr/bin/env python
#
# Copyright (c) 2017, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# All rights reserved.
#
# The Astrobee platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import rospy
import rosgraph
import os
import pwd
import time
import getpass
import socket
import thread
from ff_msgs.msg import AckStamped, GuestScienceState, GuestScienceConfig, \
GuestScienceData, AccessControlStateStamped, CommandStamped, CommandArg, \
GuestScienceApk, GuestScienceCommand, AckCompletedStatus, AckStatus
from std_msgs.msg import Header
from os import system, name
class Queue:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def enqueue(self, item):
self.items.insert(0,item)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
pub = rospy.Publisher('comm/dds/command', CommandStamped, queue_size=10)
base_id = 'LocalParticipant'
count = 0
requesting = False
state = None
config = None
current_controller = None
user = None
ack_response = None
data_response = Queue()
last_command_id = None
fault_state = False
apps = None
current_app = None
new_ack = False
ACTION_CONTINUE = 0
ACTION_GO_BACK = 1
ACTION_EXIT = 2
def get_user():
# TODO Check portability
user = getpass.getuser()
machine = socket.gethostname()
return user + '@' + machine
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
time.sleep(0.5)
print "\n ------- Ground Data System Local Simulator -------\n\n"
def request_control():
global requesting
requesting = True
send_command('requestControl')
def grab_control(msg):
global requesting
if msg.cookie != "":
arg = CommandArg()
arg.data_type = 5
arg.s = msg.cookie
send_command('grabControl', [arg])
requesting = False
def send_command(name, args = []):
global count, last_command_id, new_ack, data_response
new_ack = False
#data_response = None
cmd = CommandStamped()
cmd.header = Header()
cmd.header.stamp = rospy.Time.now()
cmd.header.frame_id = 'world'
cmd.cmd_name = name
cmd.args = args
last_command_id = str(count) + base_id
cmd.cmd_id = last_command_id
cmd.cmd_src = user
cmd.cmd_origin = 'ground'
cmd.subsys_name = ''
pub.publish(cmd)
count = count + 1
def get_manager_config():
pass
def access_control_callback(data):
global current_controller
if requesting:
grab_control(data)
else:
current_controller = data.controller
def ack_callback(data):
global fault_state, new_ack, ack_response
if data.cmd_id == last_command_id:
if (data.status.status == AckStatus.COMPLETED and
data.completed_status.status != AckCompletedStatus.OK):
fault_state = True
ack_response = data
new_ack = True
def gs_state_callback(data):
global state
state = data
def gs_config_callback(data):
global config
config = data
def gs_data_callback(data):
global data_response
data_response.enqueue(data)
#data_response = data
def start_subscribers():
rospy.init_node('gds_gs_simulator')
rospy.Subscriber("gs/gs_manager/state", GuestScienceState, gs_state_callback)
rospy.Subscriber("gs/gs_manager/config", GuestScienceConfig, gs_config_callback)
rospy.Subscriber("gs/data", GuestScienceData, gs_data_callback)
rospy.Subscriber("mgt/ack", AckStamped, ack_callback)
rospy.Subscriber("mgt/access_control/state", AccessControlStateStamped, access_control_callback)
# Wait for master to register subs and pubs
rospy.sleep(2.)
def gain_control():
timer = 0
if current_controller == None:
print "Astrobee's current controller is undetermined. We cannot proceed"
return False
elif current_controller == user:
print ("Astrobee's controller is: " + current_controller + "\n"
+ "You are the current controller")
raw_input("Press any key to continue")
return True
else:
print "Astrobee's controller is: " + current_controller + "\n"
raw_input("Press any key to grab control of the robot")
# Request and grab control
print ' > Requesting control'
request_control()
while requesting and timer < 20:
time.sleep(0.5)
timer += 1
if fault_state:
print " > Request failed with message: " + ack_response.message
return False
elif timer >= 20:
print ' > Timeout'
return False
timer = 0
print ' > Grabbing control'
while current_controller != user:
time.sleep(0.5)
timer += 1
if fault_state:
print " > Request failed with message: " + ack_response.message
return False
elif timer >= 20:
print ' > Timeout'
return False
return True
def get_apk_info():
# Wait until the GS_manager shows up
timer = 0
print (" > Waiting for Guest Science Manager communication."
" Make sure the app is running in the android device and that you "
"can ping it from this computer")
while (state == None or config == None) and timer < 30:
time.sleep(0.5)
timer += 1
if timer >= 30:
print ' > Timeout'
return False
if state.serial != config.serial:
print ' > Guest Science state and config do not match'
return False
print ' > Guest Science Manager found!'
return True
def select_app():
global apps, current_app
# Show available apps and states
apps = config.apks
print '\nAvailable Guest Science applications in HLP'
for i, app in enumerate(apps):
app_state = 'Running' if state.runningApks[i] else 'Stopped'
print str(i + 1) + ') ' + app.short_name + ' ' + app_state
print str(len(apps) + 1) + ') ' + 'Exit'
# Choose an app
try:
selection = input("\nType the number of app you want to select: ")
except:
print ' > Invalid entry'
time.sleep(1)
return None
if selection == len(apps) + 1:
return -1
if selection < 1 or selection > len(apps):
print ' > Invalid entry'
time.sleep(1)
return None
current_app = apps[selection - 1]
return (selection - 1)
def select_action():
print ("a) See available commands\n"
"b) Start application\n"
"c) Stop application\n"
"d) Send Custom Guest Science command\n"
"e) Go back to apps menu\n"
"f) Exit")
option = raw_input("\nType an option: ")
return option
def input_thread(a_list):
raw_input()
a_list.append(True)
def print_gs_feedback():
global data_response
print 'Waiting for feedback (command execution).\n'
# Print ACK
while new_ack == False:
time.sleep(0.5)
print '> Execution response'
if (ack_response.status.status == AckStatus.COMPLETED and
ack_response.completed_status.status == AckCompletedStatus.OK):
print " Execution was successful!\n"
else:
print " Something went wrong\n"
print ack_response.message
# Print GS Data
print 'Waiting for feedback (app response).\n'
print ("Please note that some apps may send a confirmation when receiving"
" a new command and then data feedback. Since we cannot know when"
" the app will send feedback, we will listen until you manually"
"stop it.\n You can stop listening by pressing ENTER")
# Variable and thread used to stop the loop
a_list = []
thread.start_new_thread(input_thread, (a_list,))
while not a_list:
if not data_response.isEmpty():
qsize = data_response.size()
data = data_response.items[qsize - 1]
if data.apk_name == current_app.apk_name:
print ('\n> Data response\n Topic: ' + data.topic +
'\n Data: ' + str(data.data))
data_response.dequeue()
def execute_action(selection):
final_act = None
print '\nYou selected ' + apps[selection].short_name + '. Choose an option\n'
option = select_action()
arg = CommandArg()
arg.data_type = 5
arg.s = apps[selection].apk_name
if option == 'a':
# List commands
clear()
print_app_cmd(selection)
final_act = ACTION_CONTINUE
elif option == 'b':
# Start app
clear()
send_command('startGuestScience', [arg])
print_gs_feedback()
final_act = ACTION_CONTINUE
elif option == 'c':
# Stop app
clear()
if state.runningApks[selection] == False:
print '\n > App already stopped'
else:
send_command('stopGuestScience', [arg])
print_gs_feedback()
final_act = ACTION_CONTINUE
elif option == 'd':
# Execute command
command = None
while True:
clear()
num_cmds = len(apps[selection].commands)
print_app_cmd(selection)
print str(num_cmds + 1) + ') Exit program'
try:
command = input('\nType the number of the command you wish to send: ')
except:
print ' > Invalid entry'
time.sleep(1)
continue
if command == num_cmds + 1:
return ACTION_EXIT
if command < 1 or command > len(apps[selection].commands):
print ' > Invalid entry'
time.sleep(1)
else:
command -= 1
break
arg2 = CommandArg()
arg2.data_type = 5
arg2.s = apps[selection].commands[command].command
clear()
send_command('customGuestScience', [arg, arg2])
print_gs_feedback()
final_act = ACTION_CONTINUE
elif option == 'e':
# Go back
final_act = ACTION_GO_BACK
elif option == 'f':
# Exit
final_act = ACTION_EXIT
else:
print ' > Invalid entry'
final_act = ACTION_CONTINUE
if final_act != ACTION_GO_BACK and final_act != ACTION_EXIT:
raw_input("\nPress any key to continue")
return final_act
def print_app_cmd(selection):
print '\nAvailable commands'
for i, cmd in enumerate(apps[selection].commands):
print str(i + 1) + ') ' + cmd.name + '\n\t' + cmd.command
def is_ros_running():
try:
rosgraph.Master('/rostopic').getPid()
except socket.error:
return False
return True
def main():
global user
timer = 0
clear()
# Check ROS master presence
print ' > Waiting for ROS communications...\n'
while not is_ros_running():
if timer == 0:
print ' > Are you running Astrobee Robot Software?\n'
elif timer == 30:
print ' > Timeout. Shutting down...'
time.sleep(1)
exit()
timer += 1
time.sleep(1)
print ' > ROS Master has been found!\n'
# Get the user
user = get_user()
# Start ROS communications
start_subscribers()
# Grab control
if gain_control():
print '\nCongrats! You are now the Astrobee controller\n'
else:
print '\nUnable to grab control of Astrobee. Shutting down...'
exit()
# Get info from Guest Science Manager
if not get_apk_info():
print '\nUnable to communicate with the Guest Science Manager. Shutting down...'
exit()
time.sleep(3)
while True:
# Clear the screen
clear()
selection = None
return_val = None
# Select and app
while selection == None:
selection = select_app()
if selection == -1:
exit()
clear()
# Choose an action
while return_val == None or return_val == ACTION_CONTINUE:
return_val = execute_action(selection)
if return_val == ACTION_CONTINUE:
clear()
if return_val == ACTION_EXIT:
break
if __name__ == '__main__':
main()
| StarcoderdataPython |
1631157 | # Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
import pytest
from f5.bigip import ManagementRoot
from f5.bigip.tm.auth.radius_server import Radius_Server
from f5.sdk_exception import MissingRequiredCreationParameter
@pytest.fixture
def FakeRadiusServer():
fake_radius_server = mock.MagicMock()
fake_radsrvobj = Radius_Server(fake_radius_server)
return fake_radsrvobj
class TestCreate(object):
def test_create_two(self, fakeicontrolsession):
b = ManagementRoot('localhost', 'admin', 'admin')
rs1 = b.tm.auth.radius_servers.radius_server
rs2 = b.tm.auth.radius_servers.radius_server
assert rs1 is not rs2
def test_create_no_args(self, FakeRadiusServer):
with pytest.raises(MissingRequiredCreationParameter):
FakeRadiusServer.create()
| StarcoderdataPython |
43796 | <filename>tests/test_automechanic/test_mol_graph.py
""" test the automechanc.mol.graph module
"""
import numpy
from automechanic.mol import graph
C8H13O_CGR = (
{0: ('C', 3, None), 1: ('C', 3, None), 2: ('C', 1, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 2, None), 7: ('C', 1, None), 8: ('O', 0, None)},
{frozenset({0, 2}): (1, None), frozenset({1, 3}): (1, None),
frozenset({2, 4}): (1, None), frozenset({3, 5}): (1, None),
frozenset({4, 6}): (1, None), frozenset({5, 7}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None)})
C8H13O_RGR = (
{0: ('C', 3, None), 1: ('C', 3, None), 2: ('C', 1, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 2, None), 7: ('C', 1, None), 8: ('O', 0, None)},
{frozenset({0, 2}): (1, None), frozenset({1, 3}): (1, None),
frozenset({2, 4}): (2, None), frozenset({3, 5}): (2, None),
frozenset({4, 6}): (1, None), frozenset({5, 7}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None)})
C8H13O_SGR = (
{0: ('C', 3, None), 1: ('C', 3, None), 2: ('C', 1, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None),
6: ('C', 2, None), 7: ('C', 1, False), 8: ('O', 0, None)},
{frozenset({0, 2}): (1, None), frozenset({1, 3}): (1, None),
frozenset({2, 4}): (1, False), frozenset({3, 5}): (1, False),
frozenset({4, 6}): (1, None), frozenset({5, 7}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None)})
C8H13O_ICH = ('InChI=1S/C8H13O/c1-3-5-7-8(9)6-4-2/h3-6,8H,7H2,1-2H3'
'/b5-3-,6-4-/t8-/m0/s1')
CH2FH2H_CGR_IMP = (
{1: ('F', 0, None), 3: ('C', 2, None), 4: ('H', 1, None),
6: ('H', 0, None)},
{frozenset({1, 3}): (1, None)})
CH2FH2H_CGR_EXP = (
{0: ('H', 0, None), 1: ('F', 0, None), 2: ('H', 0, None),
3: ('C', 0, None), 4: ('H', 0, None), 5: ('H', 0, None),
6: ('H', 0, None)},
{frozenset({1, 3}): (1, None), frozenset({2, 3}): (1, None),
frozenset({0, 3}): (1, None), frozenset({4, 5}): (1, None)})
C2H2CL2F2_MM_ICH = 'InChI=1S/C2H2Cl2F2/c3-1(5)2(4)6/h1-2H/t1-,2-/m0/s1'
C2H2CL2F2_MM_SGR = (
{0: ('C', 1, False), 1: ('C', 1, False),
2: ('F', 0, None), 3: ('Cl', 0, None),
4: ('F', 0, None), 5: ('Cl', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 3}): (1, None), frozenset({1, 4}): (1, None),
frozenset({1, 5}): (1, None)})
C2H2CL2F2_MP_ICH = 'InChI=1S/C2H2Cl2F2/c3-1(5)2(4)6/h1-2H/t1-,2+'
C2H2CL2F2_MP_SGR = (
{0: ('C', 1, False), 1: ('C', 1, True),
2: ('F', 0, None), 3: ('Cl', 0, None),
4: ('F', 0, None), 5: ('Cl', 0, None)},
{frozenset({0, 1}): (1, None), frozenset({0, 2}): (1, None),
frozenset({0, 3}): (1, None), frozenset({1, 4}): (1, None),
frozenset({1, 5}): (1, None)})
C2H2F2_P_ICH = 'InChI=1S/C2H2F2/c3-1-2-4/h1-2H/b2-1+'
C4H8O_M_ICH = 'InChI=1S/C4H8O/c1-3-4(2)5/h3,5H,1-2H3/b4-3-'
C2H2F2_P_SGR = ({0: ('C', 0, None), 1: ('C', 0, None), 2: ('F', 0, None),
3: ('F', 0, None), 4: ('H', 0, None), 5: ('H', 0, None)},
{frozenset({0, 1}): (1, True), frozenset({0, 2}): (1, None),
frozenset({1, 3}): (1, None), frozenset({0, 4}): (1, None),
frozenset({1, 5}): (1, None)})
C4H8O_M_SGR = ({0: ('C', 3, None), 1: ('C', 3, None), 2: ('C', 0, None),
3: ('C', 0, None), 4: ('O', 1, None), 5: ('H', 0, None)},
{frozenset({0, 2}): (1, None), frozenset({1, 3}): (1, None),
frozenset({2, 3}): (1, False), frozenset({3, 4}): (1, None),
frozenset({2, 5}): (1, None)})
# test constructors and value getters
def test__from_data():
""" test graph.from_data
also tests a bunch of accessors
"""
assert graph.from_data(
graph.atom_symbols(CH2FH2H_CGR_EXP),
graph.bond_keys(CH2FH2H_CGR_EXP)
) == CH2FH2H_CGR_EXP
assert graph.from_data(
graph.atom_symbols(C8H13O_CGR),
graph.bond_keys(C8H13O_CGR),
atm_imp_hyd_vlc_dct=graph.atom_implicit_hydrogen_valences(C8H13O_CGR)
) == C8H13O_CGR
assert graph.from_data(
graph.atom_symbols(C8H13O_RGR),
graph.bond_keys(C8H13O_RGR),
atm_imp_hyd_vlc_dct=graph.atom_implicit_hydrogen_valences(C8H13O_CGR),
bnd_ord_dct=graph.bond_orders(C8H13O_RGR)
) == C8H13O_RGR
assert graph.from_data(
graph.atom_symbols(C8H13O_SGR),
graph.bond_keys(C8H13O_SGR),
atm_imp_hyd_vlc_dct=graph.atom_implicit_hydrogen_valences(C8H13O_SGR),
atm_ste_par_dct=graph.atom_stereo_parities(C8H13O_SGR),
bnd_ste_par_dct=graph.bond_stereo_parities(C8H13O_SGR)
) == C8H13O_SGR
def test__atom_stereo_keys():
""" test graph.atom_stereo_keys
"""
assert graph.atom_stereo_keys(C8H13O_SGR) == (7,)
def test__bond_stereo_keys():
""" test graph.bond_stereo_keys
"""
assert (graph.bond_stereo_keys(C8H13O_SGR)
== (frozenset({2, 4}), frozenset({3, 5})))
# test value setters
def test__set_atom_implicit_hydrogen_valences():
""" test graph.set_atom_implicit_hydrogen_valences
"""
assert graph.set_atom_implicit_hydrogen_valences(
CH2FH2H_CGR_IMP, {3: 1, 4: 0, 6: 1}
) == ({1: ('F', 0, None), 3: ('C', 1, None), 4: ('H', 0, None),
6: ('H', 1, None)},
{frozenset({1, 3}): (1, None)})
def test__set_atom_stereo_parities():
""" test graph.set_atom_stereo_parities
"""
assert graph.atom_stereo_parities(
graph.set_atom_stereo_parities(C8H13O_CGR, {7: False})
) == graph.atom_stereo_parities(C8H13O_SGR)
def test__set_bond_orders():
""" test graph.set_bond_orders
"""
assert graph.set_bond_orders(
C8H13O_CGR, {frozenset({2, 4}): 2, frozenset({3, 5}): 2},
) == C8H13O_RGR
def test__set_bond_stereo_parities():
""" test graph.set_bond_stereo_parities
"""
assert graph.bond_stereo_parities(
graph.set_bond_stereo_parities(
C8H13O_CGR, {frozenset({2, 4}): False, frozenset({3, 5}): False},
)
) == graph.bond_stereo_parities(C8H13O_SGR)
def test__increment_bond_orders():
""" test graph.increment_bond_orders
"""
assert graph.increment_bond_orders(
C8H13O_CGR, {frozenset({2, 4}): 1, frozenset({3, 5}): 1}
) == C8H13O_RGR
# test derived values
def test__is_chiral():
""" test graph.is_chiral
"""
assert graph.is_chiral(C8H13O_SGR) is True
assert graph.is_chiral(C2H2CL2F2_MM_SGR) is True
assert graph.is_chiral(C2H2CL2F2_MP_SGR) is False
def test__maximum_spin_multiplicity():
""" test graph.maximum_spin_multiplicity
"""
catm_cgr = ({0: ('C', 0, None)}, {})
ch0f1_cgr = ({0: ('C', 0, None), 1: ('F', 0, None)},
{frozenset([0, 1]): (1, None)})
ch1f1_cgr = ({0: ('C', 1, None), 1: ('F', 0, None)},
{frozenset([0, 1]): (1, None)})
ch2f1_cgr = ({0: ('C', 2, None), 1: ('F', 0, None)},
{frozenset([0, 1]): (1, None)})
ch2f2_cgr = ({0: ('C', 2, None), 1: ('F', 0, None), 2: ('F', 0, None)},
{frozenset([0, 1]): (1, None), frozenset([0, 2]): (1, None)})
o2_cgr = ({0: ('O', 0, None), 1: ('O', 0, None)},
{frozenset([0, 1]): (1, None)})
assert graph.maximum_spin_multiplicity(catm_cgr) == 5
assert graph.maximum_spin_multiplicity(ch0f1_cgr) == 4
assert graph.maximum_spin_multiplicity(ch1f1_cgr) == 3
assert graph.maximum_spin_multiplicity(ch2f1_cgr) == 2
assert graph.maximum_spin_multiplicity(ch2f2_cgr) == 1
assert graph.maximum_spin_multiplicity(o2_cgr) == 3
def test__possible_spin_multiplicities():
""" test graph.possible_spin_multiplicities
"""
catm_cgr = ({0: ('C', 0, None)}, {})
ch0f1_cgr = ({0: ('C', 0, None), 1: ('F', 0, None)},
{frozenset([0, 1]): (1, None)})
ch1f1_cgr = ({0: ('C', 1, None), 1: ('F', 0, None)},
{frozenset([0, 1]): (1, None)})
ch2f1_cgr = ({0: ('C', 2, None), 1: ('F', 0, None)},
{frozenset([0, 1]): (1, None)})
ch2f2_cgr = ({0: ('C', 2, None), 1: ('F', 0, None), 2: ('F', 0, None)},
{frozenset([0, 1]): (1, None), frozenset([0, 2]): (1, None)})
o2_cgr = ({0: ('O', 0, None), 1: ('O', 0, None)},
{frozenset([0, 1]): (1, None)})
assert graph.possible_spin_multiplicities(catm_cgr) == (1, 3, 5)
assert graph.possible_spin_multiplicities(ch0f1_cgr) == (2, 4)
assert graph.possible_spin_multiplicities(ch1f1_cgr) == (1, 3)
assert graph.possible_spin_multiplicities(ch2f1_cgr) == (2,)
assert graph.possible_spin_multiplicities(ch2f2_cgr) == (1,)
assert graph.possible_spin_multiplicities(o2_cgr) == (1, 3)
def test__ring_keys_list():
""" test graph.ring_keys_list
"""
cgr = ({0: ('C', 1, None), 1: ('C', 0, None), 2: ('C', 0, None),
3: ('C', 0, None), 4: ('C', 0, None), 5: ('N', 2, None),
6: ('N', 0, None), 7: ('N', 0, None), 8: ('N', 0, None),
9: ('N', 1, None), 10: ('O', 1, None)},
{frozenset({10, 4}): (1, None), frozenset({8, 2}): (1, None),
frozenset({0, 6}): (1, None), frozenset({9, 3}): (1, None),
frozenset({1, 2}): (1, None), frozenset({3, 7}): (1, None),
frozenset({2, 5}): (1, None), frozenset({1, 6}): (1, None),
frozenset({0, 7}): (1, None), frozenset({9, 4}): (1, None),
frozenset({1, 3}): (1, None), frozenset({8, 4}): (1, None)})
assert graph.ring_keys_list(cgr) == ((0, 1, 3, 6, 7), (1, 2, 3, 4, 8, 9))
def test__backbone_keys():
""" test graph.backbone_keys
"""
assert graph.backbone_keys(CH2FH2H_CGR_EXP) == (1, 3, 4, 6)
def test__explicit_hydrogen_keys():
""" test graph.explicit_hydrogen_keys
"""
assert graph.explicit_hydrogen_keys(CH2FH2H_CGR_EXP) == (0, 2, 5)
def test__atom_nuclear_charges():
""" test graph.atom_nuclear_charges
"""
assert (graph.atom_nuclear_charges(C8H13O_CGR) ==
{0: 6, 1: 6, 2: 6, 3: 6, 4: 6, 5: 6, 6: 6, 7: 6, 8: 8})
def test__atom_total_valences():
""" test graph.atom_total_valences
"""
assert (graph.atom_total_valences(C8H13O_CGR) ==
{0: 4, 1: 4, 2: 4, 3: 4, 4: 4, 5: 4, 6: 4, 7: 4, 8: 2})
def test__atom_bond_valences():
""" test graph.atom_bond_valences
"""
assert (graph.atom_bond_valences(C8H13O_CGR) ==
{0: 4, 1: 4, 2: 3, 3: 3, 4: 3, 5: 3, 6: 4, 7: 4, 8: 1})
def test__atom_radical_valences():
""" test graph.atom_radical_valences
"""
assert (graph.atom_radical_valences(C8H13O_CGR) ==
{0: 0, 1: 0, 2: 1, 3: 1, 4: 1, 5: 1, 6: 0, 7: 0, 8: 1})
def test__atom_neighbor_keys():
""" test graph.atom_neighbor_keys
"""
assert (graph.atom_neighbor_keys(C8H13O_CGR) ==
{0: (2,), 1: (3,), 2: (0, 4), 3: (1, 5), 4: (2, 6), 5: (3, 7),
6: (4, 7), 7: (5, 6, 8), 8: (7,)})
def test__atom_explicit_hydrogen_keys():
""" test graph.atom_explicit_hydrogen_keys
"""
assert (graph.atom_explicit_hydrogen_keys(CH2FH2H_CGR_EXP) ==
{0: (), 1: (), 2: (), 3: (0, 2), 4: (5,), 5: (), 6: ()})
def test__atom_bond_keys():
""" test graph.atom_bond_keys
"""
assert (graph.atom_bond_keys(C8H13O_CGR) ==
{0: (frozenset({0, 2}),),
1: (frozenset({1, 3}),),
2: (frozenset({0, 2}), frozenset({2, 4})),
3: (frozenset({1, 3}), frozenset({3, 5})),
4: (frozenset({2, 4}), frozenset({4, 6})),
5: (frozenset({3, 5}), frozenset({5, 7})),
6: (frozenset({4, 6}), frozenset({6, 7})),
7: (frozenset({5, 7}), frozenset({6, 7}), frozenset({8, 7})),
8: (frozenset({8, 7}),)})
def test__atom_neighborhoods():
""" test graph.atom_neighborhoods
"""
assert (graph.atom_neighborhoods(C8H13O_CGR) == {
0: ({0: ('C', 3, None), 2: ('C', 1, None)},
{frozenset({0, 2}): (1, None)}),
1: ({1: ('C', 3, None), 3: ('C', 1, None)},
{frozenset({1, 3}): (1, None)}),
2: ({0: ('C', 3, None), 2: ('C', 1, None), 4: ('C', 1, None)},
{frozenset({0, 2}): (1, None), frozenset({2, 4}): (1, None)}),
3: ({1: ('C', 3, None), 3: ('C', 1, None), 5: ('C', 1, None)},
{frozenset({1, 3}): (1, None), frozenset({3, 5}): (1, None)}),
4: ({2: ('C', 1, None), 4: ('C', 1, None), 6: ('C', 2, None)},
{frozenset({2, 4}): (1, None), frozenset({4, 6}): (1, None)}),
5: ({3: ('C', 1, None), 5: ('C', 1, None), 7: ('C', 1, None)},
{frozenset({3, 5}): (1, None), frozenset({5, 7}): (1, None)}),
6: ({4: ('C', 1, None), 6: ('C', 2, None), 7: ('C', 1, None)},
{frozenset({4, 6}): (1, None), frozenset({6, 7}): (1, None)}),
7: ({8: ('O', 0, None), 5: ('C', 1, None), 6: ('C', 2, None),
7: ('C', 1, None)},
{frozenset({5, 7}): (1, None), frozenset({6, 7}): (1, None),
frozenset({8, 7}): (1, None)}),
8: ({8: ('O', 0, None), 7: ('C', 1, None)},
{frozenset({8, 7}): (1, None)})})
def test__atom_inchi_numbers():
""" test graph.atom_inchi_numbers
"""
cgr = C8H13O_CGR
natms = len(graph.atoms(cgr))
for _ in range(10):
pmt_dct = dict(enumerate(numpy.random.permutation(natms)))
cgr_pmt = graph.relabel(cgr, pmt_dct)
inv_pmt_dct = dict(map(reversed, pmt_dct.items()))
assert graph.atom_inchi_numbers(cgr_pmt) == inv_pmt_dct
ch_cgr = ({5: ('C', 1, None)}, {})
assert graph.atom_inchi_numbers(ch_cgr) == {5: 0}
ch_cgr = ({5: ('C', 0, None), 2: ('H', 0, None)},
{frozenset({5, 2}): (1, None)})
assert graph.atom_inchi_numbers(ch_cgr) == {5: 0, 2: -1}
cf_cgr = ({5: ('C', 0, None), 2: ('F', 0, None)},
{frozenset({5, 2}): (1, None)})
assert graph.atom_inchi_numbers(cf_cgr) == {5: 0, 2: 1}
ccl_cgr = ({5: ('C', 0, None), 2: ('F', 0, None)},
{frozenset({5, 2}): (1, None)})
assert graph.atom_inchi_numbers(ccl_cgr) == {5: 0, 2: 1}
def test__inchi():
""" test graph.inchi
"""
co_cgr = ({0: ('C', 0, None), 1: ('O', 0, None)},
{frozenset({0, 1}): (1, None)})
assert graph.inchi(co_cgr) == 'InChI=1S/CO/c1-2'
assert graph.inchi(C8H13O_SGR) == (
'InChI=1S/C8H13O/c1-3-5-7-8(9)6-4-2/h3-6,8H,7H2,1-2H3')
c_cgr = ({5: ('C', 0, None)}, {})
assert graph.inchi(c_cgr) == 'InChI=1S/C'
n_cgr = ({5: ('N', 0, None)}, {})
assert graph.inchi(n_cgr) == 'InChI=1S/N'
ch_cgr = ({5: ('C', 1, None)}, {})
assert graph.inchi(ch_cgr) == 'InChI=1S/CH/h1H'
ch_cgr = ({5: ('C', 0, None), 2: ('H', 0, None)},
{frozenset({5, 2}): (1, None)})
assert graph.inchi(ch_cgr) == 'InChI=1S/CH/h1H'
cf_cgr = ({5: ('C', 0, None), 2: ('F', 0, None)},
{frozenset({5, 2}): (1, None)})
assert graph.inchi(cf_cgr) == 'InChI=1S/CF/c1-2'
ccl_cgr = ({5: ('C', 0, None), 2: ('Cl', 0, None)},
{frozenset({5, 2}): (1, None)})
assert graph.inchi(ccl_cgr) == 'InChI=1S/CCl/c1-2'
nh_cgr = ({5: ('N', 1, None)}, {})
assert graph.inchi(nh_cgr) == 'InChI=1S/HN/h1H'
ch2_cgr = ({5: ('C', 2, None)}, {})
assert graph.inchi(ch2_cgr) == 'InChI=1S/CH2/h1H2'
def test__stereo_inchi():
""" test graph.stereo_inchi
"""
assert graph.stereo_inchi(C8H13O_SGR) == C8H13O_ICH
assert graph.stereo_inchi(C2H2CL2F2_MM_SGR) == C2H2CL2F2_MM_ICH
assert graph.stereo_inchi(C2H2CL2F2_MP_SGR) == C2H2CL2F2_MP_ICH
assert graph.stereo_inchi(C2H2F2_P_SGR) == C2H2F2_P_ICH
assert graph.stereo_inchi(C4H8O_M_SGR) == C4H8O_M_ICH
# test transformations
def test__implicit():
""" test graph.implicit
"""
assert graph.implicit(CH2FH2H_CGR_EXP) == CH2FH2H_CGR_IMP
assert graph.implicit(CH2FH2H_CGR_EXP, (1, 3, 4, 6)) == CH2FH2H_CGR_IMP
def test__explicit():
""" test graph.explicit
"""
ch2fh2h_cgr_exp = graph.explicit(CH2FH2H_CGR_IMP)
assert graph.backbone_isomorphic(ch2fh2h_cgr_exp, CH2FH2H_CGR_EXP)
assert (graph.atom_explicit_hydrogen_keys(ch2fh2h_cgr_exp) ==
{1: (), 3: (7, 8), 4: (9,), 6: (), 7: (), 8: (), 9: ()})
def test__explicit_stereo_sites():
""" test graph.explicit_stereo_sites
"""
assert graph.explicit_stereo_sites(C8H13O_CGR) == C8H13O_CGR
assert (graph.explicit_stereo_sites(C8H13O_SGR)
== ({0: ('C', 3, None), 1: ('C', 3, None), 2: ('C', 0, None),
3: ('C', 0, None), 4: ('C', 0, None), 5: ('C', 0, None),
6: ('C', 2, None), 7: ('C', 0, False), 8: ('O', 0, None),
9: ('H', 0, None), 10: ('H', 0, None), 11: ('H', 0, None),
12: ('H', 0, None), 13: ('H', 0, None)},
{frozenset({0, 2}): (1, None), frozenset({1, 3}): (1, None),
frozenset({2, 4}): (1, False), frozenset({3, 5}): (1, False),
frozenset({4, 6}): (1, None), frozenset({5, 7}): (1, None),
frozenset({6, 7}): (1, None), frozenset({8, 7}): (1, None),
frozenset({9, 7}): (1, None), frozenset({2, 10}): (1, None),
frozenset({3, 11}): (1, None), frozenset({4, 12}): (1, None),
frozenset({5, 13}): (1, None)}))
def test__delete_atoms():
""" test graph.delete_atoms
"""
assert (graph.delete_atoms(CH2FH2H_CGR_EXP, (0, 2, 5)) ==
({1: ('F', 0, None), 3: ('C', 0, None), 4: ('H', 0, None),
6: ('H', 0, None)},
{frozenset({1, 3}): (1, None)}))
def test__add_explicit_hydrogens():
""" test graph.add_explicit_hydrogens
"""
assert graph.add_explicit_hydrogens(
CH2FH2H_CGR_IMP, {3: 2, 4: 1}
) == ({1: ('F', 0, None), 3: ('C', 2, None), 4: ('H', 1, None),
6: ('H', 0, None), 7: ('H', 0, None), 8: ('H', 0, None),
9: ('H', 0, None)},
{frozenset({1, 3}): (1, None), frozenset({3, 7}): (1, None),
frozenset({8, 3}): (1, None), frozenset({9, 4}): (1, None)})
def test__subgraph():
""" test graph.subgraph
"""
assert (graph.subgraph(CH2FH2H_CGR_EXP, (1, 3, 4, 6)) ==
({1: ('F', 0, None), 3: ('C', 0, None), 4: ('H', 0, None),
6: ('H', 0, None)},
{frozenset({1, 3}): (1, None)}))
def test__subgraph_by_bonds():
""" test graph.subgraph_by_bonds
"""
assert (graph.subgraph_by_bonds(C8H13O_CGR,
{frozenset({1, 3}), frozenset({3, 5}),
frozenset({5, 7}), frozenset({8, 7})}) ==
({1: ('C', 3, None), 3: ('C', 1, None), 5: ('C', 1, None),
7: ('C', 1, None), 8: ('O', 0, None)},
{frozenset({1, 3}): (1, None), frozenset({3, 5}): (1, None),
frozenset({5, 7}): (1, None), frozenset({8, 7}): (1, None)}))
def test__relabel():
""" test graph.relabel
"""
assert graph.relabel(
CH2FH2H_CGR_IMP, {1: 0, 3: 1, 4: 2, 6: 3}
) == ({0: ('F', 0, None), 1: ('C', 2, None), 2: ('H', 1, None),
3: ('H', 0, None)},
{frozenset({0, 1}): (1, None)})
def test__subresonances():
""" test graph.subresonances
"""
c2_cgr = ({0: ('C', 0, None), 1: ('C', 0, None)},
{frozenset({0, 1}): (1, None)})
assert graph.subresonances(c2_cgr) == (
({0: ('C', 0, None), 1: ('C', 0, None)},
{frozenset({0, 1}): (1, None)}),
({0: ('C', 0, None), 1: ('C', 0, None)},
{frozenset({0, 1}): (2, None)}),
({0: ('C', 0, None), 1: ('C', 0, None)},
{frozenset({0, 1}): (3, None)}),
({0: ('C', 0, None), 1: ('C', 0, None)},
{frozenset({0, 1}): (4, None)}),
)
c3h3_cgr = ({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (1, None),
frozenset({2, 0}): (1, None)})
assert graph.subresonances(c3h3_cgr) == (
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (1, None),
frozenset({0, 2}): (1, None)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (2, None),
frozenset({0, 2}): (1, None)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (1, None),
frozenset({0, 2}): (2, None)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None)},
{frozenset({0, 1}): (2, None), frozenset({1, 2}): (1, None),
frozenset({0, 2}): (1, None)}),
)
def test__lowspin_resonance():
""" test graph.lowspin_resonance
"""
c6h6_cgr = ({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (1, None),
frozenset({2, 3}): (1, None), frozenset({3, 4}): (1, None),
frozenset({4, 5}): (1, None), frozenset({5, 0}): (1, None)})
assert graph.lowspin_resonance(c6h6_cgr) in [
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None)},
{frozenset({0, 1}): (2, None), frozenset({1, 2}): (1, None),
frozenset({2, 3}): (2, None), frozenset({3, 4}): (1, None),
frozenset({4, 5}): (2, None), frozenset({5, 0}): (1, None)}),
({0: ('C', 1, None), 1: ('C', 1, None), 2: ('C', 1, None),
3: ('C', 1, None), 4: ('C', 1, None), 5: ('C', 1, None)},
{frozenset({0, 1}): (1, None), frozenset({1, 2}): (2, None),
frozenset({2, 3}): (1, None), frozenset({3, 4}): (2, None),
frozenset({4, 5}): (1, None), frozenset({5, 0}): (2, None)})
]
def test__reflection():
""" test graph.reflection
"""
assert (graph.reflection(C8H13O_SGR) ==
graph.set_atom_stereo_parities(C8H13O_SGR, {7: True}))
# test comparisons
def test__backbone_isomorphic():
""" test graph.backbone_isomorphic
"""
assert graph.backbone_isomorphic(CH2FH2H_CGR_EXP, CH2FH2H_CGR_IMP)
cgr = C8H13O_CGR
natms = len(graph.atoms(cgr))
for _ in range(10):
pmt_dct = dict(enumerate(numpy.random.permutation(natms)))
cgr_pmt = graph.relabel(cgr, pmt_dct)
assert graph.backbone_isomorphic(cgr, cgr_pmt)
def test__backbone_isomorphism():
""" test graph.backbone_isomorphism
"""
assert (graph.backbone_isomorphism(CH2FH2H_CGR_EXP, CH2FH2H_CGR_IMP) ==
{1: 1, 3: 3, 4: 4, 6: 6})
cgr = C8H13O_CGR
natms = len(graph.atoms(cgr))
for _ in range(10):
pmt_dct = dict(enumerate(numpy.random.permutation(natms)))
cgr_pmt = graph.relabel(cgr, pmt_dct)
assert graph.backbone_isomorphism(cgr, cgr_pmt) == pmt_dct
if __name__ == '__main__':
# test constructors and value getters
test__from_data()
test__atom_stereo_keys()
test__bond_stereo_keys()
# test value setters
test__set_atom_implicit_hydrogen_valences()
test__set_atom_stereo_parities()
test__set_bond_orders()
test__set_bond_stereo_parities()
test__increment_bond_orders()
# test derived values
test__is_chiral()
test__maximum_spin_multiplicity()
test__possible_spin_multiplicities()
test__ring_keys_list()
test__backbone_keys()
test__explicit_hydrogen_keys()
test__atom_nuclear_charges()
test__atom_total_valences()
test__atom_bond_valences()
test__atom_radical_valences()
test__atom_neighbor_keys()
test__atom_explicit_hydrogen_keys()
test__atom_bond_keys()
test__atom_neighborhoods()
test__atom_inchi_numbers()
test__inchi()
test__stereo_inchi()
# test transformations
test__implicit()
test__explicit()
test__explicit_stereo_sites()
test__delete_atoms()
test__add_explicit_hydrogens()
test__subgraph()
test__subgraph_by_bonds()
test__relabel()
test__subresonances()
test__lowspin_resonance()
test__reflection()
# test comparisons
test__backbone_isomorphic()
test__backbone_isomorphism()
| StarcoderdataPython |
3228490 | <gh_stars>0
from importlib import import_module
import KratosMultiphysics as Kratos
import KratosMultiphysics.FluidDynamicsApplication as KratosCFD
import KratosMultiphysics.RANSApplication as KratosRANS
from KratosMultiphysics import IsDistributedRun
from KratosMultiphysics import VariableUtils
from KratosMultiphysics.kratos_utilities import CheckIfApplicationsAvailable
from KratosMultiphysics.RANSApplication import RansVariableUtilities
if (IsDistributedRun() and CheckIfApplicationsAvailable("TrilinosApplication")):
from KratosMultiphysics.TrilinosApplication import TrilinosBlockBuilderAndSolverPeriodic
from KratosMultiphysics.TrilinosApplication import TrilinosBlockBuilderAndSolver
elif (not IsDistributedRun()):
from KratosMultiphysics import ResidualBasedBlockBuilderAndSolver
from KratosMultiphysics.FluidDynamicsApplication import ResidualBasedBlockBuilderAndSolverPeriodic
else:
raise Exception("Distributed run requires TrilinosApplication")
def GetKratosObjectPrototype(type_name):
type_dict = {
"LinearSolverFactory": [
"KratosMultiphysics.python_linear_solver_factory.ConstructSolver",
"KratosMultiphysics.TrilinosApplication.trilinos_linear_solver_factory.ConstructSolver"
],
"ResidualBasedNewtonRaphsonStrategy": [
"KratosMultiphysics.ResidualBasedNewtonRaphsonStrategy",
"KratosMultiphysics.TrilinosApplication.TrilinosNewtonRaphsonStrategy"
],
"MixedGenericCriteria": [
"KratosMultiphysics.MixedGenericCriteria",
"KratosMultiphysics.TrilinosApplication.TrilinosMixedGenericCriteria"
],
"ResidualBasedIncrementalUpdateStaticScheme": [
"KratosMultiphysics.ResidualBasedIncrementalUpdateStaticScheme",
"KratosMultiphysics.TrilinosApplication.TrilinosResidualBasedIncrementalUpdateStaticScheme"
],
"SteadyScalarScheme": [
"KratosMultiphysics.RANSApplication.SteadyScalarScheme",
"KratosMultiphysics.RANSApplication.TrilinosExtension.MPISteadyScalarScheme"
],
"AlgebraicFluxCorrectedSteadyScalarScheme": [
"KratosMultiphysics.RANSApplication.AlgebraicFluxCorrectedSteadyScalarScheme",
"KratosMultiphysics.RANSApplication.TrilinosExtension.MPIAlgebraicFluxCorrectedSteadyScalarScheme"
],
"BossakRelaxationScalarScheme": [
"KratosMultiphysics.RANSApplication.BossakRelaxationScalarScheme",
"KratosMultiphysics.RANSApplication.TrilinosExtension.MPIBossakRelaxationScalarScheme"
],
"ResidualBasedSimpleSteadyScheme": [
"KratosMultiphysics.FluidDynamicsApplication.ResidualBasedSimpleSteadyScheme",
"KratosMultiphysics.FluidDynamicsApplication.TrilinosExtension.TrilinosResidualBasedSimpleSteadyScheme"
],
"ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent":[
"KratosMultiphysics.FluidDynamicsApplication.ResidualBasedPredictorCorrectorVelocityBossakSchemeTurbulent",
"KratosMultiphysics.FluidDynamicsApplication.TrilinosExtension.TrilinosPredictorCorrectorVelocityBossakSchemeTurbulent"
],
"FractionalStepSettingsPeriodic":[
"KratosMultiphysics.FluidDynamicsApplication.FractionalStepSettingsPeriodic",
"KratosMultiphysics.FluidDynamicsApplication.TrilinosExtension.TrilinosFractionalStepSettingsPeriodic"
],
"FractionalStepSettings":[
"KratosMultiphysics.FluidDynamicsApplication.FractionalStepSettings",
"KratosMultiphysics.FluidDynamicsApplication.TrilinosExtension.TrilinosFractionalStepSettings"
],
"FractionalStepStrategy":[
"KratosMultiphysics.FluidDynamicsApplication.FractionalStepStrategy",
"KratosMultiphysics.FluidDynamicsApplication.TrilinosExtension.TrilinosFractionalStepStrategy"
],
"StrategyLabel":[
"KratosMultiphysics.FluidDynamicsApplication.StrategyLabel",
"KratosMultiphysics.FluidDynamicsApplication.TrilinosExtension.TrilinosStrategyLabel"
]
}
if (type_name not in type_dict.keys()):
raise Exception(type_name + " not found in type_dict. Followings are allowed type_names:\n\t" + "\n\t".join(sorted(type_dict.keys())))
module_info = type_dict[type_name][IsDistributedRun()]
index = module_info.rfind(".")
module_name = module_info[:index]
attribute_name = module_info[index + 1:]
module = import_module(module_name)
return getattr(module, attribute_name)
def CreateDuplicateModelPart(
origin_modelpart,
destination_modelpart_name,
element_name,
condition_name):
model = origin_modelpart.GetModel()
connectivity_preserve_modeler = Kratos.ConnectivityPreserveModeler()
if not model.HasModelPart(destination_modelpart_name):
model_part = model.CreateModelPart(destination_modelpart_name)
# TODO: Remove this line once the warnings from connectivity preserve modeller is gone, otherwise,
# output will be cluttered with lots of missing variable warnings
RansVariableUtilities.CopyNodalSolutionStepVariablesList(
origin_modelpart, model_part)
# TODO: [PeriodicCondition]
# Currently, all the conditions will be replaced with the given new condition. This is an issue
# in the case of periodic cases in mpi, there we have to put PeriodicConditions in the mdpa file,
# where MetisParitioner will use that condition list to properly partition it. Therefore, "original_condition_name"
# is not used in this method at the moment.
# Following is one of the proposals to make PeriodicConditions to work with connectivity_preserve_modeller.
# connectivity_preserve_modeler.GenerateModelPart(
# origin_modelpart, model_part, element_name, condition_name,
# original_condition_name + str(domain_size) + "D" + str(domain_size)
# + "N")
if (condition_name != ""):
connectivity_preserve_modeler.GenerateModelPart(
origin_modelpart, model_part, element_name, condition_name)
else:
connectivity_preserve_modeler.GenerateModelPart(
origin_modelpart, model_part, element_name)
Kratos.Logger.PrintInfo("RANSModelPartFactory",
"Created " + destination_modelpart_name)
return model.GetModelPart(destination_modelpart_name)
def CreateRansFormulationModelPart(
original_model_part,
model_part_name_suffix,
domain_size,
element_name,
condition_name = ""):
element_suffix = str(domain_size) + "D" + str(domain_size + 1) + "N"
element_name = element_name + element_suffix
new_model_part_name = model_part_name_suffix + "_" + element_name
if (condition_name != ""):
condition_suffix = str(domain_size) + "D" + str(
domain_size) + "N"
condition_name = condition_name + condition_suffix
new_model_part_name += "_" + condition_name
return CreateDuplicateModelPart(original_model_part,
new_model_part_name, element_name,
condition_name)
def CalculateNormalsOnConditions(model_part):
domain_size = model_part.ProcessInfo[Kratos.DOMAIN_SIZE]
if (not RansVariableUtilities.IsAnalysisStepCompleted(
model_part, "CONDITION_NORMAL_CALCULATION")):
# this calculates normals on whole model part, and assigns
# NORMAL variable in NodalSolutionStepDataValue container.
# NORMAL on conditions is required by some formulations such as inlet condition for
# incompressible potential flow velocity formulation, and all boundaries for incompressible
# potential flow pressure formulation.
Kratos.NormalCalculationUtils().CalculateOnSimplex(
model_part.Conditions, domain_size)
RansVariableUtilities.AddAnalysisStep(model_part,
"CONDITION_NORMAL_CALCULATION")
# This reverts incorrectly calculated nodal NORMALS from previous method
# since it spreads condition NORMAL to all nodes of model part, but from this
# method, it again spreads condition NORMALs to nodes where condition is applied
# with SLIP flag.
RansVariableUtilities.AssignConditionVariableValuesToNodes(
model_part, Kratos.NORMAL, Kratos.SLIP)
Kratos.Logger.PrintInfo("NormalCalculationUtils",
"Condition normals calculated.")
def InitializeYPlusVariablesInConditions(model_part):
if (not RansVariableUtilities.IsAnalysisStepCompleted(
model_part, "CONDITION_TURBULENCE_VARIABLE_INITIALIZATION")):
VariableUtils().SetNonHistoricalVariableToZero(KratosRANS.RANS_Y_PLUS, model_part.Conditions)
VariableUtils().SetNonHistoricalVariableToZero(KratosRANS.FRICTION_VELOCITY, model_part.Conditions)
RansVariableUtilities.AddAnalysisStep(model_part,
"CONDITION_TURBULENCE_VARIABLE_INITIALIZATION")
Kratos.Logger.PrintInfo("Initialization",
"Initialized condition variables.")
def InitializePeriodicConditions(
base_model_part,
model_part,
variables_list,
periodic_condition_name = "PeriodicCondition"):
properties = model_part.CreateNewProperties(
model_part.NumberOfProperties() + 1)
pcu = KratosCFD.PeriodicConditionUtilities(
model_part, model_part.ProcessInfo[Kratos.DOMAIN_SIZE])
for variable in variables_list:
pcu.AddPeriodicVariable(properties, variable)
index = model_part.NumberOfConditions()
for condition in base_model_part.Conditions:
if condition.Is(Kratos.PERIODIC):
index += 1
node_id_list = [node.Id for node in condition.GetNodes()]
periodic_condition = model_part.CreateNewCondition(
periodic_condition_name, index, node_id_list, properties)
periodic_condition.Set(Kratos.PERIODIC)
def GetBoundaryFlags(boundary_flags_parameters):
if (boundary_flags_parameters.size == 0):
raise Exception("No boundary flags were found")
flags = Kratos.KratosGlobals.GetFlag(
boundary_flags_parameters[0].GetString())
for i in range(1, boundary_flags_parameters.size()):
flags |= Kratos.KratosGlobals.GetFlag(
boundary_flags_parameters[i].GetString())
return (flags)
def GetConvergenceInfo(
variable,
relative_error,
relative_tolerance,
absolute_error=-1.0,
absolute_tolerance=-1.0):
info = "[ Obtained ratio: {0:6e}; Expected ratio: {1:6e}".format(
relative_error, relative_tolerance)
if (absolute_error >= 0.0 and absolute_tolerance >= 0.0):
info += "; Absolute norm: {0:6e}; Expected norm: {1:6e}".format(
absolute_error, absolute_tolerance)
info += " ] - " + str(variable.Name())
return info
def CreateBlockBuilderAndSolver(
linear_solver,
is_periodic,
communicator):
if (IsDistributedRun()):
if (is_periodic):
return TrilinosBlockBuilderAndSolverPeriodic(
communicator, 30, linear_solver,
KratosCFD.PATCH_INDEX)
else:
return TrilinosBlockBuilderAndSolver(
communicator, 30, linear_solver)
else:
if (is_periodic):
return ResidualBasedBlockBuilderAndSolverPeriodic(
linear_solver, KratosCFD.PATCH_INDEX)
else:
return ResidualBasedBlockBuilderAndSolver(linear_solver)
| StarcoderdataPython |
56412 | <reponame>Sourav692/FAANG-Interview-Preparation<gh_stars>1000+
# Time: O(n)
# Space: O(1)
import operator
from functools import reduce
class Solution(object):
"""
:type nums: List[int]
:rtype: int
"""
def singleNumber(self, A):
return reduce(operator.xor, A)
| StarcoderdataPython |
3213153 | <filename>v6.0.6/ips/test_fortios_ips_global.py
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_ips_global
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_ips_global.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_ips_global_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'ips_global': {
'anomaly_mode': 'periodical',
'database': 'regular',
'deep_app_insp_db_limit': '5',
'deep_app_insp_timeout': '6',
'engine_count': '7',
'exclude_signatures': 'none',
'fail_open': 'enable',
'intelligent_mode': 'enable',
'session_limit_mode': 'accurate',
'skype_client_public_ipaddr': 'test_value_12',
'socket_size': '13',
'sync_session_ttl': 'enable',
'traffic_submit': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_ips_global.fortios_ips(input_data, fos_instance)
expected_data = {
'anomaly-mode': 'periodical',
'database': 'regular',
'deep-app-insp-db-limit': '5',
'deep-app-insp-timeout': '6',
'engine-count': '7',
'exclude-signatures': 'none',
'fail-open': 'enable',
'intelligent-mode': 'enable',
'session-limit-mode': 'accurate',
'skype-client-public-ipaddr': 'test_value_12',
'socket-size': '13',
'sync-session-ttl': 'enable',
'traffic-submit': 'enable'
}
set_method_mock.assert_called_with('ips', 'global', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_ips_global_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'ips_global': {
'anomaly_mode': 'periodical',
'database': 'regular',
'deep_app_insp_db_limit': '5',
'deep_app_insp_timeout': '6',
'engine_count': '7',
'exclude_signatures': 'none',
'fail_open': 'enable',
'intelligent_mode': 'enable',
'session_limit_mode': 'accurate',
'skype_client_public_ipaddr': 'test_value_12',
'socket_size': '13',
'sync_session_ttl': 'enable',
'traffic_submit': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_ips_global.fortios_ips(input_data, fos_instance)
expected_data = {
'anomaly-mode': 'periodical',
'database': 'regular',
'deep-app-insp-db-limit': '5',
'deep-app-insp-timeout': '6',
'engine-count': '7',
'exclude-signatures': 'none',
'fail-open': 'enable',
'intelligent-mode': 'enable',
'session-limit-mode': 'accurate',
'skype-client-public-ipaddr': 'test_value_12',
'socket-size': '13',
'sync-session-ttl': 'enable',
'traffic-submit': 'enable'
}
set_method_mock.assert_called_with('ips', 'global', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_ips_global_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'ips_global': {
'anomaly_mode': 'periodical',
'database': 'regular',
'deep_app_insp_db_limit': '5',
'deep_app_insp_timeout': '6',
'engine_count': '7',
'exclude_signatures': 'none',
'fail_open': 'enable',
'intelligent_mode': 'enable',
'session_limit_mode': 'accurate',
'skype_client_public_ipaddr': 'test_value_12',
'socket_size': '13',
'sync_session_ttl': 'enable',
'traffic_submit': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_ips_global.fortios_ips(input_data, fos_instance)
expected_data = {
'anomaly-mode': 'periodical',
'database': 'regular',
'deep-app-insp-db-limit': '5',
'deep-app-insp-timeout': '6',
'engine-count': '7',
'exclude-signatures': 'none',
'fail-open': 'enable',
'intelligent-mode': 'enable',
'session-limit-mode': 'accurate',
'skype-client-public-ipaddr': 'test_value_12',
'socket-size': '13',
'sync-session-ttl': 'enable',
'traffic-submit': 'enable'
}
set_method_mock.assert_called_with('ips', 'global', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_ips_global_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'ips_global': {
'random_attribute_not_valid': 'tag',
'anomaly_mode': 'periodical',
'database': 'regular',
'deep_app_insp_db_limit': '5',
'deep_app_insp_timeout': '6',
'engine_count': '7',
'exclude_signatures': 'none',
'fail_open': 'enable',
'intelligent_mode': 'enable',
'session_limit_mode': 'accurate',
'skype_client_public_ipaddr': 'test_value_12',
'socket_size': '13',
'sync_session_ttl': 'enable',
'traffic_submit': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_ips_global.fortios_ips(input_data, fos_instance)
expected_data = {
'anomaly-mode': 'periodical',
'database': 'regular',
'deep-app-insp-db-limit': '5',
'deep-app-insp-timeout': '6',
'engine-count': '7',
'exclude-signatures': 'none',
'fail-open': 'enable',
'intelligent-mode': 'enable',
'session-limit-mode': 'accurate',
'skype-client-public-ipaddr': 'test_value_12',
'socket-size': '13',
'sync-session-ttl': 'enable',
'traffic-submit': 'enable'
}
set_method_mock.assert_called_with('ips', 'global', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| StarcoderdataPython |
1634495 | #!/usr/bin/env python
#
# A simple example showcasing the basics of systest.
#
import logging
import systest
LOGGER = logging.getLogger(__name__)
# Define a testcase.
class MyTestCase(systest.TestCase):
"""Test case description.
"""
def __init__(self, name):
super(MyTestCase, self).__init__()
self.name = "my_testcase_" + name
def run(self):
LOGGER.info("Hello!")
self.assert_equal(1, 1)
self.assert_not_equal(1, 2)
self.assert_true(1 == 1)
self.assert_false(1 == 2)
self.assert_in(1, [1, 2])
self.assert_not_in(1, [0, 2])
self.assert_is_none(None)
self.assert_is_not_none(1)
self.assert_greater(2, 1)
self.assert_greater_equal(2, 2)
self.assert_less(1, 2)
self.assert_less_equal(2, 2)
with self.assert_raises(RuntimeError) as cm:
raise RuntimeError('foo')
self.assert_equal(str(cm.exception), 'foo')
sequencer = systest.setup("my_sequence")
sequencer.run(MyTestCase("1"),
(
MyTestCase("2"),
[
MyTestCase("3"),
MyTestCase("4")
]
),
MyTestCase("5"))
sequencer.report_and_exit()
| StarcoderdataPython |
3376203 | <filename>source-code/multiexp/ed25519.py<gh_stars>1-10
import random
# curve parameters
b = 256
q = 2**255 - 19
l = 2**252 + 27742317777372353535851937790883648493
# op counts
counts = {}
def reset():
counts['add'] = 0
counts['multiply'] = 0
# compute b^e mod m
#def exponent(b,e,m):
# if e == 0:
# return 1
# temp = exponent(b,e/2,m)**2 % m
# if e & 1:
# temp = (temp*b) % m
# return temp
def exponent(b,e,m):
return pow(b,e,m)
# compute x^(-1) mod m
def invert(x):
return exponent(x,q-2,q)
# useful constants
d = -121665 * invert(121666)
I = exponent(2,(q-1)/4,q)
# given a y value, recover the x value on the curve
def xfromy(y):
temp = (y*y-1) * invert(d*y*y+1)
x = exponent(temp,(q+3)/8,q)
if (x*x - temp) % q != 0:
x = (x*I) % q
if x % 2 != 0:
x = q-x
return x
# common basepoint (requires earlier function)
Gy = 4*invert(5)
Gx = xfromy(Gy)
G = [Gx % q, Gy % q]
# zero point
Z = [0,1]
# add P+Q
def _add(P,Q):
x1 = P[0]
y1 = P[1]
x2 = Q[0]
y2 = Q[1]
x3 = (x1*y2+x2*y1) * invert(1+d*x1*x2*y1*y2)
y3 = (y1*y2+x1*x2) * invert(1-d*x1*x2*y1*y2)
return [x3 % q, y3 % q]
def add(P,Q):
counts['add'] += 1
return _add(P,Q)
# scalar multiply a*P
def _multiply(a,P):
if a == 0:
return [0,1]
Q = _multiply(a/2,P)
Q = _add(Q,Q)
if a & 1:
Q = _add(Q,P)
return Q
def multiply(a,P):
counts['multiply'] += 1
return _multiply(a,P)
# generate a random scalar
def random_scalar():
return random.randrange(0,l)
# generate a random multiple of the basepoint
def random_point():
return _multiply(random_scalar(),G)
| StarcoderdataPython |
4818780 | import requests,sys,time,json
from bs4 import BeautifulSoup
import argparse
banner = """\033[0;34m=========================================================
🇵 🇭 🇫 🇫 🇹 🇪 🇦 🇲
\033[0;34m=========================================================
\033[1;32mScript edit By \033[1;31m :\033[1;0m คเкђєภ
\033[1;32mPHFF\033[1;31m : \033[1;0mTeam
"""
print (banner)
parser = argparse.ArgumentParser(description='Script Visit Website CowDollar')
parser.add_argument(
'-u','--email',
help='<Enter Your Email>',required=True
)
parser.add_argument(
'-p','--password',
help='<Enter Your Password>',required=True
)
parser.add_argument(
'-s','--sleep',
default=30,
help='Sleep (default: 30)'
)
my_namespace = parser.parse_args()
def tunggu(x):
sys.stdout.write("\r")
sys.stdout.write(" ")
for remaining in range(x, 0, -1):
sys.stdout.write("\r")
sys.stdout.write("\033[1;30m#\033[1;0m{:2d} \033[1;32mseconds remaining".format(remaining))
sys.stdout.flush()
time.sleep(1)
sys.stdout.write(" ")
ua = {
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Linux; Android 5.1; A1603 Build/LMY47I; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/72.0.3626.121 Mobile Safari/537.36",
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"accept-language": "id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7"
}
c = requests.session()
r = c.get("https://gymx.me/en/login",headers=ua)
soup = BeautifulSoup(r.text,"html.parser")
a = 0
for auth in soup.findAll("input"):
a +=1
autho = auth.get("value")
if a == 2:
break
r = c.post("https://gymx.me/en/login",headers=ua,data={"utf8": "✓","authenticity_token": autho,"user[email]": my_namespace.email,"user[password]": <PASSWORD>,"commit": "Login"})
soup = BeautifulSoup(r.text,"html.parser")
print ("\033[1;37m"+soup.title.text,"\n")
a =0
if soup.title.text == "Cowdollars":
print ("\033[1;31mFiled To Login\nPlease Check Your Email Or Your Password")
sys.exit()
else:
for ball in soup.findAll("span", class_="counter"):
a+=1
if a == 1:
print ("\033[1;32mToday Balance \033[1;31m :\033[1;0m",ball.text,"BTC")
if a == 2:
print ("\033[1;32mYesterday Balance \033[1;31m:\033[1;0m",ball.text,"BTC")
if a == 3:
print ("\033[1;32mTotal Balance\033[1;31m :\033[1;0m",ball.text,"BTC")
if a == 5:
print ("\033[1;32mConvert To USD\033[1;31m :\033[1;0m",ball.text,"USD")
a=0
for csr in soup.findAll("meta"):
a+=1
token = csr.get("content")
if a == 5:
break
print ("\033[0;34m\n\n=========================================================")
print ("\033[1;37m\n\nLet's Start Mining......!")
while True:
try:
r = c.get("https://gymx.me/en/mining/mine",headers=ua,cookies=r.cookies,timeout=15)
r1 = c.post("https://gymx.me/mining/toggle_miner_state/",headers={"accept": "application/json, text/javascript, */*; q=0.01","x-csrf-token": token,"x-requested-with": "XMLHttpRequest","user-agent": "Mozilla/5.0 (Linux; Android 5.1; A1603 Build/LMY47I; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/72.0.3626.121 Mobile Safari/537.36","content-type": "application/x-www-form-urlencoded; charset=UTF-8","accept-language": "id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7"} ,data={"state[mining_state]": "actived"},cookies=r.cookies,timeout=15)
j = json.loads(r1.text)
sys.stdout.write("\r\033[1;30m# \033[1;32m"+j["message"]["title"])
tunggu(int(my_namespace.sleep))
r2 = c.post("https://gymx.me/earnings",headers={"accept": "application/json, text/javascript, */*; q=0.01","x-csrf-token": token,"x-requested-with": "XMLHttpRequest","user-agent": "Mozilla/5.0 (Linux; Android 5.1; A1603 Build/LMY47I; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/72.0.3626.121 Mobile Safari/537.36","content-type": "application/x-www-form-urlencoded; charset=UTF-8","accept-language": "id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7"},data={"earning[bitcoin]": "0.0000005"},cookies=r.cookies,timeout=15)
js = json.loads(r2.text)
sys.stdout.write("\r\033[1;30m#\033[0;32m "+js["message"]["title"]+" "+js["message"]["msg"])
r3 = c.post("https://gymx.me/mining/toggle_miner_state/",headers={"accept": "application/json, text/javascript, */*; q=0.01","x-csrf-token": token,"x-requested-with": "XMLHttpRequest","user-agent": "Mozilla/5.0 (Linux; Android 5.1; A1603 Build/LMY47I; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/72.0.3626.121 Mobile Safari/537.36","content-type": "application/x-www-form-urlencoded; charset=UTF-8","accept-language": "id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7"},data={"state[mining_state]": "passived"},cookies=r.cookies,timeout=15)
r4 = c.get("https://gymx.me/en/mining/dashboard",headers=ua,cookies=r.cookies,timeout=15)
soup = BeautifulSoup(r4.text,"html.parser")
sys.stdout.write("\r\033[1;30m# \033[1;32mYour BTC Balance\033[1;31m :\033[1;32m "+soup.find("span", class_="counter") .text+"\n")
except:
time.sleep(3)
pass
| StarcoderdataPython |
1765142 | # A recursive solution
# How would you solve this iteratively?
def checkBalanced(rootNode):
# An empty tree is balanced by default
if rootNode == None:
return True
# recursive helper function to check the min depth of the tree
def minDepth(node):
if node == None:
return 0
return 1 + min(minDepth(node.left), minDepth(node.right))
# recursive helper function to check the max depth of the tree
def maxDepth(node):
if node == None:
return 0
return 1 + max(maxDepth(node.left), maxDepth(node.right))
return maxDepth(rootNode) - minDepth(rootNode) == 0
# Some console.log tests
class BinaryTreeNode:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def insertLeft(self, value):
self.left = BinaryTreeNode(value)
return self.left
def insertRight(self, value):
self.right = BinaryTreeNode(value)
return self.right
root = BinaryTreeNode(5)
print(checkBalanced(root)) # should print True
root.insertLeft(10)
print(checkBalanced(root)) # should print False
root.insertRight(11)
print(checkBalanced(root)) # should print True
| StarcoderdataPython |
1789252 | from rpython.rlib.rarithmetic import ovfcheck
from rpython.rlib.rbigint import rbigint, _divrem
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.lltypesystem.lloperation import llop
from som.vmobjects.abstract_object import AbstractObject
from som.vm.globals import trueObject, falseObject
class Integer(AbstractObject):
_immutable_fields_ = ["_embedded_integer"]
def __init__(self, value):
AbstractObject.__init__(self)
assert isinstance(value, int)
self._embedded_integer = value
def get_embedded_integer(self):
return self._embedded_integer
def __str__(self):
return str(self._embedded_integer)
def get_class(self, universe):
return universe.integerClass
def quick_add(self, from_method, frame, interpreter, bytecode_index):
right = frame.top()
frame.pop()
frame.pop()
frame.push(self.prim_add(right))
def quick_multiply(self, from_method, frame, interpreter, bytecode_index):
right = frame.top()
frame.pop()
frame.pop()
frame.push(self.prim_multiply(right))
def quick_subtract(self, from_method, frame, interpreter, bytecode_index):
right = frame.top()
frame.pop()
frame.pop()
frame.push(self.prim_subtract(right))
def _to_double(self):
from .double import Double
return Double(float(self._embedded_integer))
def prim_less_than(self, right):
from .double import Double
from .biginteger import BigInteger
# Check second parameter type:
if isinstance(right, BigInteger):
result = rbigint.fromint(self._embedded_integer).lt(
right.get_embedded_biginteger())
elif isinstance(right, Double):
return self._to_double().prim_less_than(right)
else:
result = self._embedded_integer < right.get_embedded_integer()
if result:
return trueObject
else:
return falseObject
def prim_less_than_or_equal(self, right):
from .double import Double
from .biginteger import BigInteger
# Check second parameter type:
if isinstance(right, BigInteger):
result = rbigint.fromint(self._embedded_integer).le(
right.get_embedded_biginteger())
elif isinstance(right, Double):
return self._to_double().prim_less_than_or_equal(right)
else:
result = self._embedded_integer <= right.get_embedded_integer()
if result:
return trueObject
else:
return falseObject
def prim_greater_than(self, right):
from .double import Double
from .biginteger import BigInteger
# Check second parameter type:
if isinstance(right, BigInteger):
result = rbigint.fromint(self._embedded_integer).gt(
right.get_embedded_biginteger())
elif isinstance(right, Double):
return self._to_double().prim_greater_than(right)
else:
result = self._embedded_integer > right.get_embedded_integer()
if result:
return trueObject
else:
return falseObject
def prim_as_string(self):
from .string import String
return String(str(self._embedded_integer))
def prim_abs(self):
return Integer(abs(self._embedded_integer))
def prim_as_32_bit_signed_value(self):
val = rffi.cast(lltype.Signed, rffi.cast(rffi.INT, self._embedded_integer))
return Integer(val)
def prim_max(self, right):
from .biginteger import BigInteger
if isinstance(right, BigInteger):
left = rbigint.fromint(self._embedded_integer)
if right.get_embedded_biginteger().gt(left):
return right
return self
assert isinstance(right, Integer)
if right.get_embedded_integer() > self._embedded_integer:
return right
return self
def prim_add(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
return BigInteger(
right.get_embedded_biginteger().add(
rbigint.fromint(self._embedded_integer)))
elif isinstance(right, Double):
return self._to_double().prim_add(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
try:
result = ovfcheck(l + r)
return Integer(result)
except OverflowError:
return BigInteger(
rbigint.fromint(l).add(rbigint.fromint(r)))
def prim_subtract(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
r = rbigint.fromint(self._embedded_integer).sub(
right.get_embedded_biginteger())
return BigInteger(r)
elif isinstance(right, Double):
return self._to_double().prim_subtract(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
try:
result = ovfcheck(l - r)
return Integer(result)
except OverflowError:
return BigInteger(
rbigint.fromint(l).sub(rbigint.fromint(r)))
def prim_multiply(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
r = rbigint.fromint(self._embedded_integer).mul(
right.get_embedded_biginteger())
return BigInteger(r)
elif isinstance(right, Double):
return self._to_double().prim_multiply(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
try:
result = ovfcheck(l * r)
return Integer(result)
except OverflowError:
return BigInteger(
rbigint.fromint(l).mul(rbigint.fromint(r)))
def prim_double_div(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
r = rbigint.fromint(self._embedded_integer).truediv(
right.get_embedded_biginteger())
return Double(r)
elif isinstance(right, Double):
return self._to_double().prim_double_div(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
return Double(l / float(r))
def prim_int_div(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
r = rbigint.fromint(self._embedded_integer).floordiv(
right.get_embedded_biginteger())
return BigInteger(r)
elif isinstance(right, Double):
return self._to_double().prim_int_div(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
return Integer(l / r)
def prim_modulo(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
r = rbigint.fromint(self._embedded_integer).mod(
right.get_embedded_biginteger())
return BigInteger(r)
elif isinstance(right, Double):
return self._to_double().prim_modulo(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
return Integer(l % r)
def prim_remainder(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
d, r = _divrem(rbigint.fromint(self._embedded_integer),
right.get_embedded_biginteger())
return BigInteger(r)
elif isinstance(right, Double):
return self._to_double().prim_remainder(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
return Integer(llop.int_mod(lltype.Signed, l, r))
def prim_and(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
r = rbigint.fromint(self._embedded_integer).and_(
right.get_embedded_biginteger())
return BigInteger(r)
elif isinstance(right, Double):
return self._to_double().prim_and(right)
else:
l = self._embedded_integer
r = right.get_embedded_integer()
return Integer(l & r)
def prim_equals(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
result = rbigint.fromint(self._embedded_integer).eq(
right.get_embedded_biginteger())
elif isinstance(right, Double):
result = self._embedded_integer == right.get_embedded_double()
elif isinstance(right, Integer):
l = self._embedded_integer
r = right.get_embedded_integer()
result = l == r
else:
return falseObject
if result:
return trueObject
else:
return falseObject
def prim_unequals(self, right):
from .double import Double
from .biginteger import BigInteger
if isinstance(right, BigInteger):
result = rbigint.fromint(self._embedded_integer).ne(
right.get_embedded_biginteger())
elif isinstance(right, Double):
result = self._embedded_integer != right.get_embedded_double()
elif isinstance(right, Integer):
l = self._embedded_integer
r = right.get_embedded_integer()
result = l != r
else:
return trueObject
if result:
return trueObject
else:
return falseObject
| StarcoderdataPython |
29897 | from typing import List
from django.shortcuts import render
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from assignment.models import Assignment
from course.models import Course
class CourseListView(ListView):
template_name = 'course/course_list.html'
model = Course
context_object_name = 'course'
class CourseDetailView(DetailView):
template_name = 'course/course_detail.html'
model = Course
context_object_name = 'course'
def get(self, request, *args, **kwargs):
self.pk = kwargs["pk"]
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs["assignment"] = Assignment.objects.filter(course__id=self.pk)
return super().get_context_data(**kwargs)
| StarcoderdataPython |
3227038 | <gh_stars>1-10
from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import cv2
from util import *
import argparse
import os
import os.path as osp
from darknet import Darknet
import pickle as pkl
import pandas as pd
import random
def arg_parse():
"""
Parse arguements to the detect module
"""
parser = argparse.ArgumentParser(description='YOLO v3 Detection Module')
parser.add_argument("--images", dest = 'images', help =
"Image / Directory containing images to perform detection upon",
default = "imgs", type = str)
parser.add_argument("--det", dest = 'det', help =
"Image / Directory to store detections to",
default = "det", type = str)
parser.add_argument("--bs", dest = "bs", help = "Batch size", default = 1)
parser.add_argument("--confidence", dest = "confidence", help = "Object Confidence to filter predictions", default = 0.5)
parser.add_argument("--nms_thresh", dest = "nms_thresh", help = "NMS Threshhold", default = 0.4)
parser.add_argument("--cfg", dest = 'cfgfile', help =
"Config file",
default = "cfg/yolov3.cfg", type = str)
parser.add_argument("--weights", dest = 'weightsfile', help =
"weightsfile",
default = "yolov3.weights", type = str)
parser.add_argument("--reso", dest = 'reso', help =
"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed",
default = "416", type = str)
return parser.parse_args()
args = arg_parse()
images = args.images
batch_size = int(args.bs)
confidence = float(args.confidence)
nms_thesh = float(args.nms_thresh)
start = 0
CUDA = torch.cuda.is_available()
num_classes = 80
classes = load_classes("data/coco.names")
#Set up the neural network
print("Loading network.....")
model = Darknet(args.cfgfile)
model.load_weights(args.weightsfile)
print("Network successfully loaded")
model.net_info["height"] = args.reso
inp_dim = int(model.net_info["height"])
assert inp_dim % 32 == 0
assert inp_dim > 32
#If there's a GPU availible, put the model on GPU
if CUDA:
model.cuda()
#Set the model in evaluation mode
model.eval()
read_dir = time.time()
#Detection phase
try:
imlist = [osp.join(osp.realpath('.'), images, img) for img in os.listdir(images)]
except NotADirectoryError:
imlist = []
imlist.append(osp.join(osp.realpath('.'), images))
except FileNotFoundError:
print ("No file or directory with the name {}".format(images))
exit()
if not os.path.exists(args.det):
os.makedirs(args.det)
load_batch = time.time()
loaded_ims = [cv2.imread(x) for x in imlist]
im_batches = list(map(prep_image, loaded_ims, [inp_dim for x in range(len(imlist))]))
im_dim_list = [(x.shape[1], x.shape[0]) for x in loaded_ims]
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
leftover = 0
if (len(im_dim_list) % batch_size):
leftover = 1
if batch_size != 1:
num_batches = len(imlist) // batch_size + leftover
im_batches = [torch.cat((im_batches[i*batch_size : min((i + 1)*batch_size,
len(im_batches))])) for i in range(num_batches)]
write = 0
if CUDA:
im_dim_list = im_dim_list.cuda()
start_det_loop = time.time()
for i, batch in enumerate(im_batches):
#load the image
start = time.time()
if CUDA:
batch = batch.cuda()
with torch.no_grad():
prediction = model(Variable(batch), CUDA)
prediction = write_results(prediction, confidence, num_classes, nms_conf = nms_thesh)
end = time.time()
if type(prediction) == int:
for im_num, image in enumerate(imlist[i*batch_size: min((i + 1)*batch_size, len(imlist))]):
im_id = i*batch_size + im_num
print("{0:20s} predicted in {1:6.3f} seconds".format(image.split("/")[-1], (end - start)/batch_size))
print("{0:20s} {1:s}".format("Objects Detected:", ""))
print("----------------------------------------------------------")
continue
prediction[:,0] += i*batch_size #transform the atribute from index in batch to index in imlist
if not write: #If we have't initialised output
output = prediction
write = 1
else:
output = torch.cat((output,prediction))
for im_num, image in enumerate(imlist[i*batch_size: min((i + 1)*batch_size, len(imlist))]):
im_id = i*batch_size + im_num
objs = [classes[int(x[-1])] for x in output if int(x[0]) == im_id]
print("{0:20s} predicted in {1:6.3f} seconds".format(image.split("/")[-1], (end - start)/batch_size))
print("{0:20s} {1:s}".format("Objects Detected:", " ".join(objs)))
print("----------------------------------------------------------")
if CUDA:
torch.cuda.synchronize()
try:
output
except NameError:
print ("No detections were made")
exit()
im_dim_list = torch.index_select(im_dim_list, 0, output[:,0].long())
scaling_factor = torch.min(416/im_dim_list,1)[0].view(-1,1)
output[:,[1,3]] -= (inp_dim - scaling_factor*im_dim_list[:,0].view(-1,1))/2
output[:,[2,4]] -= (inp_dim - scaling_factor*im_dim_list[:,1].view(-1,1))/2
output[:,1:5] /= scaling_factor
for i in range(output.shape[0]):
output[i, [1,3]] = torch.clamp(output[i, [1,3]], 0.0, im_dim_list[i,0])
output[i, [2,4]] = torch.clamp(output[i, [2,4]], 0.0, im_dim_list[i,1])
output_recast = time.time()
class_load = time.time()
colors = pkl.load(open("pallete", "rb"))
draw = time.time()
def write(x, results):
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
img = results[int(x[0])]
cls = int(x[-1])
color = random.choice(colors)
label = "{0}".format(classes[cls])
cv2.rectangle(img, c1, c2,color, 1)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv2.rectangle(img, c1, c2,color, -1)
cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1);
return img
list(map(lambda x: write(x, loaded_ims), output))
det_names = pd.Series(imlist).apply(lambda x: "{}/det_{}".format(args.det,x.split("/")[-1]))
list(map(cv2.imwrite, det_names, loaded_ims))
end = time.time()
print("SUMMARY")
print("----------------------------------------------------------")
print("{:25s}: {}".format("Task", "Time Taken (in seconds)"))
print()
print("{:25s}: {:2.3f}".format("Reading addresses", load_batch - read_dir))
print("{:25s}: {:2.3f}".format("Loading batch", start_det_loop - load_batch))
print("{:25s}: {:2.3f}".format("Detection (" + str(len(imlist)) + " images)", output_recast - start_det_loop))
print("{:25s}: {:2.3f}".format("Output Processing", class_load - output_recast))
print("{:25s}: {:2.3f}".format("Drawing Boxes", end - draw))
print("{:25s}: {:2.3f}".format("Average time_per_img", (end - load_batch)/len(imlist)))
print("----------------------------------------------------------")
torch.cuda.empty_cache()
| StarcoderdataPython |
3347133 | <gh_stars>0
import keys
@keys.key("test")
def test_func():
print("Before exception")
raise Exception("Test Exception")
print("After exception")
| StarcoderdataPython |
3370235 | """
.. conftest.py:
Most of the tests are currently doctests. Have patience.
"""
import sys
from contextlib import contextmanager
import pytest
import sqlalchemy as sa
from flask import Flask, appcontext_pushed, g
from oso import Oso
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session
from sqlalchemy_oso import register_models
from sqlalchemy_authorize import OsoPermissionsMixin, BasePermissionsMixin
Base = declarative_base()
engine = create_engine('sqlite:///:memory:', echo=False)
sess = Session(engine)
# -- Models -------------------------------------------------------------------
class BaseUser(BasePermissionsMixin, Base):
__tablename__ = 'baseuser'
__repr_attrs__ = ['name']
__permissions__ = OsoPermissionsMixin.load_permissions(
# Public permissions
read=["id", "username"],
# Role-based permissions
self=[
# The user can provide ``username`` and ``fullname``
# to ``__init__`` (as keyword args) and to ``__setattr__``.
(["create", "update"], ["username", "fullname"]),
# The user can read/delete the entire model.
"read",
"delete"
],
admin="*" # i.e., all actions on all fields
)
id = sa.Column(sa.String(128), primary_key=True)
username = sa.Column(sa.String(128), nullable=False)
fullname = sa.Column(sa.String(128), nullable=False)
ssn = sa.Column(sa.String(10), nullable=True)
is_admin = sa.Column(sa.Boolean, default=False)
def __repr__(self):
return f"<BaseUser {self.id}>"
class User(OsoPermissionsMixin, Base):
__tablename__ = 'user'
__repr_attrs__ = ['name']
__permissions__ = OsoPermissionsMixin.load_permissions(
# Public permissions
read=["id", "username"],
# Role-based permissions
self=[
# The user can provide ``username`` and ``fullname``
# to ``__init__`` (as keyword args) and to ``__setattr__``.
(["create", "update"], ["username", "fullname"]),
# The user can read/delete the entire model.
"read",
"delete"
],
admin="*" # i.e., all actions on all fields
)
id = sa.Column(sa.String(128), primary_key=True)
username = sa.Column(sa.String(128), nullable=False)
fullname = sa.Column(sa.String(128), nullable=False)
ssn = sa.Column(sa.String(10), nullable=True)
is_admin = sa.Column(sa.Boolean, default=False)
def __repr__(self):
return f"<User {self.id}>"
# -- Fixtures -----------------------------------------------------------------
@pytest.fixture(scope="session")
def session():
sess.rollback()
Base.__class__._session = None
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
Base.__class__._session = sess
return sess
@pytest.fixture(scope="session")
def app(oso):
app = Flask(__name__, instance_relative_config=True)
app.oso = oso
with app.test_client() as client:
with app.app_context():
yield client
@pytest.fixture(scope="session")
def oso():
oso = Oso()
register_models(oso, User)
from sqlalchemy_authorize.oso.oso_permissions_mixin import UserMock
oso.register_class(UserMock)
oso.load_files(["./sqlalchemy_authorize/oso/rbac.polar"])
return oso
@contextmanager
def user_set(app, user):
g.user = user
yield
# -- Doctest Namespace --------------------------------------------------------
@pytest.fixture(scope="session", autouse=True)
def add_app(doctest_namespace):
doctest_namespace["app"] = app
@pytest.fixture(scope="session", autouse=True)
def add_BaseUser(doctest_namespace):
doctest_namespace["BaseUser"] = BaseUser
@pytest.fixture(scope="session", autouse=True)
def add_User(doctest_namespace):
doctest_namespace["User"] = User
@pytest.fixture(scope="session", autouse=True)
def add_oso(doctest_namespace):
doctest_namespace["oso"] = oso
@pytest.fixture(scope="session", autouse=True)
def add_user_set(doctest_namespace):
doctest_namespace["user_set"] = user_set
| StarcoderdataPython |
41440 | import numpy as np
import matplotlib.pyplot as plt
from utils import get_state_vowel
class HopfieldNetwork:
"""
Creates a Hopfield Network.
"""
def __init__(self, patterns):
"""
Initializes the network.
Args:
patterns (np.array): Group of states to be memorized by the network.
"""
self.num_units = patterns.shape[1]
self.passes = 0
self.state_units = np.array([1 if 2 * np.random.random() - 1 >= 0 else 0 for _ in range(self.num_units)])
self.W = np.zeros((self.num_units, self.num_units))
for pattern in patterns:
self.W += np.dot(np.transpose((2 * patterns - 1)), (2 * patterns - 1))
np.fill_diagonal(self.W, 0)
self.energy = [-0.5 * np.dot(np.dot(self.state_units.T, self.W), self.state_units)]
def _generate_sequence_units(self):
""" Selects randomly the order to update states in the next iteration."""
return np.random.choice(self.num_units, self.num_units)
def run(self):
""" Runs the network until no updates occur. """
no_update = True
while True:
for unit in self._generate_sequence_units():
unit_activation = np.dot(self.W[unit, :], self.state_units)
if unit_activation >= 0 and self.state_units[unit] == 0:
self.state_units[unit] = 1
no_update = False
elif unit_activation < 0 and self.state_units[unit] == 1:
self.state_units[unit] = 0
no_update = False
self.energy.append(-0.5 * np.dot(np.dot(self.state_units.T, self.W), self.state_units))
self.passes += 1
if no_update:
break
else:
no_update = True
def main():
np.random.seed(1234)
patterns = np.array([get_state_vowel('A'),
get_state_vowel('E'),
get_state_vowel('I'),
get_state_vowel('O'),
get_state_vowel('U')])
net = HopfieldNetwork(patterns)
net.run()
# Plot patterns and output
plt.figure(figsize=(6, 3), tight_layout=True)
plt.subplot(2, 3, 1)
plt.imshow(np.reshape(patterns[0, :], (5, 5)), cmap="Greys_r")
plt.title("A")
plt.subplot(2, 3, 2)
plt.imshow(np.reshape(patterns[1, :], (5, 5)), cmap="Greys_r")
plt.title("E")
plt.subplot(2, 3, 3)
plt.imshow(np.reshape(patterns[2, :], (5, 5)), cmap="Greys_r")
plt.title("I")
plt.subplot(2, 3, 4)
plt.imshow(np.reshape(patterns[3, :], (5, 5)), cmap="Greys_r")
plt.title("O")
plt.subplot(2, 3, 5)
plt.imshow(np.reshape(patterns[4, :], (5, 5)), cmap="Greys_r")
plt.title("U")
plt.subplot(2, 3, 6)
plt.imshow(np.reshape(net.state_units, (5, 5)), cmap="Greys_r")
plt.title("Output")
# Plot energy over time
plt.figure(figsize=(4, 2))
plt.plot(net.energy)
plt.title("Energy")
plt.show()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1657529 | import matplotlib.pyplot as plt
import sklearn.datasets as skdata
import numpy as np
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
import sklearn
numeros = skdata.load_digits()
target = numeros['target']
imagenes = numeros['images']
n_imagenes = len(target)
data = imagenes.reshape((n_imagenes, -1))
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
scaler = StandardScaler()
x_train, x_test, y_train, y_test = train_test_split(data, target, train_size=0.5)
y_train[y_train!=1] = 0
y_test[y_test!=1]=0
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
numero = 1
dd = y_train==numero
cov = np.cov(x_train[dd].T)
valores, vectores = np.linalg.eig(cov)
valores = np.real(valores)
vectores = np.real(vectores)
ii = np.argsort(-valores)
valores = valores[ii]
vectores = vectores[:,ii]
clf = LinearDiscriminantAnalysis()
proyeccion_train = np.dot(x_train,vectores)
proyeccion_test = np.dot(x_test,vectores)
clf.fit(proyeccion_train[:,:10], y_train.T)
probabilidades = clf.predict_proba(proyeccion_test[:,:10])
precision1, recall1, treshold1 = sklearn.metrics.precision_recall_curve(y_test, probabilidades[:,1])
f1_score1 = 2*precision1*recall1/(precision1+recall1)
cov = np.cov(x_train.T)
valores, vectores = np.linalg.eig(cov)
valores = np.real(valores)
vectores = np.real(vectores)
ii = np.argsort(-valores)
valores = valores[ii]
vectores = vectores[:,ii]
clf = LinearDiscriminantAnalysis()
proyeccion_train = np.dot(x_train,vectores)
proyeccion_test = np.dot(x_test,vectores)
clf.fit(proyeccion_train[:,:10], y_train.T)
probabilidades_todos = clf.predict_proba(proyeccion_test[:,:10])
precision_todos, recall_todos, treshold_todos = sklearn.metrics.precision_recall_curve(y_test, probabilidades_todos[:,1])
f1_score_todos = 2*precision_todos*recall_todos/(precision_todos+recall_todos)
numero = 0
dd = y_train==numero
cov = np.cov(x_train[dd].T)
valores, vectores = np.linalg.eig(cov)
valores = np.real(valores)
vectores = np.real(vectores)
ii = np.argsort(-valores)
valores = valores[ii]
vectores = vectores[:,ii]
clf = LinearDiscriminantAnalysis()
proyeccion_train = np.dot(x_train,vectores)
proyeccion_test = np.dot(x_test,vectores)
clf.fit(proyeccion_train[:,:10], y_train.T)
probabilidades = clf.predict_proba(proyeccion_test[:,:10])
precision0, recall0, treshold0 = sklearn.metrics.precision_recall_curve(y_test, probabilidades[:,1])
f1_score0 = 2*precision0*recall0/(precision0+recall0)
plt.figure(figsize = (10,5))
plt.subplot(1,2,1)
plt.plot(treshold1,f1_score1[:-1], label = 'Solo 1')
indice = np.where(f1_score1[:-1] == np.max(f1_score1[:-1]))
print(indice)
plt.scatter(treshold1[indice], f1_score1[:-1][indice], color = 'r')
plt.legend()
plt.xlabel('Probabilidad')
plt.ylabel('F1')
plt.subplot(1,2,2)
plt.plot(recall1,precision1, label = 'solo1')
plt.legend()
plt.scatter(recall1[indice], precision1[indice], color = 'r')
plt.xlabel('Recall')
plt.ylabel('Precisión')
plt.subplot(1,2,1)
plt.plot(treshold_todos,f1_score_todos[:-1], label = 'Todos')
plt.legend()
indice = np.where(f1_score_todos[:-1] == np.max(f1_score_todos[:-1]))
print(indice)
plt.scatter(treshold_todos[indice], f1_score_todos[:-1][indice], color = 'r')
plt.xlabel('Probabilidad')
plt.ylabel('F1')
plt.subplot(1,2,2)
plt.plot(recall_todos,precision_todos, label = 'Todos')
plt.scatter(recall_todos[indice], precision_todos[indice], color = 'r')
plt.xlabel('Recall')
plt.ylabel('Precisión')
plt.legend()
plt.subplot(1,2,1)
plt.plot(treshold0,f1_score0[:-1], label = 'Solo 0')
plt.legend()
indice = np.where(f1_score0[:-1] == np.max(f1_score0[:-1]))
plt.scatter(treshold0[indice], f1_score0[:-1][indice], color = 'r')
print(indice)
plt.xlabel('Probabilidad')
plt.ylabel('F1')
plt.subplot(1,2,2)
plt.plot(recall0,precision0, label = 'Solo 0')
plt.scatter(recall0[indice], precision0[indice], color = 'r')
plt.xlabel('Recall')
plt.ylabel('Precisión')
plt.legend()
plt.savefig('F1_prec_recall.png') | StarcoderdataPython |
2658 | <reponame>zengrx/S.M.A.R.T<filename>src/advanceoperate/malimgthread.py<gh_stars>1-10
#coding=utf-8
from PyQt4 import QtCore
import os, glob, numpy, sys
from PIL import Image
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import confusion_matrix
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import BallTree
from sklearn import cross_validation
from sklearn.utils import shuffle
import sklearn
import leargist
import cPickle
import random
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
class ValidationResult(QtCore.QThread):
finishSignal = QtCore.pyqtSignal(list)
def __init__(self, parent=None):
super(ValidationResult, self).__init__(parent)
def getClassifyLabel(self):
X = numpy.load("./datafiles/img_features.npy") # 特征
y = numpy.load("./datafiles/img_labels.npy") # 标签
n = cPickle.load(open("./datafiles/img.p","rb")) # 标号
l = cPickle.load(open("./datafiles/imglabel.p", "rb")) # [家族号, 家族中序号, 文件名, 总序号]
return X, y, n ,l
'''
准备绘制矩阵的数据
@X:特征矩阵
@y:标签
@n:所有样本家族名称
@l:对应家族个数
'''
def prepareData2Matrix(self, X, y, n, l):
n_samples, useless = X.shape
p = range(n_samples)
random.seed(random.random())
random.shuffle(p)
X, y = X[p], y[p] # 打乱数组
kfold = 10 # 10重
skf = StratifiedKFold(y,kfold)
skfind = [None] * len(skf)
cnt = 0
for train_index in skf:
skfind[cnt] = train_index
cnt += 1
list_fams = n
cache = []
no_imgs = []
for l_list in l:
if 0 == l_list[1]:
# print l[l_list[3] - 1]
# print l_list
cache.append(l[l_list[3] - 1][1] + 1)
no_imgs = cache[1:len(cache)]
no_imgs.append(cache[0])
# print no_imgs # 输出所有家族包含文件个数
conf_mat = numpy.zeros((len(no_imgs), len(no_imgs))) # 初始化矩阵
n_neighbors = 5
# 10-fold Cross Validation
for i in range(kfold):
train_indices = skfind[i][0]
test_indices = skfind[i][1]
clf = []
clf = KNeighborsClassifier(n_neighbors, weights='distance')
X_train = X[train_indices]
y_train = y[train_indices]
X_test = X[test_indices]
y_test = y[test_indices]
# Training
import time
tic = time.time()
clf.fit(X_train,y_train)
toc = time.time()
print "training time= ", toc-tic # roughly 2.5 secs
# Testing
y_predict = []
tic = time.time()
y_predict = clf.predict(X_test) # output is labels and not indices
toc = time.time()
print "testing time = ", toc-tic # roughly 0.3 secs
# Compute confusion matrix
cm = []
cm = confusion_matrix(y_test,y_predict)
conf_mat = conf_mat + cm
return conf_mat, no_imgs, list_fams
def run(self):
print "start draw"
X, y, n, l = self.getClassifyLabel()
cm, nimg, listf = self.prepareData2Matrix(X, y, n, l)
msg = [cm, nimg, listf]
self.finishSignal.emit(msg)
class MalwareImageClass(QtCore.QThread):
malwarSignal = QtCore.pyqtSignal(int, list)
concluSignal = QtCore.pyqtSignal(int, list)
def __init__(self, filename, parent=None):
super(MalwareImageClass, self).__init__(parent)
self.filename = str(filename)#.encode('cp936')
self.feature = ''
'''
获取训练结果
特征,标签,文件名称及相应的序号
'''
def getClassifyLabel(self):
X = numpy.load("./datafiles/img_features.npy") # 特征
y = numpy.load("./datafiles/img_labels.npy") # 标签
n = cPickle.load(open("./datafiles/img.p","rb")) # 标号
l = cPickle.load(open("./datafiles/imglabel.p", "rb")) # [家族号, 家族中序号, 文件名, 总序号]
return X, y, n ,l
'''
对图片进行分类
train@训练集特征
label@训练集标签
'''
def classifyImage(self, feature_X, label_y, number):
im = Image.open(self.filename)
im1 = im.resize((64,64), Image.ANTIALIAS); # 转换为64x64
des = leargist.color_gist(im1); # 960 values
feature = des[0:320]; # 生成灰阶图,只需要前320内容
query_feature = feature.reshape(1, -1)
self.feature = query_feature
# 获取特征和标签
X = feature_X
y = label_y
n = number
n_neighbors = 5; # better to have this at the start of the code
knn = KNeighborsClassifier(n_neighbors, weights='distance')
knn.fit(X, y)
num = int(knn.predict(query_feature))
classname = n[num]
proba = knn.predict_proba(query_feature)
msg = [num, classname, proba]
self.malwarSignal.emit(1, msg)
'''
balltrees寻找数据集中最相近的样本
返回距离值及样本标签号
'''
def findMostSimilarImg(self, feature_X, serial):
X = feature_X
b = BallTree(X)
# 5个最相近的样本
dist, ind = b.query(self.feature, k=3)
print dist, ind
ind = ind[0]
# print ind
l = serial
imgs = []
for rank in ind:
# print rank
for name in l:
if rank == name[3]:
# print name
imgs.append(name[2])
self.concluSignal.emit(2, imgs)
def run(self):
X, y, n ,l = self.getClassifyLabel()
self.classifyImage(X, y, n)
self.findMostSimilarImg(X, l)
| StarcoderdataPython |
48241 | <filename>retrieverdash/dashboard_script/status_dashboard_tools.py
import json
import os
from difflib import HtmlDiff
from shutil import rmtree, move, copytree
from tempfile import mkdtemp
from retriever import reload_scripts
from retriever.engines import engine_list, postgres
from retriever.lib.defaults import HOME_DIR
from retriever.lib.engine_tools import getmd5
sqlite_engine = [eng for eng in engine_list if eng.name == 'SQLite'][0]
file_location = os.path.normpath(os.path.dirname(os.path.realpath(__file__)))
temp_file_location = os.path.normpath(
os.path.join(file_location, 'temp_files'))
example_datasets = ['bird-size', 'mammal-masses', 'airports', 'portal']
def get_dataset_md5(dataset, use_cache=False, debug=True, location=temp_file_location):
"""
Parameters
----------
dataset : dataset script object
use_cache : True to use cached data or False to download again
debug: True to raise error or False to fail silently
location: path where temporary files are to be created for finding md5
Returns
-------
str : The md5 value of a particular dataset.
Example
-------
>>> for dataset in reload_scripts():
... if dataset.name=='aquatic-animal-excretion':
... print(get_dataset_md5(dataset))
...
683c8adfe780607ac31f58926cf1d326
"""
try:
db_name = '{}_sqlite.db'.format(dataset.name.replace('-', '_'))
workdir = mkdtemp(dir=location)
os.chdir(workdir)
engine = sqlite_engine.__new__(sqlite_engine.__class__)
engine.script_table_registry = {}
args = {
'command': 'install',
'dataset': dataset,
'file': os.path.join(workdir, db_name),
'table_name': '{db}_{table}',
'data_dir': '.'
}
engine.opts = args
engine.use_cache = use_cache
dataset.download(engine=engine, debug=debug)
engine.to_csv(sort=False)
engine.final_cleanup()
os.remove(os.path.join(workdir, db_name))
current_md5 = getmd5(os.path.join(file_location, workdir),
data_type='dir',
encoding=dataset.encoding)
if not os.path.exists(os.path.join(file_location, 'current', dataset.name)):
os.makedirs(os.path.join(file_location, 'current', dataset.name))
for file in os.listdir(workdir):
move(os.path.join(workdir, file),
os.path.join(file_location, 'current', dataset.name))
finally:
if os.path.isfile(db_name):
os.remove(db_name)
if os.path.exists(os.path.join(HOME_DIR, 'raw_data', dataset.name)):
rmtree(os.path.join(HOME_DIR, 'raw_data', dataset.name))
os.chdir(os.path.dirname(file_location))
rmtree(workdir)
return current_md5
def create_diff(csv1, csv2, diff_file, context, numlines):
"""
Parameters
----------
csv1 : The first csv file.
csv2 : The second csv file.
diff_file : The diff_file that is to be generated.
context : set to True for contextual differences (defaults to False
which shows full differences i.e. the whole file. Lines that
have changes and also those that don't have any changes).
numlines : number of context lines. When context is set to True,
controls number of lines(extra lines) displayed before
and after the lines where the changes have been made.
When context is False, controls the number of lines to place
the "next" link anchors before the next change in the diff html
file (so click of "next" link jumps to just before the change).
It basically is used to position the "next" anchor tag a particular
number of lines before the change.
Returns
-------
None: Just creates a html source code file with diff details.
Example
-------
>>> create_diff('file1.csv', 'file2.csv', 'differ.html')
"""
html_diff = HtmlDiff()
try:
with open(csv1, 'r', encoding="ISO-8859-1") as file1, \
open(csv2, 'r', encoding="ISO-8859-1") as file2, \
open(diff_file, 'w') as file3:
diff_lines = html_diff.make_file(file1, file2,
context=context,
numlines=numlines)
file3.writelines(diff_lines)
return True
except IOError:
return False
def create_dirs(location=file_location):
"""
Creates directories required for creating diffs.
"""
required_dirs = ['temp_files', 'old', 'current', 'diffs']
for dir_name in required_dirs:
if not os.path.exists(os.path.join(location, dir_name)):
os.makedirs(os.path.join(location, dir_name))
def diff_generator(dataset, location=file_location):
"""
Generates the diff and moves file from
current directory to old directory.
"""
tables = {}
for keys in dataset.tables:
file_name = '{}_{}'.format(dataset.name.replace('-', '_'), keys)
csv_file_name = '{}.csv'.format(file_name)
html_file_name = '{}.html'.format(file_name)
if create_diff(os.path.join(location, 'old', dataset.name, csv_file_name),
os.path.join(location, 'current',
dataset.name, csv_file_name),
os.path.join(location, 'diffs', html_file_name),
context=True, numlines=1):
tables[keys] = html_file_name
try:
if not os.path.exists(os.path.join(location, 'old', dataset.name)):
os.makedirs(os.path.join(location, 'old', dataset.name))
move(os.path.join(location, 'current', dataset.name, csv_file_name),
os.path.join(location, 'old', dataset.name, csv_file_name))
except IOError:
pass
return tables
def create_json(path="dataset_details.json"):
"""
This function creates a json file with md5 values
of all(currently those in example_datasets) datasets.
"""
data = {}
for dataset in reload_scripts():
if dataset.name in example_datasets:
data[dataset.name] = {"md5": get_dataset_md5(dataset)}
with open(path, 'w') as json_file:
json.dump(data, json_file, sort_keys=True, indent=4)
def dataset_type(dataset):
"""
Parameters
----------
dataset : dataset script object
Returns
-------
str : The type of dataset.
Example
-------
>>> for dataset in reload_scripts():
... if dataset.name=='aquatic-animal-excretion':
... print(dataset_type(dataset))
...
tabular
"""
for _, table_obj in dataset.tables.items():
if hasattr(table_obj, 'dataset_type') and table_obj.dataset_type in \
["RasterDataset", "VectorDataset"]:
return "spatial"
return "tabular"
def install_postgres(dataset):
"""
Install dataset into local instance of the postgres
required_opts = [
("user", "Enter your PostgreSQL username", "postgres"),
("password", "<PASSWORD>", ""),
("host", "Enter your PostgreSQL host", "localhost"),
("port", "Enter your PostgreSQL port", 5432),
("database", "Enter your PostgreSQL database name", "postgres"),
("database_name", "Format of schema name", "{db}"),
("table_name", "Format of table name", "{db}.{table}"),
]
"""
args = {
"user": 'retrieverdash',
"password": "<PASSWORD>!",
"host": "localhost",
"port": 5432,
"command": 'install',
"database": "retrieverdash",
"dataset": dataset,
"database_name": "{db}",
"table_name": "{db}.{table}",
}
test_engine = postgres.engine()
test_engine.opts = args
dataset.download(engine=test_engine, debug=True)
folder_save_location = os.path.normpath(
os.path.join(file_location, 'current', dataset.name))
if not os.path.exists(folder_save_location):
os.makedirs(folder_save_location)
test_engine.to_csv(path=folder_save_location)
test_engine.final_cleanup()
if os.path.exists(os.path.join(HOME_DIR, 'raw_data', dataset.name)):
rmtree(os.path.join(HOME_DIR, 'raw_data', dataset.name))
def diff_generator_spatial(dataset, location=file_location):
"""
Generates the diff and moves file from
current directory to old directory. This function is specialized for spatial datasets because PostgreSQL has special rules for table naming.
"""
tables = {}
for keys in dataset.tables:
file_name = '{}.{}'.format(dataset.name.replace('-', '_'), keys)
csv_file_name = '{}.csv'.format(file_name)
html_file_name = '{}.html'.format(file_name)
if create_diff(os.path.join(location, 'old', dataset.name, csv_file_name),
os.path.join(location, 'current',
dataset.name, csv_file_name),
os.path.join(location, 'diffs', html_file_name),
context=True, numlines=1):
tables[keys] = html_file_name
try:
if not os.path.exists(os.path.join(location, 'old', dataset.name)):
os.makedirs(os.path.join(location, 'old', dataset.name))
move(os.path.join(location, 'current', dataset.name, csv_file_name),
os.path.join(location, 'old', dataset.name, csv_file_name))
except IOError:
pass
return tables
def data_shift(dataset, is_spatial=False):
"""
Shift data from the current directory to the old directory
"""
for keys in dataset.tables:
file_name = '{}_{}'.format(
dataset.name.replace('-', '_'), keys)
if is_spatial:
file_name = '{}.{}'.format(dataset.name.replace('-', '_'), keys)
csv_file_name = '{}.csv'.format(file_name)
try:
if not os.path.exists(os.path.join(file_location, 'old', dataset.name)):
os.makedirs(os.path.join(
file_location, 'old', dataset.name))
move(os.path.join(file_location, 'current', dataset.name, csv_file_name),
os.path.join(file_location, 'old', dataset.name, csv_file_name))
except IOError:
pass
| StarcoderdataPython |
13379 | # Copyright (C) 2010-2011 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.CPSM.Equipment.LoadModel.EnergyArea import EnergyArea
class SubLoadArea(EnergyArea):
"""The class is the second level in a hierarchical structure for grouping of loads for the purpose of load flow load scaling.
"""
def __init__(self, LoadGroups=None, LoadArea=None, *args, **kw_args):
"""Initialises a new 'SubLoadArea' instance.
@param LoadGroups: The Loadgroups in the SubLoadArea.
@param LoadArea: The LoadArea where the SubLoadArea belongs.
"""
self._LoadGroups = []
self.LoadGroups = [] if LoadGroups is None else LoadGroups
self._LoadArea = None
self.LoadArea = LoadArea
super(SubLoadArea, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["LoadGroups", "LoadArea"]
_many_refs = ["LoadGroups"]
def getLoadGroups(self):
"""The Loadgroups in the SubLoadArea.
"""
return self._LoadGroups
def setLoadGroups(self, value):
for x in self._LoadGroups:
x.SubLoadArea = None
for y in value:
y._SubLoadArea = self
self._LoadGroups = value
LoadGroups = property(getLoadGroups, setLoadGroups)
def addLoadGroups(self, *LoadGroups):
for obj in LoadGroups:
obj.SubLoadArea = self
def removeLoadGroups(self, *LoadGroups):
for obj in LoadGroups:
obj.SubLoadArea = None
def getLoadArea(self):
"""The LoadArea where the SubLoadArea belongs.
"""
return self._LoadArea
def setLoadArea(self, value):
if self._LoadArea is not None:
filtered = [x for x in self.LoadArea.SubLoadAreas if x != self]
self._LoadArea._SubLoadAreas = filtered
self._LoadArea = value
if self._LoadArea is not None:
if self not in self._LoadArea._SubLoadAreas:
self._LoadArea._SubLoadAreas.append(self)
LoadArea = property(getLoadArea, setLoadArea)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.