content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import numpy as np
from scipy.special import loggamma
from scipy.spatial import KDTree
from matplotlib import pyplot as plt
from scipy.optimize import minimize
from mpl_toolkits import mplot3d
from math import frexp
from mpmath import mp, hyper, nstr, hyperu
from exactlearning import BFGS_search, analyse
mp.dps = 16; mp.pretty = True
np.seterr(divide = 'raise')
#twopi = 2*np.pi
#twopi_rec = 1/twopi
#pi_rec = 1/np.pi
## Set the tag here
tag = "Linear_0"
print("*** Loading Data ***")
N_d = 10
logmoments = np.load("logmoments_{}.npy".format(tag))[:N_d]
moments = np.load("moments_{}.npy".format(tag))[:N_d]
s_values = np.load("s_values_{}.npy".format(tag))[:N_d]
real_error = np.load("real_error_{}.npy".format(tag))[:N_d]
imag_error = np.load("imag_error_{}.npy".format(tag))[:N_d]
## Chop up
real_s = np.real(s_values)
imag_s = np.imag(s_values)
real_logm = np.real(logmoments)
imag_logm = np.imag(logmoments)
real_m = np.real(moments)
imag_m = np.imag(moments)
real_log_upper = np.real(np.log(moments + real_error))
real_log_lower = np.real(np.log(moments - real_error))
imag_log_upper = np.imag(np.log(moments + imag_error))
imag_log_lower = np.imag(np.log(moments - imag_error))
## The bounds to work with
real_log_diff = real_log_upper - real_log_lower
imag_log_diff = imag_log_upper - imag_log_lower
### Define a rational/algebraic etc. solution space.
PT = np.array([-1,-2,-1/2,-3/4,3/4,3/2,5/2,2/3,np.sqrt(np.sqrt(2)),1/np.sqrt(np.sqrt(2)),1e-6,1,2,3,4,1/2,1/3,1/4,1/5,np.sqrt(2),np.sqrt(3),1/np.sqrt(2),1/np.sqrt(3),np.pi,1.0/np.pi])
#PT = np.reshape([PT,-PT],2*len(PT))
if(False):
values = []
index_to_drop = []
A = len(constants_dict.keys())
count = 0
for i in constants_dict.keys():
if(constants_dict[i] not in values): values.append(constants_dict[i])
else: index_to_drop.append(i)
count +=1
print(count/A)
for i in index_to_drop:
del constants_dict[i]
PT = np.array(list(constants_dict.values()))
Pkeys = np.array(list(constants_dict.keys()))
np.save("clean_values",PT)
np.save("clean_keys",Pkeys)
PT = np.load("clean_values.npy")
PTkeys = np.load("clean_keys.npy")
reverse_dict = { i:j for i,j in zip(PT,PTkeys)}
PT[0]=1e-7
#from scipy.spatial import KDTree
### Define a point cloud
#points = [[[[[a,b,c,d] for d in PT] for c in PT] for b in PT] for a in PT]
#points = np.array(points)
#points = np.reshape(points,(len(PT)**4,4))
#points_tree = KDTree(points)
N_terms = 13
## Scaled
def fingerprint(p):
ret = np.log(p[0]**2) ## A constant factor
ret += s_values*np.log(p[1]**2) ## C^s for some C, together with previous cover most prefactors
ret += loggamma(p[2]+ p[3]*s_values) ## A flexible gamma
ret += loggamma(p[4] + p[5]*s_values) ## A flexible gamma
hyp = [complex(hyper([p[6]*s+p[7],p[8]+p[9]*s],[p[10]+p[11]*s],p[12])) for s in s_values] ## slow generalised_hypergeom
ret += np.log(hyp)
# s_values**2 * np.log(p[6]**2) #+ s**3 * np.log(p[3]**2) + s**4 * np.log(p[4]**2) ## Strange series temrs
#ret += np.log(1 + p[5]*s_values + p[6]*s_values**2 + p[7]*s_values**3 + p[8]*s_values**4) ## Log of polynomial
return ret
#p0 = np.ones(N_terms) + (0.5- np.random.rand(N_terms))
observations = []
losses = []
def categorical_solve(nits, L_in=None, P_in=None):
C_size = len(PT)
#static = np.array(range(N_terms))
if(L_in == None): L = 0.001*np.ones((N_terms,C_size))
C = 0.001*np.ones((N_terms,C_size))
#K = np.random.choice(range(C_size),size=10,N_terms,replace=True)
p = PT[K]
l = complex_diff(p)
Q = [[ np.exp(-np.abs(K[i]-PT[j]))/l for j in range(C_size)] for i in range(N_terms)]
N = [[ np.exp(-np.abs(K[i]-PT[j])) for j in range(C_size)] for i in range(N_terms)]
L += Q
C += N
## Probability distribution over elements
if(P_in == None):
P = L/C
N = np.sum(P,axis =1)
P = P / N[:,None]
N = np.sum(P,axis =1)
#I.e. a n array of differences and sorted list...
## Add in an additional parameter choice which isn't in the list? (Some kind of solver?)
## Add in a routine that sets certain elements of P to zero after they drop below a threshold (number of observations)?
losses = []
for i in range(nits):
power = 1 + i/1000
K = np.array([np.random.choice(range(C_size),replace=True, p = pp) for pp in P])
p = PT[K]
try:
l = complex_diff(p)
except:
l = 100
if(l>100): l = 100
#l = 0.01+np.random.random()
print(l)
if(l<1e-6): return L, P
Q = [[ np.exp(-np.abs(K[i]-PT[j]))/l for j in range(C_size)] for i in range(N_terms)]
N = [[ np.exp(-np.abs(K[i]-PT[j])) for j in range(C_size)] for i in range(N_terms)]
L += Q
C += N
P = L/C
N = np.sum(P,axis =1)
P = (P / N[:,None])**power
N = np.sum(P,axis =1)
P = (P / N[:,None])
#if(i%100==0):
# i = np.transpose(np.argwhere(P<1e-3))
# L[i[0],i[1]] = 0
# P = L/C
# N = np.sum(P,axis =1)
# P = P / N[:,None]
return L, P
if(False):
L, P = categorical_solve(1000)
for i in range(N_terms):
q = np.quantile(P[i],0.75)
m = np.argmax(P[i])
indices = np.where(P[i] > q)
terms = PT[indices]
print("p[{}] ~ ".format(i),terms)
print("Hypothesis: p[{}] ~ {}".format(i,PT[m]))
for i in range(len(PT)):
print(i,PT[i])
for i in P:
plt.bar(range(len(i)),i)
plt.show()
exit()
if(False):
from scipy.stats import norm
def weighted_avg_and_std(values, weights):
average = np.average(values, weights=weights)
variance = np.average((values-average)**2, weights=weights)
return (average, np.sqrt(variance))
## First carry out a random set of experiments
for i in range(1000):
p0 = np.random.uniform(low=np.amin(PT),high=np.amax(PT),size=N_terms)
score = complex_diff(p0)
observations.append(p0)
losses.append(score)
for k in range(1):
O = np.array(observations)
L = np.array(losses)
## Now for each parameter, derive a normal distribution
MS = np.array([ weighted_avg_and_std(np.transpose(O)[i], 1/L) for i in range(N_terms)])
print(MS)
for i in range(100):
p0 = [ np.random.normal(loc=m,scale = 2*s) for m,s in MS]
score = complex_diff(p0)
observations.append(p0)
losses.append(score)
for k in range(100):
O = np.array(observations)
L = np.array(losses)
## Now for each parameter, derive a normal distribution
MS = np.array([ weighted_avg_and_std(np.transpose(O)[i], 1/L) for i in range(N_terms)])
print(MS)
## Consider the list of solutions weighted by the normals distributions
PT_weights = [ [norm(loc=m,scale=s).pdf(k) for k in PT] for m,s in MS]
PT_weights = [ a/np.sum(a) for a in PT_weights ]
Ps = np.transpose(np.array([ np.random.choice(PT,size=10,p=p) for p in PT_weights ]))
for p in Ps:
score = complex_diff(p)
observations.append(p)
losses.append(score)
print("Best Score:",np.amin(losses))
print("Best Params:",observations[np.argmin(losses)])
print(losses)
print(observations)
#p_test = [1/np.sqrt(2),0.5,0.5,0.25]
## Loop here
#res = points_tree.query(p_test,k=20)
#new_indices = res[1]
#vecs = points[new_indices]
#scores = [complex_diff(p) for p in vecs]
#print(scores)
p0= np.random.random(N_terms)
#p0 = [ 0.51238944,0.97451466,-0.01,0.4491124,0.12458327,0.82568312,0.20801154,0.27429931,0.73933532,0.16679021,0.5342653,0.90349894,0.31334464, 0.68688119]
p0 = [ 0.51238944,0.97451466,0.4491124,0.12458327,0.82568312,0.20801154,0.27429931,0.73933532,0.16679021,0.5342653,0.90349894,0.31334464, 0.68688119]
if(True):
if(True):
popt = BFGS_search(p0)
else:
print("BFGS Disabled")
popt = p0
print("** Searching for Algebraic Identity ***")
## Rational searcher
## Loop here
#res = points_tree.query(popt,k=10)
#new_indices = res[1]
#vecs = points[new_indices]
#scores = [complex_diff(p) for p in vecs]
#best_score = np.argmin(scores)
#print(vecs[best_score],scores[best_score])
analyse(popt)
## Add these "best" solutions to the mix
## This gives us a chance at partially optimising the original solution
PT = np.concatenate((PT,popt))
PTkeys = np.concatenate((PTkeys,["BFGS_param_{}".format(i) for i in range(len(popt))]))
reverse_dict = { i:j for i,j in zip(PT,PTkeys)}
##
## IMPORTANT IDEA
## CONSIDER FIRST ITERATING EACH PARAMETER IN TERMS OF NEARBY SOLUTIOSN WHILE KEEPING THE OTHER TERMS CONSTANT
## IF WE GET ANY HITS THIS IS PROMISING
PT2 = [[k] for k in PT]
value_tree = KDTree(PT2)
CHOICES = []
for i in range(len(popt)):
k_query = 5
nearest_k = value_tree.query([popt[i]],k=k_query)
## Get all the values which are within 0.1
dists = nearest_k[0]
inds = np.argwhere(dists <= 0.1)
elements = nearest_k[1][inds]
choice = [k[0] for k in PT[elements]]
CHOICES.append(choice)
print("p[{}] choose from {}".format(i,choice))
## Set up a score system for the choices
P = np.zeros((len(popt),k_query))
for i in range(len(CHOICES)):
for j in range(len(CHOICES[i])):
P[i,j]+=1
N = np.sum(P,axis =1)
P = (P / N[:,None])
## A probabilistic scoring approach
if(False):
## Assemble all parameter combinations
nits = 10*np.prod([len(ii) for ii in CHOICES])
print("N iterations = {}".format(nits))
## Or run the weighted combinations analysis
print("*** Running Enumeration ***")
l_best =100
for i in range(nits):
K = np.array([np.random.choice(range(k_query),replace=True, p = pp) for pp in P])
p = [ CHOICES[ch][K[ch]] for ch in range(len(CHOICES))]
try:
l = complex_diff(p)
except:
l = 100
if(l<l_best):
l_best=l
print("Best score yet: {} with {}".format(l,p))
exit()
## A Gaussian weighted exploration algorithm
if(True):
l_best =100
observations = []
losses = []
## First carry out a random set of experiments
for i in range(100):
K = np.array([np.random.choice(range(k_query),replace=True, p = pp) for pp in P])
p = [ CHOICES[ch][K[ch]] for ch in range(N_terms)]
try:
l = complex_diff(p)
except:
l = 100
if(l<l_best):
l_best=l
print("Best score yet: {} with {}".format(l,p))
observations.append(p)
losses.append(l)
for k in range(1):
O = np.array(observations)
L = np.array(losses)
## Now for each parameter, derive a normal distribution
MS = np.array([ weighted_avg_and_std(np.transpose(O)[i], 1/L) for i in range(N_terms)])
for i in range(100):
p = [ np.random.normal(loc=m,scale = 2*s) for m,s in MS]
try:
l = complex_diff(p)
except:
l = 100
if(l<l_best):
l_best=l
print("Best score yet: {} with {}".format(l,p))
observations.append(p)
losses.append(l)
for k in range(1000):
O = np.array(observations)
L = np.array(losses)
## Now for each parameter, derive a normal distribution
MS = np.array([ weighted_avg_and_std(np.transpose(O)[i], 1/L) for i in range(N_terms)])
## Consider the list of solutions weighted by the normals distributions
PT_weights = [ [norm(loc=MS[qq][0],scale=MS[qq][1]).pdf(k) for k in CHOICES[qq]] for qq in range(len(MS))]
PT_weights = [ a/np.sum(a) for a in PT_weights ]
p = [ np.random.choice(CHOICES[i],p=PT_weights[i]) for i in range(len(CHOICES)) ]
try:
l = complex_diff(p)
except:
l = 100
if(l<l_best):
l_best=l
print("Best score yet: {} with {}".format(l,p))
print("Translates to: {} with {}".format(l,[reverse_dict[i] for i in p]))
observations.append(p)
losses.append(l)
print("Best Params:",observations[np.argmin(losses)])
##
#WITH THE BEST SCORE, search through the tree of values
#for each parameter get say 5 values?
#Check the lists by eye? I.e.
#"Basic Constant in [0,1,2,3]", then we can see if 0 is a bad suggestion?
#"Blah Blah in ... ",
#From here we can run the above methods of filtering or a direct enumeration if the number of combinations is less than 2 million or so..
#Run on a single data point, collect the best combinations and chec kfor multiple datapoints.
#Consider a method to design splits i.e. as before on the filtering method
#If we have two very high peaks, then reenumerate using those two values only!
ax = plt.axes(projection='3d')
# Data for three-dimensional scattered points
ax.scatter3D(real_s, imag_s, real_logm, c=real_logm, cmap='Reds', label = "Numeric")
ax.scatter3D(real_s, imag_s, np.real(fit), c=np.real(fit), cmap='Greens', label = "Theoretical")
ax.set_xlabel('Re(s)')
ax.set_ylabel('Im(s)')
ax.set_zlabel('$\log Re(E[x^{s-1}])$')
plt.legend()
plt.show()
ax = plt.axes(projection='3d')
# Data for three-dimensional scattered points
ax.scatter3D(real_s, imag_s, imag_logm, c=imag_logm, cmap='Reds', label = "Numeric")
ax.scatter3D(real_s, imag_s, np.imag(fit), c=np.imag(fit), cmap='Greens', label = "Theoretical")
ax.set_xlabel('Re(s)')
ax.set_ylabel('Im(s)')
ax.set_zlabel('$\log Im(E[x^{s-1}])$')
plt.legend()
plt.show()
p_best = popt
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# coding: utf-8
"""
@author: Ping Qiu qiuping1@genomics.cn
@last modified by: Ping Qiu
@file:test_find_markers.py
@time:2021/03/16
"""
import sys
sys.path.append('/data/workspace/st/stereopy-release')
from stereo.tools.spatial_pattern_score import *
import pandas as pd
from anndata import AnnData
import numpy as np
np.random.seed(9)
def init(genes=50, cells=20, dtype='dataframe'):
gname = [f'g{i}' for i in range(genes)]
cname = [f'c{i}' for i in range(cells)]
x = np.random.randint(0, 100, (cells, genes))
if dtype == 'anndata':
var = pd.DataFrame(index=gname)
obs = pd.DataFrame(index=cname)
groups = np.random.choice(['1', '2', '3'], cells)
obs['cluster'] = groups
andata = AnnData(x, obs=obs, var=var)
return andata
else:
return pd.DataFrame(x, index=cname, columns=gname)
def test():
andata = init(30, 100, 'anndata')
tmp = SpatialPatternScore(data=andata)
tmp.fit()
print(andata.var)
test()
|
nilq/baby-python
|
python
|
import timeit
import functools
import numpy as np
def timefunc(number=10000):
def _timefunc(func):
@functools.wraps(func)
def time_func_wrapper(*args, **kwargs):
t0 = timeit.default_timer()
for _ in range(number):
value = func(*args, **kwargs)
t1 = timeit.default_timer()
print("func: {}(args={}, kwargs={}) time: {}".format(func.__name__, str(args), str(kwargs), t1-t0))
return value
return time_func_wrapper
return _timefunc
volumes= [6, 7, 8]
@timefunc(number=100000)
def modify_np(x, i, j):
y = x.copy()
free = volumes[j] - x[j]
spill = min(free, x[i])
y[i] -= spill
y[j] += spill
#y = x
h = hash(y.tostring())
return h
x = np.array([1,2,3])
modify_np(x, 1, 2)
@timefunc(number=100000)
def modify_np(x, i, j):
y = x.copy()
free = volumes[j] - x[j]
spill = min(free, x[i])
y[i] -= spill
y[j] += spill
hash(tuple(y))
x = [1,2,3]
modify_np(x, 1, 2)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
This module provides File.GetID data access object.
"""
from WMCore.Database.DBFormatter import DBFormatter
from dbs.utils.dbsExceptionHandler import dbsExceptionHandler
class GetID(DBFormatter):
"""
File GetID DAO class.
"""
def __init__(self, logger, dbi, owner):
"""
Add schema owner and sql.
"""
DBFormatter.__init__(self, logger, dbi)
self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else ""
self.sql = \
"""
SELECT F.FILE_ID
FROM %sFILES F
""" % ( self.owner )
def execute(self, conn, name, transaction = False):
"""
returns id for a given lfn
"""
sql = self.sql
sql += "WHERE F.LOGICAL_FILE_NAME = :lfn"
binds = {"lfn":name}
result = self.dbi.processData(sql, binds, conn, transaction)
plist = self.formatDict(result)
if len(plist) < 1: return -1
return plist[0]["file_id"]
|
nilq/baby-python
|
python
|
class Solution:
def fourSumCount(self, nums1: List[int], nums2: List[int], nums3: List[int], nums4: List[int]) -> int:
cnt = 0
m = {}
for num1 in nums1:
for num2 in nums2:
m[num1 + num2] = m.get(num1 + num2, 0) + 1
for num3 in nums3:
for num4 in nums4:
cnt += m.get(-(num3 + num4), 0)
return cnt
|
nilq/baby-python
|
python
|
DEBUG = False
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
SECRET_KEY = 'na2Tei0FoChe3ooloh5Yaec0ji7Aipho'
INSTALLED_APPS=(
'mailrobot',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': False,
'OPTIONS': {
'debug': DEBUG,
},
},
]
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
|
nilq/baby-python
|
python
|
VERSION = "2.0.113"
VERSION_APP = "1265"
API_KEY = "270072d0fb4811ebacd96f6726fbdbb1"
API_SECRET = "2d0288d0fb4811ebabfbd57e57c6ae64"
ENDPOINT = "https://api.myxplora.com/api"
|
nilq/baby-python
|
python
|
from .dataset_evaluator import DatasetEvaluator
__all__ = [
'DatasetEvaluator'
]
|
nilq/baby-python
|
python
|
cities = [
'Tallinn',
'Tartu',
'Narva',
'Kohtla-Jaerve',
'Paernu',
'Viljandi',
'Rakvere',
'Sillamaee',
'Maardu',
'Kuressaare',
'Voru',
'Valga',
'Haapsalu',
'Johvi',
'Paide',
'Keila',
'Kivioli',
'Tapa',
'Polva',
'Jogeva',
'Tueri',
'Elva',
'Rapla',
'Saue',
'Kaerdla'
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
"""
* Example demonstrating the Position closed-loop servo.
* Tested with Logitech F350 USB Gamepad inserted into Driver Station]
*
* Be sure to select the correct feedback sensor using configSelectedFeedbackSensor() below.
*
* After deploying/debugging this to your RIO, first use the left Y-stick
* to throttle the Talon manually. This will confirm your hardware setup.
* Be sure to confirm that when the Talon is driving forward (green) the
* position sensor is moving in a positive direction. If this is not the cause
* flip the boolean input to the setSensorPhase() call below.
*
* Once you've ensured your feedback device is in-phase with the motor,
* use the button shortcuts to servo to target position.
*
* Tweak the PID gains accordingly.
"""
from ctre import WPI_TalonSRX
import wpilib
class Robot(wpilib.IterativeRobot):
#: Which PID slot to pull gains from. Starting 2018, you can choose from
#: 0,1,2 or 3. Only the first two (0,1) are visible in web-based
#: configuration.
kSlotIdx = 0
#: Talon SRX/ Victor SPX will supported multiple (cascaded) PID loops. For
#: now we just want the primary one.
kPIDLoopIdx = 0
#: set to zero to skip waiting for confirmation, set to nonzero to wait and
#: report to DS if action fails.
kTimeoutMs = 10
def robotInit(self):
self.talon = WPI_TalonSRX(3)
self.joy = wpilib.Joystick(0)
self.loops = 0
self.lastButton1 = False
self.targetPos = 0
# choose the sensor and sensor direction
self.talon.configSelectedFeedbackSensor(
WPI_TalonSRX.FeedbackDevice.CTRE_MagEncoder_Relative,
self.kPIDLoopIdx,
self.kTimeoutMs,
)
# choose to ensure sensor is positive when output is positive
self.talon.setSensorPhase(True)
# choose based on what direction you want forward/positive to be.
# This does not affect sensor phase.
self.talon.setInverted(False)
# set the peak and nominal outputs, 12V means full
self.talon.configNominalOutputForward(0, self.kTimeoutMs)
self.talon.configNominalOutputReverse(0, self.kTimeoutMs)
self.talon.configPeakOutputForward(1, self.kTimeoutMs)
self.talon.configPeakOutputReverse(-1, self.kTimeoutMs)
# Set the allowable closed-loop error, Closed-Loop output will be
# neutral within this range. See Table in Section 17.2.1 for native
# units per rotation.
self.talon.configAllowableClosedloopError(0, self.kPIDLoopIdx, self.kTimeoutMs)
# set closed loop gains in slot0, typically kF stays zero - see documentation */
self.talon.selectProfileSlot(self.kSlotIdx, self.kPIDLoopIdx)
self.talon.config_kF(0, 0, self.kTimeoutMs)
self.talon.config_kP(0, 0.1, self.kTimeoutMs)
self.talon.config_kI(0, 0, self.kTimeoutMs)
self.talon.config_kD(0, 0, self.kTimeoutMs)
# zero the sensor
self.talon.setSelectedSensorPosition(0, self.kPIDLoopIdx, self.kTimeoutMs)
def teleopPeriodic(self):
"""
This function is called periodically during operator control
"""
# get gamepad axis - forward stick is positive
leftYstick = self.joy.getY()
# calculate the percent motor output
motorOutput = self.talon.getMotorOutputPercent()
button1 = self.joy.getRawButton(1)
button2 = self.joy.getRawButton(2)
# deadband gamepad
if abs(leftYstick) < 0.1:
leftYstick = 0
# prepare line to print
sb = []
sb.append("\tOut%%: %.3f" % motorOutput)
sb.append(
"\tPos: %.3fu" % self.talon.getSelectedSensorPosition(self.kPIDLoopIdx)
)
if self.lastButton1 and button1:
# Position mode - button just pressed
# 10 Rotations * 4096 u/rev in either direction
self.targetPos = leftYstick * 4096 * 10.0
self.talon.set(WPI_TalonSRX.ControlMode.Position, self.targetPos)
# on button2 just straight drive
if button2:
# Percent voltage mode
self.talon.set(WPI_TalonSRX.ControlMode.PercentOutput, leftYstick)
if self.talon.getControlMode() == WPI_TalonSRX.ControlMode.Position:
# append more signals to print when in speed mode.
sb.append("\terr: %s" % self.talon.getClosedLoopError(self.kPIDLoopIdx))
sb.append("\ttrg: %.3f" % self.targetPos)
# periodically print to console
self.loops += 1
if self.loops >= 10:
self.loops = 0
print(" ".join(sb))
# save button state for on press detect
self.lastButton1 = button1
if __name__ == "__main__":
wpilib.run(Robot)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
from copy import deepcopy
from queue import Queue
from pickle import dump, load
from colorama import Fore, Style
class GoBoard(object):
black = 1
space = 0
white = -1
featureCount = 22
printDic = {space : '.', black : 'B', white : 'W'}
colorDic = {space : Fore.WHITE + Style.BRIGHT, black : Fore.RED + Style.BRIGHT, white : Fore.WHITE + Style.BRIGHT, 'last' : Fore.CYAN + Style.BRIGHT, 'reset' : Style.RESET_ALL}
dxdy = [(1, 0), (-1, 0), (0, 1), (0, -1)]
def __init__(self, size = 19):
if not isinstance(size, int) or size <= 0:
raise Exception('GoBoard: __init__: error: invalid size')
self.__size = size
self.__twoHistory = [None] * 2
self.__lastMove = None
self.__nextColor = GoBoard.black
self.__boardList = self.getEmptyBoardList()
def save(self, filename):
if not isinstance(filename, str):
raise Exception('GoBoard: save: error: invalid filename')
with open(filename, 'wb') as f:
dump(self.__dict__, f, 2)
def load(self, filename):
if not isinstance(filename, str):
raise Exception('GoBoard: load: error: invalid filename')
with open(filename, 'rb') as f:
self.__dict__.update(load(f))
def getNextColor(self):
return self.__nextColor
def skip(self):
self.__nextColor = - self.__nextColor
def getSize(self):
return self.__size
def setBoardList(self, boardList):
if not self.isValidBoardList(boardList):
raise Exception('GoBoard: setBoardList: error: invalid boardList')
self.__boardList = deepcopy(boardList)
def getBoardList(self):
return deepcopy(self.__boardList)
def setSpot(self, x, y, value):
if not isinstance(x, int) or not 0 <= x < self.__size:
raise Exception('GoBoard: setSpot: error: invalid x coordinate')
if not isinstance(y, int) or not 0 <= y < self.__size:
raise Exception('GoBoard: setSpot: error: invalid y coordinate')
if not isinstance(value, int) or not GoBoard.white <= value <= GoBoard.black:
raise Exception('GoBoard: setSpot: error: invalid value')
self.__boardList[x][y] = value
def getSpot(self, x, y):
if not isinstance(x, int) or not 0 <= x < self.__size:
raise Exception('GoBoard: getSpot: error: invalid x coordinate')
if not isinstance(y, int) or not 0 <= y < self.__size:
raise Exception('GoBoard: getSpot: error: invalid y coordinate')
return self.__boardList[x][y]
def printBoard(self):
print(GoBoard.colorDic[GoBoard.space] + '+' + '-' * (self.__size * 2 + 1) + '+')
for i in range(self.__size):
print(GoBoard.colorDic[GoBoard.space] + '|', end = ' ')
for j in range(self.__size):
if self.__lastMove == (i, j):
print(GoBoard.colorDic['last'] + GoBoard.printDic[self.__boardList[i][j]], end = ' ')
else:
print(GoBoard.colorDic[self.__boardList[i][j]] + GoBoard.printDic[self.__boardList[i][j]], end = ' ')
print(GoBoard.colorDic[GoBoard.space] + '|')
print(GoBoard.colorDic[GoBoard.space] + '+' + '-' * (self.__size * 2 + 1) + '+' + Style.RESET_ALL)
def hash(self):
s = ''
for row in self.__boardList:
for spot in row:
s += str(spot + 1)
return int(s, 3)
def setBoardListFromHash(self, h):
if not isinstance(h, int):
raise Exception('GoBoard: setBoardListFromHash: error: invalid hash')
s = ''
while h > 0:
s = str(h % 3) + s
h /= 3
if len(s) < self.__size ** 2:
s = '0' * (self.__size ** 2 - len(s)) + s
elif len(s) > self.__size ** 2:
raise Exception('GoBoard: setBoardListFromHash: error: invalid hash')
for i in range(self.__size):
for j in range(self.__size):
self.__boardList[i][j] = int(s[i * self.__size + j]) - 1
def bfsFloodFill(self, x, y):
if not isinstance(x, int) or not 0 <= x < self.__size:
raise Exception('GoBoard: bfsFloodFill: error: invalid x coordinate')
if not isinstance(y, int) or not 0 <= y < self.__size:
raise Exception('GoBoard: bfsFloodFill: error: invalid y coordinate')
color = self.__boardList[x][y]
if color == GoBoard.space:
return ([], [])
stonespot = []
libertyspot = []
vis = self.getEmptyBoardList()
que = Queue()
que.put((x, y))
while not que.empty():
cur = que.get()
if not 0 <= cur[0] < self.__size or not 0 <= cur[1] < self.__size or self.__boardList[cur[0]][cur[1]] == - color or vis[cur[0]][cur[1]] == 1:
continue
vis[cur[0]][cur[1]] = 1
if self.__boardList[cur[0]][cur[1]] == GoBoard.space:
libertyspot.append((cur[0], cur[1]))
else:
stonespot.append((cur[0], cur[1]))
for d in GoBoard.dxdy:
que.put((cur[0] + d[0], cur[1] + d[1]))
return (stonespot, libertyspot)
def countLiberty(self):
ret = [[-1] * self.__size for _ in range(self.__size)]
for i in range(self.__size):
for j in range(self.__size):
if ret[i][j] == -1 and self.__boardList[i][j] != GoBoard.space:
bfs = self.bfsFloodFill(i, j)
liberty = len(bfs[1])
for spot in bfs[0]:
ret[spot[0]][spot[1]] = liberty
elif self.__boardList[i][j] == GoBoard.space:
ret[i][j] = 0
return ret
def captureSpot(self, exception = None):
ret = []
mat = self.getEmptyBoardList()
liberty = self.countLiberty()
for i in range(self.__size):
for j in range(self.__size):
if liberty[i][j] == 0 and self.__boardList[i][j] != GoBoard.space:
mat[i][j] = 1
if isinstance(exception, tuple) and len(exception) == 2:
god = self.bfsFloodFill(exception[0], exception[1])
for spot in god[0]:
mat[spot[0]][spot[1]] = 0
elif exception != None:
raise Exception('GoBoard: captureSpot: error: invalid exception')
for i in range(self.__size):
for j in range(self.__size):
if mat[i][j] == 1:
ret.append((i, j))
return ret
def capture(self, exception = None):
spots = self.captureSpot(exception)
for spot in spots:
self.__boardList[spot[0]][spot[1]] = GoBoard.space
def isValidMove(self, x, y, color):
if not isinstance(x, int) or not 0 <= x < self.__size or not isinstance(y, int) or not 0 <= y < self.__size or not isinstance(color, int) or color != GoBoard.white and color != GoBoard.black or self.__boardList[x][y] != GoBoard.space:
return False
for k in GoBoard.dxdy:
i = x + k[0]
j = y + k[1]
if 0 <= i < self.__size and 0 <= j < self.__size and self.__boardList[i][j] == GoBoard.space:
return True
tempBoard = GoBoard(self.__size)
tempBoard.setBoardList(self.__boardList)
tempBoard.setSpot(x, y, color)
tempBoard.capture((x, y))
if len(tempBoard.bfsFloodFill(x, y)[1]) == 0:
return False
if self.__twoHistory[0] == tempBoard.hash():
return False
return True
def move(self, x, y, color):
if not isinstance(x, int) or not 0 <= x < self.__size:
raise Exception('GoBoard: move: error: invalid x coordinate')
if not isinstance(y, int) or not 0 <= y < self.__size:
raise Exception('GoBoard: move: error: invalid y coordinate')
if not isinstance(color, int) or color != GoBoard.white and color != GoBoard.black:
raise Exception('GoBoard: move: error: invalid color')
if self.__boardList[x][y] != GoBoard.space:
raise Exception('GoBoard: move: error: occupied spot')
for k in GoBoard.dxdy:
i = x + k[0]
j = y + k[1]
if 0 <= i < self.__size and 0 <= j < self.__size and self.__boardList[i][j] == GoBoard.space:
self.__boardList[x][y] = color
self.capture()
self.__twoHistory[0], self.__twoHistory[1] = self.__twoHistory[1], self.hash()
self.__nextColor = - color
self.__lastMove = (x, y)
return
tempBoard = GoBoard(self.__size)
tempBoard.setBoardList(self.__boardList)
tempBoard.setSpot(x, y, color)
tempBoard.capture((x, y))
if len(tempBoard.bfsFloodFill(x, y)[1]) == 0:
raise Exception('GoBoard: move: error: invalid move')
if self.__twoHistory[0] == tempBoard.hash():
raise Exception('GoBoard: move: error: reappeared state')
self.__boardList = tempBoard.getBoardList()
self.__twoHistory[0], self.__twoHistory[1] = self.__twoHistory[1], self.hash()
self.__nextColor = - color
self.__lastMove = (x, y)
def isValidBoardList(self, boardList):
if not isinstance(boardList, list) or len(boardList) != self.__size:
return False
for row in boardList:
if not isinstance(row, list) or len(row) != self.__size:
return False
for spot in row:
if not isinstance(spot, int) or not GoBoard.white <= spot <= GoBoard.black:
return False
return True
def getEmptyBoardList(self):
return [[GoBoard.space] * self.__size for _ in range(self.__size)]
def featureColor(self, color):
if not isinstance(color, int) or not GoBoard.white <= color <= GoBoard.black:
raise Exception('GoBoard: featureColor: error: invalid color')
ret = self.getEmptyBoardList()
for i in range(self.__size):
for j in range(self.__size):
if self.__boardList[i][j] == color:
ret[i][j] = 1
return ret
def featureCurrent(self):
return self.featureColor(self.__nextColor)
def featureOpponent(self):
return self.featureColor(- self.__nextColor)
def featureEmpty(self):
return self.featureColor(GoBoard.space)
def featureAllZeros(self):
return self.getEmptyBoardList()
def featureAllOnes(self):
return [[1] * self.__size for _ in range(self.__size)]
def featureFourLiberty(self):
ret = [self.getEmptyBoardList() for _ in range(8)]
liberty = self.countLiberty()
for i in range(self.__size):
for j in range(self.__size):
if self.__boardList[i][j] == self.__nextColor:
if liberty[i][j] == 1:
ret[0][i][j] = 1
elif liberty[i][j] == 2:
ret[1][i][j] = 1
elif liberty[i][j] == 3:
ret[2][i][j] = 1
elif liberty[i][j] >= 4:
ret[3][i][j] = 1
elif self.__boardList[i][j] == - self.__nextColor:
if liberty[i][j] == 1:
ret[4][i][j] = 1
elif liberty[i][j] == 2:
ret[5][i][j] = 1
elif liberty[i][j] == 3:
ret[6][i][j] = 1
elif liberty[i][j] >= 4:
ret[7][i][j] = 1
return ret
def featureIllegal(self):
ret = self.getEmptyBoardList()
for i in range(self.__size):
for j in range(self.__size):
if not self.isValidMove(i, j, self.__nextColor):
ret[i][j] = 1
return ret
def featureFourCapture(self):
ret = [self.getEmptyBoardList() for _ in range(8)]
vis = self.getEmptyBoardList()
for i in range(self.__size):
for j in range(self.__size):
if vis[i][j] == 0 and self.__boardList[i][j] != GoBoard.space:
bfs = self.bfsFloodFill(i, j)
for spot in bfs[0]:
vis[spot[0]][spot[1]] = 1
if len(bfs[1]) == 1:
x = bfs[1][0][0]
y = bfs[1][0][1]
self.__boardList[x][y] = - self.__boardList[i][j]
count = len(self.captureSpot((x, y)))
self.__boardList[x][y] = GoBoard.space
if self.__boardList[i][j] == - self.__nextColor:
if not self.isValidMove(x, y, self.__nextColor):
continue
if count == 1:
ret[0][x][y] = 1
elif count == 2:
ret[1][x][y] = 1
elif count == 3:
ret[2][x][y] = 1
elif count >= 4:
ret[3][x][y] = 1
else:
if count == 1:
ret[4][x][y] = 1
elif count == 2:
ret[5][x][y] = 1
elif count == 3:
ret[6][x][y] = 1
elif count >= 4:
ret[7][x][y] = 1
return ret
def allFeatures(self):
ret = [[[0] * GoBoard.featureCount for _ in range(self.__size)] for _ in range(self.__size)]
tmp = []
tmp.append(self.featureCurrent())
tmp.append(self.featureOpponent())
tmp.append(self.featureEmpty())
tmp.append(self.featureAllZeros())
tmp.append(self.featureAllOnes())
tmp += self.featureFourLiberty()
tmp.append(self.featureIllegal())
tmp += self.featureFourCapture()
for i in range(self.__size):
for j in range(self.__size):
for k in range(GoBoard.featureCount):
ret[i][j][k] = tmp[k][i][j]
return ret
def rPrint(arg):
if isinstance(arg, list):
for item in arg:
rPrint(item)
print()
else:
print(arg, end = ' ')
def test():
board = GoBoard(int(input('Board size: ')))
while True:
color = board.getNextColor()
board.printBoard()
if color == GoBoard.black:
print('Black\'s turn')
else:
print('White\'s turn')
x = input('x: ')
y = input('y: ')
if x == '' and y == '':
board.skip()
else:
board.move(int(x), int(y), color)
board.printBoard()
while True:
feature = input('Feature: ')
if feature == '':
break
if hasattr(board, 'feature' + feature):
rPrint(getattr(board, 'feature' + feature)())
else:
print('Feature not found!')
if __name__ == '__main__':
test()
|
nilq/baby-python
|
python
|
from flask_login.utils import confirm_login
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired, Email, EqualTo
from wtforms import ValidationError
from myproject.models import User
class loginForm(FlaskForm):
email = StringField('Enter Email', validators=[DataRequired()])
password = PasswordField('Enter Password', validators=[DataRequired()])
submit = SubmitField('Login')
class RegForm(FlaskForm):
email = StringField('Enter Email', validators=[DataRequired(), Email()])
username = StringField('Enter Username', validators=[DataRequired()])
password = PasswordField('Enter Password', validators=[DataRequired(), EqualTo('confirm_password', message='Passwords Must Match')])
confirm_password = PasswordField('Enter Password Again', validators=[DataRequired()])
submit = SubmitField('Sign-Up')
def check_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email Already Exists')
def check_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username Taken')
|
nilq/baby-python
|
python
|
import unittest
from function.piecewise_linear_function import PiecewiseLinearFunction
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_basic_plf(self):
array = [(5, 1), (7, 3), (10, 6), (12, 8)]
f = PiecewiseLinearFunction(array)
assert f.eval(0) == 1
assert f.eval(2) == 1
assert f.eval(5) == 1
assert f.eval(6) == 2
assert f.eval(7) == 3
assert f.eval(8) == 4
assert f.eval(9) == 5
assert f.eval(10) == 6
assert f.eval(11) == 7
assert f.eval(11.5) == 7.5
assert f.eval(12) == 8
assert f.eval(13) == 8
def test_one_tp(self):
array = [(5, 1)]
f = PiecewiseLinearFunction(array)
assert f.eval(0) == 1
assert f.eval(2) == 1
assert f.eval(5) == 1
assert f.eval(6) == 1
assert f.eval(7) == 1
def test_add_tp(self):
array = [(5, 1), (7, 3), (10, 6), (12, 8)]
f = PiecewiseLinearFunction(array)
f.add((15, 5))
assert f.eval(0) == 1
assert f.eval(2) == 1
assert f.eval(5) == 1
assert f.eval(6) == 2
assert f.eval(7) == 3
assert f.eval(8) == 4
assert f.eval(9) == 5
assert f.eval(10) == 6
assert f.eval(11) == 7
assert f.eval(11.5) == 7.5
assert f.eval(12) == 8
assert f.eval(13) == 7
assert f.eval(14) == 6
assert f.eval(15) == 5
f.add_and_clear_forward((9, 5))
assert f.eval(0) == 1
assert f.eval(2) == 1
assert f.eval(5) == 1
assert f.eval(6) == 2
assert f.eval(7) == 3
assert f.eval(8) == 4
assert f.eval(9) == 5
assert f.eval(10) == 5
assert f.eval(11) == 5
assert f.eval(11.5) == 5
assert f.eval(12) == 5
assert f.eval(13) == 5
assert f.eval(14) == 5
assert f.eval(15) == 5
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
from collections.abc import Iterable
from collections.abc import Mapping
from xml.dom import minidom
from xml.dom.minidom import Element
import pandas as pd
from trainerroad.Utils.Str import *
class Workout:
def __init__(self):
pass
def add_workout_to_document(self, workouts: Iterable, document: minidom.Document, section, parent_section):
"""
:param workouts:
:param document:
:param section:
:param parent_section:
:return:
"""
workouts_ = workouts[1:]
workouts_shifted = pd.Series(workouts_).shift(1).fillna(-1).tolist()
for index, (current_interval, previous_interval) in enumerate(zip(workouts_, workouts_shifted)):
cooldown = index == len(workouts_) - 1
warmup = index == 0
self.build_workout(document=document, section=section, interval=current_interval,
previous_interval=previous_interval, warmup=warmup,
cooldown=cooldown)
parent_section.appendChild(section)
def build_workout(self, document, section, interval: dict, previous_interval: dict, cooldown=False, warmup=False):
"""
:param previous_interval:
:param document:
:param section:
:param interval:
:param cooldown:
:param warmup:
:return:
"""
end = int(interval.get("End"))
start = int(interval.get("Start"))
power = str(float(interval.get("StartTargetPowerPercent")) / 100)
duration = str(end - start)
is_current_fake = bool(interval.get("IsFake"))
# is_previous_fake = None
previous_power = None
if previous_interval != -1:
# is_previous_fake = bool(previous_interval.get("IsFake"))
previous_power = str(float(previous_interval.get("StartTargetPowerPercent")) / 100)
if cooldown is False and warmup is False:
steady_interval = document.createElement(STEADY_STATE)
steady_interval.setAttribute(DURATION, duration)
steady_interval.setAttribute(POWER, power)
new_interval = steady_interval
# print(f"Power: {power}, Start: {start}, End: {end}, Duration {duration}")
elif cooldown and warmup is False:
cooldown_interval = document.createElement(RAMP) if is_current_fake else document.createElement(
STEADY_STATE)
cooldown_interval.setAttribute(DURATION, duration)
# cooldown_interval.setAttribute(POWER_HIGH, power)
# print(f"is_current_fake: {is_current_fake}")
cooldown_interval.setAttribute(POWER_LOW, power)
if is_current_fake:
cooldown_interval.setAttribute(POWER_HIGH, str(round(float(power) - 0.1, 3)))
else:
cooldown_interval.setAttribute(POWER_HIGH, power)
# print(
# f"Cooldown: Previous Power {previous_power}, Power: {power}, Start: {start}, End: {end}, Duration {duration}")
new_interval = cooldown_interval
elif cooldown is False and warmup:
warmup_interval = document.createElement(WARMUP)
warmup_interval.setAttribute(DURATION, duration)
warmup_interval.setAttribute(POWER_HIGH, power)
warmup_interval.setAttribute(POWER_LOW, power)
new_interval = warmup_interval
# print(f"Warmup Power: {power}, Start: {start}, End: {end}, Duration {duration}")
else:
steady_interval = document.createElement(STEADY_STATE)
steady_interval.setAttribute(DURATION, duration)
steady_interval.setAttribute(POWER, power)
new_interval = steady_interval
# print(f"Power: {power}, Start: {start}, End: {end}, Duration {duration}")
section.appendChild(new_interval)
return section
def add_workout_details(self, details, section: Element, document: minidom.Document):
"""
:param details:
:param section:
:param document:
:return:
"""
workout_name = details.get(WORKOUT_NAME)
description = details.get(WORKOUT_DESC)
author_section = document.createElement(AUTHOR)
author_section.appendChild(document.createTextNode(TRAINER_ROAD))
description_section = document.createElement(DESCRIPTION)
description_section.appendChild(document.createTextNode(description))
name_section = document.createElement(NAME)
name_section.appendChild(document.createTextNode(workout_name))
section.appendChild(author_section)
section.appendChild(description_section)
section.appendChild(name_section)
def convert_workout(self, interval: Iterable, workout_details: Mapping) -> minidom.Document:
"""
:param interval:
:param workout_details:
:return:
"""
document = minidom.Document()
workout_file = document.createElement(WORKOUT_FILE)
workout_section = document.createElement(WORKOUT_STR)
self.add_workout_details(workout_details, document=document, section=workout_file)
self.add_workout_to_document(interval, document=document, section=workout_section, parent_section=workout_file)
document.appendChild(workout_file)
return document
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
from torch.quantization.observer import MinMaxObserver, PerChannelMinMaxObserver
import warnings
class _InputEqualizationObserver(nn.Module):
r"""Observer for tracking the running min/max values of input columns, and
computing the quantization parameters for the overall min/max input values.
Args:
dtype: Quantized data type
qscheme: Quantization scheme
quant_min: Minimum quantization value. If unspecified, it will
follow the 8-bit setup.
quant_max: Maximum quantization value. If unspecified, it will
follow the 8-bit setup.
output_obs: For the user to specify what kind of output observer they
would like to use
The running minimum/maximum :math:`x_\text{min/max}` are computed in the
same way as :class:`~torch.quantization.observer.PerChannelMinMaxObserver`,
with the difference that the running min/max values are stored per column.
The qparams are calculated by multiplying the min/max input column values
with the equalization scale, reducing to find the global min/max input
values, and then calculating in the same way as in
:class:`~torch.quantization.observer.MinMaxObserver`
.. note:: If the running minimum equals to the running maximum, the scales
and zero_points are set to 1.0 and 0.
"""
def __init__(self, dtype=torch.quint8, qscheme=torch.per_tensor_affine,
quant_min=None, quant_max=None, output_obs=None,
factory_kwargs=None) -> None:
super(_InputEqualizationObserver, self).__init__()
if qscheme not in {torch.per_tensor_affine, torch.per_tensor_symmetric}:
raise TypeError("Input qscheme must be per-tensor")
self.input_obs = PerChannelMinMaxObserver(ch_axis=1, dtype=dtype,
qscheme=qscheme,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs)
if output_obs is None:
self.output_obs = MinMaxObserver(dtype=dtype,
qscheme=qscheme,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs)
else:
self.output_obs = output_obs
self.equalization_scale = torch.empty(0)
def forward(self, x_orig):
# TODO: Allow for convoluational layers
if not (x_orig.ndim == 2):
raise ValueError("InputEqualizationObserver only supports Linear layers")
return self.input_obs(x_orig)
def get_input_minmax(self):
return (self.input_obs.min_vals, self.input_obs.max_vals)
def set_equalization_scale(self, equalization_scale):
self.equalization_scale = equalization_scale
def calculate_qparams(self):
r"""
Returns the scale/zero_point for the input and weight rows
"""
if self.equalization_scale.nelement() == 0:
warnings.warn(
"Must call calculate_scale before calling calculate_qparams.\
Returning default scale and zero point. "
)
return torch.tensor([1.0]), torch.tensor([0]), torch.tensor([1.0]), torch.tensor([0])
# Calculate qparams for the scaled min/max inputs
# Scale the input by the equalization scale located at the same column
# index
(min_inputs, max_inputs) = self.get_input_minmax()
min_input_scaled = torch.min(torch.mul(min_inputs, self.equalization_scale))
max_input_scaled = torch.max(torch.mul(max_inputs, self.equalization_scale))
(scale_input, zero_point_input) = self.input_obs._calculate_qparams(min_input_scaled, max_input_scaled)
return scale_input, zero_point_input
class _WeightEqualizationObserver(nn.Module):
r"""Observer for tracking the running min/max values of weight columns and
rows, and computing the quantization parameters for the weight rows.
Args:
dtype: Quantized data type
qscheme: Quantization scheme
quant_min: Minimum quantization value. If unspecified, it will
follow the 8-bit setup.
quant_max: Maximum quantization value. If unspecified, it will
follow the 8-bit setup.
This observer is made up of 2 PerChannelMinMaxObservers
- weight_col_obs: Used to record the running minimum and maximum of
columns of incoming weight tensors
- weight_row_obs: Used to record the running minimum and maximum of
rows of incoming weight tensors
The running minimum/maximum :math:`w_\text{min/max}` are computed in the
same way as :class:`~torch.quantization.observer.PerChannelMinMaxObserver`.
The qparams are calculated by multiplying the min/max weight row values
with the inverse of the equalization scale, and then calculating in the same
way as in :class:`~torch.quantization.observer.PerChannelMinMaxObserver`
.. note:: If the running minimum equals to the running maximum, the scales
and zero_points are set to 1.0 and 0.
"""
def __init__(self, dtype=torch.qint8, qscheme=torch.per_tensor_affine, quant_min=None,
quant_max=None, factory_kwargs=None) -> None:
super(_WeightEqualizationObserver, self).__init__()
self.weight_col_obs = PerChannelMinMaxObserver(ch_axis=1, dtype=dtype,
qscheme=qscheme,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs)
self.weight_row_obs = PerChannelMinMaxObserver(ch_axis=0, dtype=dtype,
qscheme=qscheme,
quant_min=quant_min,
quant_max=quant_max,
factory_kwargs=factory_kwargs)
self.equalization_scale = torch.empty(0)
def forward(self, w_orig):
# TODO: Allow for convoluational layers
if not (w_orig.ndim == 2):
raise ValueError("WeightEqualizationObserver only supports Linear layers")
return self._forward(w_orig)
def _forward(self, w_orig):
r"""
Calculates the min/max values of each weight column and weight row.
"""
w_orig = self.weight_col_obs(w_orig)
w_orig = self.weight_row_obs(w_orig)
# Calculate the column indices of the min/max weight in each row
num_row, _ = w_orig.shape
min_weights_ind = []
max_weights_ind = []
for i in range(num_row):
min_weights_ind.append(torch.nonzero(w_orig[i] == self.weight_row_obs.min_vals[i])[0][0])
max_weights_ind.append(torch.nonzero(w_orig[i] == self.weight_row_obs.max_vals[i])[0][0])
self.min_weights_ind = torch.tensor(min_weights_ind)
self.max_weights_ind = torch.tensor(max_weights_ind)
return w_orig
def get_weight_col_minmax(self):
return (self.weight_col_obs.min_vals, self.weight_col_obs.max_vals)
def get_weight_row_minmax(self):
return (self.weight_row_obs.min_vals, self.weight_row_obs.max_vals)
def set_equalization_scale(self, equalization_scale):
self.equalization_scale = equalization_scale
def calculate_qparams(self):
r"""
Returns the scale/zero_point for the input and weight rows
"""
if self.equalization_scale.nelement() == 0:
warnings.warn(
"Must call calculate_scale before calling calculate_qparams.\
Returning default scale and zero point. "
)
return torch.tensor([1.0]), torch.tensor([0]), torch.tensor([1.0]), torch.tensor([0])
if self.min_weights_ind is None or self.max_weights_ind is None:
warnings.warn(
"Must find the column indicies of the minimum of each row in the \
weights in order to calculate the qparams calculate the \
qparams. Returning default scale and zero point. "
)
return torch.tensor([1.0]), torch.tensor([0]), torch.tensor([1.0]), torch.tensor([0])
# Calculate the qparams for weights by using the rows
# Scale the weight rows by the reciprocal of the equalization scale
# located at the same column index
(min_weights, max_weights) = self.get_weight_row_minmax()
min_weights_scaled = torch.mul(min_weights, torch.reciprocal(self.equalization_scale[self.min_weights_ind]))
max_weights_scaled = torch.mul(max_weights, torch.reciprocal(self.equalization_scale[self.max_weights_ind]))
(scale_weight, zero_point_weight) = self.weight_row_obs._calculate_qparams(min_weights_scaled, max_weights_scaled)
return scale_weight, zero_point_weight
def calculate_equalization_scale(input_obs: _InputEqualizationObserver,
weight_obs: _WeightEqualizationObserver) -> torch.Tensor:
r""" Calculates the equalization scale and sets the equalization_scale value
in the observers.
Args:
input_obs: Observer that tracks the ranges for the input columns
weight_obs: Observer that tracks the ranges for the weight columns
"""
(min_inputs, max_inputs) = input_obs.get_input_minmax()
(min_weights, max_weights) = weight_obs.get_weight_col_minmax()
if not (min_inputs.shape == min_weights.shape):
raise ValueError(
"Input and Weight must have the same column dimension. " +
f"Found {min_inputs.shape} and {max_inputs.shape} instead."
)
equalization_scale = torch.sqrt((max_weights - min_weights) / (max_inputs - min_inputs))
return equalization_scale
|
nilq/baby-python
|
python
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
import torch
from torch.distributions import constraints
from torch.distributions.utils import lazy_property
from pyro.distributions.torch import Chi2
from pyro.distributions.torch_distribution import TorchDistribution
from pyro.distributions.util import broadcast_shape
class MultivariateStudentT(TorchDistribution):
"""
Creates a multivariate Student's t-distribution parameterized by degree of
freedom :attr:`df`, mean :attr:`loc` and scale :attr:`scale_tril`.
:param ~torch.Tensor df: degrees of freedom
:param ~torch.Tensor loc: mean of the distribution
:param ~torch.Tensor scale_tril: scale of the distribution, which is
a lower triangular matrix with positive diagonal entries
"""
arg_constraints = {
"df": constraints.positive,
"loc": constraints.real_vector,
"scale_tril": constraints.lower_cholesky,
}
support = constraints.real_vector
has_rsample = True
def __init__(self, df, loc, scale_tril, validate_args=None):
dim = loc.size(-1)
assert scale_tril.shape[-2:] == (dim, dim)
if not isinstance(df, torch.Tensor):
df = loc.new_tensor(df)
batch_shape = broadcast_shape(df.shape, loc.shape[:-1], scale_tril.shape[:-2])
event_shape = torch.Size((dim,))
self.df = df.expand(batch_shape)
self.loc = loc.expand(batch_shape + event_shape)
self._unbroadcasted_scale_tril = scale_tril
self._chi2 = Chi2(self.df)
super().__init__(batch_shape, event_shape, validate_args=validate_args)
@lazy_property
def scale_tril(self):
return self._unbroadcasted_scale_tril.expand(
self._batch_shape + self._event_shape + self._event_shape
)
@lazy_property
def covariance_matrix(self):
# NB: this is not covariance of this distribution;
# the actual covariance is df / (df - 2) * covariance_matrix
return torch.matmul(
self._unbroadcasted_scale_tril,
self._unbroadcasted_scale_tril.transpose(-1, -2),
).expand(self._batch_shape + self._event_shape + self._event_shape)
@lazy_property
def precision_matrix(self):
identity = torch.eye(
self.loc.size(-1), device=self.loc.device, dtype=self.loc.dtype
)
return torch.cholesky_solve(identity, self._unbroadcasted_scale_tril).expand(
self._batch_shape + self._event_shape + self._event_shape
)
@staticmethod
def infer_shapes(df, loc, scale_tril):
event_shape = loc[-1:]
batch_shape = broadcast_shape(df, loc[:-1], scale_tril[:-2])
return batch_shape, event_shape
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(MultivariateStudentT, _instance)
batch_shape = torch.Size(batch_shape)
loc_shape = batch_shape + self.event_shape
scale_shape = loc_shape + self.event_shape
new.df = self.df.expand(batch_shape)
new.loc = self.loc.expand(loc_shape)
new._unbroadcasted_scale_tril = self._unbroadcasted_scale_tril
if "scale_tril" in self.__dict__:
new.scale_tril = self.scale_tril.expand(scale_shape)
if "covariance_matrix" in self.__dict__:
new.covariance_matrix = self.covariance_matrix.expand(scale_shape)
if "precision_matrix" in self.__dict__:
new.precision_matrix = self.precision_matrix.expand(scale_shape)
new._chi2 = self._chi2.expand(batch_shape)
super(MultivariateStudentT, new).__init__(
batch_shape, self.event_shape, validate_args=False
)
new._validate_args = self._validate_args
return new
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
X = torch.empty(shape, dtype=self.df.dtype, device=self.df.device).normal_()
Z = self._chi2.rsample(sample_shape)
Y = X * torch.rsqrt(Z / self.df).unsqueeze(-1)
return self.loc + self.scale_tril.matmul(Y.unsqueeze(-1)).squeeze(-1)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
n = self.loc.size(-1)
y = torch.linalg.solve_triangular(
self.scale_tril, (value - self.loc).unsqueeze(-1), upper=False
).squeeze(-1)
Z = (
self.scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1)
+ 0.5 * n * self.df.log()
+ 0.5 * n * math.log(math.pi)
+ torch.lgamma(0.5 * self.df)
- torch.lgamma(0.5 * (self.df + n))
)
return -0.5 * (self.df + n) * torch.log1p(y.pow(2).sum(-1) / self.df) - Z
@property
def mean(self):
m = self.loc.clone()
m[self.df <= 1, :] = float("nan")
return m
@property
def variance(self):
m = self.scale_tril.pow(2).sum(-1) * (self.df / (self.df - 2)).unsqueeze(-1)
m[(self.df <= 2) & (self.df > 1), :] = float("inf")
m[self.df <= 1, :] = float("nan")
return m
|
nilq/baby-python
|
python
|
"""
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from ._univariate_selection import chi2
from ._univariate_selection import f_classif
from ._univariate_selection import f_oneway
from ._univariate_selection import f_regression
from ._univariate_selection import SelectPercentile
from ._univariate_selection import SelectKBest
from ._univariate_selection import SelectFpr
from ._univariate_selection import SelectFdr
from ._univariate_selection import SelectFwe
from ._univariate_selection import GenericUnivariateSelect
from ._variance_threshold import VarianceThreshold
from ._rfe import RFE
from ._rfe import RFECV
from ._from_model import SelectFromModel
from ._mutual_info import mutual_info_regression, mutual_info_classif
from ._base import SelectorMixin
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectFromModel',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression',
'mutual_info_classif',
'mutual_info_regression',
'SelectorMixin']
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-04-28 19:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('osf', '0026_rename_preprintservice_subjects'),
]
operations = [
migrations.AlterField(
model_name='subject',
name='text',
field=models.CharField(max_length=256),
),
migrations.AlterUniqueTogether(
name='subject',
unique_together=set([('text', 'provider')]),
),
]
|
nilq/baby-python
|
python
|
import requests
from crawl_service.util.config import CONFIG
def new_session() -> requests.Session:
session = requests.Session()
session.proxies = CONFIG.get('proxies', dict())
return session
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_autoscaling_auto_scaling_configuration_actions
short_description: Perform actions on an AutoScalingConfiguration resource in Oracle Cloud Infrastructure
description:
- Perform actions on an AutoScalingConfiguration resource in Oracle Cloud Infrastructure
- For I(action=change_compartment), moves an autoscaling configuration into a different compartment within the same tenancy. For information
about moving resources between compartments, see
L(Moving Resources to a Different Compartment,https://docs.cloud.oracle.com/iaas/Content/Identity/Tasks/managingcompartments.htm#moveRes).
When you move an autoscaling configuration to a different compartment, associated resources such as instance
pools are not moved.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
auto_scaling_configuration_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the autoscaling configuration.
type: str
aliases: ["id"]
required: true
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment to move the autoscaling configuration
to.
type: str
required: true
action:
description:
- The action to perform on the AutoScalingConfiguration.
type: str
required: true
choices:
- "change_compartment"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Perform action change_compartment on auto_scaling_configuration
oci_autoscaling_auto_scaling_configuration_actions:
# required
auto_scaling_configuration_id: "ocid1.autoscalingconfiguration.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
action: change_compartment
"""
RETURN = """
auto_scaling_configuration:
description:
- Details of the AutoScalingConfiguration resource acted upon by the current operation
returned: on success
type: complex
contains:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment containing the autoscaling
configuration.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see L(Resource Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
display_name:
description:
- A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
returned: on success
type: str
sample: display_name_example
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
- "Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the autoscaling configuration.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
cool_down_in_seconds:
description:
- For threshold-based autoscaling policies, this value is the minimum period of time to wait between scaling actions.
The cooldown period gives the system time to stabilize before rescaling. The minimum value is 300 seconds, which
is also the default. The cooldown period starts when the instance pool reaches the running state.
- For schedule-based autoscaling policies, this value is not used.
returned: on success
type: int
sample: 56
is_enabled:
description:
- Whether the autoscaling configuration is enabled.
returned: on success
type: bool
sample: true
resource:
description:
- ""
returned: on success
type: complex
contains:
type:
description:
- The type of resource.
returned: on success
type: str
sample: instancePool
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the resource that is managed by the autoscaling
configuration.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
policies:
description:
- Autoscaling policy definitions for the autoscaling configuration. An autoscaling policy defines the criteria that
trigger autoscaling actions and the actions to take.
returned: on success
type: complex
contains:
capacity:
description:
- The capacity requirements of the autoscaling policy.
returned: on success
type: complex
contains:
max:
description:
- For a threshold-based autoscaling policy, this value is the maximum number of instances the instance pool is allowed
to increase to (scale out).
- For a schedule-based autoscaling policy, this value is not used.
returned: on success
type: int
sample: 56
min:
description:
- For a threshold-based autoscaling policy, this value is the minimum number of instances the instance pool is allowed
to decrease to (scale in).
- For a schedule-based autoscaling policy, this value is not used.
returned: on success
type: int
sample: 56
initial:
description:
- For a threshold-based autoscaling policy, this value is the initial number of instances to launch in the instance pool
immediately after autoscaling is enabled. After autoscaling retrieves performance metrics, the number of
instances is automatically adjusted from this initial number to a number that is based on the limits that
you set.
- For a schedule-based autoscaling policy, this value is the target pool size to scale to when executing the schedule
that's defined in the autoscaling policy.
returned: on success
type: int
sample: 56
id:
description:
- The ID of the autoscaling policy that is assigned after creation.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
returned: on success
type: str
sample: display_name_example
policy_type:
description:
- The type of autoscaling policy.
returned: on success
type: str
sample: scheduled
time_created:
description:
- The date and time the autoscaling configuration was created, in the format defined by RFC3339.
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
is_enabled:
description:
- Whether the autoscaling policy is enabled.
returned: on success
type: bool
sample: true
execution_schedule:
description:
- The schedule for executing the autoscaling policy.
returned: on success
type: complex
contains:
type:
description:
- The type of execution schedule.
returned: on success
type: str
sample: cron
timezone:
description:
- The time zone for the execution schedule.
returned: on success
type: str
sample: UTC
expression:
description:
- A cron expression that represents the time at which to execute the autoscaling policy.
- "Cron expressions have this format: `<second> <minute> <hour> <day of month> <month> <day of week> <year>`"
- You can use special characters that are supported with the Quartz cron implementation.
- You must specify `0` as the value for seconds.
- "Example: `0 15 10 ? * *`"
returned: on success
type: str
sample: expression_example
resource_action:
description:
- ""
returned: on success
type: complex
contains:
action_type:
description:
- The type of resource action.
returned: on success
type: str
sample: power
action:
description:
- ""
returned: on success
type: str
sample: STOP
rules:
description:
- ""
returned: on success
type: complex
contains:
action:
description:
- ""
returned: on success
type: complex
contains:
type:
description:
- The type of action to take.
returned: on success
type: str
sample: CHANGE_COUNT_BY
value:
description:
- To scale out (increase the number of instances), provide a positive value. To scale in (decrease the number of
instances), provide a negative value.
returned: on success
type: int
sample: 56
display_name:
description:
- A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information.
returned: on success
type: str
sample: display_name_example
id:
description:
- ID of the condition that is assigned after creation.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
metric:
description:
- ""
returned: on success
type: complex
contains:
metric_type:
description:
- ""
returned: on success
type: str
sample: CPU_UTILIZATION
threshold:
description:
- ""
returned: on success
type: complex
contains:
operator:
description:
- The comparison operator to use. Options are greater than (`GT`), greater than or equal to
(`GTE`), less than (`LT`), and less than or equal to (`LTE`).
returned: on success
type: str
sample: GT
value:
description:
- ""
returned: on success
type: int
sample: 56
time_created:
description:
- The date and time the autoscaling configuration was created, in the format defined by RFC3339.
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
max_resource_count:
description:
- The maximum number of resources to scale out to.
returned: on success
type: int
sample: 56
min_resource_count:
description:
- The minimum number of resources to scale in to.
returned: on success
type: int
sample: 56
sample: {
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"display_name": "display_name_example",
"freeform_tags": {'Department': 'Finance'},
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"cool_down_in_seconds": 56,
"is_enabled": true,
"resource": {
"type": "instancePool",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
},
"policies": [{
"capacity": {
"max": 56,
"min": 56,
"initial": 56
},
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"policy_type": "scheduled",
"time_created": "2013-10-20T19:20:30+01:00",
"is_enabled": true,
"execution_schedule": {
"type": "cron",
"timezone": "UTC",
"expression": "expression_example"
},
"resource_action": {
"action_type": "power",
"action": "STOP"
},
"rules": [{
"action": {
"type": "CHANGE_COUNT_BY",
"value": 56
},
"display_name": "display_name_example",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"metric": {
"metric_type": "CPU_UTILIZATION",
"threshold": {
"operator": "GT",
"value": 56
}
}
}]
}],
"time_created": "2013-10-20T19:20:30+01:00",
"max_resource_count": 56,
"min_resource_count": 56
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIActionsHelperBase,
get_custom_class,
)
try:
from oci.autoscaling import AutoScalingClient
from oci.autoscaling.models import ChangeAutoScalingCompartmentDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class AutoScalingConfigurationActionsHelperGen(OCIActionsHelperBase):
"""
Supported actions:
change_compartment
"""
@staticmethod
def get_module_resource_id_param():
return "auto_scaling_configuration_id"
def get_module_resource_id(self):
return self.module.params.get("auto_scaling_configuration_id")
def get_get_fn(self):
return self.client.get_auto_scaling_configuration
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_auto_scaling_configuration,
auto_scaling_configuration_id=self.module.params.get(
"auto_scaling_configuration_id"
),
)
def change_compartment(self):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, ChangeAutoScalingCompartmentDetails
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.change_auto_scaling_configuration_compartment,
call_fn_args=(),
call_fn_kwargs=dict(
auto_scaling_configuration_id=self.module.params.get(
"auto_scaling_configuration_id"
),
change_compartment_details=action_details,
),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_action_desired_states(
self.module.params.get("action")
),
)
AutoScalingConfigurationActionsHelperCustom = get_custom_class(
"AutoScalingConfigurationActionsHelperCustom"
)
class ResourceHelper(
AutoScalingConfigurationActionsHelperCustom,
AutoScalingConfigurationActionsHelperGen,
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=False
)
module_args.update(
dict(
auto_scaling_configuration_id=dict(
aliases=["id"], type="str", required=True
),
compartment_id=dict(type="str", required=True),
action=dict(type="str", required=True, choices=["change_compartment"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="auto_scaling_configuration",
service_client_class=AutoScalingClient,
namespace="autoscaling",
)
result = resource_helper.perform_action(module.params.get("action"))
module.exit_json(**result)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import List, Property, Str
from traitsui.api import View, VGroup, UItem
from traitsui.menu import Action
from pychron.dvc.dvc_irradiationable import DVCAble
class OKButton(Action):
name = 'OK'
enabled_when = 'ok_enabled'
STYLESHEET = 'QLabel {font-size: 14px; color: red}'
class BaseEntry(DVCAble):
value = Str
available = List
error_message = Str
ok_enabled = Property(depends_on='value')
tag = ''
def do(self):
return self._add_loop()
def _get_ok_enabled(self):
if self.value not in self.available:
self.error_message = ''
return True
else:
self.error_message = '{} already exists. Choose another'.format(self.tag)
return False
def _add_loop(self):
while 1:
info = self.edit_traits()
if info.result:
db = self.get_database()
ret = self._add_item()
if ret is None:
return False
elif ret:
return True
else:
return False
def _add_item(self):
raise NotImplementedError
def _new_view(self, *args, **kw):
for a, v in (('buttons', ['OK', 'Cancel']),
('resizable', True),
('kind', 'livemodal')):
if a not in kw:
kw[a] = v
v = View(*args, **kw)
return v
def traits_view(self):
# style_sheet='QLabel {font-size: 10px} QLineEdit {font-size: 10px}'
a = VGroup(UItem('value'),
UItem('error_message', style='readonly', style_sheet=STYLESHEET))
buttons = [OKButton(), 'Cancel']
return self._new_view(a,
width=400,
title='Add {}'.format(self.tag),
buttons=buttons)
# ============= EOF =============================================
|
nilq/baby-python
|
python
|
from django.core.management.base import BaseCommand, CommandError
from game.models import *
import settings
from PIL import Image
import random
def hex_to_rgb(value):
value = value.lstrip('#')
lv = len(value)
if lv == 1:
v = int(value, 16)*17
return v, v, v
if lv == 3:
return tuple(int(value[i:i+1], 16)*17 for i in range(0, 3))
return tuple(int(value[i:i+lv/3], 16) for i in range(0, lv, lv/3))
class Command(BaseCommand):
args = ''
help = 'Run this command whenever 2 minutes go by.'
def handle(self, *args, **options):
Announcement.objects.all().delete()
all_actions = {}
player_locations = {}
for account in Account.objects.all():
if account.actions != '':
this_accounts_actions = []
actions = account.actions.split(',')
for action in actions:
if action == 'walk':
this_accounts_actions.append('walk-start')
this_accounts_actions.append('walk')
else:
this_accounts_actions.append(action)
if len(this_accounts_actions) < 10:
for i in range(10 - len(this_accounts_actions)):
this_accounts_actions.append('noop')
all_actions[account.id] = this_accounts_actions
account.inactive_turns = 0
else:
all_actions[account.id] = ['noop']*10
account.inactive_turns += 1
account.last_chat_message = account.chat_message
account.chat_message = ''
account.last_actions = account.actions
account.last_col = account.col
account.last_row = account.row
account.last_direction = account.direction
account.actions = ''
account.save()
print 'Action hash setup complete.'
for second in range(10):
for account in Account.objects.all():
this_action_name = all_actions[account.id][second]
if this_action_name != 'walk-start' and this_action_name != 'noop':
# Figure stamina for this action
this_action = get_action_by_name(this_action_name)
account.stamina += this_action['stamina']
if account.stamina > 10:
account.stamina = 10
if this_action_name == 'walk':
if account.direction == 'west':
account.col -= 1
if account.direction == 'east':
account.col += 1
if account.direction == 'north':
account.row -= 1
if account.direction == 'south':
account.row += 1
# Give flags to those who should have them
square = get_object_or_None(Square, col=account.col, row=account.row)
if square != None:
if TILES[square.tile] == 'red-flag' and account.team == 'blue':
account.has_flag = True
if TILES[square.tile] == 'blue-flag' and account.team == 'red':
account.has_flag = True
if (account.has_flag and square.col < 25 and account.team == 'red') or (account.has_flag and square.col >= 25 and account.team == 'blue'):
account.has_flag = False
account.flags_gotten += 1
Announcement.objects.create(text='%s gets a flag for %s' % (account.username, account.get_team_display()))
if this_action_name == 'run':
# Factor this into a function sometime
if account.direction == 'west':
account.col -= 1
if account.direction == 'east':
account.col += 1
if account.direction == 'north':
account.row -= 1
if account.direction == 'south':
account.row += 1
# Give flags to those who should have them
square = get_object_or_None(Square, col=account.col, row=account.row)
if square != None:
if TILES[square.tile] == 'red-flag' and account.team == 'blue':
account.has_flag = True
if TILES[square.tile] == 'blue-flag' and account.team == 'red':
account.has_flag = True
if (account.has_flag and square.col < 25 and account.team == 'red') or (account.has_flag and square.col >= 25 and account.team == 'blue'):
account.has_flag = False
account.flags_gotten += 1
Announcement.objects.create(text='%s gets a flag for %s' % (account.username, account.get_team_display()))
if account.direction == 'west':
account.col -= 1
if account.direction == 'east':
account.col += 1
if account.direction == 'north':
account.row -= 1
if account.direction == 'south':
account.row += 1
# Give flags to those who should have them
square = get_object_or_None(Square, col=account.col, row=account.row)
if square != None:
if TILES[square.tile] == 'red-flag' and account.team == 'blue':
account.has_flag = True
if TILES[square.tile] == 'blue-flag' and account.team == 'red':
account.has_flag = True
if (account.has_flag and square.col < 25 and account.team == 'red') or (account.has_flag and square.col >= 25 and account.team == 'blue'):
account.has_flag = False
account.flags_gotten += 1
Announcement.objects.create(text='%s gets a flag for %s' % (account.username, account.get_team_display()))
if this_action_name in ['north', 'south', 'east', 'west']:
account.direction = this_action_name
if account.col < 1: account.col = 1
if account.col > 48: account.col = 48
if account.row < 1: account.row = 1
if account.row > 73: account.row = 73
account.save()
if account.col not in player_locations:
player_locations[account.col] = {}
if account.row not in player_locations[account.col]:
player_locations[account.col][account.row] = []
if account not in player_locations[account.col][account.row]:
player_locations[account.col][account.row].append(account)
print 'Action resolutions finished'
for row in range(75):
for col in range(50):
if player_locations.has_key(col):
if player_locations[col].has_key(row):
players_in_this_square = player_locations[col][row]
if len(players_in_this_square) >= 2:
seen = {}
for account in players_in_this_square:
for other_account in players_in_this_square:
if account != other_account and (not seen.has_key(str(account.id) + '|' + str(other_account.id))) and (not seen.has_key(str(other_account.id) + '|' + str(account.id))):
if account.team != other_account.team:
if col < 25:
if account.team == 'blue':
account.col = BLUE_START['col']
account.row = BLUE_START['row']
other_account.enemies_tagged += 1
if other_account.team == 'blue':
other_account.col = BLUE_START['col']
other_account.row = BLUE_START['row']
account.enemies_tagged += 1
else:
if account.team == 'red':
account.col = RED_START['col']
account.row = RED_START['row']
other_account.enemies_tagged += 1
if other_account.team == 'red':
other_account.col = RED_START['col']
other_account.row = RED_START['row']
account.enemies_tagged += 1
account.save()
other_account.save()
seen[str(account.id) + '|' + str(other_account.id)] = True
seen[str(other_account.id) + '|' + str(account.id)] = True
squares = Square.objects.order_by('row', 'col')
im = Image.new('RGB', (50, 75), 'black')
for square in squares:
terrain = square.get_terrain_type()
if terrain == 'grass':
color = (102, 188, 83)
elif terrain == 'water':
color = (71, 132, 224)
elif terrain == 'corn':
color = (255, 255, 0)
elif terrain == 'rock':
color = (160, 160, 160)
elif terrain == 'trees':
color = (8, 74, 41)
elif terrain == 'dirt':
color = (205, 115, 32)
elif terrain == 'shrubbery':
color = (8, 74, 41)
elif terrain == 'road':
color = (200, 200, 200)
elif terrain == 'red-flag':
color = (150, 0, 30)
elif terrain == 'blue-flag':
color = (0, 0, 196)
if terrain == 'red-flag' or terrain == 'blue-flag':
im.putpixel((square.col, square.row), color)
im.putpixel((square.col-1, square.row), color)
im.putpixel((square.col-1, square.row-1), color)
im.putpixel((square.col-1, square.row+1), color)
im.putpixel((square.col+1, square.row), color)
else:
im.putpixel((square.col, square.row), color)
for account in Account.objects.filter(inactive_turns__lt=settings.TURNS_TILL_DEACTIVATION):
if account.team == 'red':
color = (255, 0, 0)
elif account.team == 'blue':
color = (0, 0, 255)
im.putpixel((account.col, account.row), color)
im.putpixel((account.col-1, account.row), color)
im.putpixel((account.col+1, account.row), color)
im.putpixel((account.col, account.row-1), color)
im.putpixel((account.col, account.row+1), color)
im = im.resize((250, 375), Image.NEAREST)
im.save('static/images/minimap.png', 'PNG')
|
nilq/baby-python
|
python
|
'''ইউজার একটি পুর্ন সংখ্যা ইনপুট দিবে সংখ্যা জোর হলে আউটপুট হবে(even)
আর বিজোড় হলে আউটপুট হবে (odd)'''
#input
num=int(input("Enter the number:"))
#logic
if (num%2==0):
print(f"{num} is even")
else:
print(f"{num} is odd")
|
nilq/baby-python
|
python
|
#
# Copyright 2018-2019 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
def copy_dir(source_directory,
target_directory):
"""
Copies files from source_directory to\
target_directory. If target directory doesn't exist\
it will be created.
:param source_directory: source
:type source_directory: str
:param adict: if specified, adict will be printed
:type adict: dict
"""
if os.path.isdir(source_directory):
def deep_copy(source, target):
"""Copies recursively all files from source to destination
"""
names = os.listdir(source)
os.makedirs(target, exist_ok=True)
for name in names:
src_name = os.path.join(source, name)
tgt_name = os.path.join(target, name)
if os.path.isdir(src_name):
# source is a directory
deep_copy(src_name, tgt_name)
else:
# source is a file
print('Copying "{}" to "{}" ...'
.format(src_name, tgt_name))
shutil.copy2(src_name, tgt_name)
# copy files recursively
deep_copy(source_directory,
target_directory)
else:
print('Error. Directory "{}" was not found.'.format(source_directory))
|
nilq/baby-python
|
python
|
# Generated by Django 2.2.9 on 2020-01-11 21:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bio_app', '0013_auto_20200111_2035'),
]
operations = [
migrations.AlterField(
model_name='clusters',
name='cluster_id',
field=models.CharField(max_length=255, verbose_name='cluster Id'),
),
]
|
nilq/baby-python
|
python
|
import pandas as pd
from bokeh.io import show, curdoc
from bokeh.layouts import layout
from bokeh.models import ColumnDataSource, FactorRange
from bokeh.plotting import figure
from bokeh.sampledata.degrees import data
from bokeh.themes import Theme
data = data.set_index('Year')
categories = data.columns.tolist()
categories.reverse()
curdoc().theme = Theme(json={'attrs': {
'Figure': {
'toolbar_location': None,
'outline_line_color': None,
'min_border_right': 10,
},
'Axis': {
'major_tick_in': None,
'minor_tick_out': None,
'minor_tick_in': None,
'axis_line_color': '#CAC6B6',
'major_tick_line_color': '#CAC6B6',
},
'Legend': {
'background_fill_alpha': 0.8,
}
}})
def _make_source_for_year(year):
# Get data out of dataframe for a given year
year_df = pd.DataFrame(data.loc[year]).reset_index()
year_df = year_df.rename(columns={year: 'percent_female', 'index': 'category'})
source = ColumnDataSource(year_df)
return source
def all_for_year(year):
source = _make_source_for_year(year)
bar_opts = dict(y='category', height=0.5)
p = figure(title=str(year), y_range=FactorRange(factors=categories), x_range=(0, 100), tools='')
p.grid.grid_line_color = None
p.hbar(left=0, right='percent_female', color='#AE9E59', legend='Female', source=source, **bar_opts)
p.hbar(left='percent_female', right=100, color='#CAC6B6', legend='Male', source=source, **bar_opts)
return p
def two_categories_over_time():
bar_opts = dict(width=0.3, alpha=0.8)
p = figure(title="Percentage of women graduating over time in two fields.", y_range=(0, 100), tools='')
p.vbar(bottom=0, top=data['Psychology'], x=data.index - 0.2, color='#4F4478', legend='Psychology', **bar_opts)
p.vbar(bottom=0, top=data['Engineering'], x=data.index + 0.2, color='#827F8B', legend='Engineering', **bar_opts)
return p
l = layout([
[all_for_year(1970), all_for_year(2010)],
[two_categories_over_time()],
], sizing_mode='stretch_both')
show(l)
|
nilq/baby-python
|
python
|
from django import template
from django.forms.utils import flatatt
from django.template.loader import render_to_string
from django.utils.html import format_html, format_html_join
from core.constants import VIDEO_DURATION_DATA_ATTR_NAME
register = template.Library()
def _get_poster_attribute(video):
if video and video.thumbnail:
return f'poster="{video.thumbnail.url}" ' # trailing space is deliberate
return ''
@register.simple_tag
def render_video(block):
"""Renders a video block (eg in a lesson hero or a case study).
Includes a custom attribute on the video element so we can estimate
page view time in our post-save hook, without clashing with the automatically
added `duration` attribute that a browser may add to <video>.
"""
if not block:
return ''
video_duration = getattr(block['video'], 'duration', 0)
# The default, above, _should_ never be needed because field is mandatory in the CMS
video = block['video']
timestamp_to_allow_poster_image_to_work_on_mobile_safari = '#t=0.1'
sources_data = []
for source in video.sources:
if 'src' in source:
source['src'] += timestamp_to_allow_poster_image_to_work_on_mobile_safari
sources_data.append([flatatt(source)])
sources = format_html_join('\n', '<source{0}>', sources_data)
if video.subtitles:
rendered_subtitles = []
for subtitle_spec in video.subtitles:
rendered_subtitles.append(
render_to_string(
'core/includes/_video_subtitle.html',
subtitle_spec,
)
)
subtitles = '\n'.join(rendered_subtitles)
else:
subtitles = ''
rendered = format_html(
f"""
<video preload="metadata" controls controlsList="nodownload"
{_get_poster_attribute(video)}{VIDEO_DURATION_DATA_ATTR_NAME}="{video_duration}">
{sources}
{subtitles}
Your browser does not support the video tag.
</video>
<div class="video-transcript-container"></div>
"""
)
return rendered
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import neurovault.apps.statmaps.models
import neurovault.apps.statmaps.storage
class Migration(migrations.Migration):
dependencies = [
('statmaps', '0073_auto_20161111_0033'),
]
operations = [
migrations.AlterField(
model_name='collection',
name='private',
field=models.BooleanField(default=False, verbose_name=b'Accessibility', choices=[(False, b'Public (The collection will be accessible by anyone and all the data in it will be distributed under CC0 license)'), (True, b'Private (The collection will be not listed in the NeuroVault index. It will be possible to shared it with others at a private URL.)')]),
),
migrations.AlterField(
model_name='image',
name='surface_left_file',
field=models.FileField(storage=neurovault.apps.statmaps.storage.DoubleExtensionStorage(), upload_to=neurovault.apps.statmaps.models.upload_img_to, null=True, verbose_name=b'File with the unthresholded LEFT hemisphere fsaverage surface map (.mgh, .curv, .gii)', blank=True),
),
migrations.AlterField(
model_name='image',
name='surface_right_file',
field=models.FileField(storage=neurovault.apps.statmaps.storage.DoubleExtensionStorage(), upload_to=neurovault.apps.statmaps.models.upload_img_to, null=True, verbose_name=b'File with the unthresholded RIGHT hemisphere fsaverage surface map (.mgh, .curv, .gii)', blank=True),
),
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#
# Copyright 2021 Xiaomi Corporation (author: Wei Kang)
#
# See ../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To run this single test, use
#
# ctest --verbose -R get_best_matching_stats_test_py
import unittest
import k2
import torch
class TestGetBestMatchingStats(unittest.TestCase):
def test(self):
s = '[ [ [ 5 1 4 6 ] [ 5 1 2 6 ] [ 5 3 4 6 ] ] ]'
tokens = k2.RaggedInt(s)
scores = torch.tensor([1, 2, 3, 4, 5, 7, 8, 6, 0, 0, 0, 0],
dtype=torch.float32)
counts = torch.tensor([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
dtype=torch.int32)
eos = 6
min_token = 1
max_token = 6
max_order = 2
mean, var, counts_out, ngram_order = k2.get_best_matching_stats(
tokens, scores, counts, eos, min_token, max_token, max_order)
mean_ref = torch.tensor([3, 4.5, 3, 4, 3, 4.5, 4.5, 5, 3, 4.5, 3, 4],
dtype=torch.float32)
var_ref = torch.tensor([4, 6.25, 0, 0, 4, 6.25, 5.25, 1, 4, 5.25, 0, 0],
dtype=torch.float32)
counts_out_ref = torch.tensor([2, 2, 1, 1, 2, 2, 0, 2, 2, 0, 1, 1],
dtype=torch.int32)
ngram_order_ref = torch.tensor([2, 2, 1, 2, 2, 2, 0, 1, 2, 0, 1, 2],
dtype=torch.int32)
assert torch.allclose(mean, mean_ref)
assert torch.allclose(var, var_ref)
assert torch.all(torch.eq(counts_out, counts_out_ref))
assert torch.all(torch.eq(ngram_order, ngram_order_ref))
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
#!/usr/local/bin/python
import os, sys, time
space = ' '
# read file by filename
def read_file(filename):
f = open(filename, 'r')
content = f.readlines()
f.close()
return content
def handleTemp(temp):
# extract content from <TEXT> and void <TEXT>content</TEXT>
start = temp.find('<TEXT>') + len('<TEXT>')
end = temp.find('</TEXT>')
return temp[start:end]
def handleDocument(content):
documentid = ''
Text = ''
i = 0
length = len(content)
while i < length:
if '<DOCNO>' in content[i]:
no = content[i].split(' ')
documentid = no[1]
break
# find the doc id, and to avoid un-necessary if-check, end this loop
i += 1
while i < length:
if '<TEXT>' in content[i]:
temp = ''
while '</TEXT>' not in content[i]:
# replace the '\n' with space
temp += content[i][:-1] + ' '
i += 1
temp += content[i]
Text += handleTemp(temp)
i += 1
return (documentid, Text)
# split the file into document
def splitDoc(content):
length = len(content)
i = 0
documents = []
while i < length:
if '<DOC>' in content[i]:
# start to get the whole doc
doc = []
doc.append(content[i])
i += 1
while '</DOC>' not in content[i]:
doc.append(content[i])
i += 1
doc.append(content[i])
documents.append(handleDocument(doc))
i += 1
return documents
def getNumber(content):
index = 0
# first number, second number, and the position to be continue
result = []
c = content.split(' ')
end = len(c[0]) + len(c[1]) + 2 # 2 space
result=[int(c[0]), int(c[1]), end]
return result
size = len(content)
while index < size:
if content[index] == ' ':
temp = ''
index += 1
while index < size and content[index] != ' ':
temp += content[index]
index += 1
result.append(int(temp))
else:
index += 1
if len(result) == 2:
while index < size and content[index] == ' ':
index += 1
result.append(index)
return result
def mergefile(name):
# data file
print 'mergefile'
file1 = open('cache1_' + name,'r')
file2 = open('cache2_' + name,'r')
file3 = open('cache3_' + name, 'w')
cate3 = open('cache3_' + name + '_category', 'w')
# category file
cate1 = open('cache1_' + name + '_category', 'r').readlines()
cate2 = open('cache2_' + name + '_category', 'r').readlines()
ptr1 = 0
ptr2 = 0
start = 0
while ptr1 < len(cate1) and ptr2 < len(cate2):
# line in category
# term start len
line1 = cate1[ptr1].split(' ')
line2 = cate2[ptr2].split(' ')
t1 = line1[0]
t2 = line2[0]
result = ''
if t1 < t2:
result += t1 + space + str(start) + space
file1.seek(int(line1[1]))
content = file1.read(int(line1[2][:-1]))
file3.write(content)
size = len(content)
result += str(size) + '\n'
cate3.write(result)
start += size
ptr1 += 1
elif t1 > t2:
result += t2 + space + str(start) + space
file2.seek(int(line2[1]))
content = file2.read(int(line2[2][:-1]))
file3.write(content)
size = len(content)
result += str(size) + '\n'
cate3.write(result)
start += size
ptr2 += 1
elif t1 == t2:
# if two terms are the same
result += t1 + space + str(start) + space
file1.seek(int(line1[1]))
content1 = file1.read(int(line1[2][:-1]))
# print content1
space1 = getNumber(content1)
# print space1
# sys.exit(-1)
file2.seek(int(line2[1]))
content2 = file2.read(int(line2[2][:-1]))
# print content2
space2 = getNumber(content2)
data = str(space1[0] + space2[0]) + space
data += str(space1[1] + space2[1]) + space + content1[space1[2]:-1]
data += space + content2[space2[2]:]
file3.write(data)
size = len(data)
result += str(size) + '\n'
cate3.write(result)
start += size
ptr1 += 1
ptr2 += 1
while ptr1 < len(cate1):
line1 = cate1[ptr1].split(' ')
t1 = line1[0]
result = t1 + space + str(start) + space
file1.seek(int(line1[1]))
content = file1.read(int(line1[2][:-1]))
file3.write(content)
size = len(content)
result += str(size) + '\n'
cate3.write(result)
start += size
ptr1 += 1
while ptr2 < len(cate2):
line2 = cate2[ptr2].split(' ')
t2 = line2[0]
result = t2 + space + str(start) + space
file2.seek(int(line2[1]))
content = file2.read(int(line2[2][:-1]))
file3.write(content)
size = len(content)
result += str(size) + '\n'
cate3.write(result)
start += size
ptr2 += 1
file1.close()
file2.close()
file3.close()
cate3.close()
os.remove('cache1_' + name)
os.remove('cache2_' + name)
os.remove('cache1_' + name + '_category')
os.remove('cache2_' + name + '_category')
os.rename('cache3_' + name, 'cache1_' + name)
os.rename('cache3_' + name + '_category', 'cache1_' + name + '_category')
def get_range(nums, move_able):
# return the range of the list, and return the index of the smallest number
small = 10000
next_val = 10000
next_index = 0
big = -1
index = 0
while index < len(nums):
num = nums[index][0]
if small > num:
small = num
if next_val > num and move_able[index]:
next_val = num
next_index = index
if big < num:
big = num
index += 1
return (next_index, big - small)
def get_min_span(matirx):
# a matrix should be a list contains a lot of lists
if len(matirx) == 1:
return 0
column = []
row = len(matirx)
for i in range(row):
column.append([matirx[i][0], 0])
move_able = []
smallest = 10000
for i in range(row):
move_able.append(True)
while True in move_able:
next_move = get_range(column, move_able)
if next_move[1] + 1 == row:
smallest = next_move[1] + 1
break
if smallest > next_move[1] + 1:
smallest = next_move[1] + 1
next_val = next_move[0]
column[next_val][1] += 1
if len(matirx[next_val]) <= column[next_val][1] + 1:
move_able[next_val] = False
if move_able[next_val]:
column[next_val][0] = matirx[next_val][column[next_val][1]]
# print matirx, smallest
return smallest
pass
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from icemac.addressbook.interfaces import IPerson
import icemac.addressbook.testing
import pytest
import zope.component.hooks
# Fixtures to set-up infrastructure which are usable in tests,
# see also in ./fixtures.py (which are imported via src/../conftest.py):
@pytest.yield_fixture(scope='function')
def person_with_field_data(personWithFieldDataS):
"""Provide predefined person data, see `personWithFieldDataS`."""
for connection in icemac.addressbook.testing.pyTestStackDemoStorage(
personWithFieldDataS.zodb, 'PersonWithFieldFunction'):
yield connection
# Infrastructure fixtures
@pytest.yield_fixture(scope='session')
def personWithFieldDataS(
addressBookS, FullPersonFactory, PostalAddressFactory, KeywordFactory,
PhoneNumberFactory, EMailAddressFactory, HomepageAddressFactory,
FieldFactory):
"""Create base data used in person tests."""
for connection in icemac.addressbook.testing.pyTestStackDemoStorage(
addressBookS, 'SearchSession'):
address_book = connection.rootFolder['ab']
with zope.component.hooks.site(address_book):
field_name = FieldFactory(
address_book, IPerson, 'TextLine', u'foobar').__name__
icemac.addressbook.conftest._create_person(
address_book, FullPersonFactory, PostalAddressFactory,
KeywordFactory, PhoneNumberFactory, EMailAddressFactory,
HomepageAddressFactory, **{field_name: u'my value'})
yield connection
|
nilq/baby-python
|
python
|
log_enabled = True
# 1 = print everything
# 2 = print few stuff
# 3 = print fewer stuff
log_level = 2
def log(message, message_type="info", level=3):
if not log_enabled:
return
log_message_type_symbols = {
"info": "[*]",
"warning": "[!]",
"error": "[x]",
"success": "[+]",
}
# Errors and warnings are logged anyways
if(message_type == "error" or message_type == "warning"):
print(log_message_type_symbols[message_type], message)
else:
if (level >= log_level):
print(log_message_type_symbols[message_type], message)
|
nilq/baby-python
|
python
|
from diary.views import get_next_or_none
from django.urls import reverse
def test_get_next_or_none__last(note):
assert get_next_or_none(note) is None
def test_get_next_or_none__next_exists(note2):
assert get_next_or_none(note2).title == 'My Title'
def test_NoteListView(client, note):
response = client.get(reverse('note-list'))
assert response.status_code == 200
assert b'<div hx-get="/note/create" hx-trigger="load" hx-swap="outerHTML"></div>' in response.content
|
nilq/baby-python
|
python
|
#
# Control an RFSpace SDR-IP, NetSDR, or CloudIQ.
#
# Example:
# sdr = sdrip.open("192.168.3.125")
# sdr.setrate(32000)
# sdr.setgain(-10)
# sdr.setrun()
# while True:
# buf = sdr.readiq()
# OR buf = sdr.readusb()
#
# Robert Morris, AB1HL
#
import socket
import sys
import os
import numpy
import scipy
import scipy.signal
import threading
import time
import struct
import weakutil
def x8(x):
s = bytearray([x & 0xff])
return s
def x16(x):
# least-significant first
s = bytearray([
x & 0xff,
(x >> 8) & 0xff ])
return s
def x32(x):
# least-significant first
s = bytearray([
x & 0xff,
(x >> 8) & 0xff,
(x >> 16) & 0xff,
(x >> 24) & 0xff ])
return s
# 40-bit frequency in Hz, lsb first
# but argument must be an int
def x40(hz):
s = b""
for i in range(0, 5):
s = s + bytearray([ hz & 0xff ])
hz >>= 8
return s
# turn a char into an int.
# yord[s[i]]
# in python27, s is str, s[i] is str, so call ord().
# in python3, s is bytes, s[i] is int, so no ord().
def yord(x):
if type(x) == int:
return x
else:
return ord(x)
def y16(s):
x = (yord(s[0]) +
(yord(s[1]) << 8))
return x
def y32(s):
x = (yord(s[0]) +
(yord(s[1]) << 8) +
(yord(s[2]) << 16) +
(yord(s[3]) << 24))
return x
# turn 5 bytes from NetSDR into a 40-bit number.
# LSB first.
def y40(s):
hz = (yord(s[0]) +
(yord(s[1]) << 8) +
(yord(s[2]) << 16) +
(yord(s[3]) << 24) +
(yord(s[4]) << 32))
return hz
# turn a byte array into hex digits
def hx(s):
buf = ""
for i in range(0, len(s)):
buf += "%02x " % (yord(s[i]))
return buf
mu = threading.Lock()
#
# if already connected, return existing SDRIP,
# otherwise a new one.
#
sdrips = { }
def open(ipaddr):
global sdrips, mu
mu.acquire()
if not (ipaddr in sdrips):
sdrips[ipaddr] = SDRIP(ipaddr)
sdr = sdrips[ipaddr]
mu.release()
return sdr
class SDRIP:
def __init__(self, ipaddr):
# ipaddr is SDR-IP's IP address e.g. "192.168.3.123"
self.mode = "usb"
self.ipaddr = ipaddr
self.mu = threading.Lock()
self.lasthz = 0
self.rate = None
self.frequency = None
self.running = False
self.mhz_overload = { }
self.mhz_gain = { }
# 16 or 24
# only 24 seems useful
self.samplebits = 24
# iq? i think only True works.
self.iq = True
self.nextseq = 0
self.reader_pid = None
self.connect()
# "usb" or "fm"
# maybe only here to be ready by weakaudio.py/SDRIP.
def set_mode(self, mode):
self.mode = mode
def connect(self):
# allocate a UDP socket and port for incoming data from the SDR-IP.
self.ds = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.ds.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024*1024)
self.ds.bind(('', 0)) # ask kernel to choose a free port
hostport = self.ds.getsockname() # hostport[1] is port number
# fork() a sub-process to read and buffer the data UDP socket,
# since the Python thread scheduler doesn't run us often enough if
# WSPR is compute-bound in numpy for tens of seconds.
r, w = os.pipe()
self.reader_pid = os.fork()
if self.reader_pid == 0:
os.close(r)
self.reader(w)
os._exit(0)
else:
self.pipe = r
os.close(w)
self.ds.close()
# commands over TCP to port 50000
self.cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.cs.connect((self.ipaddr, 50000))
# only this thread reads from the control TCP socket,
# and appends to self.replies.
self.replies_mu = threading.Lock()
self.replies = [ ]
th = threading.Thread(target=lambda : self.drain_ctl())
th.daemon = True
th.start()
time.sleep(0.1) # CloudIQ
# tell the SDR-IP where to send UDP packets
self.setudp(hostport[1])
# boilerplate
self.setad()
self.setfilter(0)
self.setgain(0)
#self.setgain(-20)
# "SDR-IP"
#print("name: %s" % (self.getitem(0x0001)))
# option 0x02 means reflock board is installed
#oo = self.getitem(0x000A) # Options
#oo0 = yord(oo[0])
#print("options: %02x" % (oo0))
if False:
# set calibration.
# 192.168.3.130 wants + 506
# 192.168.3.131 wants + 525
# (these are with the 10 mhz reflock ocxo, but not locked)
data = b""
data += x8(0) # ignored
if self.ipaddr == "192.168.3.130":
data += x32(80000000 + 506)
elif self.ipaddr == "192.168.3.131":
data += x32(80000000 + 525)
else:
print("sdrip.py: unknown IP address %s for calibration" % (self.ipaddr))
# data += x32(80000000 + 0)
data = None
if data != None:
self.setitem(0x00B0, data)
# A/D Input Sample Rate Calibration
# factory set to 80000000
x = self.getitem(0x00B0)
cal = y32(x[1:5])
print("sdrip %s cal: %s" % (self.ipaddr, cal))
# read the UDP socket from the SDR-IP.
def reader1(self):
while True:
buf = self.ds.recv(4096)
self.packets_mu.acquire()
self.packets.append(buf)
self.packets_mu.release()
# read the data UDP socket in a separate process and
# send the results on the pipe w.
def reader(self, w):
ww = os.fdopen(w, 'wb')
# spawn a thread that just keeps reading from the socket
# and appending packets to packets[].
self.packets = [ ]
self.packets_mu = threading.Lock()
th = threading.Thread(target=lambda : self.reader1())
th.daemon = True
th.start()
# move packets from packets[] to the UNIX pipe.
# the pipe write() calls may block, but it's OK because
# the reader1() thread keeps draining the UDP socket.
while True:
self.packets_mu.acquire()
ppp = self.packets
self.packets = [ ]
self.packets_mu.release()
if len(ppp) < 1:
# we expect 100 pkts/second
# but OSX seems to limit a process to 150 wakeups/second!
# time.sleep(0.005)
time.sleep(0.01)
for pkt in ppp:
try:
ww.write(struct.pack('I', len(pkt)))
ww.write(pkt)
ww.flush()
except:
#sys.stderr.write("sdrip: pipe write failed\n")
os._exit(1)
# consume and record TCP control messages from the NetSDR,
# and notice if it goes away.
def drain_ctl(self):
try:
while True:
reply = self.real_readreply()
if reply != None:
self.replies_mu.acquire()
self.replies.append(reply)
self.replies_mu.release()
except:
print("drain error:", sys.exc_info()[0])
sys.stdout.flush()
pass
sys.stderr.write("sdrip: control connection died\n")
os.kill(self.reader_pid, 9)
# read a 16-bit int from TCP control socket
def read16(self):
x0 = self.cs.recv(1) # least-significant byte
x1 = self.cs.recv(1) # most-significant byte
return (yord(x0) & 0xff) | ((yord(x1) << 8) & 0xff00)
# read a reply from the TCP control socket
# return [ type, item, data ]
def readctl(self):
len = self.read16() # overall length and msg type
mtype = (len >> 13) & 0x7
len &= 0x1fff
if len == 2:
# NAK -- but for what?
sys.stderr.write("sdrip: NAK\n")
return None
item = self.read16() # control item
data = b""
xlen = len - 4
while xlen > 0:
dd = self.cs.recv(1)
data += dd
xlen -= 1
return [ mtype, item, data ]
# read one reply from the tcp control socket.
def real_readreply(self):
reply = self.readctl()
if reply == None:
# NAK
return None
# print("reply: %d %04x %s" % (reply[0], reply[1], hx(reply[2])))
# reply[0] is mtype (0=set, 1=get)
# reply[1] is item
# reply[2] is date
if reply[0] == 1 and reply[1] == 5:
# A/D overload
self.got_overload()
return reply
def got_overload(self):
mhz = self.lasthz // 1000000
self.mhz_overload[mhz] = time.time()
ogain = self.mhz_gain.get(mhz, 0)
gain = ogain - 10
if gain < -30:
gain = -30
self.mhz_gain[mhz] = gain
sys.stderr.write("sdrip: overload mhz=%d %d %d\n" % (mhz, ogain, gain))
# wait for drain thread to see the reply we want.
def readreply(self, item):
self.replies_mu.acquire()
lasti = len(self.replies) - 10 # XXX
lasti = max(0, lasti)
self.replies_mu.release()
while True:
self.replies_mu.acquire()
while lasti < len(self.replies):
reply = self.replies[lasti]
lasti = lasti + 1
if reply[0] == 0 and reply[1] == item:
self.replies_mu.release()
return reply[2]
if len(self.replies) > 20:
self.replies = [ ]
lasti = 0
self.replies_mu.release()
time.sleep(0.01)
# send a Request Control Item, wait for and return the result
def getitem(self, item, extra=None):
try:
self.mu.acquire()
mtype = 1 # type=request control item
buf = b""
buf += x8(4) # overall length, lsb
buf += x8((mtype << 5) | 0) # 0 is len msb
buf += x16(item)
if extra != None:
buf += extra
self.cs.send(buf)
ret = self.readreply(item)
return ret
finally:
self.mu.release()
def setitem(self, item, data):
try:
self.mu.acquire()
mtype = 0 # set item
lx = 4 + len(data)
buf = b""
buf += x8(lx)
buf += x8((mtype << 5) | 0)
buf += x16(item)
buf += data
self.cs.send(buf)
ret = self.readreply(item)
return ret
finally:
self.mu.release()
def print_setup(self):
print(("freq 0: %d" % (self.getfreq(0)))) # 32770 if down-converting
print(("name: %s" % (self.getname())))
print(("serial: %s" % (self.getserial())))
print(("interface: %d" % (self.getinterface())))
# print("boot version: %s" % (self.getversion(0)))
# print("application firmware version: %s" % (self.getversion(1)))
# print("hardware version: %s" % (self.getversion(2)))
# print("FPGA config: %s" % (self.getversion(3)))
print(("rate: %d" % (self.getrate())))
print(("freq 0: %d" % (self.getfreq(0)))) # 32770 if down-converting
print(("A/D mode: %s" % (self.getad(0))))
print(("filter: %d" % (self.getfilter(0))))
print(("gain: %d" % (self.getgain(0))))
print(("fpga: %s" % (self.getfpga())))
print(("scale: %s" % (self.getscale(0))))
# print("downgain: %s" % (self.getdowngain()))
# set Frequency
def setfreq1(self, chan, hz):
hz = int(hz)
data = b""
data += bytearray([chan]) # 1=display, 0=actual receiver DDC
data += x40(hz)
self.setitem(0x0020, data)
self.lasthz = hz
def setfreq(self, hz):
self.setfreq1(0, hz) # DDC
self.setfreq1(1, hz) # display
# a sleep seems to be needed for the case in which
# a NetSDR is switching on the down-converter.
if hz > 30000000 and (self.frequency == None or self.frequency < 30000000):
time.sleep(0.5)
self.frequency = hz
# reduce gain if recently saw overload warning
mhz = hz // 1000000
gain = 0
if mhz in self.mhz_gain:
if time.time() - self.mhz_overload[mhz] > 5 * 60:
self.mhz_overload[mhz] = time.time()
self.mhz_gain[mhz] += 10
if self.mhz_gain[mhz] > 0:
self.mhz_gain[mhz] = 0
gain = self.mhz_gain[mhz]
if mhz <= 4 and gain > -10:
gain = -10
self.mhz_gain[mhz] = gain
self.mhz_overload[mhz] = time.time()
self.setgain(gain)
def getfreq(self, chan):
x = self.getitem(0x0020, x8(chan))
hz = y40(x[1:6])
return hz
# set Receiver State to Run
# only I/Q seems to work, not real.
def setrun(self):
self.running = True
data = b""
if self.iq:
data += x8(0x80) # 0x80=I/Q, 0x00=real
else:
data += x8(0x00) # 0x80=I/Q, 0x00=real
data += x8(0x02) # 1=idle, 2=run
if self.samplebits == 16:
data += x8(0x00) # 80=24 bit continuous, 00=16 bit continuous
else:
data += x8(0x80) # 80=24 bit continuous, 00=16 bit continuous
data += x8(0x00) # unused
self.setitem(0x0018, data)
self.nextseq = 0
# self.print_setup()
# stop receiver
def stop(self):
self.running = False
data = b""
if self.iq:
data += x8(0x80) # 0x80=I/Q, 0x00=real
else:
data += x8(0x00) # 0x80=I/Q, 0x00=real
data += x8(0x01) # 1=idle, 2=run
if self.samplebits == 16:
data += x8(0x00) # 80=24 bit continuous, 00=16 bit continuous
else:
data += x8(0x80) # 80=24 bit continuous, 00=16 bit continuous
data += x8(0x00) # unused
self.setitem(0x0018, data)
# DDC Output Sample Rate
# rate is samples/second
# must be an integer x4 division of 80 million.
# the minimum is 32000.
def setrate(self, rate):
self.rate = rate
data = b""
data += x8(0) # ignored
data += x32(rate)
self.setitem(0x00B8, data)
def getrate(self):
x = self.getitem(0x00B8, x8(0))
rate = y32(x[1:5])
return rate
# A/D Modes
# set dither and A/D gain
def setad(self):
data = b""
data += x8(0) # ignored
# bit zero is dither, bit 1 is A/D gain 1.5
#data += x8(0x3)
data += x8(0x1)
self.setitem(0x008A, data)
# [ dither, A/D gain ]
def getad(self, chan):
x = self.getitem(0x008A, x8(0))
dither = (yord(x[1]) & 1) != 0
gain = (yord(x[1]) & 2) != 0
return [ dither, gain ]
# RF Filter Select
# 0=automatic
# 11=bypass
# 12=block everything (mute)
def setfilter(self, f):
data = b""
data += x8(0) # channel
data += x8(f)
self.setitem(0x0044, data)
def getfilter(self, chan):
x = self.getitem(0x0044, x8(chan))
return yord(x[1])
# RF Gain
# gain is 0, -10, -20 -30 dB
def setgain(self, gain):
data = b""
data += x8(0) # channel 1
data += x8(gain)
self.setitem(0x0038, data)
def getgain(self, chan):
x = self.getitem(0x0038, x8(chan))
return yord(x[1])
# e.g. "NetSDR"
def getname(self):
x = self.getitem(0x0001)
return x
# e.g. "PS000553"
def getserial(self):
x = self.getitem(0x0002)
return x
# 123 means version 1.23
# returns 10 for my NetSDR
def getinterface(self):
x = self.getitem(0x0003)
return y16(x[0:2])
# ID=0 boot code
# ID=1 application firmware
# ID=2 hardware
# ID=3 FPGA configuration
# XXX seems to cause protocol problems, NetSDR sends NAKs or something.
def getversion(self, id):
x = self.getitem(0x0004, x8(id))
if x == None:
# NAK
return None
if id == 3:
return [ yord(x[1]), yord(x[2]) ] # ID, version
else:
return y16(x[1:3]) # version * 100
# [ FPGA config number, FPGA config ID, FPGA revision, descr string ]
# e.g. [1, 1, 7, 'Std FPGA Config \x00']
def getfpga(self):
x = self.getitem(0x000C)
return [ yord(x[0]),
yord(x[1]),
yord(x[2]),
x[3:] ]
# Receiver A/D Amplitude Scale
def getscale(self, chan):
x = self.getitem(0x0023, x8(chan))
return y16(x[1:3])
# VHF/UHF Down Converter Gain
# XXX seems to yield a NAK
def getdowngain(self):
x = self.getitem(0x003A)
auto = yord(x[0])
lna = yord(x[1])
mixer = yord(x[2])
ifout = yord(x[3])
return [ auto, lna, mixer, ifout ]
# Data Output UDP IP and Port Address
# just set the port, not the host address.
def setudp(self, port):
# find host's IP address.
hostport = self.cs.getsockname()
ipaddr = socket.inet_aton(hostport[0]) # yields a four-byte string, wrong order
data = b""
data += bytearray([
ipaddr[3],
ipaddr[2],
ipaddr[1],
ipaddr[0], ])
data += x16(port)
self.setitem(0x00C5, data)
# wait for and decode a UDP packet of I/Q samples.
# returns a buffer with interleaved I and Q float64.
# return an array of complex (real=I, imag=Q).
def readiq(self):
# read from the pipe; a 4-byte length, then the packet.
x4 = os.read(self.pipe, 4)
if len(x4) != 4:
sys.stderr.write("sdrip read from child failed\n")
os._exit(1)
[plen] = struct.unpack("I", x4)
assert plen > 0 and plen < 65536
buf = b""
while len(buf) < plen:
x = os.read(self.pipe, plen - len(buf))
buf = buf + x
# parse SDR-IP header into length, msg type
lx = yord(buf[0])
lx |= (yord(buf[1]) << 8)
mtype = (lx >> 13) & 0x7 # 0x4 is data
lx &= 0x1fff # should == len(buf)
# packet sequence number (wraps to 1, not 0)
seq = yord(buf[2]) | (yord(buf[3]) << 8)
gap = 0
if seq != self.nextseq and (seq != 1 or self.nextseq != 65536):
# one or more packets were lost.
# we'll fill the gap with zeros.
sys.stderr.write("seq oops got=%d wanted=%d\n" % (seq, self.nextseq))
if seq > self.nextseq:
gap = seq - self.nextseq
self.nextseq = seq + 1
if self.samplebits == 16:
samples = numpy.fromstring(buf[4:], dtype=numpy.int16)
else:
s8 = numpy.fromstring(buf[4:], dtype=numpy.uint8)
x0 = s8[0::3]
x1 = s8[1::3]
x2 = s8[2::3]
# top 8 bits, sign-extended from x2
high = numpy.greater(x2, 127)
x3 = numpy.where(high,
numpy.repeat(255, len(x2)),
numpy.repeat(0, len(x2)))
z = numpy.empty([len(x0)*4], dtype=numpy.uint8)
z[0::4] = x0
z[1::4] = x1
z[2::4] = x2
z[3::4] = x3
zz = z.tostring()
#s32 = numpy.fromstring(zz, dtype=numpy.int32)
#samples = s32.astype(numpy.int16)
samples = numpy.fromstring(zz, dtype=numpy.int32)
samples = samples.astype(numpy.float64)
if gap > 0:
pad = numpy.zeros(len(samples)*gap, dtype=numpy.float64),
samples = numpy.append(pad, samples)
ii1 = samples[0::2]
qq1 = samples[1::2]
cc1 = ii1 + 1j*qq1
return cc1
#
# read from SDR-IP, demodulate as USB.
#
def readusb(self):
iq = self.readiq()
usb = weakutil.iq2usb(iq)
return usb
|
nilq/baby-python
|
python
|
#
# This file is part of Python Client Library for STAC.
# Copyright (C) 2019 INPE.
#
# Python Client Library for STAC is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
"""Python Client Library for STAC."""
from .stac import Stac
from .utils import Catalog, Collection, Item, ItemCollection, Link, Geometry, Provider, Extent
from .version import __version__
__all__ = ('__version__',
'stac', )
|
nilq/baby-python
|
python
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import shutil
from nova.openstack.common import log as logging
from nova.virt.disk import api as disk_api
from nova.virt.libvirt import utils as libvirt_utils
LOG = logging.getLogger(__name__)
def cache_image(context, target, image_id, user_id, project_id):
if not os.path.exists(target):
libvirt_utils.fetch_image(context, target, image_id,
user_id, project_id)
def inject_into_image(image, key, net, metadata, admin_password,
files, partition, use_cow=False):
try:
disk_api.inject_data(image, key, net, metadata, admin_password,
files, partition, use_cow)
except Exception as e:
LOG.warn(_("Failed to inject data into image %(image)s. "
"Error: %(e)s") % locals())
def unlink_without_raise(path):
try:
os.unlink(path)
except OSError as e:
if e.errno == errno.ENOENT:
return
else:
LOG.warn(_("Failed to unlink %(path)s, error: %(e)s") % locals())
def rmtree_without_raise(path):
try:
if os.path.isdir(path):
shutil.rmtree(path)
except OSError as e:
LOG.warn(_("Failed to remove dir %(path)s, error: %(e)s") % locals())
def write_to_file(path, contents):
with open(path, 'w') as f:
f.write(contents)
def create_link_without_raise(source, link):
try:
os.symlink(source, link)
except OSError as e:
if e.errno == errno.EEXIST:
return
else:
LOG.warn(_("Failed to create symlink from %(source)s to %(link)s"
", error: %(e)s") % locals())
def random_alnum(count):
import random
import string
chars = string.ascii_uppercase + string.digits
return "".join(random.choice(chars) for _ in range(count))
def map_network_interfaces(network_info, use_ipv6=False):
# TODO(deva): fix assumption that device names begin with "eth"
# and fix assumption about ordering
if not isinstance(network_info, list):
network_info = [network_info]
interfaces = []
for id, (network, mapping) in enumerate(network_info):
address_v6 = None
gateway_v6 = None
netmask_v6 = None
if use_ipv6:
address_v6 = mapping['ip6s'][0]['ip']
netmask_v6 = mapping['ip6s'][0]['netmask']
gateway_v6 = mapping['gateway_v6']
interface = {
'name': 'eth%d' % id,
'address': mapping['ips'][0]['ip'],
'gateway': mapping['gateway'],
'netmask': mapping['ips'][0]['netmask'],
'dns': ' '.join(mapping['dns']),
'address_v6': address_v6,
'gateway_v6': gateway_v6,
'netmask_v6': netmask_v6,
}
interfaces.append(interface)
return interfaces
|
nilq/baby-python
|
python
|
from __future__ import print_function
import os
import sys
from distutils.dir_util import copy_tree
from pathlib import Path
import pytest
from _pytest.pytester import Testdir
from pytest import ExitCode
from tests.util import RESOURCES
pytest_plugins = "pytester"
NB_VERSION = 4
import shutil
@pytest.fixture
def fake_repo(testdir: Testdir):
copy_tree(RESOURCES.as_posix(), ".")
shutil.rmtree(".deepcov", ignore_errors=True)
return testdir
def test_when_no_xml_then_output_correctly(testdir: Testdir, fake_repo: object):
shutil.rmtree(".deepcov", ignore_errors=True)
hook_recorder = testdir.inline_run()
assert hook_recorder.ret == ExitCode.TESTS_FAILED
assert Path(".deepcov/junit.xml").exists()
assert Path(".deepcov/.coverage").exists()
def test_when_other_xml_then_output_correctly(testdir: Testdir, fake_repo: object):
shutil.rmtree(".deepcov", ignore_errors=True)
hook_recorder = testdir.inline_run("--junit-xml=junit.xml")
assert hook_recorder.ret == ExitCode.TESTS_FAILED
assert Path(".deepcov/junit.xml").exists()
assert Path("junit.xml").exists()
def test_when_trace_present_then_disables_cov(testdir: Testdir, fake_repo: object):
print(os.getcwd())
shutil.rmtree(".deepcov", ignore_errors=True)
assert not Path(".deepcov/junit.xml").exists()
sys.settrace(lambda x, y, z: None)
hook_recorder = testdir.inline_run()
assert hook_recorder.ret == ExitCode.TESTS_FAILED
assert not Path(".deepcov/junit.xml").exists()
def test_when_collect_only_then_no_output(fake_repo: Testdir):
assert not Path(".deepcov/junit.xml").exists()
hook_recorder = fake_repo.inline_run("--co")
assert hook_recorder.ret == ExitCode.OK
assert not Path(".deepcov/junit.xml").exists()
|
nilq/baby-python
|
python
|
import os
import re
import subprocess
########################################
# Globals ##############################
########################################
g_verbose = False
IGNORE_PATHS = ("/lib/modules",)
########################################
# Functions ############################
########################################
def executable_check(op):
"""Check for existence of a single binary."""
try:
proc = subprocess.Popen([op], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
return False
try:
if not proc.poll():
proc.kill()
proc.wait()
except OSError:
print("WARNING: subprocess '%s' did not terminate properly" % (op))
return True
return True
def executable_find(proposition, default_list, name):
"""Try to find given executable from proposition and default list."""
if proposition:
if not executable_check(proposition):
raise RuntimeError("could not use supplied '%s' executable '%s'" % (name, proposition))
return proposition
ret = executable_search(default_list, name)
if not ret:
raise RuntimeError("suitable '%s' executable not found" % (name))
return ret
def executable_search(op, description=None):
"""Check for existence of binary, everything within the list will be tried."""
checked = []
ret = None
if is_listing(op):
for ii in op:
if not ii in checked:
if executable_check(ii):
ret = ii
break
else:
checked += [ii]
elif isinstance(op, str):
if not op in checked:
if executable_check(op):
ret = op
checked += [op]
else:
raise RuntimeError("weird argument given to executable search: %s" % (str(op)))
if description and is_verbose():
output_message = "Looking for '%s' executable... " % (description)
if ret:
print("%s'%s'" % (output_message, ret))
else:
print("%snot found" % (output_message))
return ret
def file_is_ascii_text(op):
"""Check if given file contains nothing but ASCII7 text."""
if not os.path.isfile(op):
return False
fd = open(op, "rb")
while True:
line = fd.readline()
if 0 >= len(line):
fd.close()
return True
try:
line.decode("ascii")
except UnicodeDecodeError:
fd.close()
return False
def get_indent(op):
"""Get indentation for given level."""
ret = ""
for ii in range(op):
# Would tab be better?
ret += " "
return ret
def is_listing(op):
"""Tell if given parameter is a listing."""
return isinstance(op, (list, tuple))
def is_verbose():
"""Tell if verbose mode is on."""
return g_verbose
def labelify(op):
"""Take string as input. Convert into string that passes as label."""
return re.sub(r'[\/\.]', '_', op)
def listify(lhs, rhs=None):
"""Make a list of one or two elements if reasonable."""
if (lhs is None) and (rhs is None):
return []
if lhs is None:
if is_listing(rhs):
return rhs
return [rhs]
if rhs is None:
if is_listing(lhs):
return lhs
return [lhs]
if is_listing(lhs) and is_listing(rhs):
return lhs + rhs
if is_listing(lhs):
return lhs + [rhs]
if is_listing(rhs):
return [lhs] + rhs
return [lhs, rhs]
def locate(pth, fn, previous_paths=None):
"""Search for given file from given path downward."""
if is_listing(pth):
for ii in pth:
ret = locate(ii, fn, previous_paths)
if ret:
return ret
return None
# If path is not given or is empty, assume current path.
if not pth:
pth = "."
# Initialize previous paths on first execution.
if not previous_paths:
previous_paths = [os.path.realpath(pth)]
# Some specific directory trees would take too much time to traverse.
if pth in IGNORE_PATHS:
return None
# Recurse, expect filesystem errors.
try:
for ii in os.listdir(pth):
ret = os.path.normpath(pth + "/" + ii)
if (isinstance(fn, str) and (ii == fn)) or ((not isinstance(fn, str)) and fn.match(ii)):
if os.path.isfile(ret):
return ret
elif os.path.isdir(ret):
real_path = os.path.realpath(ret)
if not real_path in previous_paths:
ret = locate(ret, fn, previous_paths + [real_path])
if ret:
return ret
except OSError as ee: # Permission denied or the like.
if 13 == ee.errno:
return None
raise ee
return None
def run_command(lst, decode_output=True):
"""Run program identified by list of command line parameters."""
if is_verbose():
print("Executing command: %s" % (" ".join(lst)))
proc = subprocess.Popen(lst, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(proc_stdout, proc_stderr) = proc.communicate()
if decode_output and not isinstance(proc_stdout, str):
proc_stdout = proc_stdout.decode()
if decode_output and not isinstance(proc_stderr, str):
proc_stderr = proc_stderr.decode()
returncode = proc.returncode
if 0 != proc.returncode:
raise RuntimeError("command failed: %i, stderr output:\n%s" % (proc.returncode, proc_stderr))
return (proc_stdout, proc_stderr)
def set_verbose(op):
"""Set verbosity status."""
global g_verbose
g_verbose = op
|
nilq/baby-python
|
python
|
import random
import time
import cv2
from abc import ABC, abstractmethod
from core.dataClasses.frame import Frame
class ImageProcessingInt(ABC):
"""
Base Abstract class aka Interface for Image processing class
"""
@abstractmethod
def process(self, _observer, _scheduler):
"""
Imports video clip and samples it.
Sampled Images are processed and encapsulated into Frame class.
As a result those are emitted to manager by _observer.on_next()
:param _observer: rx.core.typing.Observer
:param _scheduler: rx.core.typing.Scheduler
:return:
"""
raise NotImplemented
class ImageProcessing(ImageProcessingInt):
def __init__(self, _path):
self._path = _path
def process(self, _observer, _scheduler):
video = cv2.VideoCapture(self._path)
if not video.isOpened():
_observer.on_error('FILE NOT FOUND OR WRONG CODEC')
# Find OpenCV version
(major_ver, minor_ver, subminor_ver) = cv2.__version__.split('.')
if int(major_ver) < 3:
fps = video.get(cv2.cv.CV_CAP_PROP_FPS)
else:
fps = video.get(cv2.CAP_PROP_FPS)
curr_frame = 0
while video.isOpened():
ret, frame = video.read()
if ret:
# TODO change that
f = Frame(curr_frame)
f.time_stamp_ = curr_frame # curr_frame / fps
f.img_ = frame
f.fps_ = fps
_observer.on_next(f)
else:
break
curr_frame += 1
video.release()
_observer.on_completed()
class ImageProcessingMock(ImageProcessingInt):
def __init__(self, _):
self._limit = 120
def process(self, _observer, _scheduler):
for i in range(self._limit):
time.sleep(random.uniform(0.01, 0.05))
# each time "send" processed image by evoking _observer.on_next( /analysed Frame/ ) method
_observer.on_next(Frame(i))
# when process is completed notify Manager by calling _observer.on_completed()
_observer.on_completed()
|
nilq/baby-python
|
python
|
from .attckobject import AttckObject
class AttckTactic(AttckObject):
def __init__(self, attck_obj = None, **kwargs):
'''The AttckTactic class is used to gather information about all Mitre ATT&CK Framework Tactics.
To access this class directly you must first instantiate it and provide the appropriate inputs, but it is easier to use the Attck class wrapper.
Args:
attck_obj ([json]): This should be the raw Mitre ATT&CK json object. Defaults to None, but should be provided
'''
self.attck_obj = attck_obj
self.id = super(AttckTactic, self)._set_id(kwargs)
self.created_by_ref = super(AttckTactic, self)._set_attribute(kwargs, 'created_by_ref')
self.type = super(AttckTactic, self)._set_attribute(kwargs, 'type')
self.name = super(AttckTactic, self)._set_attribute(kwargs, 'name')
self.description = super(AttckTactic, self)._set_attribute(kwargs, 'description')
self.external_reference = super(AttckTactic, self)._set_reference(kwargs)
self.created = super(AttckTactic, self)._set_attribute(kwargs, 'created')
self.modified = super(AttckTactic, self)._set_attribute(kwargs, 'modified')
self.stix = super(AttckTactic, self)._set_attribute(kwargs, 'id')
self.short_name = super(AttckTactic, self)._set_attribute(kwargs, 'x_mitre_shortname')
self.wiki = super(AttckTactic, self)._set_wiki(kwargs)
@property
def techniques(self):
'''Returns all techniques as a list that are related to this tactic'''
from .technique import AttckTechnique
technique_list = []
for item in self.attck_obj['objects']:
if 'kill_chain_phases' in item:
for prop in item['kill_chain_phases']:
if str(prop['phase_name']).lower() == str(self.short_name).lower():
technique_list.append(AttckTechnique(**item))
return technique_list
|
nilq/baby-python
|
python
|
from sys import argv
script, input_file = argv
def print_all(f):
print(f.read())
# This will reset the position of the pointer to the start
def rewind(f):
f.seek(0)
def print_a_line(line_no,f):
print(line_no, f.readline())
current_file = open(input_file)
print("First print the whole file: ")
print_all(current_file)
print("Now let's rewind, Kind of like a tape: ")
rewind(current_file)
print("Lets print three lines: ")
current_line = 1
print_a_line(current_line,current_file)
current_line += 1
print_a_line(current_line,current_file)
current_line += 1
print_a_line(current_line,current_file)
current_file.close()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of Advent of Code 2020
# https://github.com/scorphus/advent-of-code-2020
# Licensed under the BSD-3-Clause license:
# https://opensource.org/licenses/BSD-3-Clause
# Copyright (c) 2020, Pablo S. Blum de Aguiar <scorphus@gmail.com>
SEA_MONSTER = (
" # ",
"# ## ## ###",
" # # # # # # ",
)
def part1(lines):
tiles = dict(read(lines))
img = assemble(tiles)
return img[0][0][2] * img[0][-1][2] * img[-1][0][2] * img[-1][-1][2]
def part2(lines):
tiles = dict(read(lines))
img = assemble(tiles)
img = concat(img)
monster = [int(seg.replace(" ", "0").replace("#", "1"), 2) for seg in SEA_MONSTER]
monster_length = len(SEA_MONSTER[0])
monster_weight = "".join(SEA_MONSTER).count("#")
monsters = find_sea_monsters(img, monster, monster_length)
return "".join("".join(r) for r in img).count("1") - monsters * monster_weight
def read(lines):
tiles = "".join(lines).split("\n\n")
for title, tile in (t.rstrip().split("\n", maxsplit=1) for t in tiles):
tid = int(title.rstrip(":").split(maxsplit=1)[1])
yield tid, list(parse(tile))
def parse(tile):
tile = [list(r) for r in tile.replace("#", "1").replace(".", "0").splitlines()]
for i in range(8):
yield borders(tile), tile
tile = list(rotate(tile))
if i == 3:
tile = [r[::-1] for r in tile]
def rotate(tile):
for x in range(len(tile[0])):
yield [r[-x - 1] for r in tile]
def borders(tile):
left = int("".join(t[0] for t in tile), 2)
right = int("".join(t[-1] for t in tile), 2)
top = int("".join(tile[0]), 2)
bot = int("".join(tile[-1]), 2)
return left, right, top, bot
def assemble(tiles):
size = int(len(tiles) ** 0.5)
return assemble_dfs(tiles, [[None] * size for _ in range(size)], set())
def assemble_dfs(tiles, img, placed, row=0, col=0):
rc = row, col + 1
if col == len(img) - 1:
rc = row + 1, 0
for tid, tile in tiles.items():
if tid not in placed:
placed.add(tid)
for i, ((left, right, top, bot), ith_tile) in enumerate(tile):
if (row > 0 and img[row - 1][col][1] != top) or (
col > 0 and img[row][col - 1][0] != left
):
continue
img[row][col] = right, bot, tid, i, ith_tile
assemble_dfs(tiles, img, placed, *rc)
if len(placed) == len(tiles):
return img
placed.remove(tid)
def concat(img):
size = len(img) * (len(img[0][0][-1]) - 2)
final_img = [[] for _ in range(size)]
r = 0
for row in img:
for *_, tile in row:
for y, line in enumerate(tile[1:-1]):
final_img[r + y] += line[1:-1]
r += len(tile) - 2
return final_img
def find_sea_monsters(img, monster, monster_length):
for i in range(8):
count = 0
img_dec = [int("".join(row), 2) for row in img]
for r, rows in enumerate(zip(img_dec[:-2], img_dec[1:-1], img_dec[2:]), 1):
for s in range(len(img[0]) - monster_length):
count += all(r & m << s == m << s for r, m in zip(rows, monster))
if count:
return count
img = list(rotate(img)) # pragma: no cover (🤷🏻♂️)
if i == 3: # pragma: no cover (🤷🏻♂️)
img = [r[::-1] for r in img]
|
nilq/baby-python
|
python
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API endpoints for scope package."""
from http import HTTPStatus
from flask import jsonify
from flask_restplus import Namespace, Resource, cors
from ..models.scope_package import ScopePackage
from ..schemas.scope_package import ScopePackageSchema
from ..utils.auth import auth
from ..utils.util import cors_preflight
API = Namespace('ScopePackage', description='ScopePackage')
@cors_preflight('GET,OPTIONS')
@API.route('', methods=['GET', 'OPTIONS'])
class ScopePackageResource(Resource):
"""Resource for managing get scope packages."""
@staticmethod
@cors.crossdomain(origin='*')
@auth.require
def get():
"""Get all scope package."""
scope_packages = ScopePackage.find_all()
return jsonify({
'scopePackages': ScopePackageSchema().dump(scope_packages, many=True)
}), HTTPStatus.OK
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility for static analysis test of dart packages generated by dart-pkg"""
import argparse
import errno
import json
import multiprocessing
import os
import shutil
import subprocess
import sys
DART_ANALYZE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"dart_analyze.py")
# List of analysis results.
result_list = []
def collect_result(result):
result_list.append(result)
def analyze_entrypoints(dart_sdk, package_root, entrypoints):
cmd = [ "python", DART_ANALYZE ]
cmd.append("--dart-sdk")
cmd.append(dart_sdk)
cmd.append("--entrypoints")
cmd.extend(entrypoints)
cmd.append("--package-root")
cmd.append(package_root)
cmd.append("--show-sdk-warnings")
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print('Failed analyzing %s' % entrypoints)
print(e.output)
return e.returncode
return 0
def analyze_package(dart_sdk, package_root, package):
package_name = package[0]
package_entrypoints = package[1]
print('Analyzing dart-pkg %s ' % package_name)
return analyze_entrypoints(dart_sdk, package_root, package_entrypoints)
# Filter entrypoints for files that exist.
def filter_entrypoints(package_name, entrypoints):
result = []
for entrypoint in entrypoints:
if os.path.isfile(entrypoint):
result.append(entrypoint)
else:
print('WARNING: Could not find %s from %s ' % (entrypoint, package_name))
return result
def main():
parser = argparse.ArgumentParser(description='Generate a dart-pkg')
parser.add_argument('--dart-sdk',
action='store',
metavar='dart_sdk',
help='Path to the Dart SDK.')
parser.add_argument('--dart-pkg-dir',
action='store',
metavar='dart_pkg_dir',
help='Directory of dart packages',
required=True)
parser.add_argument('--package-root',
metavar='package_root',
help='packages/ directory',
required=True)
parser.add_argument('package_name',
nargs='?',
default=None)
args = parser.parse_args()
# Make sure we have a Dart SDK.
dart_sdk = args.dart_sdk
if dart_sdk is None:
dart_sdk = os.environ.get('DART_SDK')
if dart_sdk is None:
print "Pass --dart-sdk, or define the DART_SDK environment variable"
return 1
jobs = []
# Determine which packages to analyze
for filename in os.listdir(args.dart_pkg_dir):
if filename.endswith('.entries'):
if not args.package_name or (filename == args.package_name + '.entries'):
with open(os.path.join(args.dart_pkg_dir, filename)) as f:
entrypoints = f.read().splitlines()
package_name = os.path.splitext(filename)[0]
entrypoints = filter_entrypoints(package_name, entrypoints)
if entrypoints != []:
jobs.append([package_name, entrypoints])
# Create a process pool.
pool = multiprocessing.Pool(multiprocessing.cpu_count())
# Spawn jobs.
for job in jobs:
pool.apply_async(analyze_package,
args = (dart_sdk, args.package_root, job, ),
callback = collect_result)
# Wait for them to complete.
pool.close();
pool.join();
# Return the error code if any packages failed.
for result in result_list:
if result != 0:
return result
return 0
if __name__ == '__main__':
sys.exit(main())
|
nilq/baby-python
|
python
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean CLI v1.0. Copyright 2021 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from typing import Dict
from lean.components.config.lean_config_manager import LeanConfigManager
from lean.models.api import QCSecurityType
from lean.models.data import MarketHoursDatabaseEntry
class MarketHoursDatabase:
"""The MarketHoursDatabase class handles access to the market-hours-database.json file."""
def __init__(self, lean_config_manager: LeanConfigManager) -> None:
"""Creates a new MarketHoursDatabase instance.
:param lean_config_manager: the LeanConfigManager instance that is used when retrieving the data directory
"""
self._lean_config_manager = lean_config_manager
def get_entry(self, security_type: QCSecurityType, market: str, ticker: str) -> MarketHoursDatabaseEntry:
"""Reads the market hours database and returns the entry for the given data.
An error is raised if the market hours database does not contain an entry matching the given data.
:param security_type: the security type of the data
:param market: the market of the data
:param ticker: the ticker of the data
:return: the market hours database entry for the data
"""
entries = self._get_all_entries()
keys_to_check = [f"{security_type.value}-{market}-{ticker.upper()}", f"{security_type.value}-{market}-[*]"]
for key in keys_to_check:
if key in entries:
return entries[key]
raise ValueError(f"Could not find entry in market hours database, checked following keys: {keys_to_check}")
def _get_all_entries(self) -> Dict[str, MarketHoursDatabaseEntry]:
"""Reads the market hours database and returns all parsed entries by name.
:return: a dict containing all market hours database entries by name
"""
data_dir = self._lean_config_manager.get_data_directory()
market_hours_database_path = data_dir / "market-hours" / "market-hours-database.json"
market_hours_database = json.loads(market_hours_database_path.read_text(encoding="utf-8"))
return {key: MarketHoursDatabaseEntry(**value) for key, value in market_hours_database["entries"].items()}
|
nilq/baby-python
|
python
|
from django.shortcuts import render
from django.http import HttpResponse
from django.utils import translation
from challenge.models import Challenge
from codingPage.models import Log, Command
from connections.models import Feedback
def convert_to_code(data):
translation = ''
for char in data:
action = Command.objects.get(id=int(char)).action
translation += action + '\n'
return translation
# Create your views here.
def index(request):
# Retrieve latest commands from db
latest_log = Log.objects.latest('sent_datetime')
id_num = latest_log.get_challenge_id()
chal = Challenge.objects.get(id=id_num)
data = Log.objects.get(challenge = chal).data
translation = convert_to_code(data)
payload = {
'title': 'Dashboard',
# Retrieve challenge data from db
'challenge': chal,
'commands': data,
'feedback': Feedback.objects.latest('sent_datetime'),
'translation': translation,
'jsfile': 'dashboard'
}
return render(request, "dashboard.html", payload)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import optparse, os, shutil, sys, tempfile, glob, shlex, vcf, pysam
from subprocess import *
import subprocess, math, random
CHUNK_SIZE = 2**20 #1mb
def createOutputHTML (outputF, sampleNames):
ofh = open(outputF, "w")
print outputF
ofh.write( '<html>\n<head>\n<title>Galaxy - CNVKIT VCF Output</title>\n</head>\n<body>\n<p/>\n<ul>\n' )
outputDir='%s_files' % ''.join(outputF.split('.')[:-1])
for sample in sampleNames:
values = sample.split(" ")
sn = values[0]
outVCF = "%s/%s.vcf" % (outputDir, sn)
print "\noutVCF: %s\n" % outVCF
# if os.path.exists(outVCF):
ofh.write('<li><a href="%s">%s</a></li>\n' % ( outVCF, sn ) )
ofh.write( '</ul>\n</body>\n</html>\n' )
ofh.close()
def open_file_from_option( filename, mode = 'rb' ):
if filename:
return open( filename, mode = mode )
return None
def run_cmd ( cmd , descriptor):
stderr_name = tempfile.NamedTemporaryFile( prefix = "cmd_stderr" ).name
proc = Popen( args=cmd, shell=True, stderr=open( stderr_name, 'wb' ) )
exit_code = proc.wait()
if exit_code:
for line in open( stderr_name ):
print >> sys.stderr, line
os.unlink( stderr_name ) #clean up
raise Exception( "Error running command: %s " % descriptor )
os.unlink( stderr_name ) #clean up
def create_additional_bam_copy(inFile, baseline_indir):
fh = open(inFile, "r")
fpath = None
for line in fh:
fpath = line.rstrip("\n")
fh.close()
name = os.path.basename(fpath)
linkname = "%s/%s.dup.bam" % (baseline_indir, name)
os.symlink(fpath, linkname)
# add link to existing file
fh = open(inFile, "a")
fh.write(linkname)
fh.close()
def lineCount(inFile):
fh = open(inFile, "r")
counter = 0
for line in fh:
counter += 1
return counter
def __main__():
parser = optparse.OptionParser()
parser.add_option( '', '--input_dir', dest='input_dir', action='store', type="string", help='Input directory path of BAM files' )
parser.add_option( '', '--input_dir_file', dest='input_dir_file', action='store', type="string", help='Input directory File containing path of BAM files' )
parser.add_option( '', '--input_files', dest='input_files', action='store', type="string", help='Input File list containing path of BAM files' )
parser.add_option( '', '--out-dir', dest='output_dir', action='store', type="string", default=None, help='If specified, the output directory for extra files.' )
parser.add_option( '', '--log', dest='swift_log', action='store', type="string", default=None, help='swift summary output.' )
parser.add_option( '', '--output', dest='outputF', action='store', type="string", default=None, help='mpileup output.' )
parser.add_option( '-p', '--pass_through', dest='pass_through_options', action='append', type="string", help='These options are passed through directly to contra, without any modification.' )
parser.add_option( '-c', '--config', dest='config_file', action="store", type="string", default=None )
parser.add_option( '-t', '--target', dest='bed_file', action="store", type="string", default=None )
parser.add_option( '-r', '--reference_path', dest='reference_path', help="reference file" )
parser.add_option( '', '--percent-bam-files-for-baseline', type="float", dest='percent', help='contra baseline group: percentage of BAMs to use' )
parser.add_option( '', '--baseline-input-bam', dest='group_by_keyword', help='contra baseline group: to group or not to group' )
parser.add_option( '', '--group-field', dest='rg_field', help='contra baseline group: RG field to use for grouping' )
parser.add_option( '', '--keyword-separator', dest='field_separator', help='contra baseline group: RG field separator' )
parser.add_option( '', '--keyword-field-order', dest='field_order', help='contra baseline group: RG field order' )
(options, args) = parser.parse_args()
swift_bin = 'swift'
sites_file = '/opt/galaxy/tools/cnvkit/cnvkit_sites.xml'
tc_file = '/opt/galaxy/tools/swift/tc.data'
swift_file = '/opt/galaxy/tools/cnvkit/cnvkit.swift'
cnvkit_bin = "/mnt/galaxyTools/tools/pymodules/python2.7/bin"
pythonpath = "/mnt/galaxyTools/tools/pymodules/python2.7/lib/python"
r_path = "/mnt/galaxyTools/tools/R/3.2.2/bin/bin"
r_libs = "/mnt/galaxyTools/tools/R/3.2.2/site-library"
r_ld = "/mnt/galaxyTools/tools/R/3.2.2/ld_libs"
if not os.path.exists(options.output_dir):
os.mkdir(options.output_dir)
output_dir = "%s/output" % options.output_dir
inputDirectory = "%s/bams" % options.output_dir
baseline_dir = "%s/baseline" % options.output_dir
if not os.path.exists(output_dir):
os.mkdir(output_dir)
os.mkdir(inputDirectory)
os.mkdir(baseline_dir)
tmp_dir = tempfile.mkdtemp( dir=options.output_dir , prefix='tmp-TOOL-' )
inputLinkedFiles = []
basenames = []
if not options.input_files:
if options.input_dir_file:
infile = open(options.input_dir_file, 'r')
inputDirLine = infile.readline()
inputRealDirectory = inputDirLine.rstrip('\n\r')
elif options.input_dir:
inputRealDirectory = options.input_dir
inputFiles = glob.glob("%s/*.bam" % inputRealDirectory )
# create a link of the BAMs inside the inputs directory
# this is to prevent the Warning of the BAI files being older than BAMs
#for inputF in inputFiles:
# #os.symlink(inputF, "%s/%s" % (inputDirectory, os.path.basename(inputF)))
# inputLinkedFiles.append("%s/%s" % (inputDirectory, os.path.basename(inputF)))
else:
inputFiles = options.input_files.strip(" ").split(" ")
#for inputF in inputFiles:
# #os.symlink(inputF, "%s/%s" % (inputDirectory, os.path.basename(inputF)))
# inputLinkedFiles.append("%s/%s" % (inputDirectory, os.path.basename(inputF)) )
# get the input BAMs into a list
sampleNames = []
groupNames = []
baselineFiles = []
# get the sample name and bam groups if necessary
# for the bam file and store in list in the same order as the Input file list
#### Run the configManta command for each input on the head node since it's a low cost job
for inputF in inputFiles:
samfile = pysam.AlignmentFile(inputF, 'rb')
sn = samfile.header['RG'][0]['SM']
sampleNames.append("%s" % (sn))
# create the output directory for the sample
sample_outD = "%s/%s" % (output_dir, sn)
os.mkdir(sample_outD)
# create symlink for input file
os.symlink(inputF, "%s/%s.bam" % (inputDirectory, sn))
inputLinkedFiles.append("%s/%s.bam" % (inputDirectory, sn))
# get group names if needed and generate the input baseline files
base_param = ""
if options.group_by_keyword is not None and options.rg_field is not None:
if options.group_by_keyword == "all":
value = "all_bam"
else:
value = samfile.header['RG'][0][options.rg_field]
if options.field_separator is not None:
value = value.split(options.field_separator)[int(options.field_order)-1]
fh_base = open("%s/%s.txt" % (baseline_dir, value), 'a')
fh_base.write("%s\n" % inputF)
fh_base.close()
base_param = "-c %s/%s.baseline.txt" % (baseline_dir, value)
if value not in groupNames:
groupNames.append(value)
if "%s/%s.txt" % (baseline_dir, value) not in baselineFiles:
baselineFiles.append("%s/%s.txt" % (baseline_dir, value) )
# pass through options
ptc = ""
if options.pass_through_options:
ptc = ' '.join( options.pass_through_options )
# create the baseline files if needed
baselineInputs = []
if options.group_by_keyword is not None:
# Make sure there are more than 1 BAM file for each Baseline group.
# if there is only 1 input BAM, then make a link with a new name and add to file
for inFile in baselineFiles:
fileCount = lineCount(inFile)
if fileCount < 2:
create_additional_bam_copy(inFile, baseline_dir)
fileCount = 2
## create a new baseline file with only x% of inputs depending on the user spec.
file_qty_to_use = math.ceil(options.percent * fileCount)
if file_qty_to_use < 2:
file_qty_to_use = math.ceil(1 * fileCount)
with open(inFile, "rb") as source:
lines = [line.rstrip("\n") for line in source]
print "FILE QTY TO ISE: %s" % file_qty_to_use
random_choice = random.sample(lines, int(file_qty_to_use))
newBaseline_input = "%s.selected.txt" % inFile
baselineInputs= baselineInputs+random_choice
#baselineInputs.append(newBaseline_input)
with open(newBaseline_input, "wb") as sink:
sink.write("\n".join(random_choice))
print baselineInputs
baseline_bam = ' '.join(baselineInputs)
# prepare baseline command
baseline_cmd = "export R_LIBS_SITE=%s:\$R_LIBS_SITE; export LD_LIBRARY_PATH=%s:\$LD_LIBRARY_PATH; export PATH=%s:%s:\$PATH;export PYTHONPATH=%s:\$PYTHONPATH; cnvkit.py batch %s -n --output-dir %s --output-reference %s/flat_reference.cnn %s" % (r_libs, r_ld, r_path, cnvkit_bin, pythonpath, baseline_bam, baseline_dir, baseline_dir, ptc )
print "baseline_cmd: %s " % baseline_cmd
stderr = tempfile.NamedTemporaryFile( prefix="TOOL-baseline-stderr-", dir=tmp_dir )
stdout = tempfile.NamedTemporaryFile( prefix="TOOL-baseline-stdout-", dir=tmp_dir )
return_code = None
if return_code is None or not return_code:
proc = subprocess.Popen( args=baseline_cmd, stdout=stdout, stderr=stderr, shell=True, cwd=tmp_dir )
return_code = proc.wait()
if return_code:
stderr_target = sys.stderr
else:
if stdout:
stderr_target = stdout
else:
stderr_target = sys.stdout
stderr.flush()
stderr.seek(0)
while True:
chunk = stderr.read( CHUNK_SIZE )
if chunk:
stderr_target.write( chunk )
else:
break
stderr.close()
# prepare tool command
#tool_cmd = "export R_LIBS_SITE=%s:\$R_LIBS_SITE; export LD_LIBRARY_PATH=%s:\$LD_LIBRARY_PATH; export PATH=%s:%s:\$PATH;export PYTHONPATH=%s:\$PYTHONPATH; cnvkit.py batch INPUTBAM -n --output-dir OUTPUTDIR --output-reference REFNAME %s; cnvkit.py call INPUTCNS -y -m threshold -t=-1.1,-0.4,0.3,0.7 -o OUTPUTCALLFILE; cnvkit.py export vcf INPUTCALLFILE -o OUTPUTVCF; cp INT_VCF FINAL_VCF" % (r_libs, r_ld, r_path, cnvkit_bin, pythonpath, ptc)
tool_cmd = "export R_LIBS_SITE=%s:\$R_LIBS_SITE; export LD_LIBRARY_PATH=%s:\$LD_LIBRARY_PATH; export PATH=%s:%s:\$PATH;export PYTHONPATH=%s:\$PYTHONPATH; cnvkit.py batch INPUTBAM --output-dir OUTPUTDIR -r %s/flat_reference.cnn; cnvkit.py call INPUTCNS -y -m threshold -t=-1.1,-0.4,0.3,0.7 -o OUTPUTCALLFILE; cnvkit.py export vcf INPUTCALLFILE -o OUTPUTVCF; cp INT_VCF FINAL_VCF" % (r_libs, r_ld, r_path, cnvkit_bin, pythonpath, baseline_dir)
#if no stderr file is specified, we'll use our own
stderr = tempfile.NamedTemporaryFile( prefix="TOOL-stderr-", dir=tmp_dir )
stdout = tempfile.NamedTemporaryFile( prefix="TOOL-stdout-", dir=tmp_dir )
# prepare command line
swift_params = list()
swift_params.append('-outputdir=' + output_dir)
swift_params.append('-samplenames=\"%s\"' % ",".join(sampleNames))
swift_params.append('-inputfiles=\"%s\"' % ",".join(inputLinkedFiles))
## construct the swift command
swift_cmd = "%s -sites.file %s -tc.file %s %s " % (swift_bin, sites_file, tc_file, swift_file)
cmd = "%s %s %s" % (swift_cmd, ' '.join(swift_params), '-tool_cmd=\"'+tool_cmd+'\"')
print cmd
return_code = None
if return_code is None or not return_code:
proc = subprocess.Popen( args=cmd, stdout=stdout, stderr=stderr, shell=True, cwd=tmp_dir )
return_code = proc.wait()
if return_code:
stderr_target = sys.stderr
else:
if stdout:
stderr_target = stdout
else:
stderr_target = sys.stdout
stderr.flush()
stderr.seek(0)
while True:
chunk = stderr.read( CHUNK_SIZE )
if chunk:
stderr_target.write( chunk )
else:
break
stderr.close()
for vcffile in glob.glob("%s/*.vcf" % output_dir):
shutil.copy(vcffile, options.output_dir)
# create list output files in the HTML output
try:
createOutputHTML(options.outputF, sampleNames)
except Exception, e:
sys.stdout.write("problem while generating final VCF " + str(e))
#try:
# if os.path.exists(tmp_dir):
# shutil.rmtree(tmp_dir)
# #if os.path.exists(output_dir):
# # shutil.rmtree(output_dir)
#except:
# pass
swift_log_files = glob.glob("%s/*.log" % tmp_dir)
cmdSummary = "/opt/galaxy/tools/swift/parse_swift_log.py "
for logF in swift_log_files:
if "swift.log" in logF:
continue
cmdSummary += " -l %s " % logF
cmdSummary += " -o %s" % options.swift_log
return_code = None
stderr = tempfile.NamedTemporaryFile( prefix="TOOL-stderr-", dir=tmp_dir )
stdout = tempfile.NamedTemporaryFile( prefix="TOOL-stdout-", dir=tmp_dir )
if return_code is None or not return_code:
proc = subprocess.Popen( args=cmdSummary, stdout=stdout, stderr=stderr, shell=True, cwd=tmp_dir )
return_code = proc.wait()
if return_code:
stderr_target = sys.stderr
else:
if stdout:
stderr_target = stdout
else:
stderr_target = sys.stdout
stderr.flush()
stderr.seek(0)
while True:
chunk = stderr.read( CHUNK_SIZE )
if chunk:
stderr_target.write( chunk )
else:
break
stderr.close()
if __name__=="__main__":
__main__()
|
nilq/baby-python
|
python
|
from enum import Enum
class ApiCode(Enum):
SUCCESS = 1000
JINJA2_RENDER_FAILURE = 1001
DEPLOY_START_FAILURE = 1002
DEPLOY_STOP_FAILURE = 1003
DEPLOY_STATUS_FAILURE = 1004
DEPLOY_REPLAY_FAILURE = 1005
GET_FILE_FAILURE = 1006
GET_CONTAINER_FILE_FAILURE = 1007
COMMAND_EXEC_FAILURE = 1008
DEPLOY_REPLAY_FAILURE_STILL_ACTIVE = 1009
FOLDER_ZIP_FAILURE = 1010
DOCKER_DAEMON_NOT_RUNNING = 1011
MISSING_PARAMETER_POST = 1012
GET_LOGS_FAILED = 1013
MAX_DEPLOYMENTS_REACHED = 1014
CONTAINER_UNREACHABLE = 1015
GET_DEPLOYER_NETWORK_FAILED = 1016
CONTAINER_NET_CONNECT_FAILED = 1017
CONTAINER_NET_DISCONNECT_FAILED = 1018
GET_ENV_VAR_FAILURE = 1019
EMPTY_REQUEST_BODY_PROVIDED = 1020
UPLOAD_FILE_FAILURE = 1021
HTTP_HEADER_NOT_PROVIDED = 1022
KUBERNETES_SERVER_ERROR = 1023
UNAUTHORIZED = 1024
INVALID_JSON_PAYLOAD = 1025
SET_ENV_VAR_FAILURE = 1026
FOLDER_UNZIP_FAILURE = 1027
DEPLOYMENTS_FOLDER_CLEANUP_FAILURE = 1028
GENERAL = 1100
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
from torch_geometric.nn import global_mean_pool
import torch.nn.functional as F
from models.nn_utils import chebyshev
class SCNLayer(nn.Module):
def __init__(self, feature_size, output_size, enable_bias=True, k=1):
super().__init__()
self.k = k
self.conv = nn.Linear(k * feature_size, output_size, bias=enable_bias)
def forward(self, L, x):
X = chebyshev(L, x, self.k)
return self.conv(X)
class SuperpixelSCN(nn.Module):
# This model is based on model described by Stefanie Ebli et al. in Simplicial Neural Networks
# Github here https://github.com/stefaniaebli/simplicial_neural_networks?utm_source=catalyzex.com
def __init__(self, num_node_feats, num_edge_feats, num_triangle_feats, output_size, bias=True):
super().__init__()
conv_size = 32
# Degree 0 convolutions.
self.C0_1 = SCNLayer(num_node_feats, conv_size, enable_bias=bias)
self.C0_2 = SCNLayer(conv_size, conv_size, enable_bias=bias)
self.C0_3 = SCNLayer(conv_size, conv_size, enable_bias=bias)
# Degree 1 convolutions.
self.C1_1 = SCNLayer(num_edge_feats, conv_size, enable_bias=bias)
self.C1_2 = SCNLayer(conv_size, conv_size, enable_bias=bias)
self.C1_3 = SCNLayer(conv_size, conv_size, enable_bias=bias)
# Degree 2 convolutions.
self.C2_1 = SCNLayer(num_triangle_feats, conv_size, enable_bias=bias)
self.C2_2 = SCNLayer(conv_size, conv_size, enable_bias=bias)
self.C2_3 = SCNLayer(conv_size, conv_size, enable_bias=bias)
self.layer0 = nn.Linear(3 * conv_size, output_size)
self.layer1 = nn.Linear(3 * conv_size, output_size)
self.layer2 = nn.Linear(3 * conv_size, output_size)
self.combined_layer = nn.Linear(output_size * 3, output_size)
def forward(self, simplicialComplex):
X0, X1, X2 = simplicialComplex.unpack_features()
L0, L1, L2 = simplicialComplex.unpack_laplacians()
batch = simplicialComplex.unpack_batch()
out0_1 = nn.LeakyReLU()(self.C0_1(L0, X0))
out0_2 = nn.LeakyReLU()(self.C0_2(L0, out0_1))
out0_3 = nn.LeakyReLU()(self.C0_3(L0, out0_2))
out1_1 = nn.LeakyReLU()(self.C1_1(L1, X1))
out1_2 = nn.LeakyReLU()(self.C1_2(L1, out1_1))
out1_3 = nn.LeakyReLU()(self.C1_3(L1, out1_2))
out2_1 = nn.LeakyReLU()(self.C2_1(L2, X2))
out2_2 = nn.LeakyReLU()(self.C2_2(L2, out2_1))
out2_3 = nn.LeakyReLU()(self.C2_3(L2, out2_2))
out0 = self.layer0(torch.cat([out0_1, out0_2, out0_3], dim=1))
out1 = self.layer1(torch.cat([out1_1, out1_2, out1_3], dim=1))
out2 = self.layer2(torch.cat([out2_1, out2_2, out2_3], dim=1))
out0 = global_mean_pool(out0, batch[0])
out1 = global_mean_pool(out1, batch[1])
out2 = global_mean_pool(out2, batch[2])
out = torch.cat([out0, out1, out2], dim=1)
return F.softmax(self.combined_layer(out), dim=1)
class PRELU(nn.PReLU):
def forward(self, input):
return F.prelu(input, self.weight)
class PlanetoidSCN(nn.Module):
def __init__(self, num_node_feats, output_size, bias=True):
super().__init__()
f_size = output_size
self.layer_n = SCNLayer(num_node_feats, f_size, bias)
self.layer_e = SCNLayer(num_node_feats, f_size, bias)
self.layer_t = SCNLayer(num_node_feats, f_size, bias)
self.f = PRELU()
self.tri_layer = nn.Linear(output_size, output_size)
def forward(self, simplicialComplex, B1, B2):
X0, X1, X2 = simplicialComplex.unpack_features()
L0, L1, L2 = simplicialComplex.unpack_laplacians()
X0[X0 != 0] = 1
X1_in, X1_out = X0[X1[:, 0]], X0[X1[:, 1]]
X1 = torch.logical_and(X1_in, X1_out).float()
X2_i, X2_j, X2_k = X0[X2[:, 0]], X0[X2[:, 1]], X0[X2[:, 2]]
X2 = torch.logical_and(X2_i, torch.logical_and(X2_j, X2_k)).float()
X0 = self.f(self.layer_n(L0, X0))
X1 = self.f(self.layer_e(L1, X1))
X2 = self.f(self.layer_t(L2, X2))
X0 = (X0 + torch.sparse.mm(B1, X1) + torch.sparse.mm(B1, self.tri_layer(torch.sparse.mm(B2, X2)))) / 3
return X0
class FlowSCN(nn.Module):
def __init__(self, num_node_feats, num_edge_feats, num_triangle_feats, output_size, bias=False, f=nn.LeakyReLU()):
super().__init__()
conv_size = 32
self.layer1 = SCNLayer(num_edge_feats, conv_size, enable_bias=bias)
self.layer2 = SCNLayer(conv_size, conv_size, enable_bias=bias)
self.layer3 = SCNLayer(conv_size, conv_size, enable_bias=bias)
self.layer4 = SCNLayer(conv_size, conv_size, enable_bias=bias)
self.mlp1 = nn.Linear(conv_size, conv_size)
self.mlp2 = nn.Linear(conv_size, output_size)
self.f = f
def forward(self, simplicialComplex):
_, X1, _ = simplicialComplex.unpack_features()
_, L1, _ = simplicialComplex.unpack_laplacians()
batch = simplicialComplex.unpack_batch()
X1 = self.f(self.layer1(L1, X1))
X1 = self.f(self.layer2(L1, X1))
X1 = self.f(self.layer3(L1, X1))
X1 = self.f(self.layer4(L1, X1))
X1 = global_mean_pool(X1.abs(), batch[1])
X1 = F.relu(self.mlp1(X1))
return F.softmax(self.mlp2(X1), dim=1)
class TestSCN(nn.Module):
def __init__(self, num_node_feats, num_edge_feats, num_triangle_feats, output_size, bias=False, f=nn.Identity()):
super().__init__()
self.layer1 = SCNLayer(num_node_feats, output_size, enable_bias=bias)
self.layer2 = SCNLayer(num_edge_feats, output_size, enable_bias=bias)
self.layer3 = SCNLayer(num_triangle_feats, output_size, enable_bias=bias)
self.f = f
def forward(self, simplicialComplex):
X0, X1, X2 = simplicialComplex.unpack_features()
L0, L1, L2 = simplicialComplex.unpack_laplacians()
X0 = self.f(self.layer1(L0, X0))
X1 = self.f(self.layer2(L1, X1))
X2 = self.f(self.layer3(L2, X2))
return X0, X1, X2
|
nilq/baby-python
|
python
|
import os
from base64 import urlsafe_b64decode, urlsafe_b64encode
from pathlib import Path
from github import Github
gh_access_token = ''
def get_project_root():
"""Returns project root folder."""
return Path(__file__).parent
def gh_session():
"""Returns a PyGithub session."""
if gh_access_token:
return Github(gh_access_token)
return Github()
def reverse_enum(f, start=None):
start = start or 0
fl = list(f)
for i in reversed(range(len(fl))):
yield i + start, fl[i]
def norm_path(file_path):
path = file_path.replace(os.sep, '/')
if path.startswith(('a/', 'b/')):
return path[2:]
if path.startswith('/'):
return path[1:]
return path
def b64_encode(string):
encoded = urlsafe_b64encode(bytes(string, 'utf-8'))
return encoded.decode('utf-8').rstrip('=')
def b64_decode(b64_hash):
padding = 4 - (len(b64_hash) % 4)
string = b64_hash + ('=' * padding)
return urlsafe_b64decode(string).decode('utf-8')
|
nilq/baby-python
|
python
|
""" Test DB.py in isolation.
Call with twisted.trial eg.
trial test_DB.py
In the context of evoke, and by extension in evoke apps,
only init_db and execute are used by external functions.
The aim of the current exercise is to maintain the current interface
whilst rationalising code and introducing an asyncrous interface
"""
from twisted.trial import unittest
from twisted.internet.defer import inlineCallbacks # , returnValue
from .DB import DB, init_db, execute, aexecute
connect_config = ('', 'root', 'Elgar104')
class InitDBTestCase(unittest.TestCase):
"test init_db including legacy connect_config formats"
def setUp(self):
""
def tearDown(self):
""
def testTupleConnect(self):
"connect using a tuple"
init_db(connect_config)
def testSpaceDelimitedConnect(self):
"connect using a space delimited string"
init_db(' '.join(connect_config))
def testCommaDelimitedConnect(self):
"connect using a space delimited string"
init_db(','.join(connect_config))
def testDBConnect(self):
"connect by passing connect_config direct to DB object"
DB(connect_config)
class ExecuteTestCase(unittest.TestCase):
""" Not full coverage but the least that would
possibly let us know that we have an execute
connection that can handle substitutions.
"""
def setUp(self):
""
init_db(connect_config)
# #### TODO create test table
def tearDown(self):
""
# #### TODO drop test table
def testNothing(self):
"do nothing except setUp and tearDown"
def testCalculation(self):
"run a simple calculation on the database"
sql = 'select 2+3 as res'
l = execute(sql)
self.assertEqual(len(l), 1)
self.assertEqual(l[0]['res'], 5)
def testSubstitution(self):
"run simple calculation on db with parameter substitution"
l = execute('select %s+%s as res', args=(2, 3))
self.assertEqual(len(l), 1)
self.assertEqual(l[0]['res'], 5)
def testInsert(self):
"an INSERT should return its uid"
# assumes existence of test.test table
sql = "insert into test.test(thing) values ('')"
n = execute(sql)
self.assertEqual(type(n), int)
class AsyncExecuteTestCase(unittest.TestCase):
""" Not full coverage but the least that would
possibly let us know that we have an execute
connection that can handle substitutions.
Asyncrous edition
"""
def setUp(self):
""
init_db(connect_config)
def tearDown(self):
""
def testNothing(self):
"do nothing except setUp and tearDown"
def testCalculation(self):
"run a simple calculation on the database"
sql = 'select 2+3 as res'
d = aexecute(sql)
# test the results using a callback function
def testCallback(l):
self.assertEqual(len(l), 1)
self.assertEqual(l[0]['res'], 5)
d.addCallback(testCallback)
# always return a deferred
return d
@inlineCallbacks
def testInlineCallbackCalculation(self):
"""run a simple calculation on the database
Same test as last time, but recast as
an inlineCallback rather than with a
callback function.
"""
sql = 'select 2+3 as res'
l = yield aexecute(sql)
# test the results
self.assertEqual(len(l), 1)
self.assertEqual(l[0]['res'], 5)
@inlineCallbacks
def testInsert(self):
"an INSERT should return its uid"
# assumes existence of test.test table
sql = "insert into test.test(thing) values ('')"
n = yield aexecute(sql)
self.assertEqual(type(n), int)
|
nilq/baby-python
|
python
|
import platform
from global_graph_parser.G_grammarListener import G_grammarListener
from graphviz import Digraph
class MyListener(G_grammarListener):
"""
There are 2 methods (enter and exit) for each rule of the grammar.
As the walker encounters the node for rule Choice, for example,
it triggers enterChoice(). After the walker visits all children
of the Choice node, it triggers exitChoice().
NOTE: For our purpose, we can't do anything in enter methods
(except for the enterInit), so we'll leave empty. We need to
go down in parse tree and store informations in stack, before
we'll be able to build the graph.
"""
def __init__(self, graph_name):
self.graph_name = graph_name
self.stack = [] # in stack we save some informations meanwhile we walk between nodes.
self.count = 0 # needed to number each node
self.g = Digraph(graph_name, filename=graph_name, format='pdf') # initializes graph
# some stuff to recognize location where we'll save the graph.
path_file = open("path.txt", 'r')
paths = []
for line in path_file:
paths.append(line.strip())
path_file.close()
# Windows
if platform.system() == "Windows":
pass
# macOs
if platform.system() == "Darwin":
self.path = paths[0]
# Linux
if platform.system() == "Linux":
self.path = paths[1]
# Enter a parse tree produced by a Init production.
def enterInit(self, ctx):
self.g.node(str(self.count), label="", shape="circle") # start node
self.count += 1
# Exit a parse tree produced by a Init production.
def exitInit(self, ctx):
node = self.stack.pop()
self.g.edge("0", str(node[1]))
self.g.edge(str(node[2]), str(self.count))
self.g.node(str(self.count), label="", shape="doublecircle") # end node
self.g.view(self.graph_name, self.path, False) # draw the graph
# Enter a parse tree produced by a interaction production.
def enterInteraction(self, ctx):
pass
# Exit a parse tree produced by a interaction production.
def exitInteraction(self, ctx):
node = ['interaction', self.count, self.count]
self.stack.append(node)
self.count += 1
self.g.node(str(node[1]), label=ctx.getText(), shape="rect")
# Enter a parse tree produced by a Sequential production.
def enterSequential(self, ctx):
pass
# Exit a parse tree produced by a Sequential production.
def exitSequential(self, ctx):
right = self.stack.pop()
left = self.stack.pop()
node = ['sequential', left[1], right[2]]
self.stack.append(node)
self.g.edge(str(left[2]), str(right[1]))
# Enter a parse tree produced by a Choice production.
def enterChoice(self, ctx):
pass
# Exit a parse tree produced by a Choice production.
def exitChoice(self, ctx):
right = self.stack.pop()
left = self.stack.pop()
if left[0] == 'choice':
# in the case we have 3 or more choice nested,
# we'll merge together with the same start node
# and end node. If this behaviour is not required,
# just commented out this if-statement.
node = ['choice', left[1], left[2]]
self.stack.append(node)
self.g.edge(str(left[1]), str(right[1]))
self.g.edge(str(right[2]), str(left[2]))
else:
choice_node_start = str(self.count)
self.count += 1
choice_node_end = str(self.count)
self.count += 1
node = ['choice', choice_node_start, choice_node_end]
self.stack.append(node)
self.g.node(choice_node_start, label="+", shape="diamond")
self.g.edge(choice_node_start, str(left[1]))
self.g.edge(choice_node_start, str(right[1]))
self.g.node(choice_node_end, label="+", shape="diamond")
self.g.edge(str(left[2]), choice_node_end)
self.g.edge(str(right[2]), choice_node_end)
# Enter a parse tree produced by a fork production.
def enterFork(self, ctx):
pass
# Exit a parse tree produced by a fork production.
def exitFork(self, ctx):
right = self.stack.pop()
left = self.stack.pop()
if left[0] == 'fork':
# in the case we have 3 or more fork nested,
# we'll merge together with the same start node
# and end node. If this behaviour is not required,
# just commented out this if-statement.
node = ['fork', left[1], left[2]]
self.stack.append(node)
self.g.edge(str(left[1]), str(right[1]))
self.g.edge(str(right[2]), str(left[2]))
else:
fork_node_start = str(self.count)
self.count += 1
fork_node_end = str(self.count)
self.count += 1
node = ['fork', fork_node_start, fork_node_end]
self.stack.append(node)
self.g.node(fork_node_start, label="|", shape="square")
self.g.edge(fork_node_start, str(left[1]))
self.g.edge(fork_node_start, str(right[1]))
self.g.node(fork_node_end, label="|", shape="square")
self.g.edge(str(left[2]), fork_node_end)
self.g.edge(str(right[2]), fork_node_end)
# Enter a parse tree produced by a loop production.
def enterLoop(self, ctx):
pass
# Exit a parse tree produced by a loop production.
def exitLoop(self, ctx):
node_to_loop = self.stack.pop()
loop_node_start = str(self.count)
self.count += 1
loop_node_end = str(self.count)
self.count += 1
node = ['loop', loop_node_start, loop_node_end]
self.stack.append(node)
self.g.node(loop_node_start, label="+", shape="diamond")
self.g.edge(loop_node_start, str(node_to_loop[1]))
self.g.node(loop_node_end, label="+", shape="diamond")
self.g.edge(str(node_to_loop[2]), loop_node_end)
self.g.edge(loop_node_end, loop_node_start)
# Enter a parse tree produced by a Parenthesis production.
def enterParenthesis(self, ctx):
pass
# Exit a parse tree produced by a Parenthesis production.
def exitParenthesis(self, ctx):
pass
|
nilq/baby-python
|
python
|
def async_volunteer_group_adder(volunteer_group,volunteers):
through_model = volunteers[0].groups.through
dups = through_model.objects.all().filter(volunteergroup_id=volunteer_group.pk).values_list('volunteer_id', flat=True)
data = []
for pk in volunteers.values_list('pk', flat=True):
if pk not in dups:
data.append(through_model(volunteer_id=pk, volunteergroup_id=volunteer_group.pk) )
through_model.objects.bulk_create(data)
print("Completed Added group {}".format(volunteer_group))
|
nilq/baby-python
|
python
|
from django import forms
from . models import Comment
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
exclude = ["post"]
labels = {
"user_name": "Your Name",
"user_email": "Your E-Mail",
"text": "Your Comment",
}
|
nilq/baby-python
|
python
|
from django.core.management.base import BaseCommand, CommandError
import pandas as pd
import os
import time
import json
from sdap.studies.models import ExpressionStudy, ExpressionData, Database, JbrowseData, Species
from django.core.files import File
from sdap.users.models import User
from django.conf import settings
def sync_study(row):
studies = ExpressionStudy.objects.filter(pmid=row['PubMedID'], technology=parse_values(row['technology']), species=parse_values(row['species']))
if studies.count() == 0:
return False
if studies.count() > 1 :
print("Error : More than one study matching " + row['PubMedID'])
return True
dict = {
"article": row['article'],
"status": "PUBLIC",
"ome": parse_values(row['ome']),
"experimental_design": parse_values(row['experimental_design']),
"topics": parse_values(row['biological_topics']),
"tissues": parse_values(row['tissue_or_cell']),
"sex": parse_values(row['sex']),
"dev_stage":parse_values(row['developmental_stage']),
"age": parse_values(row['age']),
"antibody": parse_values(row['antibody']),
"mutant": parse_values(row['mutant']),
"cell_sorted": parse_values(row['cell_sorted']),
"keywords": parse_values(row['keywords']),
"samples_count": len(parse_values(row['sample_ID'])),
}
need_update = False
for key, value in dict.items():
if not getattr(studies[0], key) == value:
need_update = True
if need_update:
print("Updating " + row['PubMedID'])
studies.update(**dict)
jbrowse_id = row['RGVID']
for study in studies:
if study.jbrowse_data:
study.jbrowse_data.all().delete()
if "JBrowseStatus" in row and row["JBrowseStatus"] == "yes":
species = Species.objects.get(name=row['species'])
data = JbrowseData(jbrowse_id=jbrowse_id, species=species, study=study)
data.save()
return True
def process_study(row, database, superuser, study_folder):
species_dict = {
'Homo sapiens': '9606',
'Mus musculus': '10090',
'Rattus norvegicus': '10116',
'Bos taurus': '9913',
'Macaca mulatta': '9544',
'Sus scrofa': '9823',
'Gallus gallus': '9031',
'Danio rerio': '7955',
'Canis lupus familiaris': '9615',
}
if Species.objects.filter(name=row['species']).count() == 0:
print(row['species'] + " not in registered species : skipping")
return
if sync_study(row):
return
dict = {
"article": row['article'],
"pmid": row['PubMedID'],
"status": "PUBLIC",
"ome": parse_values(row['ome']),
"technology": parse_values(row['technology']),
"species": parse_values(row['species']),
"experimental_design": parse_values(row['experimental_design']),
"topics": parse_values(row['biological_topics']),
"tissues": parse_values(row['tissue_or_cell']),
"sex": parse_values(row['sex']),
"dev_stage":parse_values(row['developmental_stage']),
"age": parse_values(row['age']),
"antibody": parse_values(row['antibody']),
"mutant": parse_values(row['mutant']),
"cell_sorted": parse_values(row['cell_sorted']),
"keywords": parse_values(row['keywords']),
"samples_count": len(parse_values(row['sample_ID'])),
"database": database,
"created_by": superuser
}
print("Creating study " + dict["article"])
study = ExpressionStudy(**dict)
study.save()
jbrowse_id = row['RGVID']
if "JBrowseStatus" in row and row["JBrowseStatus"] == "yes":
species = Species.objects.get(name=row['species'])
data = JbrowseData(jbrowse_id=jbrowse_id, species=species, study=study)
data.save()
for path in parse_values(row['path']):
print("Creating file with path: " + path)
if not os.path.exists("/app/loading_data/" + path):
print("Missing file : skipping")
continue
data_dict = {
"name": "data_genelevel",
"species": Species.objects.get(name=row['species']),
"technology": row['technology'],
"study": study,
"created_by": superuser
}
if path.split('/')[-1] != "data_genelevel.txt":
data_dict['name'] = path.split('/')[-1].replace(".txt","").replace("_", " ")
expression_file = ExpressionData(**data_dict)
expression_file.file.save(path.split('/')[-1], File(open(study_folder + path)), save=False)
expression_file.save()
def populate_data(metadata_file, studies_folder):
if not os.path.exists(metadata_file):
print("Error : no metadata.csv file found.")
return
dbs = Database.objects.all()
database = dbs[0]
users = User.objects.filter(username='admin')
superuser = users[0]
df = pd.read_csv(metadata_file, sep=",")
df = df.fillna('')
for index, row in df.iterrows():
process_study(row, database, superuser, studies_folder)
def parse_values(values):
value_list = []
if values:
value_list = values.split("|")
return value_list
class Command(BaseCommand):
help = 'Add new studies to the DB'
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('metadata_file', type=str, help='Path to metadata file', default="/rgv_data/studies/metadata.csv")
parser.add_argument('studies_folder', type=str, help='Folder containing the studies folder', default="/rgv_data/")
def handle(self, *args, **options):
folder = options['studies_folder']
if not folder.endswith('/'):
folder += "/"
populate_data(options['metadata_file'], folder)
|
nilq/baby-python
|
python
|
"""
The module for loading a dataset from a given file and parsing it into a dictionary.
The offered functions allow to receive a list of pairs of samples with their labels
"""
import random
def load_file(file_name, test_file):
print(file_name);
sample_folder = "../samples";
file_path = sample_folder + "/" + file_name;
text_file = open(file_path, "r");
lines_of_file = text_file.read().split('\n');
loaded_lines = dict();
if(test_file == False):
for i in range(0, len(lines_of_file)):
elements = lines_of_file[i].split(":::");
if(elements[0] == ''):
continue;
if(len(elements) >= 3):
elements_structured = (elements[1].encode('utf8'), elements[2].encode('utf8'), file_name[-6:-4].upper());
else:
elements_structured = elements[1].encode('utf8');
loaded_lines[elements[0]] = elements_structured;
else:
for i in range(0, len(lines_of_file)):
elements = lines_of_file[i].split(" ");
if(elements[0] == '' or len(elements) < 3):
continue;
loaded_lines[elements[0]] = ' '.join(elements[2:-1]);
return loaded_lines;
def load_files_formatted(truth_files, tweet_files):
if truth_files != None:
is_test = False;
truths = []
for t in truth_files:
truths = truths + [load_file(t, is_test)];
else:
is_test = True;
#truths = load_file("truth_es.txt", False);
tweets = []
for t in tweet_files:
tweets = tweets + [load_file(t, is_test)];
formatted_data = [];
for i in range(len(tweets)):
for p in tweets[i]:
if is_test:
label = (b'AGAINST', b'MALE', 'ES');
else:
label = truths[i][p];
formatted_data.append((tweets[i][p], label, p)); #(tweet, labels, ID)
return formatted_data
def load_files_formatted_split(truth_files, tweet_files, train_prop = 0.9, test_prop = 0.1):
data = load_files_formatted(truth_files, tweet_files);
#random shuffle
random.seed(0xF12ABC12123); #random, but constant seed
random.shuffle(data)
#get proportion and return it
upper_bound = int(round(len(data) * train_prop))
return (data[:upper_bound], data[upper_bound:])
|
nilq/baby-python
|
python
|
# Generated by the pRPC protocol buffer compiler plugin. DO NOT EDIT!
# source: isolated.proto
import base64
import zlib
from google.protobuf import descriptor_pb2
# Includes description of the isolated.proto and all of its transitive
# dependencies. Includes source code info.
FILE_DESCRIPTOR_SET = descriptor_pb2.FileDescriptorSet()
FILE_DESCRIPTOR_SET.ParseFromString(zlib.decompress(base64.b64decode(
'eJy9Ws1vG0l2H4qWTLc1njbHMyNrvmo1q7E4Q5H6sMdrabJYSqTk9lCklh/22oOB3WoWpd5pdn'
'PZTcmar0OQwy4SLJB7guzmuPf8AbkGOQcIcsklQP6BADkF+b1X1c2WLdvJBogg21WvXr2vevU+'
'qm385fvGFTcMPDuSvdJwFERB/nIyP16d//AwCA49Wealg3G/HLkDGUb2YKiwF36fMWbbkR2FLf'
'mrMVbym8Zl2h1Gjwl1LiMyS5fX5kuKTimmU+rEdFqGQidA/rZhjCTYjyM38OemsPfK2jullECl'
'VrLcSqHm3zUuDe1D+Th0v5FzWeybbuUI0MZ8oWm8rkUMh4EfyvxPjdmBtMPxSA6kH4UQMstCph'
'nxjrZvD8OjIGqdwV/41ylNMV7P3zEMaDP6Hyt9ibFZ5znj4njoBXYvZIWzrXia/8h4XQ8fH5zC'
'RqxYtjWrgVsEy79nXOoFJ74icIERJoD8DeONZKKJTDPOlQSsyHxqXHUCP7JdP3w8UkcZzs0wqh'
'kv6CMO8wUjgT32guDr8TCcu8i4b8TwugLn541cQi7HKMmc1vq268Gs4dwltRbPP9k1jMlRA/Pt'
'Vq3drHc7VrPxuNto79e2rR2rVjVfyxvGzJ7V6HZqZiafMy7cbXZb5lT+opGtVh6a2bW6kbP0se'
'Z/ZkzzseWvP3/UWrn5c7wg9puF1+7989tGzrxgvmbumhnj7zK5WZ7k1/6QEdvB8HTkHh5FYm1l'
'9SeicyRFvbttico4OgpGYUlUPE8wQiigoRwdg4UhuqEUQV9ER24owmA8cqRwgp4UmB4Gx3Lky5'
'4Y+z05AooUlaHtEGHXkRCoKO7LUQgDibXSigEEOxKO7YsDKfoBNgnX5111a7vWaNdE34U3GkYu'
'N2XOQOxrGGZzr5k5jAvGbG4GYwPjK9CNZ7kM5jnTNL7k2RTWZ80p8+b8F4INAz2i8ciH4Ji5Ye'
'Q6IRiPlDJ0E8TI9g+lgFgkhj54KDS5uRDHjImD3SwEu5yCTAEya86lIFnMPzJXjM9z0xDnKsR9'
'28zMl8TEW0QUiHEoWZI2zAxblWJxWQDwnKXd4HfVnDbfMK7wjNTLQ733wW06kYggcynIFCDvmu'
'8lezLmm8B4PcHIaEguBZkC5LI5m+yZMq8B43KCQVQIMpOCEM4l00j2ZM23gGEkGFnsIch0CjIF'
'SM68ZHyauwBdrsM2C7DN+0L7NlskvgzKJGyKC6zodWy9ZvxLhqdki/dBvTj/DxmhAjR5JJ2iZ2'
'OMMDg6JUPD1cKhdNy+KxN3i80srL7wg2iCUDRiWj3Zt8cejgQkaIszHo1Ak52G932nEL9jrkEA'
'mg6tq38M5u/KUJy4uFSQYeCGoesfCptlPBX2SDLrU4kbEQyGY9KYjKeVy82weu+mIBlA3jNvpC'
'BZQD4xPzVuaEjG/BB73pt/Jzbo856sEUGeUGdTENr8uvlOCpIFZB4iVDVkyvwR9rw9f1Ps2U/d'
'wXgg/PHgABcf4eFMtkm0jwJ9A1O8p3LTTOdiCpIBJGdeTUGygFwz32JPyZiLFAC0p6hQRzzP9Z'
'QMo+ew9a8zPCVPWQK/T+b/IsNi6jgQxsLCKUYSgQxEnaNR4AdecOg6tieCEaIaDq1EsRLnpSPX'
'UeDJ0Ih9yQO5TRqN5I1Q2HAczz4FGlxZxk4Rm4MO3RlJe3LWLF7uAguYhswAcpmvdQzJAHLdXE'
'xBsoAsIS5uAIKLoGL+fPGZo4jznQhVpEmFQm0xMn+RLVbgGRmsBHk+nb9OtEbRJPxrolr4KS1q'
'KXHUKS1qCY76cQqSBaRgfmLsa0jGXMWea/M/E43Eg1y/5x67vTEMr+sHnS9wOzwZxbdXB+7+yB'
'7IlBwZeNVq4lVT2qNXodYbKUgWkLz5pvFXGQ2aMm9h0/z8n2dSknAZopif0LmHY8eRYdgfe96p'
'lu0F0sBRADm2vTGd/ZCyqB+pqERqYB5iJ9MvCrefACkpnCA0ICv0UkrRVbl1Riky3S0+qwkkC8'
'iceT0xbta8zdf0BcZN6iqlIdd5rzBuFnLcPiMHBffbyZVVEOJLV/ZvYuNeMDc5HP32pcbV/FWo'
'joX7/7HvBei1eUYvCjab0OudFCQLCIXBLzRk2vwp9nwwv5nSqpxcs7h4FCiqHSXPy0w7rcldTE'
'EygORwoBNIFpD3kPcHGjJjVqgOmP9KdIIIZzqJw24kB6GgmpfKsiFnVGUnXH0dshwq0Yo0sw8P'
'R/LQpuxgCNsZBSGCFBJWa387TAk5AyErZ4ScgZAVCDmXgmQBocLjcw25aFax5y1EpGeFTGyEQ0'
'egdcYUxsXP99spnhfBs3qG50XwrHK1N4FkAXkTVcGyhuTMHeb5fups/MBfXltZSbimmOTAZOcM'
'kxyY7JxhkgOTHWJyMMN90rrxX2XjVW1n/o1nWquFTeNS0l1RSxVKuEwv5FYMLZWe5q8Z077tB6'
'rVmm6pydYPxptw5Wfbta0rCcV9Au1nHn166EZH44MSsMuHyI7+4UTEYXQ6lOFE0v/MZP52Kru7'
'v/WHqQ92FeX9uBF8ID3vCx+XsUN77v1ZybhkfoAM85sMSu9/nEVb8QG3FX8/K3iPE3hia9zvI4'
'uKZaGoIR327MiGn0Vy5BxxoQ13HNgoj9K9yMpP9AZh+U5JvKANOYqiYbhRLveQqr1gCEaxOUjX'
'oRZi+UAJUUasaMke8tzIPVBFt412g6puuL1uYwhy4Po2CkWSCyHjBNZD2ud/gzHkHAQ9FIYO35'
'Ai529wHrgRRSzwREil0HWk+4d+4HnBCZV5dJgubeKkDzoy2oBI9PPJM4KF5KPpxmowDqmOpHDC'
'VO0DtFlY0hYzqG5EIi+qoEL1B1FIc/R7z4gDfo5n4+BHpRcJAWYpW8RCQMfeGIIlchgTQf5Pci'
'T1Uy9wxvRoYceHVIb9A6qnBDwF9ZfthRNT8wFh0RBp6ROlGtLlnUTYR6AlgdK+5QeTtVCFy9Dg'
'EolJoQsGU6reOFNQeJJ+L6DKkMNoMAgQp5RN4J1UHcI5RR8LRtwg96MTchPtQXFr4WCXS441It'
'/xlRdxXuKsdtdqi3Zzp/Og0qoJjPdbzftWtVYVWw+xWBPbzf2HLWv3bkfcbdartVZbVBpVQBud'
'lrXV7TRbbUMsVNrYusArlcZDUfvFfqvWbotmS1h7+3UL1EC+VWl0rFq7KKzGdr1btRq7RQEKot'
'HsGGjD96wO8DrNIrN9fp9o7oi9Wmv7LqaVLatudR4ywx2r0yBmO82WISpiv9LqWNvdeqUl9rut'
'/SZae9KsarW36xVrr1ZFC9UAT1G7X2t0RPtupV4/q6ghmg8atRZJn1ZTbNUgZWWrXiNWrGfVat'
'W2O6TQZLQN40HAOvo5fonBCPaoQZ1K62FRE23Xft4FFhZFtbJX2YV2S6+yCg5mu9uq7ZHUMEW7'
'u9XuWJ1upyZ2m80qG7tda923tmvtTVFvttlg3XYNglQrnQqzBg2YC+sYb3XbFhvOanRqrVZ3n9'
'6PCjjlB7AMpKxgb5Ut3GyQtuQrtWbrIZElO/AJFMWDuzXAW2RUtlaFzNCG1bY7aTQwhBGh0kRP'
'0ajt1q3dWmO7RstNIvPAatcKODCrTQgWM4YPgGmXtaaDglyGGqdct8jnKawdUanet0hyjQ0PaF'
'vaXdhs23e1zfVrj0A2mcMoZy5gtEnA3KIeE/QjjD5k6Id6TNAfY7TF0Mt6TFBqFosMzegxQT/G'
'qMzQeEyjG/QEwVBDjwm6hNGPGPpjPf6Pd/nF4gedAuf/7V14eZJ907WoLYYBMh6HN3pgQuUth4'
'gi1Aci4Nj+qYJ/E/gcVbwAFZBBZRCQ7FEREYeyQI9eCmyEprHap+sDjqmoIZ1J5ogXKDFQscDz'
'dNvPL32KkEspFcUWwhYFN/Szchg4R/T+1e1si4Hb8zmyUzl4z/bHlA5Wi2L1zu2VYhywEf48OU'
'TkF7uoHQMEaD+RXpwcuSAnnyLGcYOBQH0O1oHtfI0o2eMC8FQCAmNQIKTUP3D9caSb5c9WEv28'
'gBrxurSHE5WBsRAOsF/2FhB6VSL2A+EBy9BoIrIPPH6t9KUku1JFzCXJkHKsSuxj9TYjvly7uY'
'ywjVNxfZAFDaL+1dLLiw86zzJjFlSPEj8rginlBrGysrK6zL+dlZUN/n1Eqt/Bz/Lq2vL6amdt'
'fePWHfyW7sQ/j0pi69Sgg0RywnlDwEiryNRRrUg4Cz36x+0Uv1BA6WOJpp3PVyUn8WVrZ9sQ6+'
'vrdya6nJyclFwZ9UvB6LA86jv0hzBK0dOoQJUb9Wb0/EEFs/hI1J7a1IiHmOihWN1AIcdvV6m7'
'wAxx4a1fiCdkmaXCk5IufSZISRG6qVYm5XMoo8f6gJd4e6NbrxcK5+Kxvy+tYHEi09qrZDqUEV'
'EJ+j37NCUbdEVSZwboL0V0rDmeQf84Oi4KFmjzj1XpuBQd0+xlGikklCAOappVeM8ZDddfqOED'
'119fE092ZdQ+DdEI0nIl3HE92Tl7EDtWvdZBHhb9SIvxoj0f96NY0i5y1Gc3IbDzdSj+RCwtLS'
'lIoR+Veid3ETiqcBraVRCffy7W1wriO8Fr9eAkXortVi4jgEJeNPwhk6TLAlVTMSwsJQgqSq1+'
'9vw1SqjR9tXPbt68eXv9s5VJ2NAPcV3ffRpTQTB7lkrpjzvMJaU/TKGMUubDop8CuqCUOK/wYK'
'JD5orpLKbosAMUzjjAzRc6wD372BZP1EGW9JM1oey5HurzlANQNEWkJSiO8sUbXuLm2JdAS748'
'2Rq7HiripQIp1tYW0iyUYQqKFv0QTkPpjlhMmmtMpbpWmy1QKB0QZZZlYoNbL7RB+qWectb+KS'
'pxP1b8XPGXCs+eDa7D9sQaWKcIeK+NImzPHg4RFAGwfAVRPW2Rk2PKTmicySnPpHMVUHUmNTgs'
'/6+ismJFGd2mZF5UZBSUmC18S9n0++VvB2hpjvAvgtb3nW8ppX2/8S0yK/6G837/ZelbKiLIkb'
'//6tGCgbxNb3FqNxGyvRP7FMI/jd/UVIbsU27suYdomyjV4xw0p6JgVihzFTPMiVuRUxCz5Gz9'
'jRwFy0O711PNVXQSxNSk7RypSiWubqgq0hetqOsKSm+HAT1rUfKMty65JVnSwNXza6ACBCP+wV'
'BRVpwWHqFqGPf7CA30PurY6mVSsh9wfba0gLJoobB5BmqoMupXYxd1RwlhTD0LKWcIuWN1oagI'
'YQmvF5uSnh6oxlqyw4QbPVUaJEaBDsCnHtFXif55VyJD2mdYDW00pgmbA8jFlQ7lfcdBhSYO0E'
'YzT9qrWupYh/A5OagYDPp93EsuYnZQJUl114piYW1l9TbFzNVbnZXVjfWVjdVbpZVVmE95N0Iv'
'zZOgO7RDVKOMyfzR2CfV5K0ifX6+XdIXCAGr7YzcIe4PGTxdwNiCkoYIDn4pnUjVPm4YO7vyR3'
'Z/GImqyh7uUxRY7WabL9lS4ZyyrTQIvkGcsfl2SX+52y73AicsP5AH5Yko5Zbs4zr4jizvesGB'
'7T1usgxhmQQqp5gU+GXnKIAbWHGkKfI9VyKJJ1RH8TfDePAkVkh/TdLaSoh/nopQ6gmiRp+3pj'
'SC1KWhimyky1rZcw9GMDAXo6WjaOB9xKN4b4FfJIzEkWMm9D4hbiw+XF4cLC/2Oot3Nxb3Nhbb'
'pcX+oxsot92v5YkbqsdiMtDklODPitq9oGezs94IIStME6f6HRWsenqK7PPVknrH03Hul9jJ0t'
'Ngmatoe+jygcRQVVsrWcvP02Y9YwaLa1X8GqJAhgwO+P3M1nqi3IfUQ74gaJoOpS9Htrpq8TUL'
'k3d6HWWRbpJvzz/wt47fTb49/2mGP2D9NiNak94v9n9wILdnO+MQnXT9YZxfgIg9enCj/xfxko'
'bBOK9jeAS5HQ/Ockwt1NXk4/C0kvFiCpQhEH0Rm4CyBKJPYv8e65Yxf0378vP/lBGNwF/2+QPB'
'sTzbdtpxe0Ud1/ltZ0NvTDox/m4Tqje8CTF+aQwj+k5+ZB9Lfq5PeDJpvTH+/yPcyeKMqIOM2+'
'xn7ae7q6L+Y5xrI/pm+OuzNsoo9XPm6ylQlkCmeTX+APDfdjTQIQ==')))
_INDEX = {
f.name: {
'descriptor': f,
'services': {s.name: s for s in f.service},
}
for f in FILE_DESCRIPTOR_SET.file
}
IsolatedServiceDescription = {
'file_descriptor_set': FILE_DESCRIPTOR_SET,
'file_descriptor': _INDEX[u'isolated.proto']['descriptor'],
'service_descriptor': _INDEX[u'isolated.proto']['services'][u'Isolated'],
}
|
nilq/baby-python
|
python
|
from aiozk import protocol
from aiozk.exc import TransactionFailed
class Transaction:
"""Transaction request builder"""
def __init__(self, client):
"""
:param client: Client instance
:type client: aiozk.ZKClient
"""
self.client = client
self.request = protocol.TransactionRequest()
def check_version(self, path, version):
"""
Check znode version
:param str path: Znode path
:param int version: Znode version
:return: None
"""
path = self.client.normalize_path(path)
self.request.add(
protocol.CheckVersionRequest(path=path, version=version)
)
def create(
self, path, data=None, acl=None,
ephemeral=False, sequential=False, container=False
):
"""
Create new znode
:param str path: Znode path
:param data: Data to store in node
:type data: str or bytes
:param acl: List of ACLs
:type acl: [aiozk.ACL]
:param bool ephemeral: Ephemeral node type
:param bool sequential: Sequential node type
:param bool container: Container node type
:return: None
:raises ValueError: when *containers* feature is not supported by
Zookeeper server (< 3.5.1)
"""
if container and not self.client.features.containers:
raise ValueError("Cannot create container, feature unavailable.")
path = self.client.normalize_path(path)
acl = acl or self.client.default_acl
if self.client.features.create_with_stat:
request_class = protocol.Create2Request
else:
request_class = protocol.CreateRequest
request = request_class(path=path, data=data, acl=acl)
request.set_flags(ephemeral, sequential, container)
self.request.add(request)
def set_data(self, path, data, version=-1):
"""
Set data to znode
:param str path: Znode path
:param data: Data to store in node
:type data: str or bytes
:param int version: Current version of node
:return: None
"""
path = self.client.normalize_path(path)
self.request.add(
protocol.SetDataRequest(path=path, data=data, version=version)
)
def delete(self, path, version=-1):
"""
Delete znode
:param str path: Znode path
:param int version: Current version of node
:return: None
"""
path = self.client.normalize_path(path)
self.request.add(
protocol.DeleteRequest(path=path, version=version)
)
async def commit(self):
"""
Send all calls in transaction request and return results
:return: Transaction results
:rtype: aiozk.transaction.Result
:raises ValueError: On no operations to commit
"""
if not self.request.requests:
raise ValueError("No operations to commit.")
response = await self.client.send(self.request)
pairs = zip(self.request.requests, response.responses)
result = Result()
for request, reply in pairs:
if isinstance(reply, protocol.CheckVersionResponse):
result.checked.add(self.client.denormalize_path(request.path))
elif isinstance(reply, protocol.CreateResponse):
result.created.add(self.client.denormalize_path(request.path))
elif isinstance(reply, protocol.SetDataResponse):
result.updated.add(self.client.denormalize_path(request.path))
elif isinstance(reply, protocol.DeleteResponse):
result.deleted.add(self.client.denormalize_path(request.path))
return result
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exception, tb):
# propagate error by returning None
if exception:
return
result = await self.commit()
if not result:
raise TransactionFailed
class Result:
"""
Transaction result aggregator
Contains attributes:
- **checked** Set with results of ``check_version()`` methods
- **created** Set with results of ``create()`` methods
- **updated** Set with results of ``set_data()`` methods
- **deleted** Set with results of ``delete()`` methods
"""
def __init__(self):
self.checked = set()
self.created = set()
self.updated = set()
self.deleted = set()
def __bool__(self):
return sum([
len(self.checked),
len(self.created),
len(self.updated),
len(self.deleted),
]) > 0
|
nilq/baby-python
|
python
|
from src.util.config import (
load_config,
save_config,
)
from argparse import ArgumentParser
from typing import Dict
from src.util.default_root import DEFAULT_ROOT_PATH
def make_parser(parser: ArgumentParser):
parser.add_argument(
"--set-node-introducer",
help="Set the introducer for node - IP:Port",
type=str,
nargs="?",
default="",
)
parser.add_argument(
"--set-fullnode-port",
help="Set the port to use for the fullnode",
type=str,
nargs="?",
default="",
)
parser.add_argument(
"--set-log-level",
help="Set the instance log level, Can be CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET",
type=str,
nargs="?",
default="",
)
parser.set_defaults(function=configure)
def help_message():
print("usage: chia configure -flag")
print(
"""
chia configure [arguments] [inputs]
--set-node-introducer [IP:Port] (Set the introducer for node),
--set-fullnode-port [Port] (Set the full node default port, useful for beta testing),
--set-log-level [LogLevel] (Can be CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET)
"""
)
def configure(args, parser):
config: Dict = load_config(DEFAULT_ROOT_PATH, "config.yaml")
change_made = False
if args.set_node_introducer:
try:
if args.set_node_introducer.index(":"):
host, port = (
":".join(args.set_node_introducer.split(":")[:-1]),
args.set_node_introducer.split(":")[-1],
)
config["full_node"]["introducer_peer"]["host"] = host
config["full_node"]["introducer_peer"]["port"] = int(port)
config["introducer"]["port"] = int(port)
print("Node introducer updated.")
change_made = True
except ValueError:
print("Node introducer address must be in format [IP:Port]")
if args.set_fullnode_port:
config["full_node"]["port"] = int(args.set_fullnode_port)
config["full_node"]["introducer_peer"]["port"] = int(args.set_fullnode_port)
config["farmer"]["full_node_peer"]["port"] = int(args.set_fullnode_port)
config["timelord"]["full_node_peer"]["port"] = int(args.set_fullnode_port)
config["wallet"]["full_node_peer"]["port"] = int(args.set_fullnode_port)
config["wallet"]["introducer_peer"]["port"] = int(args.set_fullnode_port)
config["introducer"]["port"] = int(args.set_fullnode_port)
print("Default full node port updated.")
change_made = True
if args.set_log_level:
if (
(args.set_log_level == "CRITICAL")
or (args.set_log_level == "ERROR")
or (args.set_log_level == "WARNING")
or (args.set_log_level == "INFO")
or (args.set_log_level == "DEBUG")
or (args.set_log_level == "NOTSET")
):
config["logging"]["log_level"] = args.set_log_level
print("Logging level updated. Check CHIA_ROOT/log/debug.log")
change_made = True
if change_made:
print("Restart any running chia services for changes to take effect.")
save_config(args.root_path, "config.yaml", config)
else:
help_message()
return 0
|
nilq/baby-python
|
python
|
from gym.spaces import Box, MultiDiscrete
import logging
import numpy as np
import ray
import ray.experimental.tf_utils
from ray.rllib.agents.ddpg.ddpg_tf_policy import ComputeTDErrorMixin, \
TargetNetworkMixin
from ray.rllib.agents.dqn.dqn_tf_policy import postprocess_nstep_and_prio
from ray.rllib.agents.sac.sac_ensemble_tf_model import SACEnsembleTFModel
from ray.rllib.agents.sac.sac_torch_model import SACTorchModel
from ray.rllib.models import ModelCatalog
from ray.rllib.models.tf.tf_action_dist import Beta, MultiCategorical, \
DiagGaussian, MultiSquashedGaussian
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_ensemble_policy_template import build_tf_ensemble_policy
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.utils.framework import get_variable, try_import_tf, \
try_import_tfp
tf1, tf, tfv = try_import_tf()
tfp = try_import_tfp()
logger = logging.getLogger(__name__)
def build_sac_ensemble_model(policy, obs_space, action_space, config):
# 2 cases:
# 1) with separate state-preprocessor (before obs+action concat).
# 2) no separate state-preprocessor: concat obs+actions right away.
if config["use_state_preprocessor"]:
num_outputs = 256 # Flatten last Conv2D to this many nodes.
else:
num_outputs = 0
# No state preprocessor: fcnet_hiddens should be empty.
if config["model"]["fcnet_hiddens"]:
logger.warning(
"When not using a state-preprocessor with SAC, `fcnet_hiddens`"
" will be set to an empty list! Any hidden layer sizes are "
"defined via `policy_model.fcnet_hiddens` and "
"`Q_model.fcnet_hiddens`.")
config["model"]["fcnet_hiddens"] = []
# Force-ignore any additionally provided hidden layer sizes.
# Everything should be configured using SAC's "Q_model" and "policy_model"
# settings.
policy.model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=num_outputs,
model_config=config["model"],
framework=config["framework"],
model_interface=SACTorchModel
if config["framework"] == "torch" else SACEnsembleTFModel,
name="sac_model",
actor_hidden_activation=config["policy_model"]["fcnet_activation"],
actor_hiddens=config["policy_model"]["fcnet_hiddens"],
critic_hidden_activation=config["Q_model"]["fcnet_activation"],
critic_hiddens=config["Q_model"]["fcnet_hiddens"],
twin_q=config["twin_q"],
initial_alpha=config["initial_alpha"],
alpha=config["alpha"],
target_entropy=config["target_entropy"],
ensemble_size=config["partial_ensemble_size"],
timescale=config["timescale"],
shared_actor=config["shared_actor"],)
policy.target_model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=num_outputs,
model_config=config["model"],
framework=config["framework"],
model_interface=SACTorchModel
if config["framework"] == "torch" else SACEnsembleTFModel,
name="target_sac_model",
actor_hidden_activation=config["policy_model"]["fcnet_activation"],
actor_hiddens=config["policy_model"]["fcnet_hiddens"],
critic_hidden_activation=config["Q_model"]["fcnet_activation"],
critic_hiddens=config["Q_model"]["fcnet_hiddens"],
twin_q=config["twin_q"],
initial_alpha=config["initial_alpha"],
alpha=config["alpha"],
target_entropy=config["target_entropy"],
ensemble_size=config["partial_ensemble_size"],
timescale=config["timescale"],
shared_actor=config["shared_actor"],)
return policy.model
def slice_loss(x, idx, mode='slice'):
xshape = x.shape.as_list()
if mode == 'slice':
begin = [0] * len(xshape)
size = [-1] * len(xshape)
begin[1] = idx
size[1] = 1
return tf.reduce_mean(tf.slice(x, begin, size))
elif mode == 'mask':
onehot_vec = tf.expand_dims(tf.one_hot(idx, depth=E), 0)
if len(xshape) == 3:
onehot_vec = tf.expand_dims(onehot_vec, -1)
masked_x = tf.multiply(x, onehot_vec)
return tf.reduce_mean(tf.reduce_sum(masked_x, axis=1))
else:
raise ValueError
def postprocess_trajectory(policy,
sample_batch,
other_agent_batches=None,
episode=None):
if 'infos' not in sample_batch:
sample_batch['members'] = np.ones_like(sample_batch[SampleBatch.REWARDS]).astype(np.int32)
print("infos field not in sample_batch !!!")
else:
sample_batch['members'] = np.array([info['active_member'] for info in sample_batch['infos']], dtype=np.int32)
return postprocess_nstep_and_prio(policy, sample_batch)
def get_dist_class(config, action_space):
if isinstance(action_space, MultiDiscrete):
return MultiCategorical
else:
if config["normalize_actions"]:
return MultiSquashedGaussian if \
not config["_use_beta_distribution"] else Beta
else:
return DiagGaussian
def get_distribution_inputs_and_class(policy,
model,
obs_batch,
*,
explore=True,
**kwargs):
# Get base-model output.
model_out, state_out = model({
"obs": obs_batch,
"is_training": policy._get_is_training_placeholder(),
}, [], None)
# Get action model output from base-model output.
distribution_inputs = model.get_policy_output(model_out)
action_dist_class = get_dist_class(policy.config, policy.action_space)
return distribution_inputs, action_dist_class, state_out
def sac_actor_critic_loss(policy, model, _, train_batch):
# Should be True only for debugging purposes (e.g. test cases)!
deterministic = policy.config["_deterministic_loss"]
model_out_t, _ = model({
"obs": train_batch[SampleBatch.CUR_OBS],
"is_training": policy._get_is_training_placeholder(),
}, [], None)
model_out_tp1, _ = model({
"obs": train_batch[SampleBatch.NEXT_OBS],
"is_training": policy._get_is_training_placeholder(),
}, [], None)
target_model_out_tp1, _ = policy.target_model({
"obs": train_batch[SampleBatch.NEXT_OBS],
"is_training": policy._get_is_training_placeholder(),
}, [], None)
# Broadcast the action of active ensemble member to all other ensemble members,
# because this action is the one responsible for the transition.
E = policy.config['partial_ensemble_size']
dones = tf.tile(tf.expand_dims(train_batch[SampleBatch.DONES], 1), [1, E])
rewards = tf.tile(tf.expand_dims(train_batch[SampleBatch.REWARDS], 1), [1, E])
member_mat = tf.one_hot(train_batch['members'], depth=E)
# Discrete case.
if model.discrete:
# Get all action probs directly from pi and form their logp.
log_pis_t = tf.nn.log_softmax(model.get_policy_output(model_out_t), -1)
policy_t = tf.math.exp(log_pis_t)
log_pis_tp1 = tf.nn.log_softmax(
model.get_policy_output(model_out_tp1), -1)
policy_tp1 = tf.math.exp(log_pis_tp1)
# Q-values.
q_t = model.get_q_values(model_out_t)
# Target Q-values.
q_tp1 = policy.target_model.get_q_values(target_model_out_tp1)
if policy.config["twin_q"]:
twin_q_t = model.get_twin_q_values(model_out_t)
twin_q_tp1 = policy.target_model.get_twin_q_values(
target_model_out_tp1)
q_tp1 = tf.reduce_min((q_tp1, twin_q_tp1), axis=0)
######################### CROSS ENTROPY #########################
# old:
# q_tp1 -= model.alpha * log_pis_tp1
# new:
if policy.config["asymmetric"]:
print(f"============ Asymmetric Ensemble===========")
cum_log_pis_tp1 = tf.math.cumsum(log_pis_tp1, axis=1)
arange = tf.range(start=1, limit=E + 1, delta=1, dtype=tf.float32, name='range')
inv_arange = tf.math.divide(1., arange)
w = tf.tile(tf.expand_dims(inv_arange, 1), [1, q_t.shape.as_list()[-1]])
ens_log_pis_tp1 = w * cum_log_pis_tp1
q_tp1 -= model.alpha * ens_log_pis_tp1
else:
beta = 1 / E * tf.ones((E, E), dtype=tf.float32)
q_tp1 -= model.alpha * tf.matmul(beta, log_pis_tp1)
#################################################################
# Actually selected Q-values (from the actions batch).
actions_mat = tf.cast(member_mat, train_batch[SampleBatch.ACTIONS].dtype) * train_batch[SampleBatch.ACTIONS]
actions = tf.reduce_sum(actions_mat, axis=1)
bcast_actions = tf.tile(tf.expand_dims(actions, 1), [1, E])
one_hot = tf.one_hot(bcast_actions, depth=q_t.shape.as_list()[-1])
q_t_selected = tf.reduce_sum(q_t * one_hot, axis=-1)
if policy.config["twin_q"]:
twin_q_t_selected = tf.reduce_sum(twin_q_t * one_hot, axis=-1)
# Discrete case: "Best" means weighted by the policy (prob) outputs.
q_tp1_best = tf.reduce_sum(tf.multiply(policy_tp1, q_tp1), axis=-1)
q_tp1_best_masked = \
(1.0 - tf.cast(dones, tf.float32)) * \
q_tp1_best
# Continuous actions case.
else:
# Sample single actions from distribution.
action_dist_class = get_dist_class(policy.config, policy.action_space)
action_dist_t = action_dist_class(
model.get_policy_output(model_out_t), policy.model)
policy_t = action_dist_t.sample() if not deterministic else \
action_dist_t.deterministic_sample()
log_pis_t = tf.expand_dims(action_dist_t.logp(policy_t, reduce=False), -1)
action_dist_tp1 = action_dist_class(
model.get_policy_output(model_out_tp1), policy.model)
policy_tp1 = action_dist_tp1.sample() if not deterministic else \
action_dist_tp1.deterministic_sample()
log_pis_tp1 = tf.expand_dims(action_dist_tp1.logp(policy_tp1, reduce=False), -1)
# Q-values for the actually selected actions.
ex_member_mat = tf.tile(tf.expand_dims(member_mat, 2), [1, 1, policy_t.shape.as_list()[-1]])
active_actions = tf.reduce_sum(ex_member_mat * train_batch[SampleBatch.ACTIONS], axis=1, keepdims=True)
active_action_mat = tf.tile(active_actions, [1, E, 1])
q_t = model.get_q_values(model_out_t, active_action_mat)
if policy.config["twin_q"]:
twin_q_t = model.get_twin_q_values(
model_out_t, active_action_mat)
# Q-values for current policy in given current state.
q_t_det_policy = model.get_q_values(model_out_t, policy_t)
if policy.config["twin_q"]:
twin_q_t_det_policy = model.get_twin_q_values(
model_out_t, policy_t)
q_t_det_policy = tf.reduce_min(
(q_t_det_policy, twin_q_t_det_policy), axis=0)
# target q network evaluation
q_tp1 = policy.target_model.get_q_values(target_model_out_tp1,
policy_tp1)
if policy.config["twin_q"]:
twin_q_tp1 = policy.target_model.get_twin_q_values(
target_model_out_tp1, policy_tp1)
# Take min over both twin-NNs.
q_tp1 = tf.reduce_min((q_tp1, twin_q_tp1), axis=0)
q_t_selected = tf.squeeze(q_t, axis=len(q_t.shape) - 1)
if policy.config["twin_q"]:
twin_q_t_selected = tf.squeeze(twin_q_t, axis=len(q_t.shape) - 1)
######################### CROSS ENTROPY #########################
# old:
# q_tp1 -= model.alpha * log_pis_tp1
# new:
if policy.config["asymmetric"]:
print(f"============ Asymmetric Ensemble===========")
arange = tf.range(start=1, limit=E + 1, delta=1, dtype=tf.float32, name='range')
inv_arange = tf.math.divide(1., arange)
w = tf.tile(tf.expand_dims(inv_arange, 1), [1, q_t.shape.as_list()[-1]])
cum_log_pis_tp1 = tf.math.cumsum(log_pis_tp1, axis=1)
ens_log_pis_tp1 = w * cum_log_pis_tp1
q_tp1 -= model.alpha * ens_log_pis_tp1
else:
beta = 1 / E * tf.ones((E, E), dtype=tf.float32)
q_tp1 -= model.alpha * tf.matmul(beta, log_pis_tp1)
#################################################################
q_tp1_best = tf.squeeze(input=q_tp1, axis=len(q_tp1.shape) - 1)
q_tp1_best_masked = (1.0 - tf.cast(dones, tf.float32)) * q_tp1_best
assert policy.config["n_step"] == 1, "TODO(hartikainen) n_step > 1"
# compute RHS of bellman equation
q_t_selected_target = tf.stop_gradient(
rewards + policy.config["gamma"]**policy.config["n_step"] * q_tp1_best_masked)
# Compute the TD-error (potentially clipped).
base_td_error = tf.abs(q_t_selected - q_t_selected_target)
if policy.config["twin_q"]:
twin_td_error = tf.abs(twin_q_t_selected - q_t_selected_target)
td_error = 0.5 * (base_td_error + twin_td_error)
else:
td_error = base_td_error
crnt_trnng_idx = tf.cast(policy.model.flrd_cntr, tf.int32)
critic_ens_loss = 0.5 * tf.square(q_t_selected_target - q_t_selected)
slice_mode = 'slice'
critic_loss = [slice_loss(critic_ens_loss, crnt_trnng_idx, mode=slice_mode)]
if policy.config["twin_q"]:
twin_c_ens_loss = 0.5 * tf.square(q_t_selected_target - twin_q_t_selected)
critic_loss.append(slice_loss(twin_c_ens_loss, crnt_trnng_idx, mode=slice_mode))
# Alpha- and actor losses.
# Note: In the papers, alpha is used directly, here we take the log.
# Discrete case: Multiply the action probs as weights with the original
# loss terms (no expectations needed).
if model.discrete:
# ens_pis_t = tf.reduce_mean(policy_t, axis=1)
# ens_log_pis_t = tf.log(ens_pis_t)
# alpha_loss = tf.reduce_mean(
# mask *
# tf.reduce_sum(
# tf.multiply(
# tf.stop_gradient(ens_pis_t), -model.log_alpha *
# tf.stop_gradient(ens_log_pis_t + model.target_entropy)),
# axis=-1))
actor_ens_loss = tf.reduce_sum(tf.multiply(policy_t, model.alpha * log_pis_t - tf.stop_gradient(q_t)), axis=-1)
actor_loss = slice_loss(actor_ens_loss, crnt_trnng_idx, mode=slice_mode)
else:
# alpha_loss = -tf.reduce_mean(
# model.log_alpha *
# tf.stop_gradient(log_pis_t + model.target_entropy))
actor_ens_loss = model.alpha * log_pis_t - q_t_det_policy
actor_loss = slice_loss(actor_ens_loss, crnt_trnng_idx, slice_mode)
# save for stats function
policy.policy_t = policy_t
policy.q_t = q_t
policy.td_error = td_error
policy.actor_loss = actor_loss
policy.critic_loss = critic_loss
# policy.alpha_loss = alpha_loss
policy.alpha_value = model.alpha
policy.target_entropy = model.target_entropy
# in a custom apply op we handle the losses separately, but return them
# combined in one loss for now
return actor_loss + tf.add_n(critic_loss) # + alpha_loss
def gradients_fn(policy, optimizer, loss):
# Eager: Use GradientTape.
if policy.config["framework"] in ["tf2", "tfe"]:
tape = optimizer.tape
pol_weights = policy.model.policy_variables()
actor_grads_and_vars = list(zip(tape.gradient(
policy.actor_loss, pol_weights), pol_weights))
q_weights = policy.model.q_variables()
if policy.config["twin_q"]:
half_cutoff = len(q_weights) // 2
grads_1 = tape.gradient(
policy.critic_loss[0], q_weights[:half_cutoff])
grads_2 = tape.gradient(
policy.critic_loss[1], q_weights[half_cutoff:])
critic_grads_and_vars = \
list(zip(grads_1, q_weights[:half_cutoff])) + \
list(zip(grads_2, q_weights[half_cutoff:]))
else:
critic_grads_and_vars = list(zip(tape.gradient(
policy.critic_loss[0], q_weights), q_weights))
# alpha_vars = [policy.model.log_alpha]
# alpha_grads_and_vars = list(zip(tape.gradient(
# policy.alpha_loss, alpha_vars), alpha_vars))
# Tf1.x: Use optimizer.compute_gradients()
else:
actor_grads_and_vars = policy._actor_optimizer.compute_gradients(
policy.actor_loss, var_list=policy.model.policy_variables())
q_weights = policy.model.q_variables()
if policy.config["twin_q"]:
half_cutoff = len(q_weights) // 2
base_q_optimizer, twin_q_optimizer = policy._critic_optimizer
critic_grads_and_vars = base_q_optimizer.compute_gradients(
policy.critic_loss[0], var_list=q_weights[:half_cutoff]
) + twin_q_optimizer.compute_gradients(
policy.critic_loss[1], var_list=q_weights[half_cutoff:])
else:
critic_grads_and_vars = policy._critic_optimizer[
0].compute_gradients(
policy.critic_loss[0], var_list=q_weights)
# alpha_grads_and_vars = policy._alpha_optimizer.compute_gradients(
# policy.alpha_loss, var_list=[policy.model.log_alpha])
# Clip if necessary.
if policy.config["grad_clip"]:
clip_func = tf.clip_by_norm
else:
clip_func = tf.identity
# Save grads and vars for later use in `build_apply_op`.
policy._actor_grads_and_vars = [
(clip_func(g), v) for (g, v) in actor_grads_and_vars if g is not None]
policy._critic_grads_and_vars = [
(clip_func(g), v) for (g, v) in critic_grads_and_vars if g is not None]
# policy._alpha_grads_and_vars = [
# (clip_func(g), v) for (g, v) in alpha_grads_and_vars if g is not None]
grads_and_vars = (
policy._actor_grads_and_vars \
+ policy._critic_grads_and_vars \
# + policy._alpha_grads_and_vars
)
return grads_and_vars
def apply_gradients(policy, optimizer, grads_and_vars):
actor_apply_ops = policy._actor_optimizer.apply_gradients(
policy._actor_grads_and_vars)
cgrads = policy._critic_grads_and_vars
half_cutoff = len(cgrads) // 2
if policy.config["twin_q"]:
critic_apply_ops = [
policy._critic_optimizer[0].apply_gradients(cgrads[:half_cutoff]),
policy._critic_optimizer[1].apply_gradients(cgrads[half_cutoff:])
]
else:
critic_apply_ops = [
policy._critic_optimizer[0].apply_gradients(cgrads)
]
if policy.config["framework"] in ["tf2", "tfe"]:
# policy._alpha_optimizer.apply_gradients(policy._alpha_grads_and_vars)
assert False, 'implement counter apply op'
return
else:
# alpha_apply_ops = policy._alpha_optimizer.apply_gradients(
# policy._alpha_grads_and_vars,
# global_step=tf1.train.get_or_create_global_step())
return tf.group([actor_apply_ops, policy.model.cntr_inc_op] + critic_apply_ops)
# # alpha_apply_ops = policy._alpha_optimizer.apply_gradients(
# # policy._alpha_grads_and_vars,
# # global_step=tf.train.get_or_create_global_step())
# apply_ops = [actor_apply_ops] + critic_apply_ops
# apply_ops += [policy.model.cntr_inc_op]
#
# # if policy.config["alpha"] is None:
# # apply_ops += [alpha_apply_ops]
# return tf.group(apply_ops)
def stats(policy, train_batch):
return {
# "policy_t": policy.policy_t,
# "td_error": policy.td_error,
"mean_td_error": tf.reduce_mean(policy.td_error),
"actor_loss": tf.reduce_mean(policy.actor_loss),
"critic_loss": tf.reduce_mean(policy.critic_loss),
# "alpha_loss": tf.reduce_mean(policy.alpha_loss),
"alpha_value": tf.reduce_mean(policy.alpha_value),
"target_entropy": tf.constant(policy.target_entropy),
"mean_q": tf.reduce_mean(policy.q_t),
"max_q": tf.reduce_max(policy.q_t),
"min_q": tf.reduce_min(policy.q_t),
"counter": policy.model.cntr,
"floored_counter": policy.model.flrd_cntr,
}
class ActorCriticOptimizerMixin:
def __init__(self, config):
# - Create global step for counting the number of update operations.
# - Use separate optimizers for actor & critic.
if config["framework"] in ["tf2", "tfe"]:
self.global_step = get_variable(0, tf_name="global_step")
self._actor_optimizer = tf.keras.optimizers.Adam(
learning_rate=config["optimization"]["actor_learning_rate"])
self._critic_optimizer = [
tf.keras.optimizers.Adam(
learning_rate=config["optimization"][
"critic_learning_rate"])
]
if config["twin_q"]:
self._critic_optimizer.append(
tf.keras.optimizers.Adam(
learning_rate=config["optimization"][
"critic_learning_rate"]))
self._alpha_optimizer = tf.keras.optimizers.Adam(
learning_rate=config["optimization"]["entropy_learning_rate"])
else:
self.global_step = tf1.train.get_or_create_global_step()
self._actor_optimizer = tf1.train.AdamOptimizer(
learning_rate=config["optimization"]["actor_learning_rate"])
self._critic_optimizer = [
tf1.train.AdamOptimizer(
learning_rate=config["optimization"][
"critic_learning_rate"])
]
if config["twin_q"]:
self._critic_optimizer.append(
tf1.train.AdamOptimizer(
learning_rate=config["optimization"][
"critic_learning_rate"]))
self._alpha_optimizer = tf1.train.AdamOptimizer(
learning_rate=config["optimization"]["entropy_learning_rate"])
def setup_early_mixins(policy, obs_space, action_space, config):
ActorCriticOptimizerMixin.__init__(policy, config)
def setup_mid_mixins(policy, obs_space, action_space, config):
ComputeTDErrorMixin.__init__(policy, sac_actor_critic_loss)
def setup_late_mixins(policy, obs_space, action_space, config):
TargetNetworkMixin.__init__(policy, config)
def validate_spaces(pid, observation_space, action_space, config):
if not isinstance(action_space, (Box, MultiDiscrete)):
raise UnsupportedSpaceException(
"Action space ({}) of {} is not supported for "
"SAC.".format(action_space, pid))
if isinstance(action_space, Box) and len(action_space.shape) != 2:
raise UnsupportedSpaceException(
"Action space ({}) of {} has multiple dimensions "
"{}. ".format(action_space, pid, action_space.shape) +
"Consider reshaping this into a single dimension, "
"using a Tuple action space, or the multi-agent API.")
SACEnsembleTFPolicy = build_tf_ensemble_policy(
name="SACTFPolicy",
get_default_config=lambda: ray.rllib.agents.sac.sac_ensemble.DEFAULT_CONFIG,
make_model=build_sac_ensemble_model,
postprocess_fn=postprocess_trajectory,
action_distribution_fn=get_distribution_inputs_and_class,
loss_fn=sac_actor_critic_loss,
stats_fn=stats,
gradients_fn=gradients_fn,
apply_gradients_fn=apply_gradients,
extra_learn_fetches_fn=lambda policy: {"td_error": policy.td_error},
mixins=[
TargetNetworkMixin, ActorCriticOptimizerMixin, ComputeTDErrorMixin
],
validate_spaces=validate_spaces,
before_init=setup_early_mixins,
before_loss_init=setup_mid_mixins,
after_init=setup_late_mixins,
obs_include_prev_action_reward=False)
|
nilq/baby-python
|
python
|
import unittest
from test.testutil import set_default_576_324_videos_for_testing, \
set_default_576_324_videos_for_testing_scaled, \
set_default_cambi_video_for_testing_b, \
set_default_cambi_video_for_testing_10b
from vmaf.core.cambi_feature_extractor import CambiFeatureExtractor, CambiFullReferenceFeatureExtractor
from vmaf.core.cambi_quality_runner import CambiQualityRunner, CambiFullReferenceQualityRunner
from vmaf.tools.misc import MyTestCase
class CambiFeatureExtractorTest(MyTestCase):
def tearDown(self):
if hasattr(self, 'fextractor'):
self.fextractor.remove_results()
super().tearDown()
def test_run_cambi_fextractor(self):
_, _, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = CambiFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_feature_cambi_score'],
0.6892500624999999, places=4)
self.assertAlmostEqual(results[1]['Cambi_feature_cambi_score'],
0.0014658541666666667, places=4)
def test_run_cambi_fextractor_scaled(self):
_, _, asset, asset_original = set_default_576_324_videos_for_testing_scaled()
self.fextractor = CambiFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
optional_dict={}
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_feature_cambi_score'],
0.9204257916666666, places=4)
self.assertAlmostEqual(results[1]['Cambi_feature_cambi_score'],
0.004251791666666667, places=4)
def test_run_cambi_fextractor_scaled_b(self):
_, _, asset, asset_original = set_default_cambi_video_for_testing_b()
self.fextractor = CambiFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
optional_dict={}
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_feature_cambi_score'],
1.218365, places=4)
def test_run_cambi_fextractor_10b(self):
_, _, asset, asset_original = set_default_cambi_video_for_testing_10b()
self.fextractor = CambiFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
optional_dict={}
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_feature_cambi_score'],
0.01451, places=4)
def test_run_cambi_fextractor_max_log_contrast(self):
_, _, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = CambiFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
optional_dict={'max_log_contrast': 4}
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_feature_cambi_score'],
0.9182153958333333, places=4)
self.assertAlmostEqual(results[1]['Cambi_feature_cambi_score'],
0.0024499791666667, places=4)
self.fextractor = CambiFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
optional_dict={'max_log_contrast': 0}
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_feature_cambi_score'],
0.015840666666666666, places=4)
self.assertAlmostEqual(results[1]['Cambi_feature_cambi_score'],
0.000671125, places=4)
def test_run_cambi_fextractor_full_reference(self):
_, _, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = CambiFullReferenceFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_FR_feature_cambi_score'],
0.689250, places=4)
self.assertAlmostEqual(results[0]['Cambi_FR_feature_cambi_source_score'],
0.00146585416, places=4)
self.assertAlmostEqual(results[0]['Cambi_FR_feature_cambi_full_reference_score'],
0.687784, places=4)
def test_run_cambi_fextractor_full_reference_scaled_ref(self):
_, _, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = CambiFullReferenceFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
optional_dict={'src_width': 480, 'src_height': 270}
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_FR_feature_cambi_score'],
0.689250, places=4)
self.assertAlmostEqual(results[0]['Cambi_FR_feature_cambi_source_score'],
0.0042517916, places=4)
self.assertAlmostEqual(results[0]['Cambi_FR_feature_cambi_full_reference_score'],
0.6849983125, places=4)
class CambiQualityRunnerTest(MyTestCase):
def test_run_cambi_runner(self):
_, _, asset, asset_original = set_default_576_324_videos_for_testing()
self.qrunner = CambiQualityRunner(
[asset, asset_original],
None, fifo_mode=False,
result_store=None
)
self.qrunner.run(parallelize=True)
results = self.qrunner.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_score'],
0.6892500624999999, places=4)
self.assertAlmostEqual(results[1]['Cambi_score'],
0.0014658541666666667, places=4)
def test_run_cambi_runner_scale(self):
_, _, asset, asset_original = set_default_576_324_videos_for_testing_scaled()
self.qrunner = CambiQualityRunner(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
optional_dict={}
)
self.qrunner.run(parallelize=True)
results = self.qrunner.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_score'],
0.9204257916666666, places=4)
self.assertAlmostEqual(results[1]['Cambi_score'],
0.004251791666666667, places=4)
def test_run_cambi_runner_scale_b(self):
_, _, asset, asset_original = set_default_cambi_video_for_testing_b()
self.qrunner = CambiQualityRunner(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
optional_dict={}
)
self.qrunner.run(parallelize=True)
results = self.qrunner.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_score'],
1.218365, places=4)
def test_run_cambi_runner_10b(self):
_, _, asset, asset_original = set_default_cambi_video_for_testing_10b()
self.qrunner = CambiQualityRunner(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
optional_dict={}
)
self.qrunner.run(parallelize=True)
results = self.qrunner.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_score'],
0.01451, places=4)
def test_run_cambi_runner_fullref(self):
_, _, asset, asset_original = set_default_576_324_videos_for_testing()
self.qrunner = CambiFullReferenceQualityRunner(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
)
self.qrunner.run(parallelize=True)
results = self.qrunner.results
# score: arithmetic mean score over all frames
self.assertAlmostEqual(results[0]['Cambi_FR_score'],
0.687784125, places=4)
self.assertAlmostEqual(results[0]['Cambi_FR_feature_cambi_score'],
0.68925006249, places=4)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
nilq/baby-python
|
python
|
import os.path
import sys
base_path = os.path.abspath('..')
aragog_app = os.path.join(base_path, 'app')
sys.path.insert(0, aragog_app)
|
nilq/baby-python
|
python
|
import os
import sys
from argparse import ArgumentParser
from typing import List, Tuple
from requests import HTTPError # noqa
from adventofcode.config import ROOT_DIR
from adventofcode.scripts.get_inputs import get_input
from adventofcode.util.console import console
from adventofcode.util.input_helpers import get_input_for_day
def add_day():
year, day = _parse_args(sys.argv[1:])
console.print(f'Creating solution day file for year {year} day {day}')
# Solution file
module_path = os.path.join(ROOT_DIR, f'year_{year}')
solution_file = os.path.join(module_path, f'day_{day:02}_{year}.py')
create_module_dir(module_path)
write_solution_template(solution_file, year, day)
# Test file
test_module_path = os.path.abspath(os.path.join(ROOT_DIR, '../../tests', f'year_{year}'))
test_file = os.path.join(test_module_path, f'test_day_{day:02}_{year}.py')
create_module_dir(test_module_path)
write_test_template(test_file, year, day)
# Empty test input
test_input_module_path = os.path.abspath(os.path.join(ROOT_DIR, '../../tests', f'year_{year}', f'inputs'))
test_file_input = os.path.join(test_input_module_path, f'day_{day:02}.txt')
create_dir(test_input_module_path)
write_template(test_file_input, "")
verify_input_exists(year, day)
def write_solution_template(path: str, year: int, day: int) -> None:
if not os.path.exists(path):
write_template(path, read_solution_template(year, day))
console.print(f'[green]Wrote template to {path}')
else:
console.print(f'[yellow]Did not write template for year {year} day {day}, the file already exists.')
def write_test_template(path: str, year: int, day: int) -> None:
if not os.path.exists(path):
write_template(path, read_test_template(year, day))
console.print(f'[green]Wrote test template to {path}')
else:
console.print(f'[yellow]Did not write test template for year {year} day {day}, the file already exists.')
def create_module_dir(path: str) -> None:
create_dir(path)
if not os.path.exists(init_file := os.path.join(path, '__init__.py')):
with open(init_file):
pass
def create_dir(path: str) -> None:
if not os.path.exists(path):
os.mkdir(path)
def verify_input_exists(year: int, day: int) -> None:
try:
_ = get_input_for_day(year, day)
console.print(f'Input data already exists for year {year} day {day}, skipping download')
return
except FileNotFoundError:
try:
get_input(year, day)
console.print(f'Automatically downloaded input data for year {year} day {day}')
return
except HTTPError as e:
console.print(f'[red]Could not retrieve input data for year {year} day {day} automatically: {e}')
except FileNotFoundError:
console.print(f'[red]Could not retrieve input data for year {year} day {day}: .session not set correctly')
raise ValueError('unknown exception occurred in verify_input_exists')
def _read_solution_template(template_path: str, year: str, day: str) -> str:
with open(template_path) as f:
template = f.read()
template = template.replace('{year}', year)
template = template.replace('{day}', day)
return template
def _read_test_template(template_path: str, year: str, day: str, file_day: str) -> str:
with open(template_path) as f:
template = f.read()
template = template.replace('{year}', year)
template = template.replace('{day}', day)
template = template.replace('{file_day}', file_day)
return template
def read_solution_template(year: int, day: int) -> str:
template_path = os.path.join(ROOT_DIR, 'scripts/templates/day_template.txt')
return _read_solution_template(template_path, str(year), str(day))
def read_test_template(year: int, day: int) -> str:
template_path = os.path.join(ROOT_DIR, 'scripts/templates/test_template.txt')
return _read_test_template(template_path, str(year), str(day), f'{day:02}')
def write_template(filename: str, template: str):
with open(filename, 'w') as f:
f.write(template)
def _parse_args(args: List[str]) -> Tuple[int, int]:
parser = ArgumentParser(description='Add a day')
parser.add_argument('year', type=int, help='The year of the exercise')
parser.add_argument('day', type=int, help='The day of the exercise')
parsed = parser.parse_args(args)
return parsed.year, parsed.day
if __name__ == '__main__':
add_day()
|
nilq/baby-python
|
python
|
###########################
#
# #327 Rooms of Doom - Project Euler
# https://projecteuler.net/problem=327
#
# Code by Kevin Marciniak
#
###########################
|
nilq/baby-python
|
python
|
from setuptools import setup
package_name = "simdash"
description = "A web based dashboard for visualizing simulations"
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name=package_name,
description=description,
maintainer="Parantapa Bhattacharya",
maintainer_email="pb+pypi@parantapa.net",
long_description=long_description,
long_description_content_type="text/markdown",
packages=[package_name, "%s.database" % package_name, "%s.viz" %package_name],
scripts=["bin/%s" % package_name],
use_scm_version=True,
setup_requires=['setuptools_scm'],
install_requires=[
"click",
"click_completion",
"logbook",
"flask",
"altair",
"pandas",
"toml",
],
url="http://github.com/NSSAC/%s" % package_name,
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
|
nilq/baby-python
|
python
|
from django import views
from django.contrib import admin
from django.urls import path,include
from . import views
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register('profile', views.Profile_viewSet)
router.register('posts', views.Post_viewSet)
router.register('users', views.User_viewSet)
urlpatterns = [
path('',views.index, name = 'homepage' ),
path('signup/', views.signup, name='signup'),
path('', include('django.contrib.auth.urls')),
path('api/', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('profile/<username>/info', views.user_profile, name='profile'),
path('project/<post>', views.project_rating, name='project'),
path('profile/<username>/edit', views.edit_profile, name='editprofile'),
path('project/<post>', views.project_rating, name='project'),
path('search/', views.search_project, name='search'),
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
'''
(c) Copyright 2013 Telefonica, I+D. Printed in Spain (Europe). All Rights
Reserved.
The copyright to the software program(s) is property of Telefonica I+D.
The program(s) may be used and or copied only with the express written
consent of Telefonica I+D or in accordance with the terms and conditions
stipulated in the agreement/contract under which the program(s) have
been supplied.
'''
from lettuce import step, world
from common.rest_utils import RestUtils
from common.api_utils import APIUtils
from common.test_utils import TestUtils
from common.mongo_utils import MongoUtils
rest_utils = RestUtils()
test_utils = TestUtils()
api_utils = APIUtils()
mongo_utils = MongoUtils()
@step(u'the DB is working')
def the_db_is_working(step):
mongo_utils.the_db_is_working(step)
@step(u'the DB has stopped working')
def the_db_has_stopped_working(step):
mongo_utils.the_db_has_stopped_working(step)
@step(u'I send to (.*) the data (.*)')
def i_send_to_url_the_data(step, url, data):
rest_utils.send_to_url_the_data(step, url, data)
@step(u'I get a success response of type (\d+) with location (.+):')
def i_get_a_success_response_of_type_with_location(step, status_code, location_index):
rest_utils.get_a_success_response_of_type_with_location(step, status_code, location_index)
@step(u'I get an error response of type (\d+) with error code (\w+)')
def i_get_an_error_response_of_type_with_error_code(step, status_type, error_code):
rest_utils.get_an_error_response_of_type_with_error_code(step, status_type, error_code)
@step(u'I send to (.*) the instance data (\d+):')
def i_send_to_url_the_instance_data(step, url, class_index):
api_utils.send_to_url_the_instance_data(step, url, class_index)
@step(u'a class has already been published with data (\d+):')
def a_class_has_already_been_published_with_data(step, old_class_index):
""" Rest wolrd variables for scenario execution"""
api_utils.a_class_has_already_been_published_with_data(step, old_class_index)
@step(u'a class has not already been published with data (.*):')
def a_class_has_not_already_been_published_with_data(step, old_class_index):
api_utils.a_class_has_not_already_been_published_with_data(step, old_class_index)
@step(u'an instance has already been published with data (\d+):')
def an_instance_has_already_been_published_with_data(step, old_instance_index):
api_utils.an_instance_has_already_been_published_with_data(step, old_instance_index)
@step(u'the response contains the instance data')
def the_response_contains_the_instance_data(step):
api_utils.the_response_contains_the_instance_data()
@step(u'the location returns the instance data')
def the_location_returns_the_instance_data(step):
api_utils.the_url_returns_the_instance_data(step, world.location)
@step(u'I send to (.*) the rule data (\d+):')
def i_send_to_url_the_rule_data(step, url, rule_index):
api_utils.send_to_url_the_rule_data(step, url, rule_index)
@step(u'the response contains the rule data')
def the_response_contains_the_rule_data(step):
api_utils.the_response_contains_the_rule_data()
@step(u'the location returns the rule data')
def the_location_returns_the_rule_data(step):
api_utils.the_url_returns_the_rule_data(step, world.location)
@step(u'the following bindings rules are available (.*):')
def the_following_bindings_rules_are_available(step, operation_index):
api_utils.the_following_bindings_rules_are_avalilabe(step, operation_index)
@step(u'there is a context rule already been published with data (\d+):')
def there_is_a_context_rule_already_been_published_with_data(step, old_rule_index):
api_utils.there_is_a_context_rule_already_been_published_with_data(step, old_rule_index)
@step(u'the following bindings in (\d+) are available for the context rules:')
def and_the_following_bindings_in_bindings_index_are_available_for_the_context_rules(step, binding_index):
api_utils.the_following_bindings_are_available_for_the_context_rules(step, binding_index)
@step(u'I request the resource (.*) with parameters (\d+):')
def request_the_resource_with_parameters(step, url, params_index):
rest_utils.request_the_resource(step, url, params_index)
@step(u'I get a success response of type (\d+) with a result set of size (\d+)')
def get_a_success_response_of_type_with_resultset_of_size(step, status_code, size):
rest_utils.get_a_success_response_of_type_with_resultset_of_size(step, status_code, size)
@step(u'the result set contains the instance (\d+) in position (\d+):')
def the_resultset_contains_instance_in_position(step, instance_index, position):
api_utils.the_resultset_contains_instance_in_position(step, instance_index, position)
@step(u'the result set contains the instance (\d+):')
def the_resultset_contains_instance(step, instance_index):
api_utils.the_resultset_contains_instance_in_position(step, instance_index)
@step(u'And the previous bindings are pusblished for the context (\d+):')
def and_the_previous_bindings_are_pusblished_for_the_context_operation_index(step, context_index):
api_utils.send_to_url_the_rule_data(step, "$base_api_url/$bindings_url", context_index)
@step(u'the exceptionText contains (\d+)')
def the_exceptiontext_contains_exceptiontext(step, exceptionText_index):
api_utils.the_exceptiontext_contains_exceptiontext(step, exceptionText_index)
@step(u'the instance published in position (\d+) has been deleted')
def the_instance_published_has_been_deleted(step, position):
i_delete_url(step, "$base_api_url/$classes_url/$class_name/$instances_url/" + api_utils.get_instance_id(position))
@step(u'I delete resource (\d+):')
def i_delete_resource(step, resource_index):
i_delete_url(step, step.hashes[int(resource_index)]["resource"])
@step(u'I delete (.*)')
def i_delete_url(step, url):
rest_utils.delete_url(step, url)
@step(u'I check the resource (\d+):')
def i_check_the_resource(step, resource_index):
request_the_resource(step, step.hashes[int(resource_index)]["resource"])
@step(u'I request the resource (.*)')
def request_the_resource(step, url):
rest_utils.request_the_resource(step, url)
@step(u'I update (.*) with the user data (\d+):')
def i_update_url_with_the_user_data(step, url, user_data_index):
api_utils.send_to_url_the_user_data(step, url+step.hashes[int(user_data_index)]["username"], user_data_index)
@step(u'the response contains the user data')
def the_response_contains_the_user_data(step):
api_utils.the_response_contains_the_user_data()
@step(u'the location returns the user data')
def the_location_returns_the_user_data(step):
api_utils.the_url_returns_the_user_data(step, world.location)
@step(u'I get a success response of type (\d+)')
def i_get_a_success_response_of_type(step, status_code):
rest_utils.get_a_success_response_of_type(step, status_code)
@step(u'the URL (.*) returns the error code (\d+) with error code (\w+)')
def the_url_returns_an_error_of_type_with_error_code(step, url, status_code, error_code):
api_utils.the_url_returns_an_error_of_type_with_error_code(step, url, status_code, error_code)
@step(u'I send to (.*) the class data (\d+):')
def i_send_to_url_the_class_data(step, url, class_index):
api_utils.send_to_url_the_class_data(step, url, class_index)
@step(u'the DB has no classes already published')
def the_db_has_no_classes_already_published(step):
pass # the database is cleaned before each scenario
@step(u'the user performing the operation is:')
def the_user_performing_the_operation_is(step):
test_utils.reset_world()
world.request_user = step.hashes[0]["username"]
world.request_password = step.hashes[0]["password"]
assert True
|
nilq/baby-python
|
python
|
from gerais import *
from tkinter import messagebox
# ===================Usuarios==============
# ====Verifica se usuario existe====
def existeUsuario(dic,chave):
if chave in dic.keys():
return True
else:
return False
# ====Insere usuario====
def insereUsuario(dic):
email = input("Digite o email:")
municipio = input("Digite o municipio:")
if existeUsuario(dic, email):
print("Usuario já cadastrado!")
pausa()
else:
nome = input("Digite o nome: ")
dic[email]=(nome, municipio)
print("Dados inseridos com sucesso!")
pausa()
# ====Exibe um usuario====
def mostraUsuario(dic,chave):
if existeUsuario(dic,chave):
dados = dic[chave]
print(f"Nome: {dados[0]}")
print(f"Email: {chave}")
print(f"Município: {dados[1]}")
else:
print("Usuario não cadastrada!")
# ====Altera usuario====
def alteraUsuario(dic,chave):
if existeUsuario(dic,chave):
mostraUsuario(dic,chave)
confirma = input("Tem certeza que deseja alterá-lo? (S/N): ").upper()
if confirma == 'S':
nome = input("Digite o nome: ")
municipio = input("Digite o município: ")
dic[chave]=(nome, municipio)
print("Dados alterados com sucesso!")
pausa()
else:
print("Alteração cancelada!")
pausa()
else:
print("Usuario não cadastrado!")
pausa()
# ====Remove um usuario====
def removeUsuario(dic,chave):
if existeUsuario(dic,chave):
mostraUsuario(dic,chave)
confirma = input("Tem certeza que deseja apagar? (S/N): ").upper()
if confirma == 'S':
del dic[chave]
print("Dados apagados com sucesso!")
pausa()
else:
print("Exclusão cancelada!")
pausa()
else:
print("Pessoa não cadastrada!")
pausa()
# ====Mostra todos usuarios====
def mostraTodosUsuarios(dic):
print("Relatório: Todas os usuarios\n")
print("EMAIL - NOME - MUNICÍPIO\n")
for email in dic:
tupla = dic[email]
linha = email + " - " + tupla[0] + " - " + tupla[1]
print(linha)
print("")
pausa()
# ======Grava dados no arquivo=====
def gravaUsuarios(dic):
arq = open("usuarios.txt", "w")
for email in dic:
tupla = dic[email]
linha = email+";"+tupla[0]+";"+tupla[1]+"\n"
arq.write(linha)
arq.close()
# ======Pega dados do arquivo====
def recuperaUsuarios(dic):
if (existe_arquivo("usuarios.txt")):
arq = open("usuarios.txt", "r")
for linha in arq:
linha = linha[:len(linha)-1]
lista = linha.split(";")
nome = lista[1]
email = lista[0]
municipio = lista[2]
dic[email] = (nome, municipio)
# encode("windows-1252").decode("utf-8")
# def verificaMunicipio(municipio):
# ====Menu de usuarios====
def menuUsuarios(dicUsuarios):
opc = 0
while ( opc != 6 ):
print("\nGerenciamento de usuarios:\n")
print("1 - Insere Usuario")
print("2 - Altera Usuario")
print("3 - Remove Usuario")
print("4 - Mostra um Usuario")
print("5 - Mostra todos os Usuarios")
print("6 - Sair do menu de Usuarios")
opc = int( input("Digite uma opção: ") )
if opc == 1:
insereUsuario(dicUsuarios)
elif opc == 2:
email = input("Email a ser alterado: ")
alteraUsuario(dicUsuarios, email)
elif opc == 3:
email=input("Email a ser removido: ")
removeUsuario(dicUsuarios, email)
elif opc == 4:
email=input("Email a ser consultado: ")
mostraUsuario(dicUsuarios, email)
pausa()
elif opc == 5:
mostraTodosUsuarios(dicUsuarios)
elif opc == 6:
gravaUsuarios(dicUsuarios)
# ====Insere usuario INTERFACE====
def insereUsuarioInterface(dic, email, nome, municipio):
if existeUsuario(dic, email):
print("Usuario já cadastrado!")
messagebox.showinfo("Info", "Usuario já cadastrado!")
else:
dic[email]=(nome, municipio)
print("Dados inseridos com sucesso!")
messagebox.showinfo("Info", "Dados inseridos com sucesso!")
# ====Remove um usuario INTERFACE====
def removeUsuarioInterface(dic,chave):
if existeUsuario(dic,chave):
del dic[chave]
print("Dados apagados com sucesso!")
messagebox.showinfo("Info", "Dados apagados com sucesso!")
else:
print("Pessoa não cadastrada!")
messagebox.showinfo("Info", "Pessoa não cadastrada!")
# ====Altera usuario INTERFACE====
def alteraUsuarioInterface(dic, chave, nome, municipio):
if existeUsuario(dic,chave):
mostraUsuario(dic,chave)
dic[chave]=(nome, municipio)
print("Dados alterados com sucesso!")
messagebox.showinfo("Info", "Dados alterados com sucesso!")
else:
print("Usuario não cadastrado!")
messagebox.showinfo("Info", "Usuario não cadastrado!")
# ====Exibe um usuario INTERFACE====
def mostraUsuarioInterface(dic,chave):
if existeUsuario(dic,chave):
dados = dic[chave]
return(dados[0], chave, dados[1])
else:
return(False, False, False)
|
nilq/baby-python
|
python
|
from freezegun import freeze_time
from rest_framework import test
from waldur_mastermind.common.utils import parse_datetime
from waldur_mastermind.marketplace import callbacks, models
from waldur_mastermind.marketplace.tests import factories
@freeze_time('2018-11-01')
class CallbacksTest(test.APITransactionTestCase):
def test_when_resource_is_created_new_period_is_opened(self):
# Arrange
start = parse_datetime('2018-11-01')
plan = factories.PlanFactory()
resource = factories.ResourceFactory(plan=plan)
order_item = factories.OrderItemFactory(
state=models.OrderItem.States.EXECUTING,
resource=resource,
)
# Act
callbacks.resource_creation_succeeded(resource)
# Assert
self.assertTrue(
models.ResourcePlanPeriod.objects.filter(
resource=resource, plan=plan, start=start, end=None
).exists()
)
order_item.refresh_from_db()
self.assertEqual(order_item.state, models.OrderItem.States.DONE)
def test_when_plan_is_changed_old_period_is_closed_new_is_opened(self):
# Arrange
old_start = parse_datetime('2018-10-01')
new_start = parse_datetime('2018-11-01')
old_plan = factories.PlanFactory()
new_plan = factories.PlanFactory()
resource = factories.ResourceFactory(plan=old_plan)
old_period = models.ResourcePlanPeriod.objects.create(
resource=resource, plan=old_plan, start=old_start, end=None
)
order_item = factories.OrderItemFactory(
state=models.OrderItem.States.EXECUTING,
type=models.OrderItem.Types.UPDATE,
resource=resource,
plan=new_plan,
)
# Act
callbacks.resource_update_succeeded(resource)
# Assert
order_item.refresh_from_db()
self.assertEqual(order_item.state, models.OrderItem.States.DONE)
old_period.refresh_from_db()
self.assertEqual(old_period.end, new_start)
self.assertTrue(
models.ResourcePlanPeriod.objects.filter(
resource=resource, plan=new_plan, start=new_start, end=None
).exists()
)
def test_when_resource_is_terminated_old_period_is_closed(self):
# Arrange
start = parse_datetime('2018-10-01')
end = parse_datetime('2018-11-01')
plan = factories.PlanFactory()
resource = factories.ResourceFactory(plan=plan)
period = models.ResourcePlanPeriod.objects.create(
resource=resource, plan=plan, start=start, end=None
)
order_item = factories.OrderItemFactory(
state=models.OrderItem.States.EXECUTING,
type=models.OrderItem.Types.TERMINATE,
resource=resource,
plan=plan,
)
# Act
callbacks.resource_deletion_succeeded(resource)
# Assert
order_item.refresh_from_db()
self.assertEqual(order_item.state, models.OrderItem.States.DONE)
period.refresh_from_db()
self.assertEqual(period.end, end)
def test_when_resource_is_terminated_directly_old_period_is_closed(self):
# Arrange
start = parse_datetime('2018-10-01')
end = parse_datetime('2018-11-01')
plan = factories.PlanFactory()
resource = factories.ResourceFactory(
plan=plan, state=models.Resource.States.ERRED
)
period = models.ResourcePlanPeriod.objects.create(
resource=resource, plan=plan, start=start, end=None
)
# Act
resource.state = models.Resource.States.TERMINATED
resource.save()
# Assert
period.refresh_from_db()
self.assertEqual(period.end, end)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
class TypedList:
''' List-like class that allows only a single type of item '''
def __init__(self, example_element, initial_list = []):
self.type = type(example_element)
if not isinstance(initial_list, list):
raise TypeError("Second argument of TypedList must "
"be a list.")
for element in initial_list:
self.__check(element)
self.elements = initial_list[:]
def __check(self, element):
if type(element) != self.type:
raise TypeError("Attempted to add an element of "
"incorrect type to a typed list.")
def __setitem__(self, i, element):
self.__check(element)
self.elements[i] = element
def __getitem__(self, i):
return self.elements[i]
def __str__(self):
to_string = '{}'.format(self.elements)
return to_string
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import List
import torch
from reagent import types as rlt
from reagent.models.base import ModelBase
from reagent.models.fully_connected_network import FullyConnectedNetwork
class FullyConnectedCritic(ModelBase):
def __init__(
self,
state_dim: int,
action_dim: int,
sizes: List[int],
activations: List[str],
use_batch_norm: bool = False,
use_layer_norm: bool = False,
output_dim: int = 1,
):
super().__init__()
assert state_dim > 0, "state_dim must be > 0, got {}".format(state_dim)
assert action_dim > 0, "action_dim must be > 0, got {}".format(action_dim)
self.state_dim = state_dim
self.action_dim = action_dim
assert len(sizes) == len(
activations
), "The numbers of sizes and activations must match; got {} vs {}".format(
len(sizes), len(activations)
)
self.fc = FullyConnectedNetwork(
[state_dim + action_dim] + sizes + [output_dim],
activations + ["linear"],
use_batch_norm=use_batch_norm,
use_layer_norm=use_layer_norm,
)
def input_prototype(self):
# for inference: (batchsize, feature_dim)
return (
rlt.FeatureData(torch.randn(1, self.state_dim)),
rlt.FeatureData(torch.randn(1, self.action_dim)),
)
def forward(self, state: rlt.FeatureData, action: rlt.FeatureData):
assert (
len(state.float_features.shape) == len(action.float_features.shape)
and len(action.float_features.shape) == 2
and (state.float_features.shape[0] == action.float_features.shape[0])
), (
f"state shape: {state.float_features.shape}; action shape: "
f"{action.float_features.shape} not equal to (batch_size, feature_dim)"
)
cat_input = torch.cat((state.float_features, action.float_features), dim=-1)
return self.fc(cat_input)
|
nilq/baby-python
|
python
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from .models import *
import apiclient
from apiclient.discovery import build
from django.core.mail import send_mail,EmailMessage
from apiclient.errors import HttpError
from oauth2client.tools import argparser
from django.core.cache import cache
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
import os
import httplib2
import sys
#import urllib3
import json
# from music.tasks import send_feedback_email_task
from celery.decorators import task
from celery.utils.log import get_task_logger
from dbms import celery_app
logger = get_task_logger(__name__)
#import classes
from music.classes import *
DEVELOPER_KEY = "AIzaSyC4lxc1NfUV09y_vX9kTiRKvSbK6bc6rP0"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# # This OAuth 2.0 access scope allows for full read/write access to the
# # authenticated user's account.
# YOUTUBE_READ_WRITE_SCOPE = "https://www.googleapis.com/auth/youtube"
# CLIENT_SECRETS_FILE = "client_secrets.json"
# # This variable defines a message to display if the CLIENT_SECRETS_FILE is
# # missing.
# MISSING_CLIENT_SECRETS_MESSAGE = """
# WARNING: Please configure OAuth 2.0
# To make this sample run you will need to populate the client_secrets.json file
# found at:
# %s
# with information from the {{ Cloud Console }}
# {{ https://cloud.google.com/console }}
# For more information about the client_secrets.json file format, please visit:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
# """ % os.path.abspath(os.path.join(os.path.dirname(__file__),
# CLIENT_SECRETS_FILE))
# def get_authenticated_service(args):
# flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,
# scope=YOUTUBE_READ_WRITE_SCOPE,
# message=MISSING_CLIENT_SECRETS_MESSAGE)
# #storage = Storage("%s-oauth2.json" % sys.argv[0])
# storage = Storage("subscriptions-oauth2.json")
# credentials = storage.get()
# if credentials is None or credentials.invalid:
# credentials = run_flow(flow, storage, args)
# return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
# http=credentials.authorize(httplib2.Http()))
# # argparser.add_argument("--user", help="ID of the channel to subscribe to.",
# # default="Phaneendra Babu")
# # args = argparser.parse_args()
# #args = argparser.parse_args()
# args=""
# youtube = get_authenticated_service(args)
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
# class Video:
# def __init__(self):
# self.title = ""
# self.id = ""
# self.description = ""
# self.thumbnail_url = ""
# self.thumbnail_width = 0
# self.thumbnail_height = 0
# self.channelTitle = ""
# self.duration = ""
# self.caption = ""
# self.viewCount = 0
# self.likeCount = 0
# class Channel:
# def __init__(self):
# self.title = ""
# self.id = ""
# self.description = ""
# self.thumbnail_url = ""
# self.thumbnail_width = 100
# self.thumbnail_height = 100
# class Playlist:
# def __init__(self):
# self.id=""
# self.title=""
# self.channelId=""
# self.channelTitle=""
# self.thumbnail_url=""
# self.thumbnail_width = 100
# self.thumbnail_height = 100
# class PlayListItem:
# def __init__(self):
# self.playlistId=""
# self.id=""
# self.title=""
# self.description=""
# self.thumbnail_url=""
# self.thumbnail_width=100
# self.thumbnail_height=100
# self.channelTitle=""
# Create your views here.
def home(request):
return render(request, 'music/home.html', {})
def login(request):
m=""
#m=send_feedback_email_task.delay("HI It is our DBMS Project").result
print("Hello")
print("Message: %s" % m)
#print(add.delay(4,5).get())
return render(request, 'music/login.html', {})
def register(request):
return render(request, 'music/register.html', {})
def savedetails(request):
firstname = request.POST["firstname"]
lastname = request.POST["lastname"]
email = request.POST["email"]
mobile = request.POST["mobile"]
username = request.POST["username"]
password = request.POST["password"]
try:
o = Login.objects.get(username=username)
return render(request, 'music/register.html', {'error_message': username + " already taken"})
except (KeyError, Login.DoesNotExist):
l = Login(username=username, password=password)
l.save()
l.detail_set.create(firstname=firstname, lastname=lastname, email=email, mobile=mobile)
return render(request, 'music/login.html', {'error_message': "Account Successfully Registered.Login Here"})
def validate(request):
uname = request.POST["username"]
pwd = request.POST["password"]
try:
user = Login.objects.get(username=uname)
except (KeyError, Login.DoesNotExist):
return render(request, 'music/login.html', {'error_message': "Username is not found in database"})
else:
if pwd == user.password:
# return HttpResponseRedirect('music:user', args=(user.id,))
detail = Detail.objects.get(pk=user.id)
send_mail("Conformation of DBMS Accout","PLease Click Below link to confirm your email you registered on DBMS",
'cs13b1037@iith.ac.in',['cs13b1042@iith.ac.in'],fail_silently=True)
#popular_videos = cache.get_or_set('popular',popular(),100000)
popular_videos=get_popular_videos()
# popular_videos=cache.get('popular_videos')
# if popular_videos is None:
# print("Not cached")
# popular_videos=popular()
# cache.set('popular_videos',popular_videos,600)
#popular_channels = cache.get_or_set('popular_channels',popular_channels(),1000000)
popular_channels_list = popular_channels()
context = {
'id': user.id,
'fullname': detail.firstname + detail.lastname,
'email': detail.email,
'popular_videos': popular_videos,
'popular_channels':popular_channels_list,
}
return render(request, 'music/user.html', context)
else:
return render(request, 'music/login.html', {'error_message': "Incorrect Username,Password Combination"})
def user(request, id):
return render(request, "music/user.html", {'id': id})
def search(request):
query = request.POST["search"]
search_response = youtube.search().list(
q=query,
part="id,snippet",
maxResults=5
).execute()
videos = []
channels = []
# playlists = []
if 'nextPageToken' in search_response:
print(search_response['nextPageToken'])
#channels2,videos2=get_next_page.delay(search_response['nextPageToken']).get()
#videos2=get_next_page.delay(search_response['nextPageToken']).get()
print("got next page")
for search_result in search_response.get("items", []):
# print search_result
# if "snippet" in search_result and "thumbnails" in search_result["snippet"] and "default" in search_result["snippet"]["thumbnails"]:
# print search_result["snippet"]["thumbnails"]["default"]
if search_result["id"]["kind"] == "youtube#video":
v = Video()
if "id" in search_result and "videoId" in search_result["id"]:
v.id = search_result["id"]["videoId"]
get_info(v, search_result)
videos.append(v)
elif search_result["id"]["kind"] == "youtube#channel":
ch = Channel()
get_channel_info(ch, search_result)
channels.append(ch)
return render(request, 'music/search.html', {'query': query, 'videos': videos, 'channels': channels})
def watch(request, id):
related_videos = related(id)
return render(request, 'music/watch.html', {'id': id, 'related_videos': related_videos})
# channel playlists can be obtained from playlist.list or using contentDetails in channel
# channel["contentDetails"]["relatedPlaylists"]["uploads"]
def channel(request,id):
search_response = youtube.playlists().list(
channelId=id,
part="id,snippet"
).execute()
playlists=[]
print("Channel Id : ",id)
for search_result in search_response.get("items",[]):
pl=Playlist()
if "id" in search_result:
pl.id=search_result["id"]
print("Playlist Id : ",pl.id)
pl.title=search_result["snippet"]["title"]
pl.channelId=search_result["snippet"]["channelId"]
pl.channelTitle=search_result["snippet"]["channelTitle"]
pl.thumbnail_url=search_result["snippet"]["thumbnails"]["default"]["url"]
pl.thumbnail_width=search_result["snippet"]["thumbnails"]["default"]["width"]
pl.thumbnail_height=search_result["snippet"]["thumbnails"]["default"]["height"]
playlists.append(pl)
context={
'channel_id':id,
'playlists':playlists,
}
return render(request,'music/channel.html',context)
def playlist(request,id):
search_response = youtube.playlistItems().list(
playlistId=id,
part="id,snippet"
).execute()
playlistItems=[]
for search_result in search_response.get("items",[]):
pli=PlayListItem()
pli.playlistId=search_result["id"]
pli.id=search_result["snippet"]["resourceId"]["videoId"]
pli.title=search_result["snippet"]["title"]
pli.description=search_result["snippet"]["description"]
pli.thumbnail_url=search_result["snippet"]["thumbnails"]["default"]["url"]
pli.thumbnail_width=search_result["snippet"]["thumbnails"]["default"]["width"]
pli.thumbnail_height=search_result["snippet"]["thumbnails"]["default"]["height"]
pli.channelTitle=search_result["snippet"]["channelTitle"]
playlistItems.append(pli)
context={'playlistItems':playlistItems}
print("playlist Id : ",id)
return render(request,'music/playlist.html',context)
def popular():
# youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
# developerKey=DEVELOPER_KEY)
print("Popular Request")
video_response = youtube.videos().list(
chart="mostPopular",
part='id,snippet,statistics,contentDetails',
maxResults=5,
videoCategoryId="10",
).execute()
videos = []
# print(video_response)
# Add each result to the list, and then display the list of matching videos.
for video_result in video_response.get("items", []):
v = Video()
if "id" in video_result:
v.id = video_result["id"]
get_info(v, video_result)
videos.append(v)
# print("Videos:\n", "\n".join(videos), "\n")
return videos
def popular_channels():
print("popular_channels request")
search_response = youtube.channels().list(
categoryId="GCTXVzaWM",
part="snippet,id,contentDetails",
maxResults=5
).execute()
channels=[]
for search_result in search_response.get("items",[]):
ch=Channel()
ch.id=search_result["id"]
ch.description=search_result["snippet"]["description"]
ch.title=search_result["snippet"]["title"]
ch.thumbnail_url=search_result["snippet"]["thumbnails"]["default"]["url"]
# ch.thumbnail_width=search_result["snippet"]["thumbnails"]["default"]["width"]
# ch.thumbnail_height=search_result["snippet"]["thumbnails"]["default"]["height"]
channels.append(ch)
return channels
def related(id):
search_response = youtube.search().list(
type="video",
relatedToVideoId=id,
part="id,snippet",
maxResults=5,
).execute()
videos = []
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
v = Video()
if "id" in search_result and "videoId" in search_result["id"]:
v.id = search_result["id"]["videoId"]
get_info(v, search_result)
videos.append(v)
return videos
def get_info(v, video_result):
# if "id" in video_result:
# v.id = video_result["id"]
if "snippet" in video_result:
if "title" in video_result["snippet"]:
v.title = video_result["snippet"]["title"]
if "description" in video_result["snippet"]:
v.description = video_result["snippet"]["description"]
if "thumbnails" in video_result["snippet"]:
if "default" in video_result["snippet"]["thumbnails"]:
if "url" in video_result["snippet"]["thumbnails"]["default"]:
v.thumbnail_url = video_result["snippet"]["thumbnails"]["default"]["url"]
# print(v.thumbnail_url)
if "width" in video_result["snippet"]["thumbnails"]["default"]:
v.thumbnail_width = video_result["snippet"]["thumbnails"]["default"]["width"]
if "height" in video_result["snippet"]["thumbnails"]["default"]:
v.thumbnail_height = video_result["snippet"]["thumbnails"]["default"]["height"]
if "channelTitle" in video_result["snippet"]:
v.channelTitle = video_result["snippet"]["channelTitle"]
if "contentDetails" in video_result:
if "duration" in video_result["contentDetails"]:
v.duration = video_result["contentDetails"]["duration"]
if "caption" in video_result["contentDetails"]:
v.caption = video_result["contentDetails"]["caption"]
if "statistics" in video_result:
if "viewCount" in video_result["statistics"]:
v.viewCount = video_result["statistics"]["viewCount"]
if "likeCount" in video_result["statistics"]:
v.likeCount = video_result["statistics"]["likeCount"]
# channel result in search
def get_channel_info(ch, search_result):
if "id" in search_result:
ch.id = search_result["id"]["channelId"]
if "snippet" in search_result:
if "channelTitle" in search_result["snippet"]:
ch.channelTitle = search_result["snippet"]["channelTitle"]
if "descritption" in search_result["snippet"]:
ch.description = search_result["snippet"]["description"]
if "thumbnails" in search_result["snippet"]:
if "default" in search_result["snippet"]["thumbnails"]:
if "url" in search_result["snippet"]["thumbnails"]["default"]:
ch.thumbnail_url = search_result["snippet"]["thumbnails"]["default"]["url"]
if "width" in search_result["snippet"]["thumbnails"]["default"]:
ch.thumbnail_width = search_result["snippet"]["thumbnails"]["default"]["width"]
if "height" in search_result["snippet"]["thumbnails"]["default"]:
ch.thumbnail_height = search_result["snippet"]["thumbnails"]["default"]["height"]
@task(name="get_next_page")
def get_next_page(token):
print("getting next page")
logger.info("getting next page")
search_response = youtube.search().list(
pageToken=token,
part="id,snippet",
maxResults=5
).execute()
videos = []
channels = []
# playlists = []
for search_result in search_response.get("items", []):
# print search_result
# if "snippet" in search_result and "thumbnails" in search_result["snippet"] and "default" in search_result["snippet"]["thumbnails"]:
# print search_result["snippet"]["thumbnails"]["default"]
if search_result["id"]["kind"] == "youtube#video":
v = Video()
if "id" in search_result and "videoId" in search_result["id"]:
v.id = search_result["id"]["videoId"]
get_info(v, search_result)
videos.append(v)
elif search_result["id"]["kind"] == "youtube#channel":
ch = Channel()
get_channel_info(ch, search_result)
channels.append(ch)
#return channels,videos
tu=(channels,videos)
return tu
@task(name="send_feedback_email_task")
def send_feedback_email_task(message):
"""sends an email when feedback form is filled successfully"""
logger.info("Sent feedback email")
#return send_feedback_email(email, message)
message="HI It is our DBMS Project"
print("Received : ",message)
return message
@task(name="sum_two_numbers")
def add(x, y):
return x + y
def get_popular_videos():
popular_videos=cache.get('popular_videos')
if popular_videos is None:
print("Not cached")
popular_videos=popular()
cache.set('popular_videos',popular_videos,120)
return popular_videos
|
nilq/baby-python
|
python
|
def BSDriver(LoadCase):
# BoundingSurface J2 with kinematic hardening
# Written by Pedro Arduino, Mar. 22 2019
# Copyright Arduino Computational Geomechanics Group
# Ported into Python/Jupyter Notebook by Justin Bonus, Jul. 2019
#
#
# LoadCase:
# 1 ... proportionally increasing strain
# 2 ... cyclic strain
# 3 ... proportionally increasing stress
# 4 ... cyclic stress
#
# ====== LOADING CASES ==================================================
import numpy as np
from collections import namedtuple
nPoints = 200
## Switch for LoadCases:
## Pseudo-switch created by using python dictionary to hold LoadCase functions
def case_one():
case_one.time = np.linspace(0,1,nPoints+1)
case_one.strain = np.array([ 0.05, -0.015, -0.015, 0.000, 0.000, 0.000 ]).reshape(6,1) * case_one.time
case_one.StressDriven = 0
return case_one
def case_two():
nCycles = 3
omega = 0.15
case_two.time = np.linspace(0,nCycles*2*np.pi/omega,nCycles*nPoints+1);
case_two.strain = np.array([ 0.00, -0.000, -0.000, 0.045, 0.000, 0.000 ]).reshape(6,1) * np.sin( omega*case_two.time )
case_two.StressDriven = 0
return case_two
def case_three():
case_three.time = np.linspace(0,1,nPoints+1)
case_three.stress = np.array([[0.100],
[0.000],
[0.000],
[0.000],
[0.000],
[0.000]])*case_three.time + 0.0*np.array([1,1,1,0,0,0]).reshape(6,1)*np.ones( case_three.time.shape )
case_three.StressDriven = 1
return case_three
def case_four():
nCycles = 3
omega = 0.15
case_four.time = np.linspace(0, nCycles*2*np.pi/omega, nCycles*nPoints+1)
case_four.stress = np.array([[0.000],
[0.000],
[0.000], #.01, .03, -.01, .05, 0, -.02
[0.050],
[0.000],
[0.000]])*np.sin( omega*case_four.time ) + 0.0*np.array([1,1,1,0,0,0]).reshape(6,1)*np.ones( case_four.time.shape )
case_four.StressDriven = 1
return case_four
case_switcher = {
1: case_one,
2: case_two,
3: case_three,
4: case_four
}
case = case_switcher.get(LoadCase, lambda: "Invalid LoadCase")
case() #Runs the LoadCase function. Creates: case.time, case.strain | case.stress, case.StressDriven
time, StressDriven = case.time, case.StressDriven
if StressDriven:
stress = case.stress
strain = np.zeros((6,1)) #initialize empty 6x1 strain numpy array for stress-driven scenario
else:
strain = case.strain
stress = np.zeros((6,1)) #initialize empty 6x1 stress numpy array for strain-driven scenario
Stress0 = np.zeros((6,1)) #Initialize first 'unloading' point
StrainDriven = int(not StressDriven)
# ========================================================================
# ---- MATERIAL PARAMETERS
# Static Parameters
# Static Parameters
E = 20 #Elastic Modulus MPa
v= 0.49 #Poissons ratio, less than 0.5 to allow compresibility
G = E/(2*(1+v)) #Shear modulus
K = E/(3*(1-2*v)) #Bulk modulus
Kmod = 0 #Isotropic Hardening
Su = 0.061 #Yield stress in 1-D tension test MPa
hh = G #kinematic hardening parameter
mm = 1.0 #kinematic hardening parameter
beta = 0.5 #midpoint integration
RR = np.sqrt(8/3)*Su
#namedtuple used to organzie related variables, similar to a structure
static = namedtuple('StaticParam',['E','v','G','K','Kmod','Su','hh','mm','beta','RR'])
StaticParam = static(E,v,G,K,Kmod,Su,hh,mm,beta,RR)
# ========================================================================
# ---- INITIAL CONDITIONS
# Initialize the state variables
if StrainDriven:
IniStress = -0.0*(np.array([1, 1, 1, 0, 0, 0]).reshape(6,1))
IniStrain = np.linalg.solve(GetCe(StaticParam), IniStress) #Check if GetCe compacts to nxn
elif StressDriven:
IniStress = 0.0*(np.array([1, 1, 1, 0, 0, 0]).reshape(6,1))
IniStrain = 0.0*(np.array([1, 1, 1, 0, 0, 0]).reshape(6,1))
#Structure for IniState (initial state parameters, static) and CurState (changing state parameters)
state = namedtuple('state', ['eP','alphaISO','Stress0', 'Kappa', 'Psi'])
eP = 0.0*(np.array([1, 1, 1, 0, 0, 0]).reshape(6,1))
alphaISO = 0.0
Stress0 = 0.0*(np.array([1, 1, 1, 0, 0, 0]).reshape(6,1))
Kappa = 0.0
Psi = 0.0
IniState = state(eP, alphaISO, Stress0, Kappa, Psi)
# For first iteration
CurStress = IniStress
CurStrain = IniStrain
CurState = IniState
# Variables used for plotting
alphaISO_plot, j2_plot, j2e_plot, stress_var_plot, stress_var2_plot = [], [], [], [], [] #Initiliaze list format
alphaISO_plot.append(0) #Python list allows for easy data addition
strain[:,0] = CurStrain.T - IniStrain.T
stress[:,0] = CurStress.T
j2_plot.append(0)
j2e_plot.append(0)
stress_var_plot.append(0)
Stress0[:,0] = CurStress.T
Iter = np.zeros(time.shape)
# ========================================================================
# ---- COMPUTATION CYCLES
if StrainDriven:
#StrainDriven
for i in range(1, (len(strain[0]) )):
NextStrain = strain[:,i] + IniStrain.T
dStrain = strain[:,i] - strain[:, i-1] #Driving variable
#Current BSRadialMap is a function, will be transformed into a class eventually
NextStress, NextState, NextCep = BSRadialMap(dStrain, StaticParam, CurStress, CurState)
# Update Stress, Strain, and State
CurStress = NextStress
CurState = NextState
# Variables created for plotting purposes
alphaISO_plot.append(CurState.alphaISO)
stress = np.append(stress, CurStress, 1)
j2_plot.append(GetJ2(CurStress))
stress_var_plot.append(np.sqrt(2*j2_plot[i])*np.sqrt(3/2)*np.sign(stress[0,i] - stress[1,i]))
stress_var2_plot.append((stress[0,i] - stress[1,i]))
Stress0 = np.append(Stress0, CurState.Stress0, 1)
elif StressDriven:
# StressDriven driver
# set tolerance value for iterative procedure(s)
TOLERANCE = 1e-10
for i in range(0, len(stress[0])-1):
# initialize strain epsilon_{n+1}^{(0)} = eps_{n} using the old state
# (this is the initial approximation for eps_{n+1}
if i == 0:
# special settings for initial values at t_1
NextStrain = np.array([0,0,0,0,0,0]).reshape(6,1)
dStrain = np.array([0,0,0,0,0,0]).reshape(6,1)
CurState = IniState
else:
NextStrain = CurStrain
dStrain = np.array([0,0,0,0,0,0]).reshape(6,1)
NextStress, NextState, Cep = BSRadialMap(dStrain, StaticParam, CurStress, CurState)
RR = stress[:, i].reshape(6,1) - NextStress
RR = RR.reshape(6,1)
RR0 = normS(RR)
# reset iteration counter
kk = 0
# iterate until convergence
while normS(RR)/RR0 > TOLERANCE:
# update strain from eps_{n+1}^{(k)} to eps_{n+1}^{(k+1)}
dStrain = np.linalg.solve(Cep, RR)
NextStrain = NextStrain + dStrain
# compute material response for estimated strain state
# NOTE: the state variables are taken at t_n
NextStress, NextState, Cep = BSRadialMap(dStrain, StaticParam, CurStress, CurState)
#print('NextStress:',NextStress)
#print('Stress0:',NextState.Stress0)
# check for equilibrium
RR = stress[:,i].reshape(6,1) - NextStress
RR = RR.reshape(6,1)
kk = kk + 1
# emergence exit if procedure does not converge
if kk > 3:
print('procedure slow to converge. Error : ', normS( RR )/RR0)
if kk > 20:
print('procedure did not converge. Error : ', normS( RR )/RR0)
print('YOUR TANGENT Cep IS WRONG', normS( RR )/RR0)
break
Iter[i] = kk
CurStress = NextStress
CurState = NextState
# Update State variables for next step
CurStress = NextStress
CurStrain = NextStrain
CurState = NextState
# Update variables for plotting purposes
strain = np.append(strain, CurStrain, 1)
alphaISO_plot.append(CurState.alphaISO)
j2_plot.append(GetJ2(CurStress))
stress_var_plot.append(np.sqrt(2*j2_plot[i])*np.sqrt(3/2)*np.sign(stress[3,i]))
Stress0 = np.append(Stress0, CurState.Stress0, 1)
DriverOutput = namedtuple('DriverOutput',['StaticParam','time','strain','stress','alphaISO','j2','stress_var','stress_var2', 'Stress0','Iter'])
DriverOutput = DriverOutput(StaticParam, time, strain, stress, alphaISO_plot, j2_plot, stress_var_plot, stress_var2_plot, Stress0, Iter)
return DriverOutput
# =========================================================================
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
from pandapower.control.controller.trafo_control import TrafoController
class ContinuousTapControl(TrafoController):
"""
Trafo Controller with local tap changer voltage control.
INPUT:
**net** (attrdict) - Pandapower struct
**tid** (int) - ID of the trafo that is controlled
**vm_set_pu** (float) - Maximum OLTC target voltage at bus in pu
OPTIONAL:
**tol** (float, 0.001) - Voltage tolerance band at bus in percent (default: 1% = 0.01pu)
**side** (string, "lv") - Side of the transformer where the voltage is controlled
**trafo_type** (float, "2W") - Trafo type ("2W" or "3W")
**in_service** (bool, True) - Indicates if the controller is currently in_service
**check_tap_bounds** (bool, True) - In case of true the tap_bounds will be considered
**drop_same_existing_ctrl** (bool, False) - Indicates if already existing controllers of the same type and with the same matching parameters (e.g. at same element) should be dropped
"""
def __init__(self, net, tid, vm_set_pu, tol=1e-3, side="lv", trafotype="2W", in_service=True,
check_tap_bounds=True, level=0, order=0, drop_same_existing_ctrl=False,
matching_params=None, **kwargs):
if matching_params is None:
matching_params = {"tid": tid, 'trafotype': trafotype}
super().__init__(net, tid=tid, side=side, tol=tol, in_service=in_service,
trafotype=trafotype, level=level, order=order,
drop_same_existing_ctrl=drop_same_existing_ctrl,
matching_params=matching_params, **kwargs)
t = net[self.trafotable]
b = net.bus
if trafotype == "2W":
self.t_nom = t.at[tid, "vn_lv_kv"] / t.at[tid, "vn_hv_kv"] * \
b.at[net[self.trafotable].at[tid, "hv_bus"], "vn_kv"] / \
b.at[net[self.trafotable].at[tid, "lv_bus"], "vn_kv"]
elif side == "lv":
self.t_nom = t.at[tid, "vn_lv_kv"] / t.at[tid, "vn_hv_kv"] * \
b.at[net[self.trafotable].at[tid, "hv_bus"], "vn_kv"] / \
b.at[net[self.trafotable].at[tid, "lv_bus"], "vn_kv"]
elif side == "mv":
self.t_nom = t.at[tid, "vn_mv_kv"] / t.at[tid, "vn_hv_kv"] * \
b.at[net[self.trafotable].at[tid, "hv_bus"], "vn_kv"] / \
b.at[net[self.trafotable].at[tid, "mv_bus"], "vn_kv"]
self.check_tap_bounds = check_tap_bounds
self.vm_set_pu = vm_set_pu
self.trafotype = trafotype
self.tol = tol
def control_step(self, net):
"""
Implements one step of the ContinuousTapControl
"""
delta_vm_pu = net.res_bus.at[self.controlled_bus, "vm_pu"] - self.vm_set_pu
tc = delta_vm_pu / self.tap_step_percent * 100 / self.t_nom
self.tap_pos += tc * self.tap_side_coeff * self.tap_sign
if self.check_tap_bounds:
self.tap_pos = np.clip(self.tap_pos, self.tap_min, self.tap_max)
# WRITE TO NET
if net[self.trafotable].tap_pos.dtype != "float":
net[self.trafotable].tap_pos = net[self.trafotable].tap_pos.astype(float)
net[self.trafotable].at[self.tid, "tap_pos"] = self.tap_pos
def is_converged(self, net):
"""
The ContinuousTapControl is converged, when the difference of the voltage between control steps is smaller
than the Tolerance (tol).
"""
if not net[self.trafotable].at[self.tid, 'in_service']:
return True
vm_pu = net.res_bus.at[self.controlled_bus, "vm_pu"]
self.tap_pos = net[self.trafotable].at[self.tid, 'tap_pos']
difference = 1 - self.vm_set_pu / vm_pu
if self.check_tap_bounds:
if self.tap_side_coeff * self.tap_sign == 1:
if vm_pu < self.vm_set_pu and self.tap_pos == self.tap_min:
return True
elif vm_pu > self.vm_set_pu and self.tap_pos == self.tap_max:
return True
elif self.tap_side_coeff * self.tap_sign == -1:
if vm_pu > self.vm_set_pu and self.tap_pos == self.tap_min:
return True
elif vm_pu < self.vm_set_pu and self.tap_pos == self.tap_max:
return True
return abs(difference) < self.tol
|
nilq/baby-python
|
python
|
import os
import pathlib
import shlex
import shutil
import subprocess
import sys
import unittest
from vtam.utils import pip_install_vtam_for_tests
from vtam.utils.PathManager import PathManager
class TestCommandExample(unittest.TestCase):
"""Will test main commands based on a complete test dataset"""
def setUp(self):
pip_install_vtam_for_tests()
self.test_path = PathManager.get_test_path()
self.outdir_path = os.path.join(self.test_path, 'outdir')
pathlib.Path(self.outdir_path).mkdir(exist_ok=True, parents=True)
def test_command_example(self):
cmd = "vtam example"
if sys.platform.startswith("win"):
args = cmd
else:
args = shlex.split(cmd)
result = subprocess.run(args=args, check=True, cwd=self.outdir_path)
self.assertEqual(result.returncode, 0)
def tearDown(self):
shutil.rmtree(self.outdir_path, ignore_errors=True)
|
nilq/baby-python
|
python
|
from kivymd.app import MDApp
from kivymd.uix.screen import Screen
from kivymd.uix.button import MDRectangleFlatButton, MDFlatButton
from kivy.lang import Builder
from kivymd.uix.dialog import MDDialog
import helpers
class DemoAPP(MDApp):
def build(self):
self.theme_cls.primary_palette = "Green"
scrn = Screen()
self.entr1 = Builder.load_string(helpers.HelpX)
btn = MDRectangleFlatButton(text='Show',
pos_hint={'center_x': 0.5, 'center_y': 0.5},
on_press =self.funct1,on_release =self.funct2)
scrn.add_widget(self.entr1 )
scrn.add_widget(btn)
return scrn
def funct1(self,obj):
print("On Press")
def funct2(self,obj):
print("Released")
self.dialog = MDDialog(title='Username check',
text="Please enter your Password", size_hint=(0.8, 1))
self.dialog.open()
DemoAPP().run()
|
nilq/baby-python
|
python
|
import csv
import logging
import os
from dataclasses import asdict
from typing import List, TypeVar
from analysis.src.python.data_collection.api.platform_objects import Object
class CsvWriter:
def __init__(self, result_dir: str, csv_file: str, field_names: List[str]):
os.makedirs(result_dir, exist_ok=True)
self.csv_path = os.path.join(result_dir, csv_file)
self.fieldnames = field_names
with open(self.csv_path, 'w+', newline='') as f:
writer = csv.DictWriter(f, fieldnames=field_names)
writer.writeheader()
def write_csv(self, data: dict):
with open(self.csv_path, 'a+', newline='', encoding='utf8') as f:
writer = csv.DictWriter(f, fieldnames=self.fieldnames)
writer.writerow({k: data[k] for k in self.fieldnames})
T = TypeVar('T', bound=Object)
def save_objects_to_csv(output_path: str, objects: List[T], obj_class: str):
if len(objects) == 0:
return
if not os.path.exists(output_path):
os.makedirs(output_path)
logging.info(f'Writing {len(objects)} of type {type(objects[0])} to csv: {output_path}/{obj_class}s.csv')
csv_writer = CsvWriter(output_path, f'{obj_class}s.csv', list(type(objects[0]).__annotations__.keys()))
for obj in objects:
csv_writer.write_csv(asdict(obj))
|
nilq/baby-python
|
python
|
from src.base.test_cases import TestCases
class PartEqualSubSumTestCases(TestCases):
def __init__(self):
super(PartEqualSubSumTestCases, self).__init__()
self.__add_test_case__("Example 1", ([1, 5, 11, 5]), (True))
self.__add_test_case__("Example 2", ([1, 2, 3, 5]), (False))
self.__add_test_case__("Test 3", ([100]), (False))
self.__add_test_case__("Test 4", ([2]), (False))
|
nilq/baby-python
|
python
|
PALAVRA = "EXEMPLO"
print(PALAVRA[0])
print(PALAVRA[2:5])
print(len(PALAVRA))
nova = PALAVRA + "s!!"
print(nova)
outra = "Novos" + nova
print(outra)
for i in range(len(PALAVRA)):
print(PALAVRA[i])
lista = [2,4,7]
lista [1] = 9
print(lista)
print("O" in PALAVRA)
print("o" in PALAVRA)
#Eu tbm posso colocar:
print("O" in PALAVRA.lower())
palavra = PALAVRA.lower()
print(palavra)
|
nilq/baby-python
|
python
|
from discordbot.minigamesbot import MiniGamesBot
from discordbot.utils.private import DISCORD
bot = MiniGamesBot("?")
bot.run(DISCORD["TOKEN"])
|
nilq/baby-python
|
python
|
import numpy as np
import mistree as mist
def test_PlotHistMST_rotate():
pmst = mist.PlotHistMST()
pmst._get_rotate_colors()
assert pmst.rotate_colors == 1
pmst._get_rotate_linestyles()
assert pmst.rotate_linestyle == 1
def test_PlotHistMST_plot():
x = np.random.random_sample(100)
y = np.random.random_sample(100)
mst = mist.GetMST(x=x, y=y)
d, l, b, s = mst.get_stats()
hmst = mist.HistMST()
hmst.setup()
mst_dict = hmst.get_hist(d, l, b, s)
pmst = mist.PlotHistMST()
pmst.read_mst(mst_dict)
pmst.plot(plt_output='close')
def test_PlotHistMST_plot_usebox():
x = np.random.random_sample(100)
y = np.random.random_sample(100)
mst = mist.GetMST(x=x, y=y)
d, l, b, s = mst.get_stats()
hmst = mist.HistMST()
hmst.setup()
mst_dict = hmst.get_hist(d, l, b, s)
pmst = mist.PlotHistMST()
pmst.read_mst(mst_dict)
pmst.plot(usebox=False, plt_output='close')
def test_PlotHistMST_plot_contour_extra_option():
hmst = mist.HistMST()
hmst.setup()
hmst.start_group()
for i in range(0, 10):
x = np.random.random_sample(100)
y = np.random.random_sample(100)
mst = mist.GetMST(x=x, y=y)
d, l, b, s = mst.get_stats()
mst_dict = hmst.get_hist(d, l, b, s)
mst_dict = hmst.end_group()
pmst = mist.PlotHistMST()
pmst.read_mst(mst_dict)
pmst.plot(plt_output='close')
pmst = mist.PlotHistMST()
pmst.read_mst(mst_dict)
pmst.plot(usebox=False, plt_output='close')
pmst = mist.PlotHistMST()
pmst.read_mst(mst_dict)
pmst.plot(xlabels=['a', 'b', 'c', 'd'], plt_output='close')
pmst = mist.PlotHistMST()
pmst.read_mst(mst_dict)
pmst.plot(usefraction=True, plt_output='close')
pmst = mist.PlotHistMST()
pmst.read_mst(mst_dict)
pmst.plot(usefraction=True, usemean=False, plt_output='close')
pmst = mist.PlotHistMST()
pmst.read_mst(mst_dict)
pmst.plot(units=r'^{\circ}', plt_output='close')
def test_PlotHistMST_plot_comparison():
x_lf, y_lf, z_lf = mist.get_levy_flight(5000)
x_alf, y_alf, z_alf = mist.get_adjusted_levy_flight(5000)
mst = mist.GetMST(x=x_lf, y=y_lf, z=z_lf)
d_lf, l_lf, b_lf, s_lf = mst.get_stats()
mst = mist.GetMST(x=x_alf, y=y_alf, z=z_alf)
d_alf, l_alf, b_alf, s_alf = mst.get_stats()
hmst = mist.HistMST()
hmst.setup(uselog=True)
hist_lf = hmst.get_hist(d_lf, l_lf, b_lf, s_lf)
x_alf, y_alf, z_alf = mist.get_adjusted_levy_flight(5000)
mst = mist.GetMST(x=x_alf, y=y_alf, z=z_alf)
d_alf, l_alf, b_alf, s_alf = mst.get_stats()
hist_alf = hmst.get_hist(d_alf, l_alf, b_alf, s_alf)
pmst = mist.PlotHistMST()
pmst.read_mst(hist_lf, label='Levy Flight')
pmst.read_mst(hist_alf, label='Adjusted Levy Flight')
pmst.plot(usecomp=True, plt_output='close')
pmst = mist.PlotHistMST()
pmst.read_mst(hist_lf, label='Levy Flight')
pmst.read_mst(hist_alf, label='Adjusted Levy Flight')
pmst.plot(usebox=False, usecomp=True, plt_output='close')
def test_PlotHistMST_plot_comparison_envelope():
x_lf, y_lf, z_lf = mist.get_levy_flight(5000)
mst = mist.GetMST(x=x_lf, y=y_lf, z=z_lf)
d_lf, l_lf, b_lf, s_lf = mst.get_stats()
hmst = mist.HistMST()
hmst.setup(uselog=True)
hist_lf = hmst.get_hist(d_lf, l_lf, b_lf, s_lf)
hmst.start_group()
for i in range(0, 10):
x_alf, y_alf, z_alf = mist.get_adjusted_levy_flight(5000)
mst = mist.GetMST(x=x_alf, y=y_alf, z=z_alf)
d_alf, l_alf, b_alf, s_alf = mst.get_stats()
_hist_alf = hmst.get_hist(d_alf, l_alf, b_alf, s_alf)
hist_alf_group = hmst.end_group()
pmst = mist.PlotHistMST()
pmst.read_mst(hist_lf, label='Levy Flight')
pmst.read_mst(hist_alf_group, label='Adjusted Levy Flight')
pmst.plot(usecomp=True, plt_output='close')
def test_PlotHistMST_read_mst():
x = np.random.random_sample(100)
y = np.random.random_sample(100)
mst = mist.GetMST(x=x, y=y)
d, l, b, s = mst.get_stats()
hmst = mist.HistMST()
hmst.setup()
mst_hist = hmst.get_hist(d, l, b, s)
pmst = mist.PlotHistMST()
pmst.read_mst(mst_hist, color='dodgerblue', linewidth=1., linestyle=':', alpha=0.5,
label='check', alpha_envelope=0.5)
assert pmst.colors[0] == 'dodgerblue'
assert pmst.linewidths[0] == 1.
assert pmst.linestyles[0] == ':'
assert pmst.alphas[0] == 0.5
assert pmst.labels[0] == 'check'
assert pmst.alphas_envelope[0] == 0.5
assert pmst.need_envelopes[0] == False
assert pmst.use_sqrt_s == True
def test_PlotHistMST_read_mst_uselog():
x = np.random.random_sample(100)
y = np.random.random_sample(100)
mst = mist.GetMST(x=x, y=y)
d, l, b, s = mst.get_stats()
hmst = mist.HistMST()
hmst.setup(uselog=True)
mst_hist = hmst.get_hist(d, l, b, s)
pmst = mist.PlotHistMST()
pmst.read_mst(mst_hist, color='dodgerblue', linewidth=1., linestyle=':', alpha=0.5,
label='check', alpha_envelope=0.5)
assert pmst.colors[0] == 'dodgerblue'
assert pmst.linewidths[0] == 1.
assert pmst.linestyles[0] == ':'
assert pmst.alphas[0] == 0.5
assert pmst.labels[0] == 'check'
assert pmst.alphas_envelope[0] == 0.5
assert pmst.need_envelopes[0] == False
assert pmst.use_sqrt_s == True
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
from tools import *
from sys import argv
from os.path import join
import h5py
import matplotlib.pylab as plt
import numpy as np
###################################################### see old version at the end (easier to understand, but less generalized and not using pca) ########################################################################
if len(argv) > 1:
pathToSimFolder = argv[1]
else:
pathToSimFolder = "../data/"
currents = np.load(join(pathToSimFolder, "currents.npy"))
currentsUncert = np.load(join(pathToSimFolder, "currentsUncert.npy"))
min_ = np.min(currents, axis=(1, 2))
max_ = np.max(currents, axis=(1, 2))
normedCurrents = ((currents.T - min_) / (max_ - min_)).T
samples = currents.shape[0]
print("samples: ", samples)
minFitness = 0.8
bins = 200
fitnessBins = 100
D = np.array(
[
currents[:, 0, 0] - currents[:, 0, 1],
currents[:, 0, 0] - currents[:, 1, 0],
currents[:, 0, 0] - currents[:, 1, 1],
]
)
N = D.shape[0]
C = np.cov(D)
M = np.linalg.eig(C)[1].T
# M = np.array([[1,-1,0],[0,0,-1],[0.5,0.5,-0.5]]) #<<<<<-------- define transformation matrix here and ignore PCA
###print new coordinates
for i in range(3):
print(
f"D{i+1} = {M[i,0]+M[i,1]+M[i,2]:5.2f} I_00 + {-M[i,0]:5.2f} I_01 + {-M[i,1]:5.2f} I_10 + {-M[i,2]:5.2f} I_11"
)
print("transformation matrix:\n", M)
Delta = M @ D
print("corrcoef matrix:\n", np.corrcoef(Delta))
# ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Delta_bins, Delta_counts = [], []
for i in range(N):
Delta_counts_, Delta_bins_ = np.histogram(Delta[i], bins=bins, density=True)
Delta_bins.append(Delta_bins_)
Delta_counts.append(Delta_counts_)
np.save(join(pathToSimFolder, f"Delta{i}_bins.npy"), Delta_bins_)
np.save(join(pathToSimFolder, f"Delta{i}_counts.npy"), Delta_counts_)
Delta_mesh = np.array(
np.meshgrid(
*[(Delta_bins[i][1:] + Delta_bins[i][:-1]) / 2 for i in range(N)], indexing="ij"
)
)
probabilities = np.array(
np.meshgrid(
*[(Delta_bins[i][1:] - Delta_bins[i][:-1]) * Delta_counts[i] for i in range(N)],
indexing="ij",
)
)
totProbab = np.prod(probabilities, axis=0)
D_mesh = np.einsum("ji...,i...->j...", np.linalg.inv(M), Delta_mesh)
diffs = np.zeros((bins, bins, bins, 2, 2, 2, 2))
diffs[:, :, :, 0, 0, 0, 1] = D_mesh[0]
diffs[:, :, :, 0, 0, 1, 0] = D_mesh[1]
diffs[:, :, :, 0, 0, 1, 1] = D_mesh[2]
diffs[:, :, :, 0, 1, 1, 0] = D_mesh[1] - D_mesh[0]
diffs[:, :, :, 0, 1, 1, 1] = D_mesh[2] - D_mesh[0]
diffs[:, :, :, 1, 0, 1, 1] = D_mesh[2] - D_mesh[1]
diffs[:, :, :, 0, 1, 0, 0] = -diffs[:, :, :, 0, 0, 0, 1]
diffs[:, :, :, 1, 0, 0, 0] = -diffs[:, :, :, 0, 0, 1, 0]
diffs[:, :, :, 1, 1, 0, 0] = -diffs[:, :, :, 0, 0, 1, 1]
diffs[:, :, :, 1, 0, 0, 1] = -diffs[:, :, :, 0, 1, 1, 0]
diffs[:, :, :, 1, 1, 0, 1] = -diffs[:, :, :, 0, 1, 1, 1]
diffs[:, :, :, 1, 1, 1, 0] = -diffs[:, :, :, 1, 0, 1, 1]
max_min = np.max(np.abs(diffs), axis=(3, 4, 5, 6))
I_normed = np.zeros((bins, bins, bins, 2, 2))
I_normed[:, :, :, 0, 0] = np.max(diffs[:, :, :, 0, 0, :, :], axis=(3, 4)) / max_min
I_normed[:, :, :, 0, 1] = np.max(diffs[:, :, :, 0, 1, :, :], axis=(3, 4)) / max_min
I_normed[:, :, :, 1, 0] = np.max(diffs[:, :, :, 1, 0, :, :], axis=(3, 4)) / max_min
I_normed[:, :, :, 1, 1] = np.max(diffs[:, :, :, 1, 1, :, :], axis=(3, 4)) / max_min
def getFitness(gate, normed_currents):
def gateFunc(in1, in2):
if gate == "AND":
return in1 & in2
if gate == "NAND":
return not (in1 & in2)
if gate == "OR":
return in1 | in2
if gate == "NOR":
return not (in1 | in2)
if gate == "XOR":
return in1 ^ in2
if gate == "XNOR":
return not (in1 ^ in2)
if len(normed_currents.shape) == 5:
return 1 - 0.25 * np.sum(
[
abs(
gateFunc(int(i / 2), i % 2)
- normed_currents[:, :, :, int(i / 2), i % 2]
)
for i in range(4)
],
axis=0,
)
elif len(normed_currents.shape) == 3:
return 1 - 0.25 * np.sum(
[
abs(gateFunc(int(i / 2), i % 2) - normed_currents[:, int(i / 2), i % 2])
for i in range(4)
],
axis=0,
)
############################### delta distr ###############################
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
for i in range(N):
ax.hist(
Delta_bins[i][:-1],
Delta_bins[i],
weights=Delta_counts[i],
color=color(i, N),
histtype="step",
label=rf"$\scriptsize \Delta_{{ {i+1} }}$",
)
ax.set_xlabel(r"$\Delta_{i}$")
ax.set_ylabel(r"$P(\Delta_{i})$")
ax.legend()
ax.set_xlim(-0.02, 0.065)
plt.savefig(join(pathToSimFolder, f"deltaDistr.png"), bbox_inches="tight", dpi=300)
# plt.show()
plt.close(fig)
############################### fitness distr ###############################
gates = ["AND", "NAND", "OR", "NOR", "XOR", "XNOR"]
# gates = ["XOR"]
for gate in gates:
estimatedFitness = getFitness(gate, I_normed)
realFitness = getFitness(gate, normedCurrents)
hitIndices = np.where(estimatedFitness > minFitness)
hitIndices = np.where(estimatedFitness > minFitness)
print(
f"{gate}: {np.sum(totProbab[hitIndices]):%} {np.sum(realFitness>minFitness)/samples:%}"
)
fitness_counts, fitness_bins = np.histogram(
estimatedFitness.flatten(),
weights=totProbab.flatten(),
bins=fitnessBins,
density=True,
)
realFitness_counts, realFitness_bins = np.histogram(
realFitness, bins=fitnessBins, density=True
)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.hist(
fitness_bins[:-1],
fitness_bins,
weights=fitness_counts,
color=color(0, 2),
histtype="step",
label=r"estimated",
)
ax.hist(
realFitness_bins[:-1],
realFitness_bins,
weights=realFitness_counts,
color=color(1, 2),
histtype="step",
label=r"real",
)
# ax.set_xlim(0.4,1)
# ax.set_ylim(0,1)
ax.set_xlabel(r"$f$")
ax.set_ylabel(r"$P(f)$")
ax.legend(loc="best")
plt.savefig(
join(pathToSimFolder, f"fitnessDistr_{gate}_pca.png"),
bbox_inches="tight",
dpi=300,
)
# plt.show()
plt.close(fig)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.hist(
fitness_bins[:-1],
fitness_bins,
weights=fitness_counts,
color=color(0, 2),
histtype="step",
label=r"estimated",
)
ax.hist(
realFitness_bins[:-1],
realFitness_bins,
weights=realFitness_counts,
color=color(1, 2),
histtype="step",
label=r"real",
)
# ax.set_xlim(0.4,1)
ax.set_ylim(max(ax.get_ylim()[0], 1e-3), ax.get_ylim()[1])
ax.set_xlabel(r"$f$")
ax.set_ylabel(r"$P(f)$")
ax.legend(loc="best")
print(
f"{gate}: {np.sum(totProbab[hitIndices]):%} {np.sum(realFitness>minFitness)/samples:%}"
)
fitness_counts, fitness_bins = np.histogram(
estimatedFitness.flatten(),
weights=totProbab.flatten(),
bins=fitnessBins,
density=True,
)
realFitness_counts, realFitness_bins = np.histogram(
realFitness, bins=fitnessBins, density=True
)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.hist(
fitness_bins[:-1],
fitness_bins,
weights=fitness_counts,
color=color(0, 2),
histtype="step",
label=r"estimated",
)
ax.hist(
realFitness_bins[:-1],
realFitness_bins,
weights=realFitness_counts,
color=color(1, 2),
histtype="step",
label=r"real",
)
# ax.set_xlim(0.4,1)
# ax.set_ylim(0,1)
ax.set_xlabel(r"$f$")
ax.set_ylabel(r"$P(f)$")
ax.legend(loc="best")
plt.savefig(
join(pathToSimFolder, f"fitnessDistr_{gate}_pca.png"),
bbox_inches="tight",
dpi=300,
)
# plt.show()
plt.close(fig)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.hist(
fitness_bins[:-1],
fitness_bins,
weights=fitness_counts,
color=color(0, 2),
histtype="step",
label=r"estimated",
)
ax.hist(
realFitness_bins[:-1],
realFitness_bins,
weights=realFitness_counts,
color=color(1, 2),
histtype="step",
label=r"real",
)
# ax.set_xlim(0.4,1)
ax.set_ylim(max(ax.get_ylim()[0], 1e-3), ax.get_ylim()[1])
ax.set_xlabel(r"$f$")
ax.set_ylabel(r"$P(f)$")
ax.legend(loc="best")
ax.set_yscale("log")
plt.savefig(
join(pathToSimFolder, f"fitnessDistr_{gate}_pca_log.png"),
bbox_inches="tight",
dpi=300,
)
# plt.show()
plt.close(fig)
### P(f>f_min)
f = np.linspace(0, 1, 200)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.plot(
f,
[np.sum(totProbab[np.where(estimatedFitness > fi)]) for fi in f],
color=color(0, 2),
label=r"estimated",
)
ax.plot(
f,
[np.sum(realFitness > fi) / samples for fi in f],
color=color(1, 2),
label=r"real",
)
np.save(
join(pathToSimFolder, f"{gate}_estimated_integrated_raw_fitness.npy"),
[np.sum(totProbab[np.where(estimatedFitness > fi)]) for fi in f],
)
np.save(
join(pathToSimFolder, f"{gate}_real_integrated_raw_fitness.npy"),
[np.sum(realFitness > fi) / samples for fi in f],
)
# ax.set_xlim(0.4,1)
ax.set_ylim(2e-4, None)
ax.set_xlabel(r"$f_\textrm{min}$")
ax.set_ylabel(r"$P(f > f_\textrm{min})$")
ax.grid(which="both")
ax.legend(loc="best")
ax.set_yscale("log")
plt.savefig(
join(pathToSimFolder, f"P_fmin_{gate}.png"), bbox_inches="tight", dpi=300
)
# plt.show()
plt.close(fig)
############################### raw fitness vs corrected fitness ###############################
rawFitness = np.load(join(pathToSimFolder, "fitness.npy"))
correctedFitness = rawFitness - 2 * np.load(join(pathToSimFolder, "fitnessUncert.npy"))
print(f"corrected fitness prob: {np.sum(correctedFitness>minFitness)/samples:%}")
rawFitness_counts, rawFitness_bins = np.histogram(
rawFitness, range=(0, 1), bins=fitnessBins, density=True
)
correctedFitness_counts, correctedFitness_bins = np.histogram(
correctedFitness, range=(0, 1), bins=fitnessBins, density=True
)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.hist(
rawFitness_bins[:-1],
rawFitness_bins,
weights=rawFitness_counts,
color=color(0, 2),
histtype="step",
label=r"raw",
)
ax.hist(
correctedFitness_bins[:-1],
correctedFitness_bins,
weights=correctedFitness_counts,
color=color(1, 2),
histtype="step",
label=r"corrected",
)
ax.set_xlim(0, 1)
# ax.set_ylim(max(ax.get_ylim()[0],1e-3),ax.get_ylim()[1])
ax.set_xlabel(r"$f$")
ax.set_ylabel(r"$P(f)$")
ax.legend(loc="best")
ax.set_yscale("log")
plt.savefig(
join(pathToSimFolder, f"rawFitnes_vs_correctedFitness.png"),
bbox_inches="tight",
dpi=300,
)
# plt.show()
plt.close(fig)
###################################################### old version starts here ########################################################################
"""
#!/usr/bin/python3
from tools import *
from sys import argv
from os.path import join
import h5py
import matplotlib.pylab as plt
import numpy as np
if len(argv)>1:
pathToSimFolder=argv[1]
else:
pathToSimFolder="../data/"
controlFitness = np.load(join(pathToSimFolder,"fitness.npy" ))
currents = np.load(join(pathToSimFolder,"currents.npy" ))
currentsUncert = np.load(join(pathToSimFolder,"currentsUncert.npy"))
N = currents.shape[0]
minFitness = 0.8
relUncertThres = 10000
bins = 50
print("################ XOR ################")
D_10_01 = currents[:,1,0]-currents[:,0,1]
D_11_00 = currents[:,1,1]-currents[:,0,0]
D_DIFF = (currents[:,1,1]+currents[:,0,0]-currents[:,0,1]-currents[:,1,0])/2
#relative uncerttainty
print("meanUncert D_10_01: ",np.mean(np.sqrt(currentsUncert[:,1,0]**2 + currentsUncert[:,0,1]**2)))
print("meanUncert D_11_00: ",np.mean(np.sqrt(currentsUncert[:,1,1]**2 + currentsUncert[:,0,0]**2)))
print("meanUncert D_DIFF: ",np.mean(0.5 * np.sqrt(currentsUncert[:,1,1]**2 + currentsUncert[:,0,0]**2 + currentsUncert[:,0,1]**2 + currentsUncert[:,1,0]**2)))
u_D_10_01 = np.sqrt(currentsUncert[:,1,0]**2 + currentsUncert[:,0,1]**2) / D_10_01
u_D_11_00 = np.sqrt(currentsUncert[:,1,1]**2 + currentsUncert[:,0,0]**2) / D_11_00
u_D_DIFF = 0.5 * np.sqrt(currentsUncert[:,1,1]**2 + currentsUncert[:,0,0]**2 + currentsUncert[:,0,1]**2 + currentsUncert[:,1,0]**2) / D_DIFF
valid = np.where(np.logical_and(np.logical_and(u_D_11_00 < relUncertThres, u_D_10_01 < relUncertThres), u_D_DIFF < relUncertThres) )
D_10_01 = D_10_01[valid]
D_11_00 = D_11_00[valid]
D_DIFF = D_DIFF [valid]
n=D_11_00.shape[0]
print(f"{n} == {n/N:.2%} valid")
r = np.corrcoef(D_10_01,D_11_00)[0,1]
print(f"D_10_01 vs D_11_00: r = {r}, T(r) = {r * np.sqrt(n-2)/np.sqrt(1-r**2)}")
r = np.corrcoef(D_10_01,D_DIFF)[0,1]
print(f"D_10_01 vs D_DIFF: r = {r}, T(r) = {r * np.sqrt(n-2)/np.sqrt(1-r**2)}")
r = np.corrcoef(D_11_00,D_DIFF)[0,1]
print(f"D_11_00 vs D_DIFF: r = {r}, T(r) = {r * np.sqrt(n-2)/np.sqrt(1-r**2)}")
# r = np.corrcoef(currents[:,1,1],currents[:,0,0])[0,1]
# print(f"currents[:,1,1] vs currents[:,0,0]: r = {r}, T(r) = {r * np.sqrt(N-2)/np.sqrt(1-r**2)}")
D0_counts, D0_bins = np.histogram(D_10_01, bins = bins, density = True)
D1_counts, D1_bins = np.histogram(D_11_00, bins = bins, density = True)
Dd_counts, Dd_bins = np.histogram(D_DIFF , bins = bins, density = True)
D0, D1, Dd = np.meshgrid((D0_bins[1:]+D0_bins[:-1])/2,
(D1_bins[1:]+D1_bins[:-1])/2,
(Dd_bins[1:]+Dd_bins[:-1])/2, indexing = "ij")
diffs = np.zeros((bins,bins,bins,2,2,2,2))
diffs[:,:,:,0,0,0,1] = Dd+(+D0-D1)/2
diffs[:,:,:,0,0,1,0] = Dd+(-D0-D1)/2
diffs[:,:,:,1,1,0,1] = Dd+(+D0+D1)/2
diffs[:,:,:,1,1,1,0] = Dd+(-D0+D1)/2
diffs[:,:,:,1,0,0,1] = D0
diffs[:,:,:,1,1,0,0] = D1
diffs[:,:,:,0,1,0,0] = -diffs[:,:,:,0,0,0,1]
diffs[:,:,:,1,0,0,0] = -diffs[:,:,:,0,0,1,0]
diffs[:,:,:,0,1,1,1] = -diffs[:,:,:,1,1,0,1]
diffs[:,:,:,1,0,1,1] = -diffs[:,:,:,1,1,1,0]
diffs[:,:,:,0,1,1,0] = -diffs[:,:,:,1,0,0,1]
diffs[:,:,:,0,0,1,1] = -diffs[:,:,:,1,1,0,0]
Delta = np.max(np.abs(diffs),axis=(3,4,5,6))
I_normed = np.zeros((bins,bins,bins,2,2))
I_normed[:,:,:,0,0] = np.max(diffs[:,:,:,0,0,:,:],axis=(3,4))/Delta
I_normed[:,:,:,0,1] = np.max(diffs[:,:,:,0,1,:,:],axis=(3,4))/Delta
I_normed[:,:,:,1,0] = np.max(diffs[:,:,:,1,0,:,:],axis=(3,4))/Delta
I_normed[:,:,:,1,1] = np.max(diffs[:,:,:,1,1,:,:],axis=(3,4))/Delta
fitness = 1-(I_normed[:,:,:,0,0] + 1-I_normed[:,:,:,0,1] + 1-I_normed[:,:,:,1,0] + I_normed[:,:,:,1,1])/4 #XOR
#calc probabilities
prob0, prob1, probd = np.meshgrid((D0_bins[1:]-D0_bins[:-1]) * D0_counts,
(D1_bins[1:]-D1_bins[:-1]) * D1_counts,
(Dd_bins[1:]-Dd_bins[:-1]) * Dd_counts, indexing = "ij")
totProb = prob0 * prob1 * probd
indices = np.where(fitness>minFitness)
#for i in range(indices[0].shape[0]):
# print(D0[indices[0][i],indices[1][i],indices[2][i]],D1[indices[0][i],indices[1][i],indices[2][i]],Dd[indices[0][i],indices[1][i],indices[2][i]])
print("prob:",np.sum(totProb[indices]))
print("newMean:",np.sum(totProb[indices]*fitness[indices])/np.sum(totProb[indices]))
fitnessBins = 50
fitness_counts , fitness_bins = np.histogram(fitness.flatten(), weights = totProb.flatten(), bins = fitnessBins, density = True)
realFitness_counts, realFitness_bins = np.histogram(controlFitness, bins = fitnessBins, density = True)
fig, ax=plt.subplots(1,1,figsize=(4.980614173228346,3.2))
ax.hist(fitness_bins[:-1] , fitness_bins , weights=fitness_counts , color = color(0,2), histtype = "step", label = r"estimated")
ax.hist(realFitness_bins[:-1], realFitness_bins, weights=realFitness_counts, color = color(1,2), histtype = "step", label = r"real")
# ax.set_xlim(0.4,1)
# ax.set_ylim(0.4,1)
ax.set_xlabel(r"$\mathcal{F}$")
ax.set_ylabel(r"$P(\mathcal{F})$")
ax.legend()
plt.savefig(join(pathToSimFolder,f"fitnessDistr_XOR.png"),bbox_inches="tight",dpi=300)
# plt.show()
plt.close(fig)
fig, ax=plt.subplots(1,1,figsize=(4.980614173228346,3.2))
# im = ax.imshow(np.mean(fitness, axis = 2), cmap = "gnuplot", extent = [ D1_bins[0], D1_bins[-1], D0_bins[-1], D0_bins[0]])
im = ax.imshow(fitness[:,:,2] , cmap = "gnuplot", extent = [ D1_bins[0], D1_bins[-1], D0_bins[-1], D0_bins[0]])
# im = ax.imshow(D1[:,:,5] , cmap = "gnuplot", extent = [ D1_bins[0], D1_bins[-1], D0_bins[-1], D0_bins[0]])
# ax.set_xlim(0.4,1)
# ax.set_ylim(0.4,1)
ax.set_xlabel(r"$\scriptsize \Delta_{2}^\textrm{X}$")
ax.set_ylabel(r"$\scriptsize \Delta_{1}^\textrm{X}$")
plt.colorbar(im)
plt.savefig(join(pathToSimFolder,f"mapXOR.png"),bbox_inches="tight",dpi=300)
# plt.show()
plt.close(fig)
fig, ax=plt.subplots(1,1,figsize=(4.980614173228346,3.2))
ax.hist(D0_bins[:-1], D0_bins, weights=D0_counts, color = color(0,3), histtype = "step", label = r"$\scriptsize \Delta_{1}^\textrm{X}$")
ax.hist(D1_bins[:-1], D1_bins, weights=D1_counts, color = color(1,3), histtype = "step", label = r"$\scriptsize \Delta_{2}^\textrm{X}$")
ax.hist(Dd_bins[:-1], Dd_bins, weights=Dd_counts, color = color(2,3), histtype = "step", label = r"$\scriptsize \Delta_{3}^\textrm{X}$")
np.save(join(pathToSimFolder,"XOR_D0_bins.npy" ),D0_bins)
np.save(join(pathToSimFolder,"XOR_D0_counts.npy"),D0_counts)
np.save(join(pathToSimFolder,"XOR_D1_bins.npy" ),D1_bins)
np.save(join(pathToSimFolder,"XOR_D1_counts.npy"),D1_counts)
np.save(join(pathToSimFolder,"XOR_Dd_bins.npy" ),Dd_bins)
np.save(join(pathToSimFolder,"XOR_Dd_counts.npy"),Dd_counts)
ax.set_xlabel(r"$\Delta_{i}$")
ax.set_ylabel(r"$P(\Delta_{i})$")
ax.legend()
plt.savefig(join(pathToSimFolder,f"distXOR.png"),bbox_inches="tight",dpi=300)
# plt.show()
plt.close(fig)
print("################ AND ################")
D_00_01 = currents[:,0,0]-currents[:,0,1]
D_01_10 = currents[:,0,1]-currents[:,1,0]
D_DIFF = currents[:,1,1] -(currents[:,0,0]+currents[:,0,1]+currents[:,1,0])/3
#relative uncerttainty
u_D_00_01 = np.sqrt(currentsUncert[:,0,0]**2 + currentsUncert[:,0,1]**2) / D_00_01
u_D_01_10 = np.sqrt(currentsUncert[:,0,1]**2 + currentsUncert[:,1,0]**2) / D_01_10
u_D_DIFF = np.sqrt(currentsUncert[:,1,1]**2 + currentsUncert[:,0,0]**2/9 + currentsUncert[:,0,1]**2/9 + currentsUncert[:,1,0]**2/9) / D_DIFF
print("meanUncert D_00_01: ",np.mean(np.sqrt(currentsUncert[:,0,0]**2 + currentsUncert[:,0,1]**2)))
print("meanUncert D_01_10: ",np.mean(np.sqrt(currentsUncert[:,0,1]**2 + currentsUncert[:,1,0]**2)))
print("meanUncert D_DIFF: " ,np.mean(np.sqrt(currentsUncert[:,1,1]**2 + currentsUncert[:,0,0]**2/9 + currentsUncert[:,0,1]**2/9 + currentsUncert[:,1,0]**2/9)))
valid = np.where(np.logical_and(np.logical_and(u_D_11_00 < relUncertThres, u_D_10_01 < relUncertThres), u_D_DIFF < relUncertThres) )
D_00_01 = D_00_01[valid]
D_01_10 = D_01_10[valid]
D_DIFF = D_DIFF [valid]
n=D_00_01.shape[0]
print(f"{n} == {n/N:.2%} valid")
r = np.corrcoef(D_00_01,D_01_10)[0,1]
print(f"D_00_01 vs D_01_10: r = {r}, T(r) = {r * np.sqrt(n-2)/np.sqrt(1-r**2)}")
r = np.corrcoef(D_00_01,D_DIFF)[0,1]
print(f"D_00_01 vs D_DIFF: r = {r}, T(r) = {r * np.sqrt(n-2)/np.sqrt(1-r**2)}")
r = np.corrcoef(D_01_10,D_DIFF)[0,1]
print(f"D_01_10 vs D_DIFF: r = {r}, T(r) = {r * np.sqrt(n-2)/np.sqrt(1-r**2)}")
# D0_counts, D0_bins = np.histogram(D_00_01, bins = bins, density = True)
# D1_counts, D1_bins = np.histogram(D_01_10, bins = bins, density = True)
# Dd_counts, Dd_bins = np.histogram(D_DIFF , bins = bins, density = True)
# D0, D1, Dd = np.meshgrid((D0_bins[1:]+D0_bins[:-1])/2,
# (D1_bins[1:]+D1_bins[:-1])/2,
# (Dd_bins[1:]+Dd_bins[:-1])/2, indexing = "ij")
# diffs = np.zeros((bins,bins,bins,2,2,2,2))
# diffs[:,:,:,0,0,0,1] = D0
# diffs[:,:,:,0,0,1,0] = D0 + D1
# diffs[:,:,:,0,0,1,1] = 1/3*D1 + 2/3*D0 - Dd
# diffs[:,:,:,0,1,1,0] = D1
# diffs[:,:,:,0,1,1,1] = 1/3*D1 - 1/3*D0 - Dd
# diffs[:,:,:,1,0,1,1] =-2/3*D1 - 1/3*D0 - Dd
# diffs[:,:,:,0,1,0,0] = -diffs[:,:,:,0,0,0,1]
# diffs[:,:,:,1,0,0,0] = -diffs[:,:,:,0,0,1,0]
# diffs[:,:,:,1,1,0,0] = -diffs[:,:,:,0,0,1,1]
# diffs[:,:,:,1,0,0,1] = -diffs[:,:,:,0,1,1,0]
# diffs[:,:,:,1,1,0,1] = -diffs[:,:,:,0,1,1,1]
# diffs[:,:,:,1,1,1,0] = -diffs[:,:,:,1,0,1,1]
# Delta = np.max(np.abs(diffs),axis=(3,4,5,6))
# I_normed = np.zeros((bins,bins,bins,2,2))
# I_normed[:,:,:,0,0] = np.max(diffs[:,:,:,0,0,:,:],axis=(3,4))/Delta
# I_normed[:,:,:,0,1] = np.max(diffs[:,:,:,0,1,:,:],axis=(3,4))/Delta
# I_normed[:,:,:,1,0] = np.max(diffs[:,:,:,1,0,:,:],axis=(3,4))/Delta
# I_normed[:,:,:,1,1] = np.max(diffs[:,:,:,1,1,:,:],axis=(3,4))/Delta
fitness = 1-(I_normed[:,:,:,0,0] + I_normed[:,:,:,0,1] + I_normed[:,:,:,1,0] + 1-I_normed[:,:,:,1,1])/4 #AND
#calc probabilities
prob0, prob1, probd = np.meshgrid((D0_bins[1:]-D0_bins[:-1]) * D0_counts,
(D1_bins[1:]-D1_bins[:-1]) * D1_counts,
(Dd_bins[1:]-Dd_bins[:-1]) * Dd_counts, indexing = "ij")
totProb = prob0 * prob1 * probd
indices = np.where(fitness>minFitness)
#for i in range(indices[0].shape[0]):
# print(D0[indices[0][i],indices[1][i],indices[2][i]],D1[indices[0][i],indices[1][i],indices[2][i]],Dd[indices[0][i],indices[1][i],indices[2][i]])
print("prob:",np.sum(totProb[indices]))
print("newMean:",np.sum(totProb[indices]*fitness[indices])/np.sum(totProb[indices]))
min_ = np.min(currents, axis = (1,2))
max_ = np.max(currents, axis = (1,2))
controlFitnessAND = 1 - 0.25 * ((currents[:,0,0]-min_)/(max_-min_) + (currents[:,1,0]-min_)/(max_-min_) + (currents[:,0,1]-min_)/(max_-min_) + 1 - (currents[:,1,1]-min_)/(max_-min_) )
fitnessBins = 50
fitness_counts , fitness_bins = np.histogram(fitness.flatten(), weights = totProb.flatten(), bins = fitnessBins, density = True)
realFitness_counts, realFitness_bins = np.histogram(controlFitnessAND, bins = fitnessBins, density = True)
fig, ax=plt.subplots(1,1,figsize=(4.980614173228346,3.2))
ax.hist(fitness_bins[:-1] , fitness_bins , weights=fitness_counts , color = color(0,2), histtype = "step", label = r"estimated")
ax.hist(realFitness_bins[:-1], realFitness_bins, weights=realFitness_counts, color = color(1,2), histtype = "step", label = r"real")
# ax.set_xlim(0.4,1)
# ax.set_ylim(0.4,1)
ax.set_xlabel(r"$\mathcal{F}$")
ax.set_ylabel(r"$P(\mathcal{F})$")
ax.legend(loc=2)
plt.savefig(join(pathToSimFolder,f"fitnessDistr_AND.png"),bbox_inches="tight",dpi=300)
# plt.show()
plt.close(fig)
fig, ax=plt.subplots(1,1,figsize=(4.980614173228346,3.2))
# im = ax.imshow(np.mean(fitness, axis = 2), cmap = "gnuplot", extent = [ D1_bins[0], D1_bins[-1], D0_bins[-1], D0_bins[0]])
im = ax.imshow(fitness[:,:,49] , cmap = "gnuplot", extent = [ D1_bins[0], D1_bins[-1], D0_bins[-1], D0_bins[0]])
# im = ax.imshow(D1[:,:,5] , cmap = "gnuplot", extent = [ D1_bins[0], D1_bins[-1], D0_bins[-1], D0_bins[0]])
# ax.set_xlim(0.4,1)
# ax.set_ylim(0.4,1)
ax.set_xlabel(r"$\scriptsize \Delta_{2}^\textrm{A}$")
ax.set_ylabel(r"$\scriptsize \Delta_{1}^\textrm{A}$")
plt.colorbar(im)
plt.savefig(join(pathToSimFolder,f"mapAND.png"),bbox_inches="tight",dpi=300)
# plt.show()
plt.close(fig)
fig, ax=plt.subplots(1,1,figsize=(4.980614173228346,3.2))
ax.hist(D0_bins[:-1], D0_bins, weights=D0_counts, color = color(0,3), histtype = "step", label = r"$\scriptsize \Delta_{1}^\textrm{A}$")
ax.hist(D1_bins[:-1], D1_bins, weights=D1_counts, color = color(1,3), histtype = "step", label = r"$\scriptsize \Delta_{2}^\textrm{A}$")
ax.hist(Dd_bins[:-1], Dd_bins, weights=Dd_counts, color = color(2,3), histtype = "step", label = r"$\scriptsize \Delta_{3}^\textrm{A}$")
np.save(join(pathToSimFolder,"AND_D0_bins.npy" ),D0_bins)
np.save(join(pathToSimFolder,"AND_D0_counts.npy"),D0_counts)
np.save(join(pathToSimFolder,"AND_D1_bins.npy" ),D1_bins)
np.save(join(pathToSimFolder,"AND_D1_counts.npy"),D1_counts)
np.save(join(pathToSimFolder,"AND_Dd_bins.npy" ),Dd_bins)
np.save(join(pathToSimFolder,"AND_Dd_counts.npy"),Dd_counts)
ax.set_xlabel(r"$\Delta_{i}$")
ax.set_ylabel(r"$P(\Delta_{i})$")
ax.legend()
plt.savefig(join(pathToSimFolder,f"distAND.png"),bbox_inches="tight",dpi=300)
# plt.show()
plt.close(fig)
"""
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Contains a base class for a calltips mode.
"""
import logging
from pyqode.core.api import Mode, TextHelper
from pyqode.core.api.panel import Panel
from pyqode.qt import QtCore, QtWidgets
MAX_CALLTIP_WIDTH = 80
def _logger():
return logging.getLogger(__name__)
class CalltipsMode(Mode, QtCore.QObject):
"""A base class for calltips modes."""
tooltipDisplayRequested = QtCore.Signal(object, int)
tooltipHideRequested = QtCore.Signal()
def __init__(self):
Mode.__init__(self)
QtCore.QObject.__init__(self)
self.tooltipDisplayRequested.connect(self._display_tooltip)
self.tooltipHideRequested.connect(QtWidgets.QToolTip.hideText)
self.__requestCnt = 0
self._cached_style = None
def on_state_changed(self, state):
if state:
self.editor.key_released.connect(self._on_key_released)
else:
self.editor.key_released.disconnect(self._on_key_released)
def _on_key_released(self, event):
if (event.key() == QtCore.Qt.Key_ParenLeft or
event.key() == QtCore.Qt.Key_Comma):
tc = self.editor.textCursor()
line = tc.blockNumber()
col = tc.columnNumber()
fn = self.editor.file.path
encoding = self.editor.file.encoding
source = self.editor.toPlainText()
# jedi has a bug if the statement has a closing parenthesis
# remove it!
lines = source.splitlines()
try:
l = lines[line].rstrip()
except IndexError:
# at the beginning of the last line (empty)
return
if l.endswith(")"):
lines[line] = l[:-1]
source = "\n".join(lines)
self._request_calltip(source, line, col, fn, encoding)
elif (event.key() in [
QtCore.Qt.Key_ParenRight,
QtCore.Qt.Key_Return,
QtCore.Qt.Key_Left,
QtCore.Qt.Key_Right,
QtCore.Qt.Key_Up,
QtCore.Qt.Key_Down,
QtCore.Qt.Key_End,
QtCore.Qt.Key_Home,
QtCore.Qt.Key_PageDown,
QtCore.Qt.Key_PageUp,
QtCore.Qt.Key_Backspace, QtCore.Qt.Key_Delete]):
QtWidgets.QToolTip.hideText()
def _request_calltip(self, source, line, col, fn, encoding):
pass
def _on_results_available(self, results):
pass
def _is_last_chard_end_of_word(self):
try:
tc = TextHelper(self.editor).word_under_cursor()
tc.setPosition(tc.position())
tc.movePosition(tc.StartOfLine, tc.KeepAnchor)
l = tc.selectedText()
last_char = l[len(l) - 1]
seps = self.editor.word_separators
symbols = [",", " ", "("]
return last_char in seps and last_char not in symbols
except IndexError:
return False
def _tooltip_style(self):
if self._cached_style is not None:
return self._cached_style
pal = self.editor.palette()
qss = '''
white-space: pre;
background: {background_color};
color: {text_color};
font-family: {font_family};
font-size: {font_size};
'''.format(
background_color=pal.base().color().name(),
text_color=pal.text().color().name(),
font_family=self.editor.font_name,
font_size=self.editor.font_size,
)
try:
highlight = self.editor.syntax_highlighter.formats[
'keyword'].foreground().color().name()
except (AttributeError, KeyError):
highlight = self.editor.palette().highlightedText().color().name()
self._cached_style = qss, highlight
return self._cached_style
def _format_tooltip(self, name, params, current_param=None, doc=None):
lines = []
qss, highlight = self._tooltip_style()
html = '<div style="{}">{}('.format(qss, name)
line = '{}('.format(name)
for i, param in enumerate(params):
if len(line) + len(param) > MAX_CALLTIP_WIDTH:
lines.append(html)
html = line = ' ' * (len(name) + 1)
if i < len(params) - 1 and not param.endswith(','):
param += ", "
if param.endswith(','):
param += ' ' # pep8 calltip
if i == current_param:
html += "<span style='color:{};'>".format(highlight)
line += param
html += param
if i == current_param:
html += "</span>"
lines.append(html + ')</div>')
return '\n'.join(lines)
def _display_tooltip(self, call, col):
if not call or self._is_last_chard_end_of_word():
return
calltip = self._format_tooltip(
call['call.call_name'],
call['call.params'],
call['call.index'],
call['call.doc'],
)
rect = self.editor.cursorRect()
position = QtCore.QPoint(
(
rect.left() +
self.editor.panels.margin_size(Panel.Position.LEFT) -
(col - call['call.bracket_start'][1]) *
self.editor.fontMetrics().width('_')
),
(
rect.bottom() +
self.editor.panels.margin_size(Panel.Position.TOP)
)
)
position = self.editor.mapToGlobal(position)
QtWidgets.QToolTip.showText(position, calltip, self.editor)
|
nilq/baby-python
|
python
|
from .queryset import *
|
nilq/baby-python
|
python
|
import typing as t
def client_id() -> str:
"""Application (client) ID of app registration"""
return None
def client_secret() -> str:
"""Application secret"""
return None
def authority() -> str:
"""The authority URL"""
return 'https://login.microsoftonline.com/common'
def scope() -> t.Union[t.List[str], None]:
"""Requested scopes on user login"""
return None
|
nilq/baby-python
|
python
|
from PIL import Image
from pytesseract import pytesseract
import argparse
import xmltodict
import json
import cv2
import os
import requests
from puttext import puttext
from nltk.tokenize import sent_tokenize
import math
filename = '../upload/table1.png'
o_filename = '../upload/table2.png'
conf_data = pytesseract.run_tesseract(
filename,output_filename_base='test',lang='eng+hin',extension='xml', config='alto --oem 1')
f_hin = open("test.xml", "r")
# print(xmltodict.parse(f_hin.read()))
data = xmltodict.parse(f_hin.read())
blocks = data['alto']['Layout']['Page']['PrintSpace']['TextBlock']
for block in blocks:
textline = block['TextLine']
text = ''
height = 0
x = block['@HPOS']
y = block['@VPOS']
word_count = 0
no_lines = 0
line_height = 0
previous_position = 0
previous_position_x = 0
previous_position_y = 0
if isinstance(textline, list):
no_lines = len(textline)
print(no_lines)
# if line_height == 0:
line_height = int(block['@HEIGHT']) / len(textline)
for line in textline:
if line['String'] is not None:
words = line['String']
if height == 0:
height = line['@HEIGHT']
if isinstance(words, list):
for word in words:
if previous_position == 0:
previous_position = int(word['@HPOS']) + int(word['@WIDTH'])
previous_position_x = int(word['@HPOS'])
previous_position_y = int(word['@VPOS'])
text += word['@CONTENT'] + ' '
else:
print('diff'+str(int(word['@HPOS']) - previous_position))
if abs(int(word['@HPOS']) - previous_position) > 10 and abs(previous_position_y - int(word['@VPOS'])) < int(word['@HEIGHT']):
engarr = []
translation_text = ''
# if word_count == 0:
sent_text = sent_tokenize(text)
for sent in sent_text:
engarr.append({'src': sent, 'id': 1})
res = requests.post('http://52.40.71.62:3003/translator/translation_en', json=engarr)
dictFromServer = res.json()
if dictFromServer['response_body'] is not None:
for translation in dictFromServer['response_body']:
# print(translation)
translation_text += translation['tgt'] + ' '
puttext(int(height),previous_position_x,previous_position_y,translation_text,o_filename, len(translation_text.split(' ')), line_height)
text = word['@CONTENT']
else:
text += word['@CONTENT'] + ' '
previous_position = int(word['@HPOS']) + int(word['@WIDTH'])
previous_position_y = int(word['@VPOS'])
previous_position_x = int(word['@HPOS'])
else:
text += words['@CONTENT'] + ' '
# previous_position = int(words['@HPOS']) +int(words['@WIDTH'])
# previous_position_y = int(words['@VPOS'])
# previous_position_x = int(words['@HPOS'])
sent_text = sent_tokenize(text)
engarr = []
translation_text = ''
# if word_count == 0:
for sent in sent_text:
engarr.append({'src': sent, 'id': 1})
# translation_text += sent + ' '
# print(sent)
# print(len(translation_text.split(' ')))
# word_count = math.ceil(len(translation_text.split(' '))/no_lines)
# # print(word_count)
# puttext(int(height),int(x),int(y),translation_text,'../upload/1562311529_hin_0.jpg', int(word_count), line_height)
res = requests.post('http://52.40.71.62:3003/translator/translation_en', json=engarr)
dictFromServer = res.json()
if dictFromServer['response_body'] is not None:
for translation in dictFromServer['response_body']:
print(translation)
translation_text += translation['tgt'] + ' '
if word_count == 0:
word_count = math.ceil(len(translation_text.split(' '))/no_lines)
puttext(int(height),int(x),int(y),translation_text,o_filename, int(word_count), line_height)
else:
# if line_height == 0:
line_height = int(block['@HEIGHT'])
if textline['String'] is not None:
words = textline['String']
# if height == 0:
height = textline['@HEIGHT']
# if word_count == 0:
if isinstance(words, list):
for word in words:
text += word['@CONTENT'] + ' '
# previous_position = int(word['@HPOS']) + int(word['@WIDTH'])
# previous_position_y = int(word['@VPOS'])
# previous_position_x = int(word['@HPOS'])
else:
text += words['@CONTENT'] + ' '
# previous_position = int(words['@HPOS']) + int(words['@WIDTH'])
# previous_position_y = int(words['@VPOS'])
# previous_position_x = int(words['@HPOS'])
word_count = len(text.split(' '))
# print('1')
# print(text)
# puttext(int(height),int(x),int(y),text,'../upload/1562311529_hin_0.jpg', int(word_count), line_height)
engarr = []
engarr.append({'src': text, 'id': 1})
res = requests.post('http://52.40.71.62:3003/translator/translation_en', json=engarr)
dictFromServer = res.json()
if dictFromServer['response_body'] is not None:
for translation in dictFromServer['response_body']:
print(translation)
puttext(int(height),int(x),int(y),translation['tgt'],o_filename, int(word_count), line_height)
|
nilq/baby-python
|
python
|
#
# Loop transformation submodule.that implements a combination of various loop transformations.
#
import sys
import orio.module.loop.submodule.submodule, transformation
import orio.module.loop.submodule.tile.tile
import orio.module.loop.submodule.permut.permut
import orio.module.loop.submodule.regtile.regtile
import orio.module.loop.submodule.unrolljam.unrolljam
import orio.module.loop.submodule.scalarreplace.scalarreplace
import orio.module.loop.submodule.boundreplace.boundreplace
import orio.module.loop.submodule.pragma.pragma
import orio.module.loop.submodule.arrcopy.arrcopy
import orio.module.loop.submodule.cuda.cuda
from orio.main.util.globals import *
#---------------------------------------------------------------------
class Composite(orio.module.loop.submodule.submodule.SubModule):
'''The composite loop transformation submodule.'''
def __init__(self, perf_params = None, transf_args = None, stmt = None, language='C', tinfo = None):
'''To instantiate a composite loop transformation submodule.'''
orio.module.loop.submodule.submodule.SubModule.__init__(self, perf_params, transf_args, stmt, language)
# transformation submodule.
self.tile_smod = orio.module.loop.submodule.tile.tile.Tile()
self.perm_smod = orio.module.loop.submodule.permut.permut.Permut()
self.regt_smod = orio.module.loop.submodule.regtile.regtile.RegTile()
self.ujam_smod = orio.module.loop.submodule.unrolljam.unrolljam.UnrollJam()
self.srep_smod = orio.module.loop.submodule.scalarreplace.scalarreplace.ScalarReplace()
self.brep_smod = orio.module.loop.submodule.boundreplace.boundreplace.BoundReplace()
self.prag_smod = orio.module.loop.submodule.pragma.pragma.Pragma()
self.acop_smod = orio.module.loop.submodule.arrcopy.arrcopy.ArrCopy()
self.cuda_smod = orio.module.loop.submodule.cuda.cuda.CUDA()
#-----------------------------------------------------------------
def __readTransfArgs(self, perf_params, transf_args):
'''Process the given transformation arguments'''
# all expected argument names
TILE = 'tile'
PERMUT = 'permut'
REGTILE = 'regtile'
UJAM = 'unrolljam'
SCALARREP = 'scalarreplace'
BOUNDREP = 'boundreplace'
PRAGMA = 'pragma'
OPENMP = 'openmp'
VECTOR = 'vector'
ARRCOPY = 'arrcopy'
CUDA = 'cuda'
# all expected transformation arguments
tiles = ([], None)
permuts = ([], None)
regtiles = (([],[]), None)
ujams = (([],[]), None)
scalarrep = (False, None)
boundrep = (False, None)
pragma = ([], None)
openmp = ((False, ''), None)
vector = ((False, ''), None)
arrcopy = ([], None)
cuda = ((None, False, False, None), None)
# iterate over all transformation arguments
for aname, rhs, line_no in transf_args:
# evaluate the RHS expression
try:
rhs = eval(rhs, perf_params)
except Exception, e:
err('orio.module.loop.submodule.composite.composite: %s: failed to evaluate the argument expression: %s\n --> %s: %s' %
(line_no, rhs,e.__class__.__name__, e))
# update transformation arguments
if aname == TILE:
tiles = (rhs, line_no)
elif aname == PERMUT:
permuts = (rhs, line_no)
elif aname == REGTILE:
regtiles = (rhs, line_no)
elif aname == UJAM:
ujams = (rhs, line_no)
elif aname == SCALARREP:
scalarrep = (rhs, line_no)
elif aname == BOUNDREP:
boundrep = (rhs, line_no)
elif aname == PRAGMA:
pragma = (rhs, line_no)
elif aname == OPENMP:
openmp = (rhs, line_no)
elif aname == VECTOR:
vector = (rhs, line_no)
elif aname == ARRCOPY:
arrcopy = (rhs, line_no)
elif aname == CUDA:
cuda = (rhs, line_no)
# unknown argument name
else:
err('orio.module.loop.submodule.composite.composite: %s: unrecognized transformation argument: "%s"' % (line_no, aname))
# check semantics of the transformation arguments
(tiles, permuts, regtiles, ujams, scalarrep, boundrep,
pragma, openmp, vector, arrcopy, cuda) = self.checkTransfArgs(tiles, permuts, regtiles, ujams,
scalarrep, boundrep, pragma,
openmp, vector, arrcopy, cuda)
# return information about the transformation arguments
return (tiles, permuts, regtiles, ujams, scalarrep, boundrep, pragma, openmp, vector, arrcopy, cuda)
#-----------------------------------------------------------------
def checkTransfArgs(self, tiles, permuts, regtiles, ujams, scalarrep, boundrep, pragma,
openmp, vector, arrcopy, cuda):
'''Check the semantics of the given transformation arguments'''
# evaluate arguments for loop tiling
rhs, line_no = tiles
if not isinstance(rhs, list) and not isinstance(rhs, tuple):
err('orio.module.loop.submodule.composite.composite: %s: tile argument must be a list/tuple: %s' % (line_no, rhs))
targs = []
for e in rhs:
if (not isinstance(e, list) and not isinstance(e, tuple)) or len(e) != 3:
err(('orio.module.loop.submodule.composite.composite:%s: element of tile argument must be in the form of ' +
'(<loop-id>,<tsize>,<tindex>): %s') % (line_no, e))
loop_id, tsize, tindex = e
loop_id = self.__convertLoopId(loop_id, line_no)
tsize, tindex = self.tile_smod.checkTransfArgs((tsize, line_no), (tindex, line_no))
targs.append((loop_id, tsize, tindex))
tiles = targs
# evaluate arguments for loop permutation/interchange
rhs, line_no = permuts
if not isinstance(rhs, list) and not isinstance(rhs, tuple):
err('orio.module.loop.submodule.composite.composite: %s: permutation argument must be a list/tuple: %s' % (line_no, rhs))
for e in rhs:
seq, = self.perm_smod.checkTransfArgs((e, line_no))
permuts = rhs
# evaluate arguments for register tiling
rhs, line_no = regtiles
if not isinstance(rhs, list) and not isinstance(rhs, tuple):
err('orio.module.loop.submodule.composite.composite: %s: register-tiling argument must be a list/tuple: %s' % (line_no, rhs))
if len(rhs) != 2:
err(('orio.module.loop.submodule.composite.composite:%s: register-tiling argument must be in the form of ' +
'(<loop-ids>,<ufactors>): %s') % (line_no, rhs))
loops, ufactors = rhs
loops, ufactors = self.regt_smod.checkTransfArgs((loops, line_no), (ufactors, line_no))
regtiles = (loops, ufactors)
# evaluate arguments for unroll/jamming
rhs, line_no = ujams
if not isinstance(rhs, list) and not isinstance(rhs, tuple):
err('orio.module.loop.submodule.composite.composite: %s: unroll/jam argument must be a list/tuple: %s' % (line_no, rhs))
if len(rhs) != 2:
err(('orio.module.loop.submodule.composite.composite:%s: unroll/jam argument must be in the form of ' +
'(<loop-ids>,<ufactors>): %s') % (line_no, rhs))
loops, ufactors = rhs
for lp,uf in zip(loops, ufactors):
self.ujam_smod.checkTransfArgs((uf, line_no), (False, line_no))
ujams = (loops, ufactors)
# evaluate arguments for scalar replacement
rhs, line_no = scalarrep
if isinstance(rhs, bool) or rhs == 0 or rhs == 1:
scalarrep = (rhs, None, None)
else:
if ((not isinstance(rhs, list) and not isinstance(rhs, tuple)) or len(rhs) < 1 or
len(rhs) > 3 or (not isinstance(rhs[0], bool) and rhs[0] != 0 and rhs[0] != 1)):
err(('orio.module.loop.submodule.composite.composite:%s: scalar replacement argument must be in the form of ' +
'((True|False),<dtype>,<prefix>): %s') % (line_no, rhs))
do_scalarrep = rhs[0]
dtype = None
prefix = None
if len(rhs) >= 2:
dtype = rhs[1]
if len(rhs) >= 3:
prefix = rhs[2]
dtype, prefix = self.srep_smod.checkTransfArgs((dtype, line_no), (prefix, line_no))
scalarrep = (do_scalarrep, dtype, prefix)
# evaluate arguments for bound replacement
rhs, line_no = boundrep
if isinstance(rhs, bool) or rhs == 0 or rhs == 1:
boundrep = (rhs, None, None)
else:
if ((not isinstance(rhs, list) and not isinstance(rhs, tuple)) or len(rhs) < 1 or
len(rhs) > 3 or (not isinstance(rhs[0], bool) and rhs[0] != 0 and rhs[0] != 1)):
err(('orio.module.loop.submodule.composite.composite:%s: bound replacement argument must be in the form of ' +
'((True|False),<lprefix>,<uprefix>): %s') % (line_no, rhs))
do_boundrep = rhs[0]
lprefix = None
uprefix = None
if len(rhs) >= 2:
lprefix = rhs[1]
if len(rhs) >= 3:
uprefix = rhs[2]
lprefix, uprefix = self.brep_smod.checkTransfArgs((lprefix, line_no), (uprefix, line_no))
boundrep = (do_boundrep, lprefix, uprefix)
# evaluate arguments for pragma directives
rhs, line_no = pragma
if not isinstance(rhs, list) and not isinstance(rhs, tuple):
err('orio.module.loop.submodule.composite.composite: %s: pragma argument must be a list/tuple: %s' % (line_no, rhs))
targs = []
for e in rhs:
if (not isinstance(e, list) and not isinstance(e, tuple)) or len(e) != 2:
err(('orio.module.loop.submodule.composite.composite:%s: element of pragma directive argument must be in the form of ' +
'(<loop-id>,<pragma-strings>): %s') % (line_no, e))
loop_id, pragmas = e
loop_id = self.__convertLoopId(loop_id, line_no)
pragmas, = self.prag_smod.checkTransfArgs((pragmas, line_no))
targs.append((loop_id, pragmas))
pragma = targs
# evaluate arguments for openmp pragma directive
rhs, line_no = openmp
if ((not isinstance(rhs, list) and not isinstance(rhs, tuple)) or len(rhs) != 2 or
not isinstance(rhs[0], bool)):
err(('orio.module.loop.submodule.composite.composite:%s: element of openmp pragma directive argument must be in the form of ' +
'((True|False),<pragma-strings>): %s') % (line_no, rhs))
do_openmp, pragmas = rhs
pragmas, = self.prag_smod.checkTransfArgs((pragmas, line_no))
openmp = do_openmp, pragmas
# evaluate arguments for vectorization pragma directive
rhs, line_no = vector
if ((not isinstance(rhs, list) and not isinstance(rhs, tuple)) or len(rhs) != 2 or
not isinstance(rhs[0], bool)):
err(('orio.module.loop.submodule.composite.composite:%s: element of vectorization pragma directive argument must be in ' +
'the form of ((True|False),<pragma-strings>): %s') % (line_no, rhs))
do_vector, pragmas = rhs
pragmas, = self.prag_smod.checkTransfArgs((pragmas, line_no))
vector = do_vector, pragmas
# evaluate arguments for array-copy optimization
rhs, line_no = arrcopy
if not isinstance(rhs, list) and not isinstance(rhs, tuple):
err('orio.module.loop.submodule.composite.composite: %s: array-copy argument must be a list/tuple: %s' % (line_no, rhs))
targs = []
for e in rhs:
if ((not isinstance(e, list) and not isinstance(e, tuple)) or len(e) > 5 or
len(e) < 3 or not isinstance(e[0], bool)):
err(('orio.module.loop.submodule.composite.composite:%s: element of tile argument must be in the form of ' +
'((True|False),<array-ref-str>,<dim-sizes>,<suffix>,<dtype>): %s') %
(line_no, e))
dtype = None
suffix = None
if len(e) == 3:
do_acopy, aref, dimsizes = e
elif len(e) == 4:
do_acopy, aref, dimsizes, suffix = e
else:
do_acopy, aref, dimsizes, suffix, dtype = e
(aref, suffix,
dtype, dimsizes)= self.acop_smod.checkTransfArgs((aref, line_no), (suffix, line_no),
(dtype, line_no), (dimsizes, line_no))
targs.append((do_acopy, aref, suffix, dtype, dimsizes))
arrcopy = targs
# evaluate arguments for cuda
rhs, line_no = cuda
if not isinstance(rhs, tuple):
err('orio.module.loop.submodule.cuda.cuda: %s: cuda argument must be a tuple: %s' % (line_no, rhs))
if len(rhs) != 4:
err(('orio.module.loop.submodule.cuda.cuda:%s: cuda argument must be in the form of ' +
'(<threadCount>,<cacheBlocks>,<pinHostMem>,<streamCount>): %s') % (line_no, rhs))
cuda = rhs
# return information about the transformation arguments
return (tiles, permuts, regtiles, ujams, scalarrep, boundrep, pragma, openmp, vector, arrcopy, cuda)
#-----------------------------------------------------------------
def applyTransf(self, tiles, permuts, regtiles, ujams, scalarrep, boundrep,
pragma, openmp, vector, arrcopy, cuda, stmt):
'''To apply a sequence of transformations'''
# perform the composite transformations
t = transformation.Transformation(tiles, permuts, regtiles, ujams, scalarrep,
boundrep, pragma, openmp, vector, arrcopy, cuda, self.stmt)
transformed_stmt = t.transform()
# return the transformed statement
return transformed_stmt
#-----------------------------------------------------------------
def __convertLoopId(self, lid, line_no):
'''
Convert the loop ID to a list: [True/False, id1, id2, id3, ...].
The 'True' boolean value indicates that at least one of the loop ID must exist in the
statement body. A 'False' value means that it is OK if no loop IDs exist in the statement
body.
The sequence of IDs imply that "apply optimizations on id1 (if exist), if not, apply
optimizations on id2 (if exist), and so on and so forth".
'''
# check if the loop ID is well-formed
if isinstance(lid, str):
pass
elif (isinstance(lid, tuple) or isinstance(lid, list)) and len(lid) > 0:
for i in lid:
if not isinstance(i, str):
err('orio.module.loop.submodule.composite.composite: %s: loop ID must be a string: %s' % (line_no, i))
else:
err('orio.module.loop.submodule.composite.composite: %s: invalid loop ID representation: %s' % (line_no, lid))
# create the loop ID abstraction
lids = []
if isinstance(lid, str):
lids.append(True)
lids.append(lid)
elif (isinstance(lid, tuple) or isinstance(lid, list)) and len(lid) > 0:
lids.append(isinstance(lid, tuple))
lids.extend(lid)
else:
err('orio.module.loop.submodule.composite.composite internal error: '+
'incorrect representation of the loop IDs', doexit=True)
return lids
#-----------------------------------------------------------------
def transform(self):
'''To apply various loop transformations'''
# debugging info
#debug("perf_params=" + str(self.perf_params), self,level=6)
# read all transformation arguments
args_info = self.__readTransfArgs(self.perf_params, self.transf_args)
(tiles, permuts, regtiles, ujams, scalarrep,
boundrep, pragma, openmp, vector, arrcopy, cuda) = args_info
# perform all transformations
try:
transformed_stmt = self.applyTransf(tiles, permuts, regtiles, ujams, scalarrep, boundrep,
pragma, openmp, vector, arrcopy, cuda, self.stmt)
except Exception, e:
err('orio.module.loop.submodule.composite.composite : error transforming "%s"\n --> %s:%s' %
(self.stmt, e.__class__, e))
# return the transformed statement
return transformed_stmt
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-04-23 11:07
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('shop', '0002_product_image_back'),
]
operations = [
migrations.CreateModel(
name='BtcInvoice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('invoice_id', models.CharField(max_length=15)),
('address', models.CharField(max_length=100, null=True)),
('price_in_usd', models.DecimalField(decimal_places=8, max_digits=15)),
('price_in_btc', models.DecimalField(decimal_places=8, max_digits=15)),
('added_time', models.DateTimeField(default=datetime.datetime.now)),
('order', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='shop.Order')),
],
options={
'get_latest_by': 'added_time',
},
),
migrations.RunSQL(
"INSERT INTO btc_payment_btcinvoice (invoice_id, address, price_in_usd, price_in_btc, added_time) VALUES (9999, '1Pjc6NVTqFWQa6bj8HE1n3mU5nBgCty8WN', 99, 0.99, '2018-04-20 16:20:20');"
),
migrations.CreateModel(
name='BtcInvoicePayment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('transaction_hash', models.CharField(max_length=150, null=True)),
('value', models.PositiveIntegerField(help_text='must be devided by 100000000')),
('invoice', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='btc_payment.BtcInvoice')),
],
),
migrations.CreateModel(
name='BtcPendingInvoicePayment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('invoice_id', models.CharField(max_length=15)),
('transaction_hash', models.CharField(max_length=150, null=True)),
('value', models.PositiveIntegerField(help_text='must be devided by 100000000')),
],
),
]
|
nilq/baby-python
|
python
|
from collections import Iterator, Iterable
import string
from . import reader
class Record:
"""
Simple namespace object that makes the fields of a GTF record available. Subclassed
to create records specific to exons, transcripts, and genes
"""
__slots__ = ['_fields', '_attribute']
_del_letters = string.ascii_letters
_del_non_letters = ''.join(
set(string.printable).difference(string.ascii_letters))
def __init__(self, record):
"""
:param str record: input record from file
"""
fields = record.strip(';\n').split('\t')
self._fields = fields[:8]
self._attribute = {
key: value.strip('"') for (key, value) in
[field.split() for field in fields[8].split('; ')]
}
def __repr__(self):
return '<Record: %s>' % self.__str__()
def __bytes__(self):
return self.__str__().encode()
def __str__(self):
return '\t'.join(self._fields) + self._format_attribute() + '\n'
def __hash__(self) -> int:
"""hash the record string"""
return hash(self.__str__())
def _format_attribute(self):
return ' '.join('%s "%s";' % (k, v) for k, v in self._attribute.items())
@property
def seqname(self):
return self._fields[0]
@property
def chromosome(self):
return self._fields[0] # synonym for seqname
@property
def source(self):
return self._fields[1]
@property
def feature(self):
return self._fields[2]
@property
def start(self):
return int(self._fields[3])
@property
def end(self):
return int(self._fields[4])
@property
def score(self):
return self._fields[5]
@property
def strand(self):
return self._fields[6]
@property
def frame(self):
return self._fields[7]
@property
def size(self):
size = self.end - self.start
if size < 0:
raise ValueError('invalid record: negative size %d (start > end)' % size)
else:
return size
def get_attribute(self, key):
"""
access an item from the attribute field of a GTF file.
:param key: item to access
:return str: value of item
"""
try:
return self._attribute[key]
except KeyError:
return None
def set_attribute(self, key, value):
"""
:param str key: attribute name
:param str value: attribute value
"""
self._attribute[key] = value
def __eq__(self, other):
"""equivalent to testing if start, end, chrom and strand are the same."""
return hash(self) == hash(other)
def __ne__(self, other):
return not self.__eq__(other)
class Reader(reader.Reader):
"""
SubClass of reader.Reader, returns an Reader with several specialized iterator
methods.
:method __iter__: Iterator over all non-header records in gtf; yields Record objects.
:method iter_genes: Iterator over all genes in gtf; yields Gene objects.
"""
def __init__(self, files_='-', mode='r', header_comment_char='#'):
"""
:param list|str files_: file or list of files to be read. Defaults to sys.stdin
:param mode: open mode. Default 'r' will return string objects. Change to 'rb' to
return bytes. Not currently supported.
:param str|bytes header_comment_char: character that marks headers, to be removed.
"""
super().__init__(files_, mode, header_comment_char) # different default args
def __iter__(self):
for line in super().__iter__():
yield Record(line)
def filter(self, retain_types):
"""
iterate over a gtf file, returning only record whose feature type is in
retain_types.
:param Iterable retain_types: a set of record feature types to retain
:return Iterator:
"""
retain_types = set(retain_types)
for record in self:
if record.feature in retain_types:
yield record
|
nilq/baby-python
|
python
|
import pdb
def add_default_args(parser, root_dir, rand_seed=None, possible_model_names=None):
# tng, test, val check intervals
parser.add_argument('--eval_test_set', dest='eval_test_set', action='store_true', help='true = run test set also')
parser.add_argument('--check_val_every_n_epoch', default=1, type=int, help='check val every n epochs')
parser.opt_list('--accumulate_grad_batches', default=1, type=int, tunable=False,
help='accumulates gradients k times before applying update. Simulates huge batch size')
parser.add_argument('--max_nb_epochs', default=200, type=int, help='cap epochs')
parser.add_argument('--min_nb_epochs', default=2, type=int, help='min epochs')
parser.add_argument('--train_percent_check', default=1.0, type=float, help='how much of tng set to check')
parser.add_argument('--val_percent_check', default=1.0, type=float, help='how much of val set to check')
parser.add_argument('--test_percent_check', default=1.0, type=float, help='how much of test set to check')
parser.add_argument('--val_check_interval', default=0.95, type=float, help='how much within 1 epoch to check val')
parser.add_argument('--log_save_interval', default=100, type=int, help='how many batches between log saves')
parser.add_argument('--add_log_row_interval', default=100, type=int, help='add log every k batches')
# early stopping
parser.add_argument('--disable_early_stop', dest='enable_early_stop', action='store_false')
parser.add_argument('--early_stop_metric', default='val_acc', type=str)
parser.add_argument('--early_stop_mode', default='min', type=str)
parser.add_argument('--early_stop_patience', default=3, type=int, help='number of epochs until stop')
# gradient handling
parser.add_argument('--gradient_clip', default=-1, type=int)
parser.add_argument('--track_grad_norm', default=-1, type=int, help='if > 0, will track this grad norm')
# model saving
parser.add_argument('--model_save_path', default=root_dir + '/model_weights')
parser.add_argument('--model_save_monitor_value', default='val_acc')
parser.add_argument('--model_save_monitor_mode', default='max')
# model paths
parser.add_argument('--model_load_weights_path', default=None, type=str)
if possible_model_names is not None:
parser.add_argument('--model_name', default='', help=','.join(possible_model_names))
# test_tube settings
parser.add_argument('-en', '--tt_name', default='pt_test')
parser.add_argument('-td', '--tt_description', default='pytorch lightning test')
parser.add_argument('--tt_save_path', default=root_dir + '/test_tube_logs', help='logging dir')
parser.add_argument('--enable_single_run', dest='single_run', action='store_true')
parser.add_argument('--nb_hopt_trials', default=1, type=int)
parser.add_argument('--log_stdout', dest='log_stdout', action='store_true')
# GPU
parser.add_argument('--gpus', default=None, type=str)
parser.add_argument('--single_run_gpu', dest='single_run_gpu', action='store_true')
parser.add_argument('--default_tensor_type', default='torch.cuda.FloatTensor', type=str)
parser.add_argument('--use_amp', dest='use_amp', action='store_true')
parser.add_argument('--check_grad_nans', dest='check_grad_nans', action='store_true')
parser.add_argument('--amp_level', default='O2',type=str)
# run on hpc
parser.add_argument('--on_cluster', dest='on_cluster', action='store_true')
# FAST training
# use these settings to make sure network has no bugs without running a full dataset
parser.add_argument('--fast_dev_run', dest='fast_dev_run', default=False, action='store_true', help='runs validation after 1 tng step')
parser.add_argument('--enable_tqdm', dest='enable_tqdm', default=False, action='store_true', help='false removes the prog bar')
parser.add_argument('--overfit', default=-1, type=float, help='% of dataset to use with this option. float, or -1 for none')
# debug args
if rand_seed is not None:
parser.add_argument('--random_seed', default=rand_seed, type=int)
parser.add_argument('--interactive', dest='interactive', action='store_true', help='runs on gpu without cluster')
parser.add_argument('--debug', dest='debug', action='store_true', help='enables/disables test tube')
parser.add_argument('--local', dest='local', action='store_true', help='enables local tng')
# optimizer
parser.add_argument('--lr_scheduler_milestones', default=None, type=str)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
########################################################################################################################
#
# Copyright (c) 2014, Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
########################################################################################################################
"""Lab1: generate layout on physical grid
1. Copy this file to working directory
2. For GDS export, prepare layermap file
"""
import laygo
import numpy as np
import yaml
#import logging;logging.basicConfig(level=logging.DEBUG)
import os.path
if os.path.isfile("laygo_config.yaml"):
with open("laygo_config.yaml", 'r') as stream:
techdict = yaml.load(stream)
tech = techdict['tech_lib']
metal = techdict['metal_layers']
pin = techdict['pin_layers']
text = techdict['text_layer']
prbnd = techdict['prboundary_layer']
res = techdict['physical_resolution']
print(tech + " loaded sucessfully")
else:
print("no config file exists. loading default settings")
tech = "freePDK45"
metal = [['metal0', 'donotuse'],
['metal1', 'drawing'],
['metal2', 'drawing'],
['metal3', 'drawing'],
['metal4', 'drawing'],
['metal5', 'drawing'],
['metal6', 'drawing'],
['metal7', 'drawing'],
['metal8', 'drawing'],
['metal9', 'drawing'],
]
pin = [['text', 'drawing'],
['metal1', 'pin'],
['metal2', 'pin'],
['metal3', 'pin'],
['metal4', 'pin'],
['metal5', 'pin'],
['metal6', 'pin'],
['metal7', 'pin'],
['metal8', 'pin'],
['metal9', 'pin'],
]
text = ['text', 'drawing']
prbnd = ['prBoundary', 'drawing']
res=0.0025
workinglib = 'laygo_working'
utemplib = tech+'_microtemplates_dense'
laygen = laygo.GridLayoutGenerator(physical_res=res)
laygen.layers['metal'] = metal
laygen.layers['pin'] = pin
laygen.layers['prbnd'] = prbnd
if tech=='laygo10n': #fake technology
laygen.use_phantom = True
laygen.add_library(workinglib)
laygen.sel_library(workinglib)
laygen.load_template(filename=tech+'_microtemplates_dense_templates.yaml', libname=utemplib)
laygen.load_grid(filename=tech+'_microtemplates_dense_grids.yaml', libname=utemplib)
laygen.templates.sel_library(utemplib)
laygen.grids.sel_library(utemplib)
#laygen.templates.display()
#laygen.grids.display()
mycell = '_generate_example_3'
pg = 'placement_basic' #placement grid
laygen.add_cell(mycell)
laygen.sel_cell(mycell) #select the cell to work on
in0=laygen.place(None, 'nmos4_fast_left', pg, xy=np.array([0,0]))
#placement on grid
laygen.place(None, 'nmos4_fast_boundary', pg, xy=np.array([2,0]))
laygen.place(None, 'nmos4_fast_center_nf2', pg, xy=np.array([3,0]))
in1=laygen.place(None, 'nmos4_fast_center_nf2', pg, xy=np.array([5,0]))
#abutted placement on grid
in2=laygen.relplace(None, 'nmos4_fast_center_nf1_right', pg, in1.name)
in3=laygen.relplace(None, 'nmos4_fast_boundary', pg, in2.name)
in4=laygen.relplace(None, 'nmos4_fast_tap', pg, in3.name)
in5=laygen.relplace(None, 'nmos4_fast_right', pg, in4.name)
ip0=laygen.relplace(None, 'pmos4_fast_left', pg, in0.name, direction='top', transform='MX')
ip1=laygen.relplace(None, 'pmos4_fast_boundary', pg, ip0.name, transform='MX')
ip2=laygen.relplace(None, 'pmos4_fast_center_nf2', pg, ip1.name, transform='MX')
ip3=laygen.relplace(None, 'pmos4_fast_center_nf2', pg, ip2.name, transform='MX')
ip4=laygen.relplace(None, 'pmos4_fast_center_nf1_right', pg, ip3.name, transform='MX')
ip5=laygen.relplace(None, 'pmos4_fast_boundary', pg, ip4.name, transform='MX')
ip6=laygen.relplace(None, 'pmos4_fast_tap', pg, ip5.name, transform='MX')
ip7=laygen.relplace(None, 'pmos4_fast_right', pg, ip6.name, transform='MX')
#route on grid
laygen.route(None, metal[2], xy0=np.array([4,3]), xy1=np.array([6,3]), gridname0='route_M1_M2_mos')
#route on grid with reference objects
laygen.route(None, metal[2], xy0=np.array([1,3]), xy1=np.array([2,3]), gridname0='route_M1_M2_mos',
refinstname0=ip1.name, transform0='MX', refinstname1=ip1.name, transform1='MX')
#route on grid, pin reference
laygen.route(None, metal[1], xy0=np.array([0,0]), xy1=np.array([0,0]), gridname0='route_M1_M2_mos',
refinstname0=in1.name, refpinname0='G0', refinstname1=ip3.name, refpinname1='G0')
#via placement on grid
laygen.via(None, np.array([4,3]), gridname='route_M1_M2_mos')
#via placement on grid with offset
laygen.via(None, np.array([1,3]), gridname='route_M1_M2_mos', offset=np.array([0.45, 0.96]), transform='MX')
#via placement on grid with pin reference
laygen.via(None, np.array([0,0]), refinstname=ip3.name, refpinname='G0', gridname='route_M1_M2_mos')
laygen.display()
#bag export, if bag does not exist, gds export
import imp
try:
imp.find_module('bag')
import bag
prj = bag.BagProject()
laygen.sel_cell(mycell)
laygen.export_BAG(prj, array_delimiter=['[', ']'])
except ImportError:
laygen.sel_cell(mycell) # cell selection
laygen.export_GDS('output.gds', layermapfile=tech+".layermap") # change layermapfile
|
nilq/baby-python
|
python
|
from .models import db
from .utils import update_menus
from diff_match_patch import diff_match_patch
from flask import Markup, abort, g, redirect, render_template, request, send_file, url_for
from flask_security.core import current_user
import os
def render(page, path, version):
file, mimetype, fragment = page.subtree.open(path, version)
if not file:
abort(404)
if not fragment:
return send_file(file, mimetype=mimetype)
html = file.read()
return render_template('subtree.html', **{
'mode': 'render',
'page': page,
'path': path,
'title': page.title,
'content': Markup(html.decode('utf8')),
'version': version,
'mimetype': page.subtree._mimetype_from_path(path) or mimetype,
})
def raw(page, path, version):
file, mimetype, fragment = page.subtree.open(path, version=version, raw=True)
if not mimetype.startswith('text/'):
return send_file(file, mimetype=mimetype)
return render_template('subtree.html', **{
'mode': 'raw',
'page': page,
'path': path,
'title': page.title,
'content': file.read().decode('utf8'),
'version': version,
'mimetype': mimetype,
})
def edit(page, path, version):
file, mimetype, fragment = page.subtree.open(path, version=None, raw=True)
if not mimetype.startswith('text/'):
return send_file(file, mimetype=mimetype)
return render_template('subtree.html', **{
'mode': 'edit',
'page': page,
'path': path,
'title': page.title,
'content': file.read().decode('utf8'),
'version': version,
'mimetype': mimetype,
})
def patch(page, path):
file, mimetype, fragment = page.subtree.open(path, version=None, raw=True)
if not mimetype.startswith('text/'):
return abort(400)
dmp = diff_match_patch()
patch = dmp.patch_fromText(request.form.get('patch'))
text, _ = dmp.patch_apply(patch, file.read().decode('utf8'))
page.subtree.write(path, text.encode('utf8'))
return {
'is_draft': page.subtree.diff(None, None, 'HEAD'),
}
def subtree(page, path, version):
request_version = version
if version == None and not current_user.any_role('admin', 'editor'):
version = 'HEAD'
if page.subtree.isdir(path, version):
index = page.subtree.find_index(path, version)
if index:
path = index
elif request.method == 'GET':
if current_user.any_role('admin', 'editor'):
return render_template('subtree.html', **{
'mode': 'missing_index',
'page': page,
'path': path,
'title': page.title,
'version': version,
})
abort(404)
if request.method == 'POST' and current_user.any_role('admin', 'editor'):
action = request.form.get('action')
if action == 'create':
type_ = request.form.get('type')
if type_ == 'html':
filename = 'index.html'
elif type_ == 'markdown':
filename = 'index.md'
else:
filename = 'index.txt'
if not page.subtree.exists(path, filename):
page.subtree.write(os.path.join(path, filename), b'')
return {}
elif action == 'delete':
page.subtree.delete()
db.session.delete(page)
db.session.commit()
elif action == 'patch':
title = request.form.get('title')
if title is not None:
if not title.strip():
title = 'Untitled'
page.title = title
db.session.add(page)
db.session.commit()
update_menus()
return patch(page, path)
elif action == 'restore':
page.subtree.revert(version)
return {}
elif action == 'commit':
page.subtree.commit()
return {}
if request.args.get('raw'):
return raw(page, path, version)
elif request.args.get('edit') != None:
return edit(page, path, version)
return render(page, path, version)
|
nilq/baby-python
|
python
|
import numpy as np
import pandas
import pickle
import json
import shutil
import warnings
from typing import Union, List
import os
from sfaira.consts import OCS
from sfaira.data import load_store
from sfaira.data.dataloaders import Universe
from sfaira.estimators import EstimatorKerasEmbedding
from sfaira.ui import ModelZoo
from sfaira.versions.metadata import CelltypeUniverse, OntologyCl
def _tp(yhat, ytrue):
"""
Class wise true positive count.
:param yhat:
:param ytrue:
:return:
"""
yhat_true = np.asarray(yhat == np.max(yhat, axis=1, keepdims=True), dtype="float32")
return np.sum(yhat_true * ytrue, axis=0)
def _fp(yhat, ytrue):
"""
Class wise false positive count.
:param yhat:
:param ytrue:
:return:
"""
yhat_true = np.asarray(yhat == np.max(yhat, axis=1, keepdims=True), dtype="float32")
return np.sum(yhat_true * (1. - ytrue), axis=0)
def _tn(yhat, ytrue):
"""
Class wise true negative count.
:param yhat:
:param ytrue:
:return:
"""
yhat_true = np.asarray(yhat < np.max(yhat, axis=1, keepdims=True), dtype="float32")
return np.sum(yhat_true * (1. - ytrue), axis=0)
def _fn(yhat, ytrue):
"""
Class wise false negative count.
:param yhat:
:param ytrue:
:return:
"""
yhat_true = np.asarray(yhat < np.max(yhat, axis=1, keepdims=True), dtype="float32")
return np.sum(yhat_true * ytrue, axis=0)
def accuracy(yhat, ytrue):
"""
Class wise accuracy.
:param yhat:
:param ytrue:
:return:
"""
return (_tp(yhat, ytrue) + _tn(yhat, ytrue)) / yhat.shape[0]
def f1(yhat, ytrue):
"""
Class wise F1.
:param yhat:
:param ytrue:
:return:
"""
precision = _tp(yhat, ytrue) / (_tp(yhat, ytrue) + _fp(yhat, ytrue))
recall = _tp(yhat, ytrue) / (_tp(yhat, ytrue) + _fn(yhat, ytrue))
return 2 * 1 / (1 / precision + 1 / recall)
def auc_roc(yhat, ytrue):
"""
Class wise AUC ROC.
:param yhat:
:param ytrue:
:return:
"""
import sklearn
auc_roc = np.array([
sklearn.metrics.roc_auc_score(y_true=ytrue[:, i], y_score=yhat[:, i])
for i in range(ytrue.shape[0])
])
return auc_roc
class GridsearchContainer:
histories: Union[None, dict]
evals: Union[None, dict]
run_ids: Union[None, list]
gs_keys: Union[None, dict]
summary_tab: Union[None, pandas.DataFrame]
cv: bool
source_path: dict
model_id_len: Union[None, int]
def __init__(
self,
source_path: dict,
cv: bool
):
self.histories = None
self.evals = None
self.run_ids = None
self.gs_keys = None
self.cv = cv
self.source_path = source_path
self.summary_tab = None
def load_gs(
self,
gs_ids: List[str]
):
"""
Loads all relevant data of a grid search.
:param gs_ids:
:return:
"""
res_dirs = [
os.path.join(self.source_path[x], x, "results", "")
for x in gs_ids
]
run_ids = [
np.unique([
x.split("_evaluation.pickle")[0]
for x in os.listdir(indir)
if "_evaluation.pickle" in x
])
for i, indir in enumerate(res_dirs)
]
histories = {}
evals = {}
hyperpars = {}
model_hyperpars = {}
run_ids_proc = []
gs_keys = []
gs_dirs = []
for i, indir in enumerate(res_dirs):
for x in run_ids[i]:
fn_history = os.path.join(indir, f"{x}_history.pickle")
if os.path.isfile(fn_history):
with open(fn_history, 'rb') as f:
histories[x] = pickle.load(f)
else:
print(f"file {x}_history.pickle not found")
fn_eval = os.path.join(indir, f"{x}_evaluation.pickle")
if os.path.isfile(fn_eval):
with open(fn_eval, 'rb') as f:
evals[x] = pickle.load(f)
else:
print(f"file {x}_evaluation.pickle not found")
fn_hp = os.path.join(indir, f"{x}_hyperparam.pickle")
if os.path.isfile(fn_hp):
with open(fn_hp, 'rb') as f:
hyperpars[x] = pickle.load(f)
else:
print(f"file {x}_hyperparam.pickle not found")
fn_mhp = os.path.join(indir, f"{x}_model_hyperparam.pickle")
if os.path.isfile(fn_mhp):
with open(fn_mhp, 'rb') as f:
model_hyperpars[x] = pickle.load(f)
else:
print(f"file {x}_model_hyperparam.pickle not found")
run_ids_proc.append(x)
gs_keys.append(os.path.normpath(indir).split(os.path.sep)[-2])
gs_dirs.append(indir)
self.run_ids = run_ids_proc
self.gs_keys = dict(zip(run_ids_proc, gs_keys))
self.gs_dirs = dict(zip(run_ids_proc, gs_dirs))
self.evals = evals
self.hyperpars = hyperpars
self.model_hyperpars = model_hyperpars
self.histories = histories
def load_y(
self,
hat_or_true: str,
run_id: str
):
fn = os.path.join(self.gs_dirs[run_id], f"{run_id}_y{hat_or_true}.npy")
return np.load(fn)
def best_model_by_partition(
self,
partition_select: str,
metric_select: str,
cv_mode: str = "mean",
subset: dict = {},
return_run_only: bool = False,
grouping: list = ["organ", "model_type"]
):
"""
:param partition_select:
:param metric_select:
:param cv_mode:
:param subset:
:param return_run_only:
:param grouping:
:return:
"""
model_ids = []
run_ids = []
for id, df in self.summary_tab.groupby(grouping):
if df.shape[0] > 0:
model_id_temp, run_id_temp, cv_id_temp = self.get_best_model_ids(
tab=df,
partition_select=partition_select,
metric_select=metric_select,
subset=subset,
cv_mode=cv_mode
)
model_ids.append(model_id_temp)
run_ids.append(run_id_temp)
if return_run_only:
return self.summary_tab.loc[[x in run_ids for x in self.summary_tab["run"].values], :]
else:
return self.summary_tab.loc[[x in model_ids for x in self.summary_tab["model_gs_id"].values], :]
def get_best_model_ids(
self,
tab,
metric_select: str,
partition_select: str,
subset: dict = {},
cv_mode: str = "mean"
):
"""
:param tab:
:param metric_select:
:param partition_select:
:param subset:
:param cv_mode:
:return:
"""
for k, v in subset.items():
tab = tab.loc[tab[k].values == v, :]
if metric_select.endswith('accuracy') \
or metric_select.endswith('acc_agg') \
or metric_select.endswith('f1') \
or metric_select.endswith('tpr'):
ascending = False
if cv_mode == "min":
raise Warning("selected cv_mode min with metric_id %s, likely not intended" % metric_select)
elif metric_select.endswith('loss') \
or metric_select.endswith('mse') \
or metric_select.endswith('negll') \
or metric_select.endswith('custom_cce_agg') \
or metric_select.endswith('fpr'):
ascending = True
if cv_mode == "max":
raise Warning("selected cv_mode max with metric_id %s, likely not intended" % metric_select)
else:
raise ValueError("measure %s not recognized" % metric_select)
if partition_select not in ["test", "val", "train"]:
raise ValueError("partition %s not recognised" % partition_select)
metric_select = f"{partition_select}_{metric_select}"
if cv_mode.lower() == "mean":
best_model = tab.groupby("run", as_index=False)[metric_select].mean(). \
sort_values([metric_select], ascending=ascending)
elif cv_mode.lower() == "median":
best_model = tab.groupby("run", as_index=False)[metric_select].median(). \
sort_values([metric_select], ascending=ascending)
elif cv_mode.lower() == "max":
best_model = tab.groupby("run", as_index=False)[metric_select].max(). \
sort_values([metric_select], ascending=ascending)
elif cv_mode.lower() == "min":
best_model = tab.groupby("run", as_index=False)[metric_select].min(). \
sort_values([metric_select], ascending=ascending)
else:
raise ValueError("cv_mode %s not recognized" % cv_mode)
best_run_id = best_model['run'].values[0] if best_model.shape[0] > 0 else None
best_cv = tab[tab["run"] == best_run_id]. \
sort_values([metric_select], ascending=ascending)['cv'].values[0] if best_run_id is not None \
else None
best_model_id = tab[tab["run"] == best_run_id]. \
sort_values([metric_select], ascending=ascending)['model_gs_id'].values[0] if best_run_id is not None \
else None
return best_model_id, best_run_id, best_cv
@property
def cv_keys(self) -> List[str]:
"""
Returns keys of cross-validation used in dictionaries in this class.
:return: list of string keys
"""
return np.unique(self.summary_tab["cv"].values).tolist()
def save_best_weight(
self,
path: str,
partition: str = "val",
metric: str = "loss",
subset: dict = {}
):
"""
Copies weight file from best hyperparameter setting from grid search directory to zoo directory with cleaned
file name.
:param path: Target file to save to. This is intended to be the zoo directory ready for upload.
:param partition:
:param metric:
:param subset:
:return:
"""
assert not self.cv, "not implemented for CV yet"
model_id, _, _ = self.get_best_model_ids(
tab=self.summary_tab,
partition_select=partition,
metric_select=metric,
cv_mode="mean",
subset=subset,
)
shutil.copyfile(
os.path.join(self.gs_dirs[model_id], self.gs_keys[model_id], "results", f"{model_id}_weights.h5"),
os.path.join(path, f"{model_id}_weights.h5")
)
def plot_completions(
self,
groupby=["depth", "width", "lr", "dropout", "l1", "l2"],
height_fig=7,
width_fig=7
):
"""
Plot number of completed grid search points by category.
:param groupby:
:param height_fig:
:param width_fig:
:return:
"""
import matplotlib.pyplot as plt
import seaborn as sns
if self.summary_tab is None:
self.create_summary_tab()
sns_tab = self.summary_tab.copy()
# Build figure.
organs = np.unique(sns_tab["organ"].values)
model_types = np.unique(sns_tab["model_type"].values)
hm = np.zeros((len(organs), len(model_types)))
for i, m in enumerate(model_types):
for j, o in enumerate(organs):
n_by_gridpoint = sns_tab.loc[
np.logical_and(
sns_tab["model_type"].values == m,
sns_tab["organ"].values == o
), :].groupby(groupby).size().values
# Assume that largest number of successful completions is maximum (all completed:
hm[j, i] = np.sum(n_by_gridpoint == np.max(n_by_gridpoint)) if len(n_by_gridpoint) > 0 else 0
sns_data_heatmap = pandas.DataFrame(
hm, index=organs, columns=model_types
)
fig, axs = plt.subplots(1, 1, figsize=(height_fig, width_fig))
with sns.axes_style("dark"):
axs = sns.heatmap(
sns_data_heatmap,
annot=True, fmt=".2f",
ax=axs,
xticklabels=True, yticklabels=True,
cbar_kws={'label': 'n'}
)
return fig, axs
def plot_best_model_by_hyperparam(
self,
metric_select: str,
param_hue='lr',
partition_select: str = "val",
partition_show: str = "test",
subset: dict = {},
param_x: Union[tuple, list] = ('lr', 'depth', 'width', 'dropout', 'l1', 'l2'),
show_swarm: bool = False,
panel_width: float = 4.,
panel_height: float = 2.
):
"""
Produces boxplots for all hyperparameters choices by organ.
:param partition: "train" or "eval" or "test" partition of data.
:param metric_select: Metric to plot.
:param param_x: Hyper-parameter for x-axis partition.
:param param_hue: Hyper-parameter for hue-axis partition.
:param panel_width:
:param panel_height:
:return:
"""
import seaborn as sns
import matplotlib.pyplot as plt
params = [param for param in param_x if len(np.unique(self.summary_tab[param])) > 1 and param != param_hue]
organs = np.unique(self.summary_tab["organ"].values)
fig, ax = plt.subplots(
nrows=len(organs), ncols=len(params),
figsize=(panel_width * len(params), panel_height * len(organs))
)
if len(organs) == 1:
ax = np.expand_dims(ax, axis=0)
for j, param in enumerate(params):
summary_table_param = self.best_model_by_partition(
partition_select=partition_select,
metric_select=metric_select,
cv_mode="mean",
subset=subset,
return_run_only=False,
grouping=["organ", param, param_hue]
)
summary_table_param.sort_values([param, param_hue])
for i, organ in enumerate(organs):
summary_table = summary_table_param.loc[summary_table_param["organ"].values == organ, :]
# Plot each metric:
ycol = f"{partition_show}_{metric_select}"
if len(organs) == 1 and len(params) == 1:
ax = np.array([ax])
sns.boxplot(
x=param, hue=param_hue, y=ycol,
data=summary_table, ax=ax[i, j] if len(ax.shape) == 2 else ax[i] if len(ax.shape) == 1 else ax
)
if show_swarm:
sns.swarmplot(
x=param, hue=param_hue, y=ycol,
data=summary_table, ax=ax[i, j] if len(ax.shape) == 2 else ax[i] if len(ax.shape) == 1 else ax
)
if j == 0:
if len(ax.shape) == 2:
ax[i, j].set_ylabel(organ + "\n" + ycol)
else:
ax[i].set_ylabel(organ + "\n" + ycol)
return fig, ax
def plot_training_history(
self,
metric_select: str,
metric_show: str,
partition_select: str = "val",
subset: dict = {},
cv_key: Union[str, None] = None,
log_loss: bool = False
):
"""
Plot train and validation loss during training and learning rate reduction for each organ
The partition that is shown in train+val by default because these are the only ones recorded during training.
:param metric_select: metric to select best model by
:param metric_show: metric to show as function of training progress, together with loss and learing rate.
:param partition_select: "train" or "eval" or "test" partition of data to select fit by.
:param metric_select: Metric to select fit by.
:param cv_key: Index of cross-validation to plot training history for.
:param log_loss:
:return:
"""
import seaborn as sns
import matplotlib.pyplot as plt
panel_width = 5
panel_height = 3
organs = np.unique(self.summary_tab["organ"].values)
fig, ax = plt.subplots(
nrows=len(organs), ncols=3,
figsize=(panel_width * 3, panel_height * len(organs))
)
if len(organs) == 1:
ax = np.expand_dims(ax, axis=0)
for i, organ in enumerate(organs):
model_gs_id, _, _ = self.get_best_model_ids(
tab=self.summary_tab,
partition_select=partition_select,
metric_select=metric_select,
cv_mode="mean",
subset=dict(list(subset.items()) + [("organ", organ)]),
)
if cv_key is None:
sns_data = []
for run in list(np.unique(
self.summary_tab.loc[self.summary_tab["model_gs_id"].values == model_gs_id, "run"].values
)):
sns_data_temp = pandas.DataFrame(self.histories[run])
sns_data_temp["epoch"] = np.arange(0, sns_data_temp.shape[0])
sns_data_temp["cv"] = run[-1]
sns_data.append(sns_data_temp)
sns_data = pandas.concat(sns_data, axis=0)
else:
cv = cv_key
sns_data = pandas.DataFrame(self.histories[f"{model_gs_id}_{cv}"])
sns_data["epoch"] = np.arange(0, sns_data.shape[0])
sns_data["cv"] = cv
# loss
sns_data_loss = pandas.concat([pandas.DataFrame({
"epoch": sns_data["epoch"].values,
"cv": sns_data["cv"].values,
"loss": np.log(sns_data[x].values) if log_loss else sns_data[x].values,
"partition": x
}) for i, x in enumerate(["loss", "val_loss"])])
sns.lineplot(
x="epoch", y="loss", style="partition", hue="cv",
data=sns_data_loss, ax=ax[i, 0]
)
ax[i, 0].set_ylabel(organ + "\nloss")
ax[i, 0].legend_.remove()
# metric
if metric_show not in sns_data.columns:
raise ValueError(f"metric {metric_show} not found in {sns_data.columns}")
sns_data_metric = pandas.concat([pandas.DataFrame({
"epoch": sns_data["epoch"].values,
"cv": sns_data["cv"].values,
metric_show: sns_data[metric_show].values,
"partition": x
}) for i, x in enumerate([metric_show, f"val_{metric_show}"])])
sns.lineplot(
x="epoch", y=metric_show, style="partition", hue="cv",
data=sns_data_metric, ax=ax[i, 1]
)
ax[i, 1].set_ylabel(organ + "\n" + metric_show)
ax[i, 1].legend_.remove()
# lr
sns_data_lr = pandas.DataFrame({
"epoch": sns_data["epoch"].values,
"cv": sns_data["cv"].values,
"lr": np.log(sns_data["lr"].values) / np.log(10)
})
sns.lineplot(
x="epoch", y="lr", hue="cv",
data=sns_data_lr, ax=ax[i, 2]
)
ax[i, 2].set_ylabel("log10 learning rate")
ax[i, 2].legend_.remove()
return fig, ax
def write_best_hyparam(
self,
write_path,
subset: dict = {},
partition: str = "test",
metric: str = "custom_negll",
cvs: Union[None, List[int]] = None
):
best_model_id = self.get_best_model_ids(
tab=self.summary_tab,
subset=subset,
partition_select=partition,
metric_select=metric,
)[0]
if best_model_id is not None:
if cvs is None:
file_path_base = os.path.join(
self.gs_dirs[best_model_id],
self.gs_keys[best_model_id],
'results',
best_model_id,
)
else:
file_path_base = os.path.join(
self.gs_dirs[f"{best_model_id}_cv{cvs[0]}"],
f"{best_model_id}_cv{cvs[0]}",
)
# Read model hyperparameter
with open(f"{file_path_base}_model_hyperparam.pickle", 'rb') as file:
hyparam_model = pickle.load(file)
# Read optimizer hyperparameter
with open(f"{file_path_base}_hyperparam.pickle", 'rb') as file:
hyparam_optim = pickle.load(file)
# Write both hyperparameter dicts
with open(os.path.join(write_path, f"{best_model_id[:-12]}_best_hyperparam.txt"), 'w') as file:
file.write(json.dumps({"model": hyparam_model, "optimizer": hyparam_optim}))
return
class SummarizeGridsearchCelltype(GridsearchContainer):
loss_idx: int
acc_idx: int
def __init__(
self,
source_path: dict,
cv: bool,
model_id_len: int = 3
):
super(SummarizeGridsearchCelltype, self).__init__(
source_path=source_path,
cv=cv
)
self.model_id_len = model_id_len
def load_ontology_names(
self,
run_id: str
):
"""
Loads ontology ids from a specific model of a previously loaded grid search.
:param run_id:
:return:
"""
fn = os.path.join(self.gs_dirs[run_id], f"{run_id}_ontology_names.pickle")
if not os.path.isfile(fn):
raise FileNotFoundError(f"file {run_id}_ontology_names.pickle not found")
with open(fn, 'rb') as f:
ids = pickle.load(f)
return ids
def create_summary_tab(self):
metrics = list(self.evals.values())[0]['val'].keys()
self.summary_tab = pandas.DataFrame(dict(
list({
"depth": [id_i.split("_")[self.model_id_len + 0] for id_i in self.run_ids], # noqa: E241
"width": [id_i.split("_")[self.model_id_len + 1] for id_i in self.run_ids], # noqa: E241
"lr": [id_i.split("_")[self.model_id_len + 2] for id_i in self.run_ids], # noqa: E241
"dropout": [id_i.split("_")[self.model_id_len + 3] for id_i in self.run_ids], # noqa: E241
"l1": [id_i.split("_")[self.model_id_len + 4] for id_i in self.run_ids], # noqa: E241
"l2": [id_i.split("_")[self.model_id_len + 5] for id_i in self.run_ids], # noqa: E241
"cv": [id_i.split("_")[-1] if self.cv else "1" for id_i in self.run_ids], # noqa: E241
"model": ["_".join(id_i.split("_")[:self.model_id_len]) for id_i in self.run_ids], # noqa: E241
"organism": [id_i.split("_")[1].split("-")[0] for id_i in self.run_ids], # noqa: E241
"organ": [id_i.split("_")[1].split("-")[1] for id_i in self.run_ids], # noqa: E241
"model_type": [ # noqa: E241
"linear" if (id_i.split("_")[1].split("-")[2] == "mlp" and
id_i.split("_")[1].split("-")[3].split(".")[1] == "0")
else id_i.split("_")[1].split("-")[2]
for id_i in self.run_ids
],
"version": [id_i.split("_")[1].split("-")[3] for id_i in self.run_ids], # noqa: E241
"model_gs_id": ["_".join(id_i.split("_")[:(self.model_id_len + 6)]) for id_i in self.run_ids], # noqa: E241
"run": self.run_ids, # noqa: E241
}.items()) +
list(dict([("train_" + m, [self.evals[x]["train"][m] for x in self.run_ids]) for m in metrics]).items()) +
list(dict([("test_" + m, [self.evals[x]["test"][m] for x in self.run_ids]) for m in metrics]).items()) +
list(dict([("val_" + m, [self.evals[x]["val"][m] for x in self.run_ids]) for m in metrics]).items()) +
list(dict([("all_" + m, [self.evals[x]["all"][m] for x in self.run_ids]) for m in metrics]).items())
))
if self.summary_tab.shape[0] == 0:
raise ValueError("summary_tab was empty")
def best_model_celltype(
self,
subset: dict = {},
partition: str = "val",
metric: str = "loss",
cvs: Union[None, List[int]] = None
):
model_id, _, _ = self.get_best_model_ids(
tab=self.summary_tab,
partition_select=partition,
metric_select=metric,
cv_mode="mean",
subset=subset,
)
if model_id is not None:
if cvs is not None:
fns = [
os.path.join(self.gs_dirs[f"{model_id}_cv{x}"], self.gs_keys[f"{model_id}_cv{x}"], "results", f"{model_id}_cv{x}")
for x in cvs
]
else:
fns = [os.path.join(self.gs_dirs[model_id], self.gs_keys[model_id], "results", model_id)]
covar = [pandas.read_csv(f"{x}_covar.csv") for x in fns]
return model_id, covar
else:
return None, [None]
def plot_best(
self,
rename_levels=[],
partition_select: str = "val",
partition_show: str = "test",
metric_select: str = "acc",
metric_show: str = "acc",
collapse_cv: str = "max",
vmin=None,
vmax=None,
height_fig=7,
width_fig=7
):
"""
Plot accuracy or other metric heatmap by organ and model type.
:param rename_levels:
:param partition_select:
:param partition_show:
:param metric_select:
:param metric_show:
:param collapse_cv:
:param vmin:
:param vmax:
:param height_fig:
:param width_fig:
:return:
"""
import matplotlib.pyplot as plt
import seaborn as sns
if self.summary_tab is None:
self.create_summary_tab()
# Choose the best over categories based on mean loss in CV.
# Keep variation across CV.
sns_tab = self.best_model_by_partition(
partition_select=partition_select,
metric_select=metric_select,
return_run_only=False,
grouping=["organ", "model_type"]
)
for rename_level in rename_levels:
levels_new = sns_tab[rename_level[0]].values
levels_new[levels_new == rename_level[1]] = rename_level[2]
sns_tab[rename_level[0]] = levels_new
# Build figure.
organs = np.unique(sns_tab["organ"].values)
model_types = np.unique(sns_tab["model_type"].values)
hm = np.zeros((len(organs), len(model_types))) + np.nan
mask = np.isnan(hm)
for i, m in enumerate(model_types):
for j, o in enumerate(organs):
data_temp = sns_tab.loc[
np.logical_and(
sns_tab["model_type"].values == m,
sns_tab["organ"].values == o
), f"{partition_show}_{metric_show}"
]
if data_temp.shape[0] > 0:
if self.cv:
if collapse_cv == "mean":
hm[j, i] = np.mean(data_temp.values)
elif collapse_cv == "median":
hm[j, i] = np.median(data_temp.values)
elif collapse_cv == "max":
hm[j, i] = np.max(data_temp.values)
elif collapse_cv == "min":
hm[j, i] = np.min(data_temp.values)
else:
raise ValueError(f"collapse_cv {collapse_cv} not recognized")
mask[j, i] = False
else:
hm[j, i] = data_temp.values[0]
mask[j, i] = False
if vmin is not None:
hm = np.maximum(hm, np.asarray(vmin))
if vmax is not None:
hm = np.minimum(hm, np.asarray(vmin))
sns_data_heatmap = pandas.DataFrame(
hm, index=organs, columns=model_types
)
fig, axs = plt.subplots(1, 1, figsize=(height_fig, width_fig))
with sns.axes_style("dark"):
axs = sns.heatmap(
sns_data_heatmap, # mask=mask,
annot=True, fmt=".2f",
ax=axs, vmin=0, vmax=1,
xticklabels=True, yticklabels=True,
cbar_kws={'label': f"{partition_show}_{metric_show}"},
cmap=None
)
return fig, axs, sns_data_heatmap
def plot_best_classwise_heatmap(
self,
organ: str,
organism: str,
datapath: str,
store_format: str,
targetpath: str,
configpath: str,
partition_select: str = "val",
metric_select: str = "custom_cce_agg",
metric_show: str = "f1",
collapse_cv: str = "mean",
min_cells: int = 10,
height_fig: int = 7,
width_fig: int = 7
):
"""
Plot evaluation metric heatmap for specified organ by cell classes and model types.
:param organ: Organ to plot in heatmap.
:param organism: Species that the gridsearch was run on
:param datapath: Path to the local sfaira data repository
:param store_format:
:param targetpath:
:param configpath:
:param partition_select: Based on which partition to select the best model
- train
- val
- test
- all
:param metric_select: Based on which metric to select the best model
- loss
- accuracy
- custom_cce_agg
- acc_agg
- f1
- tpr
- fpr
:param metric_show: Which classwise metric to plot.
- accuracy
- f1
:param collapse_cv: How to collapse over the single cv runs.
:param min_cells: Minimum number of cells of a type must be present in the whole dataset for that class to be included in the plot.
:param height_fig: Figure height.
:param width_fig: Figure width.
:return: fig, axs, sns_data_heatmap
"""
import matplotlib.pyplot as plt
import seaborn as sns
def f1(yhat, ytrue):
"""
Class wise F1.
:param yhat:
:param ytrue:
:return:
"""
def _tp(yhat, ytrue):
"""
Class wise true positive count.
:param yhat:
:param ytrue:
:return:
"""
yhat_true = np.asarray(yhat == np.max(yhat, axis=1, keepdims=True), dtype="float32")
return np.sum(yhat_true * ytrue, axis=0)
def _fp(yhat, ytrue):
"""
Class wise false positive count.
:param yhat:
:param ytrue:
:return:
"""
yhat_true = np.asarray(yhat == np.max(yhat, axis=1, keepdims=True), dtype="float32")
return np.sum(yhat_true * (1. - ytrue), axis=0)
def _fn(yhat, ytrue):
"""
Class wise false negative count.
:param yhat:
:param ytrue:
:return:
"""
yhat_true = np.asarray(yhat < np.max(yhat, axis=1, keepdims=True), dtype="float32")
return np.sum(yhat_true * ytrue, axis=0)
precision = _tp(yhat, ytrue) / (_tp(yhat, ytrue) + _fp(yhat, ytrue))
recall = _tp(yhat, ytrue) / (_tp(yhat, ytrue) + _fn(yhat, ytrue))
return 2 * 1 / (1 / precision + 1 / recall)
if self.summary_tab is None:
self.create_summary_tab()
# Choose the best over categories based on mean loss in CV.
# Keep variation across CV.
sns_tab = self.best_model_by_partition(
partition_select=partition_select,
metric_select=metric_select,
return_run_only=False,
grouping=["organ", "model_type"]
)
sns_tab = sns_tab[sns_tab['organ'] == organ]
store = load_store(cache_path=datapath, store_format=store_format)
store.load_config(configpath)
store.subset(attr_key="id", values=[k for k in store.indices.keys()
if 'cell_ontology_class' in store.adata_by_key[k].obs.columns])
store.subset(attr_key="cellontology_class", excluded_values=[
store._adata_ids_sfaira.unknown_metadata_identifier,
store._adata_ids_sfaira.not_a_cell_celltype_identifier,
])
cu = CelltypeUniverse(
cl=OntologyCl(branch="v2021-02-01"),
uberon=OCS.organ,
)
cu.load_target_universe(targetpath)
cell_counts = store.obs['cell_ontology_class'].value_counts().to_dict()
celltypelist = list(cell_counts.keys()).copy()
leaves = cu.onto_cl.convert_to_name(cu.onto_cl.leaves)
for k in celltypelist:
leafnodes = cu.onto_cl.convert_to_name(cu.onto_cl.map_to_leaves(node=k, return_type="ids", include_self=True))
# Replace count on intermediate nodes with counts over leaves
if k not in leaves:
for leaf in leaves:
if leaf not in cell_counts.keys():
cell_counts[leaf] = 0
cell_counts[leaf] += 1. / len(leafnodes)
del cell_counts[k]
# Compute class-wise metrics
vals = []
for i, run_id in enumerate(sns_tab["run"].values):
yhat = self.load_y(hat_or_true='hat', run_id=run_id)
ytrue = self.load_y(hat_or_true='true', run_id=run_id)
if metric_show == "acc":
m = accuracy(yhat, ytrue)
elif metric_show == "f1":
m = f1(yhat, ytrue)
else:
raise ValueError(f"did not recognize metric_show {metric_show}")
vals.append(m)
sns_tab[f"{metric_show}_classwise"] = vals
# Build figure.
model_types = sns_tab["model_type"].unique()
model_types.sort()
classes = self.load_ontology_names(run_id=sns_tab["run"].values[0])
hm = np.zeros((len(classes), len(model_types))) + np.nan
# mask = np.isnan(hm)
for i, m in enumerate(model_types):
data_temp = np.vstack(sns_tab.loc[sns_tab["model_type"].values == m, f"{metric_show}_classwise"].values)
if data_temp.shape[0] > 0:
if self.cv:
if collapse_cv == "mean":
hm[:, i] = np.nanmean(data_temp, axis=0)
elif collapse_cv == "median":
hm[:, i] = np.nanmedian(data_temp, axis=0)
elif collapse_cv == "max":
hm[:, i] = np.nanmax(data_temp, axis=0)
elif collapse_cv == "min":
hm[:, i] = np.nanmin(data_temp, axis=0)
else:
raise ValueError(f"collapse_cv {collapse_cv} not recognized")
else:
hm[:, i] = data_temp.values[0]
n_cells = []
for c in classes:
if c in cell_counts.keys():
n_cells.append(np.round(cell_counts[c]))
else:
warnings.warn(f"Celltype {c} from cell ontology not found in {organism} {organ} dataset")
n_cells.append(np.nan)
n_cells = np.array(n_cells)[:, None]
sns_data_heatmap = pandas.DataFrame(
np.hstack((n_cells, hm)),
index=classes,
columns=['Number of cells in whole dataset'] + list(model_types)
)
sns_data_heatmap = sns_data_heatmap[sns_data_heatmap['Number of cells in whole dataset'] >= min_cells]
mask = np.zeros(sns_data_heatmap.shape).astype(bool)
mask[:, 0] = True
with sns.axes_style("dark"):
fig, axs = plt.subplots(1, 1, figsize=(width_fig, height_fig))
axs = sns.heatmap(
sns_data_heatmap, mask=mask,
annot=True, fmt=".2f",
ax=axs, vmin=0, vmax=1,
xticklabels=True, yticklabels=True,
cbar_kws={'label': f"test_{metric_show}"},
cmap=None
)
axs = sns.heatmap(
data=sns_data_heatmap, mask=~mask,
annot=True, fmt=".0f",
ax=axs, alpha=0,
xticklabels=True, yticklabels=True,
annot_kws={"color": "black"},
cbar=False
)
return fig, axs, sns_data_heatmap
def plot_best_classwise_scatter(
self,
organ: str,
organism: str,
datapath: str,
store_format: str,
targetpath: str,
configpath: str,
partition_select: str = "val",
metric_select: str = "custom_cce_agg",
metric_show: str = "f1",
collapse_cv: str = "mean",
min_cells: int = 10,
height_fig: int = 7,
width_fig: int = 7,
annotate_thres_ncells: int = 1000,
annotate_thres_f1: float = 0.5,
):
"""
Plot evaluation metric scatterplot for specified organ by cell classes and model types.
:param organ: Organ to plot in heatmap.
:param organism: Organism that the gridsearch was run on
:param datapath: Path to the local sfaira data repository
:param store_format:
:param targetpath:
:param configpath:
:param partition_select: Based on which partition to select the best model
- train
- val
- test
- all
:param metric_select: Based on which metric to select the best model
- loss
- accuracy
- custom_cce_agg
- acc_agg
- f1
- tpr
- fpr
:param metric_show: Which classwise metric to plot.
- accuracy
- f1
:param collapse_cv: How to collapse over the single cv runs.
:param min_cells: Minimum number of cells of a type must be present in the whole dataset for that class to be included in the plot.
:param height_fig: Figure height.
:param width_fig: Figure width.
:param annotate_thres_ncells:
:param annotate_thres_f1:
:return: fig, axs, sns_data_scatter
"""
import matplotlib.pyplot as plt
import seaborn as sns
if self.summary_tab is None:
self.create_summary_tab()
# Choose the best over categories based on mean loss in CV.
# Keep variation across CV.
sns_tab = self.best_model_by_partition(
partition_select=partition_select,
metric_select=metric_select,
return_run_only=False,
grouping=["organ", "model_type"]
)
sns_tab = sns_tab[sns_tab['organ'] == organ]
store = load_store(cache_path=datapath, store_format=store_format)
store.load_config(configpath)
store.subset(attr_key="id", values=[k for k in store.indices.keys()
if 'cell_ontology_id' in store.adata_by_key[k].obs.columns])
store.subset(attr_key="cellontology_class", excluded_values=[
store._adata_ids_sfaira.unknown_metadata_identifier,
store._adata_ids_sfaira.not_a_cell_celltype_identifier,
])
cu = CelltypeUniverse(
cl=OntologyCl(branch="v2021-02-01"),
uberon=OCS.organ,
)
cu.load_target_universe(targetpath)
cell_counts = store.obs['cell_ontology_class'].value_counts().to_dict()
celltypelist = list(cell_counts.keys()).copy()
leaves = cu.onto_cl.convert_to_name(cu.onto_cl.leaves)
for k in celltypelist:
leafnodes = cu.onto_cl.convert_to_name(cu.onto_cl.map_to_leaves(node=k, return_type="ids", include_self=True))
# Replace count on intermediate nodes with counts over leaves
if k not in leaves:
for leaf in leaves:
if leaf not in cell_counts.keys():
cell_counts[leaf] = 0
cell_counts[leaf] += 1. / len(leafnodes)
del cell_counts[k]
# Compute class-wise metrics
vals = []
for i, run_id in enumerate(sns_tab["run"].values):
yhat = self.load_y(hat_or_true='hat', run_id=run_id)
ytrue = self.load_y(hat_or_true='true', run_id=run_id)
if metric_show == "acc":
m = accuracy(yhat, ytrue)
elif metric_show == "f1":
m = f1(yhat, ytrue)
else:
raise ValueError("did not recognize metric_show %s" % metric_show)
vals.append(m)
sns_tab[f"{metric_show}_classwise"] = vals
# Build figure.
model_types = sns_tab["model_type"].unique()
classes = self.load_ontology_names(run_id=sns_tab["run"].values[0])
hm = np.zeros((len(classes), len(model_types))) + np.nan
# mask = np.isnan(hm)
for i, m in enumerate(model_types):
data_temp = np.vstack(sns_tab.loc[sns_tab["model_type"].values == m, f"{metric_show}_classwise"].values)
if data_temp.shape[0] > 0:
if self.cv:
if collapse_cv == "mean":
hm[:, i] = np.nanmean(data_temp, axis=0)
elif collapse_cv == "median":
hm[:, i] = np.nanmedian(data_temp, axis=0)
elif collapse_cv == "max":
hm[:, i] = np.nanmax(data_temp, axis=0)
elif collapse_cv == "min":
hm[:, i] = np.nanmin(data_temp, axis=0)
else:
raise ValueError(f"collapse_cv {collapse_cv} not recognized")
else:
hm[:, i] = data_temp.values[0]
n_cells = []
for c in classes:
if c in cell_counts.keys():
n_cells.append(np.round(cell_counts[c]))
else:
warnings.warn(f"Celltype {c} from cell cu not found in {organism} {organ} dataset")
n_cells.append(np.nan)
n_cells = np.array(n_cells)[:, None]
sns_data_scatter = pandas.DataFrame(
np.hstack((n_cells, hm)),
index=classes,
columns=['Number of cells in whole dataset'] + list(model_types)
)
sns_data_scatter = sns_data_scatter[sns_data_scatter['Number of cells in whole dataset'] >= min_cells]
sns_data_scatter = pandas.melt(sns_data_scatter,
id_vars=['Number of cells in whole dataset'],
value_vars=list(model_types),
var_name='Model type',
value_name='Classwise f1 score',
ignore_index=False
)
with sns.axes_style("dark"):
fig, axs = plt.subplots(1, 1, figsize=(width_fig, height_fig))
axs = sns.scatterplot(x='Number of cells in whole dataset',
y='Classwise f1 score',
style='Model type',
alpha=0.8,
data=sns_data_scatter,
ax=axs
)
for line in range(0, sns_data_scatter.shape[0]):
if (sns_data_scatter['Number of cells in whole dataset'][line] > annotate_thres_ncells) \
and (sns_data_scatter['Classwise f1 score'][line] > annotate_thres_f1):
axs.text(sns_data_scatter['Number of cells in whole dataset'][line] + 100,
sns_data_scatter['Classwise f1 score'][line],
sns_data_scatter.index[line],
horizontalalignment='left',
size='medium',
color='black',
weight='semibold'
)
return fig, axs, sns_data_scatter
class SummarizeGridsearchEmbedding(GridsearchContainer):
loss_idx: int
mse_idx: int
def __init__(
self,
source_path: dict,
cv: bool,
loss_idx: int = 0,
mse_idx: int = 1,
model_id_len: int = 3
):
super(SummarizeGridsearchEmbedding, self).__init__(
source_path=source_path,
cv=cv
)
self.loss_idx = loss_idx
self.mse_idx = mse_idx
self.model_id_len = model_id_len
def create_summary_tab(self):
metrics = list(self.evals.values())[0]['val'].keys()
self.summary_tab = pandas.DataFrame(dict(
list({
"depth": [id_i.split("_")[self.model_id_len + 0] for id_i in self.run_ids], # noqa: E241
"width": [id_i.split("_")[self.model_id_len + 1] for id_i in self.run_ids], # noqa: E241
"lr": [id_i.split("_")[self.model_id_len + 2] for id_i in self.run_ids], # noqa: E241
"dropout": [id_i.split("_")[self.model_id_len + 3] for id_i in self.run_ids], # noqa: E241
"l1": [id_i.split("_")[self.model_id_len + 4] for id_i in self.run_ids], # noqa: E241
"l2": [id_i.split("_")[self.model_id_len + 5] for id_i in self.run_ids], # noqa: E241
"cv": [id_i.split("_")[-1] if self.cv else "1" for id_i in self.run_ids], # noqa: E241
"model": ["_".join(id_i.split("_")[:self.model_id_len]) for id_i in self.run_ids], # noqa: E241
"organism": [id_i.split("_")[1].split("-")[0] for id_i in self.run_ids], # noqa: E241
"organ": [id_i.split("_")[1].split("-")[1] for id_i in self.run_ids], # noqa: E241
"model_type": [id_i.split("_")[1].split("-")[2] for id_i in self.run_ids], # noqa: E241
"version": [id_i.split("_")[1].split("-")[3] for id_i in self.run_ids], # noqa: E241
"model_gs_id": ["_".join(id_i.split("_")[:(self.model_id_len + 6)]) for id_i in self.run_ids], # noqa: E241
"run": self.run_ids, # noqa: E241
}.items()) +
list(dict([("train_" + m, [self.evals[x]["train"][m] if m in self.evals[x]["train"].keys() else
self.evals[x]["train"]['neg_ll_' + m] for x in self.run_ids])
for m in metrics]).items()) +
list(dict([("test_" + m, [self.evals[x]["test"][m] if m in self.evals[x]["test"].keys() else
self.evals[x]["test"]['neg_ll_' + m] for x in self.run_ids])
for m in metrics]).items()) +
list(dict([("val_" + m, [self.evals[x]["val"][m] if m in self.evals[x]["val"].keys()
else self.evals[x]["val"]['neg_ll_' + m] for x in self.run_ids])
for m in metrics]).items()) +
list(dict([("all_" + m, [self.evals[x]["all"][m] if m in self.evals[x]["all"].keys()
else self.evals[x]["all"]['neg_ll_' + m] for x in self.run_ids])
for m in metrics]).items())
))
# TODO: Hacky solution to make sure metrics are called the same in VAE and other models
rename_dict = {
"train_neg_ll_custom_mse": "train_custom_mse",
"train_neg_ll_custom_negll": "train_custom_negll",
"test_neg_ll_custom_mse": "test_custom_mse",
"test_neg_ll_custom_negll": "test_custom_negll",
"val_neg_ll_custom_mse": "val_custom_mse",
"val_neg_ll_custom_negll": "val_custom_negll",
"all_neg_ll_custom_mse": "all_custom_mse",
"all_neg_ll_custom_negll": "all_custom_negll",
}
self.summary_tab = self.summary_tab.rename(columns=rename_dict)
if self.summary_tab.shape[0] == 0:
raise ValueError("summary_tab was empty")
def best_model_embedding(
self,
subset: dict = {},
partition: str = "val",
metric: str = "loss",
cvs: Union[None, List[int]] = None
):
model_id, _, _ = self.get_best_model_ids(
tab=self.summary_tab,
partition_select=partition,
metric_select=metric,
cv_mode="mean",
subset=subset,
)
if model_id is not None:
if cvs is not None:
fns = [
os.path.join(self.gs_dirs[f"{model_id}_cv{x}"], f"{model_id}_cv{x}")
for x in cvs
]
else:
fns = [os.path.join(self.gs_dirs[model_id], self.gs_keys[model_id], "results", model_id)]
embedding = [np.load(f"{x}_embedding.npy") for x in fns]
covar = [pandas.read_csv(f"{x}_covar.csv") for x in fns]
return model_id, embedding, covar
else:
return None, [None], [None]
def plot_best(
self,
rename_levels=[],
partition_select: str = "val",
partition_show: str = "test",
metric_select: str = "ll",
metric_show: str = "ll",
collapse_cv: str = "min",
vmin=None,
vmax=None,
height_fig=7,
width_fig=7
):
"""
:param rename_levels:
:param partition_select:
:param partition_show:
:param metric_select:
:param metric_show:
:param collapse_cv:
:param vmin:
:param vmax:
:param height_fig:
:param width_fig:
:return:
"""
import matplotlib.pyplot as plt
import seaborn as sns
if self.summary_tab is None:
self.create_summary_tab()
# Choose the best over categories based on mean loss in CV.
# Keep variation across CV.
sns_tab = self.best_model_by_partition(
partition_select=partition_select,
metric_select=metric_select,
return_run_only=False,
grouping=["organ", "model_type"]
)
for rename_level in rename_levels:
levels_new = sns_tab[rename_level[0]].values
levels_new[levels_new == rename_level[1]] = rename_level[2]
sns_tab[rename_level[0]] = levels_new
# Build figure.
organs = np.unique(sns_tab["organ"].values)
model_types = np.unique(sns_tab["model_type"].values)
hm = np.zeros((len(organs), len(model_types))) + np.nan
mask = np.isnan(hm)
for i, m in enumerate(model_types):
for j, o in enumerate(organs):
data_temp = sns_tab.loc[
np.logical_and(
sns_tab["model_type"].values == m,
sns_tab["organ"].values == o
), f"{partition_show}_{metric_show}"
]
if data_temp.shape[0] > 0:
if self.cv:
if collapse_cv == "mean":
hm[j, i] = np.mean(data_temp.values)
elif collapse_cv == "median":
hm[j, i] = np.median(data_temp.values)
elif collapse_cv == "max":
hm[j, i] = np.max(data_temp.values)
elif collapse_cv == "min":
hm[j, i] = np.min(data_temp.values)
else:
raise ValueError("collapse_cv % s not recognized" % collapse_cv)
else:
hm[j, i] = data_temp.values[0]
mask[j, i] = False
if vmin is not None:
hm = np.maximum(hm, np.asarray(vmin))
if vmax is not None:
hm = np.minimum(hm, np.asarray(vmin))
sns_data_heatmap = pandas.DataFrame(
hm, index=organs, columns=model_types
)
fig, axs = plt.subplots(1, 1, figsize=(height_fig, width_fig))
with sns.axes_style("dark"):
axs = sns.heatmap(
sns_data_heatmap,
annot=True, fmt=".2f",
ax=axs,
xticklabels=True, yticklabels=True,
cbar_kws={'label': f"{partition_show}_{metric_show}"}
)
return fig, axs, sns_data_heatmap
from typing import Union, List
def get_gradients_by_celltype(
self,
model_organ: str,
data_organ: str,
organism: Union[str, None],
genome: Union[str, None, dict],
model_type: Union[str, List[str]],
metric_select: str,
data_source: str,
datapath,
gene_type: str = "protein_coding",
configpath: Union[None, str] = None,
store_format: Union[None, str] = None,
test_data=True,
partition_select: str = "val",
ignore_cache=False,
min_cells=10,
):
"""
Compute gradients across latent units with respect to input features for each cell type.
:param model_organ:
:param data_organ:
:param organism:
:param model_type:
:param metric_select:
:param datapath:
:param test_data:
:param partition_select:
:param ignore_cache:
:param min_cells:
:return: (cell types, input features) cumulative gradients
"""
model_id, run_id, _ = self.get_best_model_ids(
tab=self.summary_tab,
metric_select=metric_select,
partition_select=partition_select,
subset={
"organ": model_organ,
"model_type": model_type,
}
)
resultspath = self.gs_dirs[run_id]
if os.path.isfile(os.path.join(resultspath, f'{model_id}_grads.pickle')) and not ignore_cache:
print('Load gradients from cached file...')
with open(os.path.join(resultspath, f'{model_id}_grads.pickle'), 'rb') as f:
gradients_raw = pickle.load(f)
else:
print('Compute gradients (1/3): load data')
# load data
if data_source == "store":
if genome is not None:
warnings.warn("Using data_source='store', the provided genome will be ignored")
store = load_store(cache_path=datapath, store_format=store_format)
store.load_config(configpath)
store.subset(attr_key="id", values=[k for k in store.indices.keys()
if 'cell_ontology_id' in store.adata_by_key[k].obs.columns])
store.subset(attr_key="cellontology_class", excluded_values=[
store._adata_ids_sfaira.unknown_metadata_identifier,
store._adata_ids_sfaira.not_a_cell_celltype_identifier,
])
adatas = store.adata_sliced
# Load into memory:
for k in adatas.keys():
adatas[k] = adatas[k].to_memory()
adata = adatas[list(adatas.keys())[0]]
if len(adatas.keys()) > 0:
adata = adata.concatenate(*[adatas[k] for k in list(adatas.keys())[1:]])
elif data_source == "universe":
if configpath is not None or store_format is not None:
warnings.warn("Using data_source='universe', the provided configpath and store_format will be ignored")
u = Universe(data_path=datapath)
if organism is not None:
u.subset("organism", organism)
if data_organ is not None:
u.subset("organ", data_organ)
u.load(allow_caching=False)
u.streamline_features(match_to_reference=genome, subset_genes_to_type=gene_type)
u.streamline_metadata()
adata = u.adata
else:
raise ValueError("data_source has to be 'universe' or 'store'")
print('Compute gradients (2/3): load embedding')
zoo = ModelZoo()
zoo.model_id = "_".join(model_id.split("_")[:3])
embedding = EstimatorKerasEmbedding(
data=adata,
model_dir="",
model_id=model_id,
model_topology=zoo.topology_container
)
embedding.init_model()
embedding.model.training_model.load_weights(os.path.join(resultspath, f'{model_id}_weights.h5'))
# compute gradients
print('Compute gradients (3/3): cumulate gradients')
gradients_raw = embedding.compute_gradients_input(test_data=test_data, batch_size=256, per_celltype=True)
with open(os.path.join(resultspath, f'{model_id}_grads.pickle'), 'wb') as f:
pickle.dump(gradients_raw, f, pickle.HIGHEST_PROTOCOL)
print('Gradients saved to cache file!')
# filter by minimum number cells min_cells
filtered_grads = {}
celltypes = []
for celltype in gradients_raw['gradients'].keys():
if gradients_raw['counts'][celltype] > min_cells:
filtered_grads.update({celltype: gradients_raw['gradients'][celltype]})
celltypes.append(celltype)
return np.concatenate([
np.mean(a, axis=0, keepdims=True)
for a in list(filtered_grads.values())
], axis=0), celltypes
def plot_gradient_distr(
self,
model_organ: str,
data_organ: str,
model_type: Union[str, List[str]],
metric_select: str,
datapath: str,
data_source: str,
organism: Union[str, None] = None,
genome: Union[str, None] = None,
configpath: Union[None, str] = None,
store_format: Union[None, str] = None,
test_data=True,
gene_type: str = "protein_coding",
partition_select: str = "val",
normalize=True,
remove_inactive=True,
min_cells=10,
bw=0.02,
xlim=None,
by_type=True,
height_fig=7,
width_fig=7,
hist=False,
ignore_cache=False,
save=None,
):
import seaborn as sns
import matplotlib.pyplot as plt
if by_type and isinstance(model_type, list):
raise ValueError("cannot plot by type and by model")
if isinstance(model_type, str):
model_type = [model_type]
if self.summary_tab is None:
self.create_summary_tab()
# best model for each organ and model_type
avg_grads = {}
celltypes = {}
for modelt in model_type:
avg_grads[modelt], celltypes[modelt] = self.get_gradients_by_celltype(
model_organ=model_organ,
data_organ=data_organ,
organism=organism,
model_type=modelt,
metric_select=metric_select,
genome=genome,
gene_type=gene_type,
data_source=data_source,
datapath=datapath,
configpath=configpath,
store_format=store_format,
test_data=test_data,
partition_select=partition_select,
ignore_cache=ignore_cache,
min_cells=min_cells,
)
if normalize:
avg_grads[modelt] = np.abs(avg_grads[modelt])
avg_grads[modelt] = (avg_grads[modelt] - np.min(avg_grads[modelt], axis=1, keepdims=True)) /\
np.maximum(np.max(avg_grads[modelt], axis=1, keepdims=True) -
np.min(avg_grads[modelt], axis=1, keepdims=True), 1e-8)
fig, axs = plt.subplots(1, 1, figsize=(width_fig, height_fig))
if len(avg_grads.values()) == 1:
threshold = np.mean(list(avg_grads.values())[0]) * 0.05
avg_grads_mask = np.mean(list(avg_grads.values())[0], axis=0) > threshold
active_grads = list(avg_grads.values())[0][:, avg_grads_mask]
plt.axvline(threshold, color='k', linestyle='dashed', linewidth=1,
label="active gene threshold"
)
plt.axvline(np.mean(active_grads), color='k', linestyle='solid', linewidth=1,
label="average gradient\nof active genes")
print('number of active inputs: ', active_grads.shape[1])
for k, v in avg_grads.items():
if by_type:
v_mask = np.mean(v, axis=0) > threshold
for i, x in enumerate(v):
if remove_inactive:
x = x[v_mask]
if not hist:
sns.kdeplot(x, bw_method=bw, ax=axs)
else:
if remove_inactive:
threshold = np.mean(v) * 0.05
v_mask = np.mean(v, axis=0) > threshold
v = v[:, v_mask]
if not hist:
sns.kdeplot(np.asarray(v).flatten(), bw_method=bw, label=k, ax=axs)
if xlim is not None:
axs.set_xlim(xlim)
plt.legend(loc="best")
plt.xlabel(r'$\rm{mean}_{i=1,...,D} \frac{\partial z_i}{\partial x}$')
if hist:
plt.ylabel('# genes')
plt.tight_layout()
if save is not None:
plt.savefig(save)
plt.show()
def plot_gradient_cor(
self,
model_organ: str,
data_organ: str,
model_type: Union[str, List[str]],
metric_select: str,
datapath: str,
data_source: str,
organism: Union[str, None] = None,
genome: Union[str, None] = None,
configpath: Union[None, str] = None,
store_format: Union[None, str] = None,
test_data=True,
gene_type: str = "protein_coding",
partition_select: str = "val",
height_fig=7,
width_fig=7,
ignore_cache=False,
min_cells=10,
by_type=True,
vmin=0.,
vmax=1.,
save=None,
):
"""
Plot correlation heatmap of gradient vectors accumulated on input features between cell types or models.
:param model_organ:
:param data_organ:
:param model_type:
:param metric_select:
:param datapath:
:param configpath:
:param store_format:
:param test_data:
:param partition_select:
:param height_fig:
:param width_fig:
:param ignore_cache:
:param min_cells:
:param by_type:
:param vmin:
:param vmax:
:param save:
:return:
"""
import seaborn as sns
import matplotlib.pyplot as plt
if by_type and isinstance(model_type, list):
raise ValueError("cannot plot by type and by model")
if isinstance(model_type, str):
model_type = [model_type]
if self.summary_tab is None:
self.create_summary_tab()
# best model for each organ and model_type
avg_grads = {}
celltypes = {}
for modelt in model_type:
avg_grads[modelt], celltypes[modelt] = self.get_gradients_by_celltype(
model_organ=model_organ,
data_organ=data_organ,
organism=organism,
model_type=modelt,
metric_select=metric_select,
genome=genome,
gene_type=gene_type,
data_source=data_source,
datapath=datapath,
configpath=configpath,
store_format=store_format,
test_data=test_data,
partition_select=partition_select,
ignore_cache=ignore_cache,
min_cells=min_cells,
)
fig, axs = plt.subplots(1, 1, figsize=(width_fig, height_fig))
if by_type:
v = avg_grads[model_type[0]]
celltypes_coord = celltypes[model_type[0]]
cormat = pandas.DataFrame(
np.corrcoef(v),
index=celltypes_coord,
columns=celltypes_coord
)
sns.heatmap(cormat, vmin=vmin, vmax=vmax, ax=axs)
else:
pass
plt.tight_layout()
if save is not None:
plt.savefig(save)
plt.show()
def plot_npc(
self,
organ,
topology_version,
cvs=None
):
"""
Plots the explained variance ration that accumulates explained variation of the latent space’s ordered
principal components.
If an embedding file is found that contains z, z_mean, z_var (eg. output from predict_variational() function)
the model will use z, and not z_mean.
"""
import matplotlib.pyplot as plt
if self.summary_tab is None:
self.create_summary_tab()
models = np.unique(self.summary_tab["model_type"]).tolist()
self.summary_tab["topology"] = [x.split("_")[5] for x in self.summary_tab["model_gs_id"].values]
with plt.style.context("seaborn-whitegrid"):
plt.figure(figsize=(12, 6))
for model in models:
model_id, embedding, covar = self.best_model_embedding(
subset={"model_type": model, "organ": organ, "topology": topology_version},
partition="val",
metric="loss",
cvs=cvs,
)
if len(embedding[0].shape) == 3:
z = embedding[0][0] # in case of three-dimensional VAE embedding (z, z_mean, z_var), use z
else:
z = embedding[0]
cov = np.cov(z.T)
eig_vals, eig_vecs = np.linalg.eig(cov)
eig_sum = sum(eig_vals)
var_exp = [(i / eig_sum) for i in sorted(eig_vals, reverse=True)]
cum_var_exp = np.cumsum([0] + var_exp)
plt.step(range(0, eig_vals.shape[0] + 1), cum_var_exp, where="post", linewidth=3,
label="%s cumulative explained variance (95%%: %s / 99%%: %s)" % (model, np.sum(cum_var_exp < .95), np.sum(cum_var_exp < .99)))
plt.yticks([0.0, .25, .50, .75, .95, .99])
plt.ylabel("Explained variance ratio", fontsize=16)
plt.xlabel("Principal components", fontsize=16)
plt.legend(loc="best", fontsize=16, frameon=True)
plt.tight_layout()
plt.show()
def plot_active_latent_units(
self,
organ,
topology_version,
cvs=None
):
"""
Plots latent unit activity measured by empirical variance of the expected latent space.
See: https://arxiv.org/abs/1509.00519
If an embedding file is found that contains z, z_mean, z_var (eg. output from predict_variational() function)
the model will use z, and not z_mean.
"""
colors = ['red', 'blue', 'green', 'cyan', 'magenta', 'yellow', 'darkgreen', 'lime', 'navy', 'royalblue',
'pink', 'peru']
def active_latent_units_mask(z):
var_x = np.diagonal(np.cov(z.T))
min_var_x = 0.01
active_units_mask = var_x > min_var_x
return active_units_mask
import matplotlib.pyplot as plt
if self.summary_tab is None:
self.create_summary_tab()
models = np.unique(self.summary_tab["model_type"]).tolist()
self.summary_tab["topology"] = [x.split("_")[5] for x in self.summary_tab["model_gs_id"].values]
with plt.style.context("seaborn-whitegrid"):
plt.figure(figsize=(12, 6))
plt.axhline(np.log(0.01), color="k", linestyle='dashed', linewidth=2, label="active unit threshold")
for i, model in enumerate(models):
model_id, embedding, covar = self.best_model_embedding(
subset={"model_type": model, "organ": organ, "topology": topology_version},
partition="val",
metric="loss",
cvs=cvs,
)
if len(embedding[0].shape) == 3:
z = embedding[0][0] # in case of three-dimensional VAE embedding (z, z_mean, z_var), use z
else:
z = embedding[0]
latent_dim = z.shape[1]
var = np.sort(np.diagonal(np.cov(z.T)))[::-1]
log_var = np.log(var)
active_units = np.log(var[active_latent_units_mask(z)])
plt.plot(range(1, log_var.shape[0] + 1), log_var, color=colors[i], alpha=1.0, linewidth=3,
label="%s active units: %i" % (model, len(active_units)))
# to plot vertical lines
log_var_cut = var.copy()
log_var_cut[~active_latent_units_mask(z)] = 0
log_var_cut = np.log(log_var_cut)
num_active = np.argmax(log_var_cut)
if num_active > 0:
plt.vlines(num_active, ymin=-.15, ymax=0.15, color=colors[i], linestyle='solid', linewidth=3)
if model == "vaevamp":
z1, z2 = np.split(np.log(np.diagonal(np.cov(z.T))), 2)
plt.plot(range(1, int(latent_dim / 2) + 1), np.sort(z2)[::-1], color=colors[i], alpha=1.0,
label=r"%s $z_2$ active units: %i" % (model, len(z2[z2 > np.log(0.01)])),
linestyle='dashed', linewidth=3)
plt.plot(range(1, int(latent_dim / 2) + 1), np.sort(z1)[::-1], color=colors[i], alpha=1.0,
label=r"%s $z_1$ active units: %i" % (model, len(z1[z1 > np.log(0.01)])),
linestyle='dotted', linewidth=3)
plt.xlabel(r'Latent unit $i$', fontsize=16)
plt.ylabel(r'$\log\,{(A_{\bf z})}_i$', fontsize=16)
plt.title(r"Latent unit activity", fontsize=16)
plt.legend(loc="upper right", frameon=True, fontsize=12)
plt.tight_layout()
plt.show()
|
nilq/baby-python
|
python
|
"""
Defines the possible rewards for rollout.
Can be augmented for more complex policies using simialr scheme as Rollout or UCT policies.
Is defined through CLI in the Tree script.
"""
class RolloutRewards(object):
"""
Defines penalty and rewards for the rollout if it's in the chasis.
"""
def __init__(self, penalty, full_state_reward):
self.penalty = penalty
self.full_state_reward = full_state_reward
def __repr__(self):
"""Reward representation is its values"""
return("Penalty is {} and full state reward is {}".format(self.penalty, self.full_state_reward))
Basic_Rollout_Reward = RolloutRewards(penalty = -1, full_state_reward = 2)
|
nilq/baby-python
|
python
|
"""
sentry.models.release
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from django.db import models
from django.utils import timezone
from jsonfield import JSONField
from sentry.db.models import FlexibleForeignKey, Model, sane_repr
class Release(Model):
"""
A release is generally created when a new version is pushed into a
production state.
"""
project = FlexibleForeignKey('sentry.Project')
version = models.CharField(max_length=64)
# ref might be the branch name being released
ref = models.CharField(max_length=64, null=True, blank=True)
url = models.URLField(null=True, blank=True)
date_added = models.DateTimeField(default=timezone.now)
date_started = models.DateTimeField(null=True, blank=True)
date_released = models.DateTimeField(null=True, blank=True)
# arbitrary data recorded with the release
data = JSONField(default={})
class Meta:
app_label = 'sentry'
db_table = 'sentry_release'
unique_together = (('project', 'version'),)
__repr__ = sane_repr('project_id', 'version')
|
nilq/baby-python
|
python
|
import os
from setuptools import setup
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name="restful-gpio",
version="0.1.0",
license="MIT",
author="ykaragol",
python_requires='>3.4.0',
packages=["gpio"],
install_requires=required
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.TargetInfo import TargetInfo
class AlipayMerchantServiceconsultBatchqueryModel(object):
def __init__(self):
self._begin_time = None
self._end_time = None
self._page_num = None
self._page_size = None
self._status = None
self._target_infos = None
@property
def begin_time(self):
return self._begin_time
@begin_time.setter
def begin_time(self, value):
self._begin_time = value
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, value):
self._end_time = value
@property
def page_num(self):
return self._page_num
@page_num.setter
def page_num(self, value):
self._page_num = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def target_infos(self):
return self._target_infos
@target_infos.setter
def target_infos(self, value):
if isinstance(value, list):
self._target_infos = list()
for i in value:
if isinstance(i, TargetInfo):
self._target_infos.append(i)
else:
self._target_infos.append(TargetInfo.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.begin_time:
if hasattr(self.begin_time, 'to_alipay_dict'):
params['begin_time'] = self.begin_time.to_alipay_dict()
else:
params['begin_time'] = self.begin_time
if self.end_time:
if hasattr(self.end_time, 'to_alipay_dict'):
params['end_time'] = self.end_time.to_alipay_dict()
else:
params['end_time'] = self.end_time
if self.page_num:
if hasattr(self.page_num, 'to_alipay_dict'):
params['page_num'] = self.page_num.to_alipay_dict()
else:
params['page_num'] = self.page_num
if self.page_size:
if hasattr(self.page_size, 'to_alipay_dict'):
params['page_size'] = self.page_size.to_alipay_dict()
else:
params['page_size'] = self.page_size
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.target_infos:
if isinstance(self.target_infos, list):
for i in range(0, len(self.target_infos)):
element = self.target_infos[i]
if hasattr(element, 'to_alipay_dict'):
self.target_infos[i] = element.to_alipay_dict()
if hasattr(self.target_infos, 'to_alipay_dict'):
params['target_infos'] = self.target_infos.to_alipay_dict()
else:
params['target_infos'] = self.target_infos
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMerchantServiceconsultBatchqueryModel()
if 'begin_time' in d:
o.begin_time = d['begin_time']
if 'end_time' in d:
o.end_time = d['end_time']
if 'page_num' in d:
o.page_num = d['page_num']
if 'page_size' in d:
o.page_size = d['page_size']
if 'status' in d:
o.status = d['status']
if 'target_infos' in d:
o.target_infos = d['target_infos']
return o
|
nilq/baby-python
|
python
|
import math
from typing import List, Optional
from PyQt5 import QtGui
from PyQt5.QtCore import QCoreApplication, Qt
from PyQt5.QtWidgets import (QApplication, QDesktopWidget, QMainWindow,
QShortcut)
from sqlalchemy.orm.exc import NoResultFound
from src.error.invalid_reference import InvalidReferenceError
from src.models.verse import Verse
from src.ui.advanced_search import AdvancedSearchWindow
from src.ui.main.control import MainWindowControl
from src.ui.main.dialogs.about_dialog import AboutDialog
from src.ui.main.dialogs.installing_version_progress_dialog import \
InstallingVersionProgressDialog
from src.ui.main.view_model import MainViewModel
from src.ui.main.widgets.history_widget import HistoryWidget
from src.ui.main.widgets.search_bar_widget import SearchBarWidget
from src.ui.main.window import Ui_MainWindow
from src.ui.projector import ProjectorWindow
from src.ui.projector_settings import ProjectorSettingsWindow
from src.ui.remote_control import RemoteControlWindow
from src.ui.theme_settings import ThemeSettingsWindow
from src.widgets.chapter_widget import ChapterWidget
class MainWindow(QMainWindow, Ui_MainWindow):
__view_model: MainViewModel
chapter_widget: ChapterWidget
history_widget: HistoryWidget
progress_dialog: Optional[InstallingVersionProgressDialog] = None
main_window_control: MainWindowControl
def __init__(self, parent=None):
super().__init__(parent)
super().setupUi(self)
self.setWindowIcon(QtGui.QIcon('icon.ico'))
self.__view_model = MainViewModel()
self.main_window_control = MainWindowControl(self)
self.projector_settings_window = ProjectorSettingsWindow()
self.theme_settings_window = ThemeSettingsWindow()
self.projector_window = ProjectorWindow()
self.advanced_search_window = AdvancedSearchWindow()
self.remote_control_window = RemoteControlWindow(
main_window_control=self.main_window_control)
self.about_dialog = AboutDialog()
self.search_bar_widget = SearchBarWidget(
versions=self.__view_model.versions,
search_callable=self.search,
project_callable=self.project,
)
self.chapter_widget = ChapterWidget(
list_widget=self.chapter_list_widget)
self.history_widget = HistoryWidget(
list_widget=self.history_list_widget)
screen = QDesktopWidget().screenGeometry(2)
self.projector_window.move(screen.left(), screen.top())
self.header_container.addWidget(self.search_bar_widget)
self.configure_events()
self.configure_hot_keys()
def configure_events(self):
self.__view_model.on_change_current_verse(self.on_change_current_verse)
self.action_export_history.triggered.connect(self.export_history)
self.action_projector_settings.triggered.connect(
self.show_projector_settings)
self.action_theme_settings.triggered.connect(self.show_themes)
self.action_about.triggered.connect(self.show_about)
self.action_advanced_search.triggered.connect(
self.show_advanced_search)
self.action_quit.triggered.connect(self.close)
self.action_install_version.triggered.connect(self.install_version)
self.action_remote.triggered.connect(self.show_remote_control)
self.advanced_search_window.verse_clicked.connect(
self.on_verse_clicked_advanced_search)
self.chapter_widget.verse_clicked.connect(self.on_chapter_verse_click)
self.history_widget.reference_clicked.connect(
self.on_history_verse_click)
self.search_bar_widget.update_clicked.connect(
self.update_projector_text)
self.search_bar_widget.change_version.connect(
self.on_change_current_version)
def on_verse_clicked_advanced_search(self, verse: Verse):
self.__view_model.current_verse = verse
self.__view_model.update_current_chapter(verse)
self.update_chapter()
self.select_current_verse_in_chapter()
self.chapter_widget.scroll_to_verse(verse)
def on_change_current_version(self, version: str):
self.__view_model.current_version = version
verse = self.__view_model.current_verse
if verse is not None:
self.search(str(verse.reference))
def on_change_current_verse(self, verse: Verse):
self.preview_text_edit.setText(f"{verse.text} ({verse.reference})")
self.update_projector_text()
def on_history_verse_click(self, verse: Verse):
self.__view_model.current_verse = verse
self.__view_model.update_current_chapter(verse)
self.update_chapter()
self.select_current_verse_in_chapter()
self.chapter_widget.scroll_to_verse(verse)
def on_chapter_verse_click(self, verse: Verse):
self.__view_model.current_verse = verse
self.select_current_verse_in_chapter()
def select_current_verse_in_chapter(self):
self.chapter_widget.select_verse(self.__view_model.current_verse)
def install_version(self):
def on_update_progress(progress: int):
if self.progress_dialog is None:
self.progress_dialog = InstallingVersionProgressDialog(self)
self.progress_dialog.setValue(0)
self.progress_dialog.show()
self.progress_dialog.setValue(math.ceil(progress*100))
QCoreApplication.processEvents()
if progress == 1:
self.progress_dialog.hide()
self.progress_dialog = None
self.__view_model.install_version(on_update_progress)
self.__view_model.update_versions()
self.search_bar_widget.set_versions(self.__view_model.versions)
def export_history(self):
self.__view_model.export_history(self.history_widget.history)
def show_about(self):
self.about_dialog.show()
def show_projector_settings(self):
self.projector_settings_window.show()
def show_themes(self):
self.theme_settings_window.show()
def show_advanced_search(self):
self.advanced_search_window.show()
def show_remote_control(self):
self.remote_control_window.show()
def configure_hot_keys(self):
hot_keys = [
(Qt.Key_PageUp, self.next_verse, self),
(Qt.Key_PageDown, self.previous_verse, self),
(Qt.Key_F4, self.search_input_request_focus, self),
(Qt.Key_F5, self.project, self),
(Qt.Key_F6, self.update_projector_text, self),
(Qt.Key_PageUp, self.next_verse, self.projector_window),
(Qt.Key_PageDown, self.previous_verse, self.projector_window),
(Qt.Key_Escape, self.close_projector, self),
]
for hot_key, action, window in hot_keys:
QShortcut(hot_key, window).activated.connect(action)
def search_input_request_focus(self):
self.search_bar_widget.search_input_request_focus()
def closeEvent(self, a0: QtGui.QCloseEvent) -> None:
self.remote_control_window.close()
self.__view_model.application.quit()
def previous_verse(self):
try:
verse = self.__view_model.previous_verse()
self.select_current_verse_in_chapter()
self.chapter_widget.scroll_to_verse(verse)
except Exception:
self.preview_text_edit.setText('Verso não encontrado')
def next_verse(self):
try:
verse = self.__view_model.next_verse()
self.select_current_verse_in_chapter()
self.chapter_widget.scroll_to_verse(verse)
except Exception:
self.preview_text_edit.setText('Verso não encontrado')
def set_occurrences(self, verses: List[Verse]):
model = QtGui.QStandardItemModel()
for verse in verses:
item = QtGui.QStandardItem()
item.setText(f"{verse.text} ({verse.reference})")
model.appendRow(item)
self.occurrences_list_view.setModel(model)
self.occurrences_label.setText(f'Ocorrências: {len(verses)}')
def update_projector_text(self):
self.projector_window.text = self.preview_text_edit.toPlainText()
def close_projector(self):
self.projector_window.close()
def project(self):
screen = QApplication.screens()[-1]
self.projector_window.show()
self.projector_window.windowHandle().setScreen(screen)
self.projector_window.showFullScreen()
def update_chapter(self):
self.chapter_widget.chapter = self.__view_model.current_chapter
def search(self, search_text: str):
try:
verse = self.__view_model.search(search_text)
self.history_widget.add_verse(verse)
self.update_chapter()
self.select_current_verse_in_chapter()
self.chapter_widget.scroll_to_verse(verse)
self.search_bar_widget.set_text(str(verse.reference))
except InvalidReferenceError:
self.preview_text_edit.setText('Referência bíblica inválida')
except NoResultFound:
self.preview_text_edit.setText('Texto não encontrado')
|
nilq/baby-python
|
python
|
from gi.repository import Gtk
from gi.repository import Gio
from gi.repository import Pango
from internationalize import _
import os
def create_function_str(name, *args):
new_func = name + "("
for i, arg in enumerate(args):
if i != 0:
new_func += ", "
new_func += repr(arg)
new_func += ")"
return new_func
def gtk_is_container(widget):
try:
widget.get_children()
return True
except:
return False
def gtk_set_value(widget, value):
if widget.get_name() == "GtkSwitch":
widget.set_active(bool(value))
else:
widget.set_value(value)
def gtk_set_value_by_name(name, value, root):
widget = gtk_get_widget_by_name(name, root)
gtk_set_value(widget, value)
def gtk_get_value(widget):
if widget.get_name() == "GtkSwitch":
return widget.get_active()
else:
return widget.get_value()
def gtk_get_value_by_name(name, value, root):
widget = gtk_get_widget_by_name(name, root)
return gtk_gset_value(widget, value)
def gtk_get_widget_name(widget):
return Gtk.Buildable.get_name(widget)
def gtk_get_widget_by_name(name, root):
if name == gtk_get_widget_name(root):
return root
if gtk_is_container(root):
result = None
for child in root.get_children():
widget = gtk_get_widget_by_name(name, child)
if widget:
result = widget
return result
else:
return None
def reparent(what, where):
if what.get_parent():
what.reparent(where)
else:
where.add(what)
def reparent_paned(what, where, func):
if what.get_parent():
what.reparent(where)
else:
func(what, 0, 0)
def get_gtk_version():
return Gtk.get_major_version() + 0.01 * Gtk.get_minor_version()
def filicide(parent):
for child in parent.get_children():
parent.remove(child)
def gtk_get_builder_object(path, name, handlers=False):
if os.path.isfile(path):
builder = Gtk.Builder()
builder.add_from_file(path)
if handlers:
builder.connect_signals(handlers)
return builder.get_object(name)
else:
return False
def gtk_translate_widget(widget):
widget_type = widget.get_name()
if widget_type == "GtkButton":
widget.set_label(_(widget.get_label()))
elif widget_type == "GtkLabel":
widget.set_label(_(widget.get_label()))
elif gtk_is_container(widget):
for child in widget.get_children():
gtk_translate_widget(child)
def import_from_builder(destination, path, name, handlers=False):
"""
path - path to .glade file
name - id of container from witch we will extract widgets
"""
temp_object = gtk_get_builder_object(path, name, handlers)
if temp_object:
for child in temp_object.get_children():
gtk_translate_widget(child)
reparent(child, destination)
def gtk_set_margin(self, all=False, top=False, bottom=False, right=False, left=False):
if all:
self.set_margin_left(all)
self.set_margin_right(all)
self.set_margin_top(all)
self.set_margin_bottom(all)
if top:
self.set_margin_top(top)
if bottom:
self.set_margin_bottom(bottom)
if right:
self.set_margin_right(right)
if left:
self.set_margin_left(left)
def gtk_add_css_class(widget, class_name):
widget.get_style_context().add_class(class_name)
def gtk_remove_css_class(widget, class_name):
widget.get_style_context().remove_class(class_name)
def gtk_add_css(css):
from gi.repository import Gdk
style_provider = Gtk.CssProvider()
style_provider.load_from_data(css)
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(),
style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
def list_folders(path, sort):
folders = os.listdir(path)
if sort:
folders.sort()
if sort == "reversed":
folders.reverse()
return folders
def get_folders(path, sort=False):
folders = list_folders(path, sort)
return filter(lambda x: os.path.isdir(path + "/" + x), folders)
def get_files(path, ext_filter=None, sort=False):
folders = list_folders(path, sort)
return filter(lambda x: not os.path.isdir(path + "/" + x) and (not ext_filter or x.endswith(ext_filter)), folders)
def get_icon_image(name):
icon = Gio.ThemedIcon(name=name)
return Gtk.Image.new_from_gicon(icon, Gtk.IconSize.BUTTON)
class ToggleBox(Gtk.Box):
def __init__(self):
Gtk.Box.__init__(self, spacing=0)
self.get_style_context().add_class("inline-toolbar")
self.handlers = []
self.tbuttons = []
self.count = 0
self.user_callback = None
def add_toggler(self, tbutton):
if tbutton.get_name() != "GtkToggleButton":
raise Exception("ToggleBox items have to be ToggleButtons")
self.pack_start(tbutton, True, True, 0)
self.tbuttons.append(tbutton)
handler = tbutton.connect("toggled", self.do_callback, self.count)
self.handlers.append(handler)
self.count += 1
def set_callback(self, callback):
self.user_callback = callback
def do_callback(self, button, new_id):
for id, tbutton in enumerate(self.tbuttons):
tbutton.handler_block(self.handlers[id])
if id == new_id:
tbutton.set_active(True)
self._do_user_callback(id)
else:
tbutton.set_active(False)
tbutton.handler_unblock(self.handlers[id])
def set_active(self, new_active_id):
if new_active_id < len(self.tbuttons):
self.do_callback(None, new_active_id)
else:
raise KeyError("There is no ToggleButton with id: " + new_active_id + " in this ToggleBox")
def _do_user_callback(self, id):
if self.user_callback:
self.user_callback(id)
class IconicButton(Gtk.Button):
def __init__(self, icon_name, tooltip=None):
Gtk.Button.__init__(self)
# or add?
self.set_image(get_icon_image(icon_name))
if tooltip:
self.set_tooltip_text(tooltip)
def is_file(path, ext_filter=None):
import os.path
return os.path.isfile(path) and (not ext_filter or path.endswith(ext_filter))
def is_dir(path):
import os.path
return os.path.isdir(path)
def file_get_contents(path):
import os.path
if os.path.isfile(path):
with open(path, "r") as f:
return f.read()
else:
print "no file: ", path
return ""
def file_put_contents(path, text=""):
with open(path, "w") as f:
f.write(text)
def help_me(waste=''):
import webbrowser, internationalize
path = "documentation/user_guide.html"
lang = internationalize.CURRENT_LANG
if is_dir("documentation/" + lang):
path = "documentation/" + lang + "/user_guide.html"
webbrowser.open('file://' + os.path.realpath(path), new=1, autoraise=True)
def set_tabs(text_view, font_description, number_of_spaces):
layout = Pango.Layout(text_view.get_pango_context())
layout.set_text(" "*number_of_spaces, number_of_spaces)
layout.set_font_description(font_description)
real_tab_width = layout.get_pixel_size()[0]
del layout
tabs = Pango.TabArray.new(1, True)
tabs.set_tab(0, Pango.TabAlign.LEFT, real_tab_width)
text_view.set_tabs(tabs)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#
# Copyright 2011-2018 Matt Austin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals
import re
from django.conf import settings
from django.template import Library, Node, NodeList, TemplateSyntaxError
from django.utils.encoding import smart_str
from thummer.utils import get_thumbnail
register = Library()
kw_pat = re.compile(r'^(?P<key>[\w]+)=(?P<value>.+)$')
class ThummerNodeBase(Node):
"""
A Node that renders safely
"""
nodelist_empty = NodeList()
def render(self, context):
try:
return self._render(context)
except Exception:
if settings.DEBUG:
raise
# TODO: Log error
return self.nodelist_empty.render(context)
def _render(self, context):
raise NotImplemented()
@register.tag('thummer')
class ThummerNode(ThummerNodeBase):
child_nodelists = ('nodelist_url', 'nodelist_empty')
error_msg = ('Syntax error. Expected: ``thummer url geometry '
'[key1=val1 key2=val2...] as var``')
def __init__(self, parser, token):
bits = token.split_contents()
if len(bits) < 5 or bits[-2] != 'as':
raise TemplateSyntaxError(self.error_msg)
self.url = parser.compile_filter(bits[1])
self.geometry = parser.compile_filter(bits[2])
self.options = []
for bit in bits[3:-2]:
m = kw_pat.match(bit)
if not m:
raise TemplateSyntaxError(self.error_msg)
key = smart_str(m.group('key'))
expr = parser.compile_filter(m.group('value'))
self.options.append((key, expr))
self.as_var = bits[-1]
self.nodelist_url = parser.parse(('empty', 'endthummer',))
if parser.next_token().contents == 'empty':
self.nodelist_empty = parser.parse(('endthummer',))
parser.delete_first_token()
def _render(self, context):
url = self.url.resolve(context)
geometry = self.geometry.resolve(context)
options = {}
for key, expr in self.options:
noresolve = {'True': True, 'False': False, 'None': None}
value = noresolve.get('{}'.format(expr), expr.resolve(context))
if key == 'options':
options.update(value)
else:
options[key] = value
if url:
thumbnail = get_thumbnail(url, geometry, **options)
else:
return self.nodelist_empty.render(context)
context.push()
context[self.as_var] = thumbnail
output = self.nodelist_url.render(context)
context.pop()
return output
def __iter__(self):
for node in self.nodelist_url:
yield node
for node in self.nodelist_empty:
yield node
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.