hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c4f0d8ae46d830b24cfa63aab36f1e2b1e69133 | 44,433 | py | Python | pylayers/simul/simultraj.py | lidiaxp/newPyLayer | 4caa020b0cff1c043a63bf18b71a40aa478b7737 | [
"MIT"
] | 1 | 2021-10-02T01:02:04.000Z | 2021-10-02T01:02:04.000Z | pylayers/simul/simultraj.py | buguen/pylayers | 65dd798c30370ae64edfceaf37bf6c47a92330d2 | [
"MIT"
] | null | null | null | pylayers/simul/simultraj.py | buguen/pylayers | 65dd798c30370ae64edfceaf37bf6c47a92330d2 | [
"MIT"
] | 3 | 2017-12-04T23:18:20.000Z | 2021-02-20T03:42:21.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
"""
Run simulation with full human trajectory
.. currentmodule:: pylayers.simul.simultraj
Simul class
===========
.. autosummary::
:toctree: generated/
Run simulation and data exploitation
------------------------------------
.. autosummary::
:toctree: generated/
Simul.__init__
Simul.run
Simul._gen_net
Simul.evaldeter
Simul.evalstat
Simul.show
Loading and Saving
------------------
.. autosummary::
:toctree: generated/
Simul._saveh5
Simul._loadh5
Simul.savepd
Simul.loadpd
Extraction
----------
.. autosummary::
:toctree: generated/
Simul.get_link
Simul.get_value
Miscellanous
------------
.. autosummary::
:toctree: generated/
Simul.setttime
Simul.replace_data
Simul.check_exist
Simul.get_df_from_link
Simul.update_pos
See Also
--------
pylayers.simul.link
"""
import doctest
import numpy as np
import copy
import matplotlib.pylab as plt
import pylayers.util.pyutil as pyu
import pylayers.signal.waveform as wvf
from pylayers.signal.device import Device
# Handle Layout
from pylayers.gis.layout import Layout
# Handle VectChannel and ScalChannel
from pylayers.antprop import antenna
from pylayers.network.network import Network
from pylayers.simul.link import *
from pylayers.measures.cormoran import *
# Handle directory hierarchy
from pylayers.util.project import *
# Handle UWB measurements
import pylayers.mobility.trajectory as tr
from pylayers.mobility.ban.body import *
from pylayers.antprop.statModel import *
import pandas as pd
import csv
class Simul(PyLayers):
"""
Link oriented simulation
A simulation requires :
+ A Layout
+ A Person
+ A Trajectory
or a CorSer instance
Members
-------
dpersons : dictionnary of persons (agent)
dap : dictionnary of access points
Methods
-------
load_simul : load configuration file
load_Corser : load a Corser file
_gen_net : generate network and asociated links
show : show layout and network
evaldeter : run simulation over time
"""
def __init__(self, source ='simulnet_TA-Office.h5',verbose=False):
""" object constructor
Parameters
----------
source : string
h5 trajectory file default simulnet_TA-Office.h5
verbose : boolean
Notes
-----
The simultraj has a dataframe
"""
# self.progress = -1 # simulation not loaded
self.verbose = verbose
self.cfield = []
self.dpersons = {}
self.dap = {}
self.Nag = 0
self.Nap = 0
# source analysis
if isinstance(source,str):
self.filetraj = source
self.load_simul(source)
self.source = 'simul'
elif 'pylayers' in source.__module__:
self.filetraj = source._filename
self.load_CorSer(source)
cutoff=2
self.source = 'CorSer'
# generate the Network
# The wireless standard and frequency is fixed in this function
#
self._gen_net()
# initialize Stochastic Link
self.SL = SLink()
# initialize Deterministic Link
self.DL = DLink(L=self.L,verbose=self.verbose)
self.DL.cutoff=cutoff
self.filename = 'simultraj_' + self.filetraj + '.h5'
# data is a panda container which is initialized
#
# We do not save all the simulation in a DataFRame anymore
#
#self.data = pd.DataFrame(columns=['id_a', 'id_b',
# 'x_a', 'y_a', 'z_a',
# 'x_b', 'y_b', 'z_b',
# 'd', 'eng', 'typ',
# 'wstd', 'fcghz',
# 'fbminghz', 'fbmaxghz', 'fstep', 'aktk_id',
# 'sig_id', 'ray_id', 'Ct_id', 'H_id'
# ])
#self.data.index.name='t'
self._filecsv = self.filename.split('.')[0] + '.csv'
self.todo = {'OB': True,
'B2B': True,
'B2I': True,
'I2I': False}
filenameh5 = pyu.getlong(self.filename,pstruc['DIRLNK'])
if os.path.exists(filenameh5) :
self.loadpd()
self.settime(0.)
# self._saveh5_init()
def __repr__(self):
s = 'Simul trajectories class\n'
s = s + '------------------------\n'
s = s +'\n'
s = s + 'Used layout: ' + self.L.filename + '\n'
s = s + 'Number of Agents: ' + str(self.Nag) + '\n'
s = s + 'Number of Access Points: ' + str(self.Nap) + '\n'
s = s + 'Link to be evaluated: ' + str(self.todo) + '\n'
s = s + 'tmin: ' + str(self._tmin) + '\n'
s = s + 'tmax: ' + str(self._tmax) + '\n'
s = s +'\n'
# network info
s = s + 'self.N :\n'
s = s + self.N.__repr__() + '\n'
s = s + 'CURRENT TIME: ' + str(self.ctime) + '\n'
return s
def load_simul(self, source):
""" load a simultraj configuration file
Parameters
----------
source : string
name of simulation file to be loaded
"""
self.filetraj = source
if not os.path.isfile(source):
raise AttributeError('Trajectory file'+source+'has not been found.\
Please make sure you have run a simulnet simulation before runining simultraj.')
# get the trajectory
traj = tr.Trajectories()
traj.loadh5(self.filetraj)
# get the layout
self.L = Layout(traj.Lfilename)
# resample trajectory
for ut, t in enumerate(traj):
if t.typ == 'ag':
person = Body(t.name + '.ini')
tt = t.time()
self.dpersons.update({t.name: person})
self._tmin = tt[0]
self._tmax = tt[-1]
self.time = tt
else:
pos = np.array([t.x[0], t.y[0], t.z[0]])
self.dap.update({t.ID: {'pos': pos,
'ant': antenna.Antenna(),
'name': t.name
}
})
self.ctime = np.nan
self.Nag = len(self.dpersons.keys())
self.Nap = len(self.dap.keys())
self.traj = traj
def load_CorSer(self,source):
""" load CorSer file for simulation
Parameters
----------
source :
name of simulation file to be loaded
"""
if isinstance(source.B,Body):
B = [source.B]
elif isinstance(source.B,list):
B = source.B
elif isinstance(source.B,dict):
B=source.B.values()
else:
raise AttributeError('CorSer.B must be a list or a Body')
self.L = source.L
self.traj = tr.Trajectories()
self.traj.Lfilename=self.L._filename
for b in B:
self.dpersons.update({b.name: b})
self._tmin = b.time[0]
self._tmax = b.time[-1]
self.time = b.time
self.traj.append(b.traj)
for ap in source.din:
techno,ID=ap.split(':')
if techno == 'HKB':
techno = 'hikob'
if techno == 'TCR':
techno = 'tcr'
if techno == 'BS':
techno = 'bespoon'
self.dap.update({ap: {'pos': source.din[ap]['p'],
'ant': source.din[ap]['ant'],
'T': source.din[ap]['T'],
'name': techno
}
})
self.ctime = np.nan
self.Nag = len(B)
self.Nap = len(source.din)
self.corser = source
def _gen_net(self):
""" generate Network and associated links
Notes
-----
Create self.N : Network object
See Also
--------
pylayers.network.network
"""
#
# Create Network
#
N = Network()
#
# get devices on bodies
#
# forall person
# forall device
for p in self.dpersons:
D = []
for dev in self.dpersons[p].dev:
aDev = Device(self.dpersons[p].dev[dev]['name'], ID = dev)
D.append(aDev)
D[-1].ant['A1']['name'] = self.dpersons[p].dev[dev]['file']
D[-1].ant['antenna'] = self.dpersons[p].dev[dev]['ant']
N.add_devices(D, grp=p)
#
# get access point devices
#
for ap in self.dap:
D = Device(self.dap[ap]['name'], ID = ap)
D.ant['antenna'] = self.dap[ap]['ant']
N.add_devices(D, grp = 'ap', p = self.dap[ap]['pos'])
N.update_orient(ap, self.dap[ap]['T'], now = 0.)
# create Network
#
# _get_wstd
# _get_grp
# _connect
# _init_PN
#
N.create()
self.N = N
def show(self):
""" show actual simlulation configuration
"""
fig, ax = self.L.showGs()
fig, ax = self.N.show(fig=fig, ax=ax)
return fig, ax
def evaldeter(self, na, nb, wstd, fmod='force',nf=10,fGHz=[], **kwargs):
""" deterministic evaluation of a link
Parameters
----------
na : string:
node a id in self.N (Network)
nb : string:
node b id in self.N (Network)
wstd : string:
wireless standard used for commmunication between na and nb
fmode : string ('center'|'band'|'force')
mode of frequency evaluation
center : single frequency (center frequency of a channel)
band : nf points on the whole band
force : takes directly fGHz
nf : int:
number of frequency points (if fmode = 'band')
**kwargs : argument of DLink
Returns
-------
(a, t )
a : ndarray
alpha_k
t : ndarray
tau_k
See Also
--------
pylayers.simul.link.DLink
"""
# todo in network :
# take into consideration the postion and rotation of antenna and not device
self.DL.Aa = self.N.node[na]['ant']['antenna']
self.DL.a = self.N.node[na]['p']
self.DL.Ta = self.N.node[na]['T']
self.DL.Ab = self.N.node[nb]['ant']['antenna']
self.DL.b = self.N.node[nb]['p']
self.DL.Tb = self.N.node[nb]['T']
#
# The frequency band is chosen from the selected standard
# if fmode == 'center'
# only center frequency is calculated
#
#'
if fmod == 'center':
self.DL.fGHz = self.N.node[na]['wstd'][wstd]['fcghz']
if fmod == 'band':
fminGHz = self.N.node[na]['wstd'][wstd]['fbminghz']
fmaxGHz = self.N.node[na]['wstd'][wstd]['fbmaxghz']
self.DL.fGHz = np.linspace(fminGHz, fmaxGHz, nf)
if fmod == 'force':
assert len(fGHz)>0,"fGHz has not been defined"
self.DL.fGHz = fGHz
a, t = self.DL.eval(**kwargs)
return a, t
def evalstat(self, na, nb):
""" statistical evaluation of a link
Parameters
----------
na : string:
node a id in self.N (Netwrok)
nb : string:
node b id in self.N (Netwrok)
Returns
-------
(a, t, eng)
a : ndarray
alpha_k
t : ndarray
tau_k
eng : float
engagement
"""
pa = self.N.node[na]['p']
pb = self.N.node[nb]['p']
if self.source == 'simul':
dida, name = na.split('_')
didb, name = nb.split('_')
elif self.source =='CorSer':
bpa,absolutedida,dida,name,technoa = self.corser.devmapper(na)
bpb,absolutedidb,didb,name,technob = self.corser.devmapper(nb)
ak, tk, eng = self.SL.onbody(self.dpersons[name], dida, didb, pa, pb)
return ak, tk, eng
def settime(self,t):
""" set current time
"""
self.ctime = t
self._traj=copy.copy(self.traj)
self.update_pos(t)
def run(self, **kwargs):
""" run the link evaluation along a trajectory
Parameters
----------
OB: boolean
perform on body statistical link evaluation
B2B: boolean
perform body to body deterministic link evaluation
B2I: boolean
perform body to infrastructure deterministic link evaluation
I2I: boolean
perform infrastructure to infrastructure deterministic link eval.
links: dict
dictionnary of link to be evaluated (key is wtsd and value is a list of links)
(if [], all link are considered)
wstd: list
list of wstd to be evaluated
(if [], all wstd are considered)
t: np.array
list of timestamp to be evaluated
(if [], all timestamps are considered)
tbr : boolean
time in bit reverse order (tmin,tmax,N) Npoints=2**N
replace_data: boolean (True)
if True , reference id of all already simulated link will be erased
and replace by new simulation id
fGHz : np.array
frequency in GHz
Examples
--------
>>> from pylayers.simul.simultraj import *
>>> from pylayers.measures.cormoran import *
>>> C=CorSer(layout=True)
>>> S=Simul(C,verbose=True)
>>> link={'ieee802154':[]}
>>> link['ieee802154'].append(S.N.links['ieee802154'][0])
>>> lt = [0,0.2,0.3,0.4,0.5]
>>> S.run(links=link,t=lt)
"""
defaults = {'OB': True,
'B2B': True,
'B2I': True,
'I2I': False,
'links': {},
'wstd': [],
't': np.array([]),
'btr':True,
'DLkwargs':{},
'replace_data':True,
'fmod':'force',
'fGHz':np.array([2.45])
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
DLkwargs = kwargs.pop('DLkwargs')
links = kwargs.pop('links')
wstd = kwargs.pop('wstd')
OB = kwargs.pop('OB')
B2B = kwargs.pop('B2B')
B2I = kwargs.pop('B2I')
I2I = kwargs.pop('I2I')
fmod = kwargs.pop('fmod')
self.fGHz = kwargs.pop('fGHz')
self.todo.update({'OB':OB,'B2B':B2B,'B2I':B2I,'I2I':I2I})
# Check link attribute
if links == {}:
links = self.N.links
elif not isinstance(links, dict):
raise AttributeError('links is {wstd:[list of links]}, see self.N.links')
for k in links.keys():
checkl = [l in self.N.links[k] for l in links[k]]
if len(np.where(checkl==False)[0])>0:
# if sum(checkl) != len(self.N.links):
uwrong = np.where(np.array(checkl) is False)[0]
raise AttributeError(str(np.array(links)[uwrong])
+ ' links does not exist in Network')
wstd = links.keys()
# # Check wstd attribute
# if wstd == []:
# wstd = self.N.wstd.keys()
# elif not isinstance(wstd, list):
# wstd = [wstd]
checkw = [w in self.N.wstd.keys() for w in wstd]
if sum(checkw) != len(wstd):
uwrong = np.where(np.array(checkw) is False)[0]
raise AttributeError(str(np.array(wstd)[uwrong])
+ ' wstd are not in Network')
# force time attribute compliant
if not isinstance(kwargs['t'],np.ndarray):
if isinstance(kwargs['t'],list):
lt = np.array(kwargs['t'])
elif (isinstance(kwargs['t'], int)
or isinstance(kwargs['t'],float)):
lt = np.array([kwargs['t']])
else :
lt = kwargs['t']
#if len(lt) == 0:
# lt = self.time
# check time attribute
if kwargs['btr']:
if (lt[0] < self._tmin) or\
(lt[1] > self._tmax) :
raise AttributeError('Requested time range not available')
# self._traj is a copy of self.traj, which is affected by resampling.
# it is only a temporary attribute for a given run
# if len(lt) > 1:
# sf = 1/(1.*lt[1]-lt[0])
# self._traj = self.traj.resample(sf=sf, tstart=lt[0])
# else:
# self._traj = self.traj.resample(sf=1.0, tstart=lt[0])
# self._traj.time()
# self.time = self._traj.t
# self._time = pd.to_datetime(self.time,unit='s')
#
# Nested Loops
#
# time
# standard
# links
# evaldeter &| evalstat
#
#lt = self.get_sim_time(lt)
#self._time=self.get_sim_time(lt)
init = True
if kwargs['btr']:
tmin = lt[0]
tmax = lt[1]
Nt = int(2**lt[2])
ta = np.linspace(tmin,tmax,Nt)
it = np.hstack((np.r_[0],np.r_[pyu.bitreverse(Nt,int(lt[2]))]))
#trev = t[it]
else:
ta = kwargs['t']
it = range(len(ta))
## Start to loop over time
## ut : counter
## t : time value (s)
#for ut, t in enumerate(lt):
for ks,ut in enumerate(it):
t = ta[ut]
self.ctime = t
# update spatial configuration of the scene for time t
self.update_pos(t)
# print self.N.__repr__()
## Start to loop over available Wireless standard
##
for w in wstd:
## Start to loop over the chosen links stored in links
##
for na, nb, typ in links[w]:
# If type of link is valid (Body 2 Body,...)
#
if self.todo[typ]:
if self.verbose:
print '-'*30
print 'time:', t, '/', lt[-1] ,' time idx:', ut,
'/',len(ta),'/',ks
print 'processing: ',na, ' <-> ', nb, 'wstd: ', w
print '-'*30
eng = 0
#
# Invoque link deterministic simulation
#
# node : na
# node : nb
# wstd : w
#
self.evaldeter(na, nb,
w,
applywav=False,
fmod = fmod,
fGHz = self.fGHz,
**DLkwargs)
# if typ == 'OB':
# self.evalstat(na, nb)
# eng = self.SL.eng
# L = self.DL + self.SL
# self._ak = L.H.ak
# self._tk = L.H.tk
# else :
# Get alphak an tauk
self._ak = self.DL.H.ak
self._tk = self.DL.H.tk
aktk_id = str(ut) + '_' + na + '_' + nb + '_' + w
# this is a dangerous way to proceed !
# the id as a finite number of characters
while len(aktk_id)<40:
aktk_id = aktk_id + ' '
df = pd.DataFrame({ 'id_a': na,
'id_b': nb,
'x_a': self.N.node[na]['p'][0],
'y_a': self.N.node[na]['p'][1],
'z_a': self.N.node[na]['p'][2],
'x_b': self.N.node[nb]['p'][0],
'y_b': self.N.node[nb]['p'][1],
'z_b': self.N.node[nb]['p'][2],
'd': self.N.edge[na][nb]['d'],
'eng': eng,
'typ': typ,
'wstd': w,
'fcghz': self.N.node[na]['wstd'][w]['fcghz'],
'fbminghz': self.fGHz[0],
'fbmaxghz': self.fGHz[-1],
'nf': len(self.fGHz),
'aktk_id':aktk_id,
'sig_id': self.DL.dexist['sig']['grpname'],
'ray_id': self.DL.dexist['ray']['grpname'],
'Ct_id': self.DL.dexist['Ct']['grpname'],
'H_id': self.DL.dexist['H']['grpname'],
},columns=['id_a', 'id_b',
'x_a', 'y_a', 'z_a',
'x_b', 'y_b', 'z_b',
'd', 'eng', 'typ',
'wstd', 'fcghz',
'fbminghz', 'fbmaxghz', 'fstep', 'aktk_id',
'sig_id', 'ray_id', 'Ct_id', 'H_id'
],index= [t]) #self._time[ut]])
self.savepd(df)
def replace_data(self, df):
"""check if a dataframe df already exists in self.data
Parameters
----------
df : pd.DataFrame
Returns
-------
boolean
True if already exists
False otherwise
"""
self.data[(self.data.index == df.index) &
(self.data['id_a'] == df['id_a'].values[0]) &
(self.data['id_b'] == df['id_b'].values[0]) &
(self.data['wstd'] == df['wstd'].values[0])]=df.values
def check_exist(self, df):
"""check if a dataframe df already exists in self.data
Parameters
----------
df : pd.DataFrame
Returns
-------
boolean
True if already exists
False otherwise
"""
# check init case
if not len(self.data.index) == 0:
ud = self.data[(self.data.index == df.index) &
(self.data['id_a'] == df['id_a'].values[0]) &
(self.data['id_b'] == df['id_b'].values[0]) &
(self.data['wstd'] == df['wstd'].values[0])]
if len(ud) == 0:
return False
else :
return True
else :
return False
def savepd(self,df):
""" save data information of a simulation
Parameters
----------
df : one index data
Notes
-----
"""
filenameh5 = pyu.getlong(self.filename, pstruc['DIRLNK'])
store = pd.HDFStore(filenameh5)
#self.data=self.data.sort()
store.append('df',df)
store.close()
def loadpd(self):
""" load data from previous simulations
"""
filenameh5 = pyu.getlong(self.filename, pstruc['DIRLNK'])
store = pd.HDFStore(filenameh5)
#self.data = pd.read_hdf(filenameh5,'df')
self.data = store.get('df')
self.data.index.name='t'
self.data = self.data.sort()
def get_sim_time(self,t):
""" retrieve closest time value in regard of passed t value in parameter
"""
if not isinstance(t,list) and not isinstance(t,np.ndarray):
return np.array([self.time[np.where(self.time <=t)[0][-1]]])
else :
return np.array([self.get_sim_time(tt) for tt in t])[:,0]
def get_df_from_link(self,id_a,id_b,wstd=''):
""" Return a restricted data frame for a specific link
Parameters
----------
id_a : str
node id a
id_b: str
node id b
wstd: str
optionnal :wireslees standard
"""
if wstd == '':
return self.data[(self.data['id_a']==id_a) &
(self.data['id_b']==id_b)]
else :
return self.data[(self.data['id_a']==id_a) &
(self.data['id_b']==id_b) &
self.data['wstd']==wstd]
def update_pos(self, t):
""" update positions of devices and bodies for a given time index
Parameters
----------
t : int
time value
"""
# if a bodies are involved in simulation
if ((self.todo['OB']) or (self.todo['B2B']) or (self.todo['B2I'])):
nodeid = []
pos = []
devlist = []
orient = []
for up, person in enumerate(self.dpersons.values()):
person.settopos(self._traj[up], t=t, cs=True)
name = person.name
dev = person.dev.keys()
devlist.extend(dev)
#nodeid.extend([n + '_' + name for n in dev])
pos.extend([person.dcs[d][:, 0] for d in dev])
orient.extend([person.acs[d] for d in dev])
# TODO !!!!!!!!!!!!!!!!!!!!
# in a future version , the network update must also update
# antenna position in the device coordinate system
self.N.update_pos(devlist, pos, now=t)
self.N.update_orient(devlist, orient, now=t)
self.N.update_dis()
def get_value(self,**kwargs):
""" retrieve output parameter at a specific time
Parameters
----------
typ : list
list of parameters to be retrieved
(R | C |H | ak | tk | rss )
links: list
dictionnary of link to be evaluated (key is wtsd and value is a list of links)
(if [], all link are considered)
t: int or np.array
list of timestamp to be evaluated | singlr time instant
Returns
-------
output: dict
[link_key]['t']
['ak']
...
"""
# get time
defaults = {'t': 0,
'typ':['ak'],
'links': {},
'wstd':[],
'angles':False
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
# allocate an empty dictionnary for wanted selected output
output={}
# manage time t can be a list or a float
t = kwargs['t']
t = self.get_sim_time(t)
dt = self.time[1]-self.time[0]
# manage links
plinks = kwargs['links']
links=[]
if isinstance(plinks,dict):
for l in plinks.keys():
links.extend(plinks[l])
if len(links) == 0:
raise AttributeError('Please give valid links to get values')
# output['t']=[]
# output['time_to_simul']=[]
# for each requested time step
for tt in t :
# for each requested links
for link in links:
linkname=link[0]+'-'+link[1]
if not output.has_key(linkname):
output[linkname] = {}
if not output[linkname].has_key('t'):
output[linkname]['t'] = []
# restrict global dataframe self.data to the specific link
df = self.get_df_from_link(link[0],link[1])
# restrict global dataframe self.data to the specific z
df = df[(df.index > tt-dt) & (df.index <= tt+dt)]
if len(df) != 0:
output[linkname]['t'].append(tt)
if len(df)>1:
print 'Warning possible issue in self.get_value'
line = df.iloc[-1]
# # get info of the corresponding timestamp
# line = df[(df['id_a'] == link[0]) & (df['id_b'] == link[1])].iloc[-1]
# if len(line) == 0:
# line = df[(df['id_b'] == link[0]) & (df['id_a'] == link[1])]
# if len(line) == 0:
# raise AttributeError('invalid link')
#retrieve correct position and orientation given the time
#self.update_pos(t=tt)
# antennas positions
#self.DL.a = self.N.node[link[0]]['p']
#self.DL.b = self.N.node[link[1]]['p']
# antennas orientation
#self.DL.Ta = self.N.node[link[0]]['T']
#self.DL.Tb = self.N.node[link[1]]['T']
# antennas object
#self.DL.Aa = self.N.node[link[0]]['ant']['antenna']
#self.DL.Ab = self.N.node[link[1]]['ant']['antenna']
# get the antenna index
#uAa_opt, uAa = self.DL.get_idx('A_map',self.DL.Aa._filename)
#uAb_opt, uAb = self.DL.get_idx('A_map',self.DL.Ab._filename)
if 'ak' in kwargs['typ'] or 'tk' in kwargs['typ'] or 'rss' in kwargs['typ']:
H_id = line['H_id'].decode('utf8')
# load the proper link
# parse index
lid = H_id.split('_')
#if (lid[5]==str(uAa))&(lid[6]==str(uAb)):
self.DL.load(self.DL.H,H_id)
if 'ak' in kwargs['typ']:
if not output[linkname].has_key('ak'):
output[linkname]['ak']=[]
output[linkname]['ak'].append(copy.deepcopy(self.DL.H.ak))
if 'tk' in kwargs['typ']:
if not output[linkname].has_key('tk'):
output[linkname]['tk']=[]
output[linkname]['tk'].append(copy.deepcopy(self.DL.H.tk))
if 'rss' in kwargs['typ']:
if not output[linkname].has_key('rss'):
output[linkname]['rss']=[]
output[linkname]['rss'].append(copy.deepcopy(self.DL.H.rssi()))
if 'R' in kwargs['typ']:
if not output[linkname].has_key('R'):
output[linkname]['R']=[]
ray_id = line['ray_id']
self.DL.load(self.DL.R,ray_id)
output[linkname]['R'].append(copy.deepcopy(self.DL.R))
if 'C' in kwargs['typ']:
if not output[linkname].has_key('C'):
output[linkname]['C']=[]
Ct_id = line['Ct_id']
self.DL.load(self.DL.C,Ct_id)
if kwargs['angles']:
self.DL.C.islocal=False
self.DL.C.locbas(Tt=self.DL.Ta, Tr=self.DL.Tb)
#T channel
output[linkname]['C'].append(copy.deepcopy(self.DL.C))
if 'H' in kwargs['typ']:
if not output[linkname].has_key('H'):
output[linkname]['H']=[]
H_id = line['H_id']
lid = H_id.split('_')
#if (lid[5]==str(uAa))&(lid[6]==str(uAb)):
self.DL.load(self.DL.H,H_id)
output[linkname]['H'].append(copy.deepcopy(self.DL.H))
# if time value not found in dataframe
else:
if not output[linkname].has_key('time_to_simul'):
output[linkname]['time_to_simul'] = []
output[linkname]['time_to_simul'].append(tt)
for l in output.keys():
if output[l].has_key('time_to_simul'):
print 'link', l , 'require simulation for timestamps', output[l]['time_to_simul']
return(output)
def get_link(self,**kwargs):
""" retrieve a Link specific time from a simultraj
Parameters
----------
typ : list
list of parameters to be retrieved
(ak | tk | R |C)
links: list
dictionnary of link to be evaluated (key is wtsd and value is a list of links)
(if [], all link are considered)
t: int or np.array
list of timestamp to be evaluated | singlr time instant
Returns
-------
DL : DLink
Examples
--------
>>> from pylayers.simul.simultraj import *
>>> from pylayers.measures.cormoran import *
>>> C=CorSer(serie=6,day=11,layout=True)
>>> S = Simul(C,verbose=False)
>>> DL = S.get_link(typ=['R','C','H'])
"""
# get time
defaults = {'t': 0,
'typ':['ak'],
'links': {},
'wstd':[],
'angles':False
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
output={}
# manage time
t = kwargs['t']
t = self.get_sim_time(t)
dt = self.time[1]-self.time[0]
# manage links
plinks = kwargs['links']
links=[]
if isinstance(plinks,dict):
for l in plinks.keys():
links.extend(plinks[l])
if len(links) == 0:
raise AttributeError('Please give valid links to get values')
# output['t']=[]
# output['time_to_simul']=[]
# for each requested time step
for tt in t :
# for each requested links
for link in links:
linkname=link[0]+'-'+link[1]
if not output.has_key(linkname):
output[linkname] = {}
if not output[linkname].has_key('t'):
output[linkname]['t'] = []
# restrict global dataframe self.data to the specific link
df = self.get_df_from_link(link[0],link[1])
# restrict global dataframe self.data to the specific z
df = df[(df.index > tt-dt) & (df.index <= tt+dt)]
if len(df) != 0:
output[linkname]['t'].append(tt)
if len(df)>1:
print 'Warning possible issue in self.get_link'
line = df.iloc[-1]
# # get info of the corresponding timestamp
# line = df[(df['id_a'] == link[0]) & (df['id_b'] == link[1])].iloc[-1]
# if len(line) == 0:
# line = df[(df['id_b'] == link[0]) & (df['id_a'] == link[1])]
# if len(line) == 0:
# raise AttributeError('invalid link')
#retrieve correct position and orientation given the time
self.update_pos(t=tt)
self.DL.a = self.N.node[link[0]]['p']
self.DL.b = self.N.node[link[1]]['p']
self.DL.Ta = self.N.node[link[0]]['T']
self.DL.Tb = self.N.node[link[1]]['T']
#self.DL.Aa = self.N.node[link[0]]['ant']['antenna']
#self.DL.Ab = self.N.node[link[1]]['ant']['antenna']
#H_id = line['H_id'].decode('utf8')
#self.DL.load(self.DL.H,H_id)
if 'R' in kwargs['typ']:
ray_id = line['ray_id']
self.DL.load(self.DL.R,ray_id)
if 'C' in kwargs['typ']:
Ct_id = line['Ct_id']
self.DL.load(self.DL.C,Ct_id)
if kwargs['angles']:
self.DL.C.islocal=False
self.DL.C.locbas(Tt=self.DL.Ta, Tr=self.DL.Tb)
if 'H' in kwargs['typ']:
H_id = line['H_id']
self.DL.load(self.DL.H,H_id)
return(self.DL)
def _show3(self, **kwargs):
""" 3D show using Mayavi
Parameters
----------
t: float
time index
link: list
[id_a, id_b]
id_a : node id a
id_b : node id b
'lay': bool
show layout
'net': bool
show net
'body': bool
show bodies
'rays': bool
show rays
"""
defaults = {'t': 0,
'link': [],
'wstd':[],
'lay': True,
'net': True,
'body': True,
'rays': True,
'ant': False
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
link = kwargs['link']
self.update_pos(kwargs['t'])
if len(self.data) != 0:
df = self.data[self.data.index == pd.to_datetime(kwargs['t'])]
if len(df) != 0:
raise AttributeError('invalid time')
# default
if link ==[]:
line = df[df.index<=pd.to_datetime(0)]
link = [line['id_a'].values[0],line['id_b'].values[0]]
else :
# get info of the corresponding timestamp
line = df[(df['id_a'] == link[0]) & (df['id_b'] == link[1])]
if len(line) == 0:
line = df[(df['id_b'] == link[0]) & (df['id_a'] == link[1])]
if len(line) == 0:
raise AttributeError('invalid link')
rayid = line['ray_id'].values[0]
self.DL.a = self.N.node[link[0]]['p']
self.DL.b = self.N.node[link[1]]['p']
self.DL.Ta = self.N.node[link[0]]['T']
self.DL.Tb = self.N.node[link[1]]['T']
self.DL.load(self.DL.R,rayid)
self.DL._show3(newfig= False,
lay= kwargs['lay'],
rays= kwargs['rays'],
ant=False)
else :
self.DL._show3(newfig= False,
lay= True,
rays= False,
ant=False)
if kwargs['net']:
self.N._show3(newfig=False)
if kwargs['body']:
for p in self.dpersons:
self.dpersons[p]._show3(newfig=False,
topos=True,
pattern=kwargs['ant'])
# def _saveh5_init(self):
# """ initialization of the h5py file
# """
# filenameh5 = pyu.getlong(self.filename, pstruc['DIRLNK'])
# import ipdb
# try:
# f5 = h5py.File(filenameh5, 'w')
# f5.create_dataset('time', shape=self.time.shape, data=self.time)
# f5.close()
# except:
# f5.close()
# raise NameError('simultra.saveinit: \
# issue when writting h5py file')
def _saveh5(self, ut, ida, idb, wstd):
""" Save in h5py format
Parameters
----------
ut : int
time index in self.time
ida : string
node a index
idb : string
node b index
wstd : string
wireless standard of used link
Notes
-----
Dataset organisation:
simultraj_<trajectory_filename.h5>.h5
|
|time
| ...
|
|/<tidx_ida_idb_wstd>/ |attrs
| |a_k
| |t_k
Root dataset :
time : array
range of simulation time
Group identifier :
tidx : index in time dataset
ida : node a index in Network
idb : node b index in Network
wstd : wireless standar of link interest
Inside group:
a_k : alpha_k values
t_k : tau_k values
See Also
--------
pylayers.simul.links
"""
filenameh5 = pyu.getlong(self.filename, pstruc['DIRLNK'])
grpname = str(ut) + '_' + ida + '_' + idb + '_' + wstd
# try/except to avoid loosing the h5 file if
# read/write error
try:
fh5 = h5py.File(filenameh5, 'a')
if not grpname in fh5.keys():
fh5.create_group(grpname)
f = fh5[grpname]
# for k in kwargs:
# f.attrs[k] = kwargs[k]
f.create_dataset('alphak',
shape=self._ak.shape,
maxshape=(None),
data=self._ak)
f.create_dataset('tauk',
shape=self._tk.shape,
maxshape=(None),
data=self._tk)
else:
pass#print grpname + ' already exists in ' + filenameh5
fh5.close()
except:
fh5.close()
raise NameError('Simultraj._saveh5: issue when writting h5py file')
def _loadh5(self, grpname):
""" Load in h5py format
Parameters
----------
grpname : string
group name which can be found sin self.data aktk_idx column
Returns
-------
(ak, tk, conf)
ak : ndarray:
alpha_k
tk : ndarray:
alpha_k
"""
filenameh5 = pyu.getlong(self.filename, pstruc['DIRLNK'])
# try/except to avoid loosing the h5 file if
# read/write error
try:
fh5 = h5py.File(filenameh5, 'r')
if not grpname in fh5.keys():
fh5.close()
raise NameError(grpname + ' cannot be reached in ' + self.filename)
f = fh5[grpname]
# for k in f.attrs.keys():
# conf[k]=f.attrs[k]
ak = f['alphak'][:]
tk = f['tauk'][:]
fh5.close()
return ak, tk
except:
fh5.close()
raise NameError('Simultraj._loadh5: issue when reading h5py file')
def tocsv(self, ut, ida, idb, wstd,init=False):
filecsv = pyu.getlong(self._filecsv,pstruc['DIRLNK'])
with open(filecsv, 'a') as csvfile:
fil = csv.writer(csvfile, delimiter=';',
quoting=csv.QUOTE_MINIMAL)
if init:
keys = self.data.iloc[-1].keys()
data = [k for k in keys]
data .append('ak')
data .append('tk')
fil.writerow(data)
values = self.data.iloc[-1].values
data = [v for v in values]
sak = str(self._ak.tolist())
stk = str(self._tk.tolist())
data.append(sak)
data.append(stk)
fil.writerow(data)
if (__name__ == "__main__"):
#plt.ion()
doctest.testmod()
| 31.246835 | 97 | 0.442464 |
333f09156b00998a66ed6822a29a89de16b2d5e6 | 1,688 | py | Python | ivi/agilent/agilentDSO7034B.py | edupo/python-ivi | 8105d8064503725dde781f0378d75db58defaecb | [
"MIT"
] | null | null | null | ivi/agilent/agilentDSO7034B.py | edupo/python-ivi | 8105d8064503725dde781f0378d75db58defaecb | [
"MIT"
] | null | null | null | ivi/agilent/agilentDSO7034B.py | edupo/python-ivi | 8105d8064503725dde781f0378d75db58defaecb | [
"MIT"
] | null | null | null | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent7000B import *
class agilentDSO7034B(agilent7000B):
"Agilent InfiniiVision DSO7034B IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DSO7034B')
super(agilentDSO7034B, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 350e6
self._init_channels()
| 37.511111 | 86 | 0.755332 |
3cead7a98f2147f10a9ba3dac2ff90eeb53e0262 | 7,947 | py | Python | ckeditor_uploader/views.py | guettli/django-ckeditor | 8473ac661c02a9dcd27063eb81e6b1bccbbd36a7 | [
"BSD-3-Clause"
] | 1 | 2021-05-08T08:40:55.000Z | 2021-05-08T08:40:55.000Z | ckeditor_uploader/views.py | guettli/django-ckeditor | 8473ac661c02a9dcd27063eb81e6b1bccbbd36a7 | [
"BSD-3-Clause"
] | 7 | 2021-03-30T13:52:56.000Z | 2022-03-12T00:38:56.000Z | ckeditor_uploader/views.py | guettli/django-ckeditor | 8473ac661c02a9dcd27063eb81e6b1bccbbd36a7 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, unicode_literals
import inspect
import os
import warnings
from datetime import datetime
from django.conf import settings
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
from django.utils.html import escape
from django.utils.module_loading import import_string
from django.views import generic
from django.views.decorators.csrf import csrf_exempt
from ckeditor_uploader import utils
from ckeditor_uploader.backends import registry
from ckeditor_uploader.forms import SearchForm
from ckeditor_uploader.utils import storage
from .utils import is_valid_image_extension
def _get_user_path(user):
user_path = ''
# If CKEDITOR_RESTRICT_BY_USER is True upload file to user specific path.
RESTRICT_BY_USER = getattr(settings, 'CKEDITOR_RESTRICT_BY_USER', False)
if RESTRICT_BY_USER:
try:
user_prop = getattr(user, RESTRICT_BY_USER)
except (AttributeError, TypeError):
user_prop = getattr(user, 'get_username')
if callable(user_prop):
user_path = user_prop()
else:
user_path = user_prop
return str(user_path)
def get_upload_filename(upload_name, request):
user_path = _get_user_path(request.user)
# Generate date based path to put uploaded file.
# If CKEDITOR_RESTRICT_BY_DATE is True upload file to date specific path.
if getattr(settings, 'CKEDITOR_RESTRICT_BY_DATE', True):
date_path = datetime.now().strftime('%Y/%m/%d')
else:
date_path = ''
# Complete upload path (upload_path + date_path).
upload_path = os.path.join(
settings.CKEDITOR_UPLOAD_PATH, user_path, date_path
)
if (getattr(settings, 'CKEDITOR_UPLOAD_SLUGIFY_FILENAME', True) and
not hasattr(settings, 'CKEDITOR_FILENAME_GENERATOR')):
upload_name = utils.slugify_filename(upload_name)
if hasattr(settings, 'CKEDITOR_FILENAME_GENERATOR'):
generator = import_string(settings.CKEDITOR_FILENAME_GENERATOR)
# Does the generator accept a request argument?
try:
inspect.getcallargs(generator, upload_name, request)
except TypeError:
# Does the generator accept only an upload_name argument?
try:
inspect.getcallargs(generator, upload_name)
except TypeError:
warnings.warn(
"Update %s() to accept the arguments `filename, request`."
% settings.CKEDITOR_FILENAME_GENERATOR
)
else:
warnings.warn(
"Update %s() to accept a second `request` argument."
% settings.CKEDITOR_FILENAME_GENERATOR,
PendingDeprecationWarning
)
upload_name = generator(upload_name)
else:
upload_name = generator(upload_name, request)
return storage.get_available_name(
os.path.join(upload_path, upload_name)
)
class ImageUploadView(generic.View):
http_method_names = ['post']
def post(self, request, **kwargs):
"""
Uploads a file and send back its URL to CKEditor.
"""
uploaded_file = request.FILES['upload']
backend = registry.get_backend()
ck_func_num = request.GET.get('CKEditorFuncNum')
if ck_func_num:
ck_func_num = escape(ck_func_num)
filewrapper = backend(storage, uploaded_file)
allow_nonimages = getattr(settings, 'CKEDITOR_ALLOW_NONIMAGE_FILES', True)
# Throws an error when an non-image file are uploaded.
if not filewrapper.is_image and not allow_nonimages:
return HttpResponse("""
<script type='text/javascript'>
window.parent.CKEDITOR.tools.callFunction({0}, '', 'Invalid file type.');
</script>""".format(ck_func_num))
filepath = get_upload_filename(uploaded_file.name, request)
saved_path = filewrapper.save_as(filepath)
url = utils.get_media_url(saved_path)
if ck_func_num:
# Respond with Javascript sending ckeditor upload url.
return HttpResponse("""
<script type='text/javascript'>
window.parent.CKEDITOR.tools.callFunction({0}, '{1}');
</script>""".format(ck_func_num, url))
else:
_, filename = os.path.split(saved_path)
retdata = {'url': url, 'uploaded': '1',
'fileName': filename}
return JsonResponse(retdata)
upload = csrf_exempt(ImageUploadView.as_view())
def get_image_files(user=None, path=''):
"""
Recursively walks all dirs under upload dir and generates a list of
full paths for each file found.
"""
# If a user is provided and CKEDITOR_RESTRICT_BY_USER is True,
# limit images to user specific path, but not for superusers.
STORAGE_DIRECTORIES = 0
STORAGE_FILES = 1
# allow browsing from anywhere if user is superuser
# otherwise use the user path
if user and not user.is_superuser:
user_path = _get_user_path(user)
else:
user_path = ''
browse_path = os.path.join(settings.CKEDITOR_UPLOAD_PATH, user_path, path)
try:
storage_list = storage.listdir(browse_path)
except NotImplementedError:
return
except OSError:
return
for filename in storage_list[STORAGE_FILES]:
if os.path.splitext(filename)[0].endswith('_thumb') or os.path.basename(filename).startswith('.'):
continue
filename = os.path.join(browse_path, filename)
yield filename
for directory in storage_list[STORAGE_DIRECTORIES]:
if directory.startswith('.'):
continue
directory_path = os.path.join(path, directory)
for element in get_image_files(user=user, path=directory_path):
yield element
def get_files_browse_urls(user=None):
"""
Recursively walks all dirs under upload dir and generates a list of
thumbnail and full image URL's for each file found.
"""
files = []
for filename in get_image_files(user=user):
src = utils.get_media_url(filename)
if getattr(settings, 'CKEDITOR_IMAGE_BACKEND', None):
if is_valid_image_extension(src):
thumb = utils.get_media_url(utils.get_thumb_filename(filename))
else:
thumb = utils.get_icon_filename(filename)
visible_filename = os.path.split(filename)[1]
if len(visible_filename) > 20:
visible_filename = visible_filename[0:19] + '...'
else:
thumb = src
visible_filename = os.path.split(filename)[1]
files.append({
'thumb': thumb,
'src': src,
'is_image': is_valid_image_extension(src),
'visible_filename': visible_filename,
})
return files
def browse(request):
files = get_files_browse_urls(request.user)
if request.method == 'POST':
form = SearchForm(request.POST)
if form.is_valid():
query = form.cleaned_data.get('q', '').lower()
files = list(filter(lambda d: query in d[
'visible_filename'].lower(), files))
else:
form = SearchForm()
show_dirs = getattr(settings, 'CKEDITOR_BROWSE_SHOW_DIRS', False)
dir_list = sorted(set(os.path.dirname(f['src'])
for f in files), reverse=True)
# Ensures there are no objects created from Thumbs.db files - ran across
# this problem while developing on Windows
if os.name == 'nt':
files = [f for f in files if os.path.basename(f['src']) != 'Thumbs.db']
context = {
'show_dirs': show_dirs,
'dirs': dir_list,
'files': files,
'form': form
}
return render(request, 'ckeditor/browse.html', context)
| 34.107296 | 106 | 0.6415 |
362c43bb655db03fdb83f1ba446a168ca9f95c9f | 8,220 | py | Python | sdk/python/pulumi_azure_nextgen/web/v20200601/get_web_app_host_name_binding.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/web/v20200601/get_web_app_host_name_binding.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/web/v20200601/get_web_app_host_name_binding.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetWebAppHostNameBindingResult',
'AwaitableGetWebAppHostNameBindingResult',
'get_web_app_host_name_binding',
]
@pulumi.output_type
class GetWebAppHostNameBindingResult:
"""
A hostname binding object.
"""
def __init__(__self__, azure_resource_name=None, azure_resource_type=None, custom_host_name_dns_record_type=None, domain_id=None, host_name_type=None, id=None, kind=None, name=None, site_name=None, ssl_state=None, thumbprint=None, type=None, virtual_ip=None):
if azure_resource_name and not isinstance(azure_resource_name, str):
raise TypeError("Expected argument 'azure_resource_name' to be a str")
pulumi.set(__self__, "azure_resource_name", azure_resource_name)
if azure_resource_type and not isinstance(azure_resource_type, str):
raise TypeError("Expected argument 'azure_resource_type' to be a str")
pulumi.set(__self__, "azure_resource_type", azure_resource_type)
if custom_host_name_dns_record_type and not isinstance(custom_host_name_dns_record_type, str):
raise TypeError("Expected argument 'custom_host_name_dns_record_type' to be a str")
pulumi.set(__self__, "custom_host_name_dns_record_type", custom_host_name_dns_record_type)
if domain_id and not isinstance(domain_id, str):
raise TypeError("Expected argument 'domain_id' to be a str")
pulumi.set(__self__, "domain_id", domain_id)
if host_name_type and not isinstance(host_name_type, str):
raise TypeError("Expected argument 'host_name_type' to be a str")
pulumi.set(__self__, "host_name_type", host_name_type)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if site_name and not isinstance(site_name, str):
raise TypeError("Expected argument 'site_name' to be a str")
pulumi.set(__self__, "site_name", site_name)
if ssl_state and not isinstance(ssl_state, str):
raise TypeError("Expected argument 'ssl_state' to be a str")
pulumi.set(__self__, "ssl_state", ssl_state)
if thumbprint and not isinstance(thumbprint, str):
raise TypeError("Expected argument 'thumbprint' to be a str")
pulumi.set(__self__, "thumbprint", thumbprint)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_ip and not isinstance(virtual_ip, str):
raise TypeError("Expected argument 'virtual_ip' to be a str")
pulumi.set(__self__, "virtual_ip", virtual_ip)
@property
@pulumi.getter(name="azureResourceName")
def azure_resource_name(self) -> Optional[str]:
"""
Azure resource name.
"""
return pulumi.get(self, "azure_resource_name")
@property
@pulumi.getter(name="azureResourceType")
def azure_resource_type(self) -> Optional[str]:
"""
Azure resource type.
"""
return pulumi.get(self, "azure_resource_type")
@property
@pulumi.getter(name="customHostNameDnsRecordType")
def custom_host_name_dns_record_type(self) -> Optional[str]:
"""
Custom DNS record type.
"""
return pulumi.get(self, "custom_host_name_dns_record_type")
@property
@pulumi.getter(name="domainId")
def domain_id(self) -> Optional[str]:
"""
Fully qualified ARM domain resource URI.
"""
return pulumi.get(self, "domain_id")
@property
@pulumi.getter(name="hostNameType")
def host_name_type(self) -> Optional[str]:
"""
Hostname type.
"""
return pulumi.get(self, "host_name_type")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="siteName")
def site_name(self) -> Optional[str]:
"""
App Service app name.
"""
return pulumi.get(self, "site_name")
@property
@pulumi.getter(name="sslState")
def ssl_state(self) -> Optional[str]:
"""
SSL type
"""
return pulumi.get(self, "ssl_state")
@property
@pulumi.getter
def thumbprint(self) -> Optional[str]:
"""
SSL certificate thumbprint
"""
return pulumi.get(self, "thumbprint")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualIP")
def virtual_ip(self) -> str:
"""
Virtual IP address assigned to the hostname if IP based SSL is enabled.
"""
return pulumi.get(self, "virtual_ip")
class AwaitableGetWebAppHostNameBindingResult(GetWebAppHostNameBindingResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebAppHostNameBindingResult(
azure_resource_name=self.azure_resource_name,
azure_resource_type=self.azure_resource_type,
custom_host_name_dns_record_type=self.custom_host_name_dns_record_type,
domain_id=self.domain_id,
host_name_type=self.host_name_type,
id=self.id,
kind=self.kind,
name=self.name,
site_name=self.site_name,
ssl_state=self.ssl_state,
thumbprint=self.thumbprint,
type=self.type,
virtual_ip=self.virtual_ip)
def get_web_app_host_name_binding(host_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebAppHostNameBindingResult:
"""
A hostname binding object.
:param str host_name: Hostname in the hostname binding.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['hostName'] = host_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:web/v20200601:getWebAppHostNameBinding', __args__, opts=opts, typ=GetWebAppHostNameBindingResult).value
return AwaitableGetWebAppHostNameBindingResult(
azure_resource_name=__ret__.azure_resource_name,
azure_resource_type=__ret__.azure_resource_type,
custom_host_name_dns_record_type=__ret__.custom_host_name_dns_record_type,
domain_id=__ret__.domain_id,
host_name_type=__ret__.host_name_type,
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
site_name=__ret__.site_name,
ssl_state=__ret__.ssl_state,
thumbprint=__ret__.thumbprint,
type=__ret__.type,
virtual_ip=__ret__.virtual_ip)
| 36.533333 | 263 | 0.64854 |
610438725983c526498b49051353f05ce0081aba | 12,565 | py | Python | src/chisel/Combiner.py | raphael-group/chisel | 817b32e61f3e5e470e79a8cada03c72f94fc94f8 | [
"BSD-3-Clause"
] | 28 | 2019-10-15T16:50:28.000Z | 2022-03-07T08:43:06.000Z | src/chisel/Combiner.py | raphael-group/chisel | 817b32e61f3e5e470e79a8cada03c72f94fc94f8 | [
"BSD-3-Clause"
] | 19 | 2019-11-08T18:46:34.000Z | 2021-04-06T17:44:51.000Z | src/chisel/Combiner.py | raphael-group/chisel | 817b32e61f3e5e470e79a8cada03c72f94fc94f8 | [
"BSD-3-Clause"
] | 6 | 2019-12-02T16:48:21.000Z | 2021-07-26T19:33:44.000Z | #!/usr/bin/env python2.7
import os, sys
import argparse
import bisect
import math
import multiprocessing as mp
from multiprocessing import Lock, Value, Pool
from collections import defaultdict
from collections import Counter
from collections import deque
from functools import reduce
import numpy as np
import scipy.stats
from Utils import *
def parse_args(args):
description = "Compute RDR from barcoded single-cell sequencing data."
parser = argparse.ArgumentParser(description=description)
parser.add_argument("-r","--rdr", required=True, type=str, help="RDR file")
parser.add_argument("-b","--baf", required=True, type=str, help="BAF file")
parser.add_argument("-j","--jobs", required=False, type=int, default=0, help="Number of parallele jobs to use (default: equal to number of available processors)")
parser.add_argument("-k", "--blocksize", required=False, type=str, default="50kb", help="Size of the haplotype blocks (default: 50kb, use 0 to disable)")
parser.add_argument("-a", "--significance", required=False, type=float, default=0.05, help="Significance level to estimate the maximum shift error for BAF = 0.5 (default: 0.05)")
parser.add_argument("-q", "--restarts", required=False, type=int, default=100, help="Number of restarts for EM (default: 100)")
parser.add_argument("-t", "--bootstrap", required=False, type=int, default=100, help="Number of bootastrapping points to estimate maximum shift error (default: 100)")
parser.add_argument("-e", "--maxerror", required=False, type=float, default=None, help="Maximum shift error for identification of BAF = 0.5 (default: not used, when specified the value is used instead of estimation)")
parser.add_argument("-E", "--minerror", required=False, type=float, default=0.001, help="Minimum shift error for identification of BAF = 0.5 (default: 0.001)")
parser.add_argument("-l", "--listofcells", required=False, type=str, default=None, help="List of cells to include (default: None)")
parser.add_argument("-s", "--seed", required=False, type=int, default=None, help="Random seed for replication (default: None)")
args = parser.parse_args(args)
if not os.path.isfile(args.rdr):
raise ValueError("RDR file does not exist!")
if not os.path.isfile(args.baf):
raise ValueError("BAF file does not exist!")
if args.jobs == 0:
args.jobs = mp.cpu_count()
if args.jobs < 1:
raise ValueError("The number of jobs must be positive!")
if args.restarts < 1:
raise ValueError("The number of restarts must be positive!")
if args.bootstrap < 1:
raise ValueError("The number of bootstrapping points must be positive!")
if not 0.0 <= args.significance <= 1.0:
raise ValueError("The maxerror must be in [0, 1]!")
if args.maxerror is not None and not 0.0 <= args.maxerror <= 0.5:
raise ValueError("The maxerror must be in [0, 0.5]!")
if args.minerror is not None and not 0.0 <= args.minerror <= 0.5:
raise ValueError("The minerror must be in [0, 0.5]!")
if args.listofcells is not None and not os.path.isfile(args.listofcells):
raise ValueError("The list of cells does not exist!")
if args.seed and args.seed < 1:
raise ValueError("The random seed must be positive!")
blocksize = 0
try:
if args.blocksize[-2:] == "kb":
blocksize = int(args.blocksize[:-2]) * 1000
elif args.blocksize[-2:] == "Mb":
blocksize = int(args.blocksize[:-2]) * 1000000
else:
blocksize = int(args.blocksize)
except:
raise ValueError("Size must be a number, optionally ending with either \"kb\" or \"Mb\"!")
if blocksize == 0:
blocksize = None
return {
'rdr' : args.rdr,
'baf' : args.baf,
'j' : args.jobs,
'blocksize' : blocksize,
'restarts' : args.restarts,
'bootstrap' : args.bootstrap,
'significance' : args.significance,
'maxerror' : args.maxerror,
'minerror' : args.minerror,
'listofcells' : args.listofcells,
'seed' : args.seed
}
def main(args=None, stdout_file=None):
log('Parsing and checking arguments')
args = parse_args(args)
log('\n'.join(['Arguments:'] + ['\t{} : {}'.format(a, args[a]) for a in args]), level='INFO')
cells = None
if args['listofcells'] is not None:
log('Read list of cells')
cells = read_listofcells(args['listofcells'])
log('Reading RDR')
rdr = read_rdr(args['rdr'], cells=cells)
log('Reading BAF')
cA, cB, bulk = read_baf(args['baf'], cells=cells)
log('Combining')
rb = combo(rdr, cA, cB, bulk, args)
if stdout_file is not None:
stdout_f = open(stdout_file, 'w')
log('Printing combined RDR and BAF')
baf = (lambda A, B : (float(B) / float(A+B)) if A+B > 0 else 0.5)
gen = ((c, b, e) for c in sorted(rb, key=orderchrs) for b in sorted(rb[c], key=(lambda x : x[0])) for e in sorted(rb[c][b]))
for c, b, e in gen:
formb = (lambda c, b, e : [rb[c][b][e]['BAF'][0], rb[c][b][e]['BAF'][1], baf(*rb[c][b][e]['BAF'])])
formr = (lambda c, b, e : [rb[c][b][e]['normalcount'], rb[c][b][e]['readcount'], rb[c][b][e]['RDR']])
line = '\t'.join(map(str, [c, b[0], b[1], e] + formr(c, b, e) + formb(c, b, e)))
if stdout_file is not None:
stdout_f.write(line + '\n')
else:
print line
if stdout_file is not None:
stdout_f.close()
def read_rdr(f, cells):
d = (lambda : {'RDR' : 0.0, 'readcount' : 0, 'normalcount' : 0})
rdr = defaultdict(lambda : defaultdict(lambda : defaultdict(lambda : d())))
with open(f, 'r') as i:
for l in (l for l in i if l[0] != '#' and len(l) > 1):
p = l.strip().split()
if cells is None or p[3] in cells:
assert len(p) == 7 and p[3] not in rdr[p[0]][(int(p[1]), int(p[2]))]
rdr[p[0]][(int(p[1]), int(p[2]))][p[3]]['normalcount'] = int(p[-3])
rdr[p[0]][(int(p[1]), int(p[2]))][p[3]]['readcount'] = int(p[-2])
rdr[p[0]][(int(p[1]), int(p[2]))][p[3]]['RDR'] = float(p[-1])
return {c : dict(rdr[c]) for c in rdr}
def read_baf(f, cells):
countA = defaultdict(lambda : defaultdict(lambda : dict()))
countB = defaultdict(lambda : defaultdict(lambda : dict()))
bulk = defaultdict(lambda : defaultdict(lambda : (0, 0)))
form = (lambda p0, p1, p2, p3, p4 : (p0, int(p1), p2, int(p3), int(p4)))
with open(f, 'r') as i:
for l in (l for l in i if l[0] != '#' and len(l) > 1):
c, o, e, a, b = form(*l.strip().split())
if (a + b) > 0 and (cells is None or e in cells):
assert e not in countA[c][o] and e not in countB[c][o]
countA[c][o][e] = a
countB[c][o][e] = b
bulk[c][o] = (bulk[c][o][0] + a, bulk[c][o][1] + b)
return {c : dict(countA[c]) for c in countA}, {c : dict(countB[c]) for c in countB}, {c : dict(bulk[c]) for c in bulk}
def read_listofcells(f):
cells = set()
with open(f, 'r') as i:
for l in (l for l in i if l[0] != '#' and len(l) > 1):
cells.add(l.strip().split()[0])
return cells
def combo(rdr, cA, cB, bulk, args):
np.random.seed(args['seed'])
rb = defaultdict(lambda : dict())
cells = set(e for c in rdr for b in rdr[c] for e in rdr[c][b])
jobs = ((c, b, np.random.randint(1000)) for c in rdr for b in rdr[c])
njobs = sum(len(rdr[c].keys()) for c in rdr)
snps = {c : sorted(cA[c].keys()) if c in cA else [] for c in rdr}
bar = ProgressBar(total=njobs, length=40, verbose=False)
initargs = (snps, cA, cB, bulk, args)
pool = Pool(processes=min(args['j'], njobs), initializer=init, initargs=initargs)
counts = (lambda c, b, e : rdr[c][b][e].items())
for c, b, A, B in pool.imap_unordered(combine, jobs):
rb[c][b] = {e : dict(counts(c, b, e) + [('BAF', (A[e], B[e]) if e in A else (0, 0))]) for e in cells}
bar.progress(advance=True, msg="Combined bin {}:{}-{}".format(c, b[0], b[1]))
return rb
def init(_snps, _cA, _cB, _bulk, args):
global snps, cA, cB, bulk, blocksize, restarts, boot, alpha, maxerror, minerror
snps = _snps
cA = _cA
cB = _cB
bulk = _bulk
blocksize = args['blocksize']
restarts = args['restarts']
boot = args['bootstrap']
alpha = args['significance']
maxerror = args['maxerror']
minerror = args['minerror']
def combine(job):
c, b, seed = job
np.random.seed(seed)
L = bisect.bisect_left(snps[c], b[0])
R = bisect.bisect_right(snps[c], b[1])
if L >= R:
return c, b, dict(), dict()
snpsLR = snps[c][L:R]
assert all(snpsLR[x - 1] < snpsLR[x] if x > 0 else True for x, o in enumerate(snpsLR))
if blocksize:
que = deque(snpsLR)
assert sorted(snpsLR) == list(que) and b[0] <= que[0] and que[-1] <= b[1]
omap = {}
blocks = {}
for bk in range(b[0], b[1]+1, blocksize):
block = (0, 0)
while que and bk <= que[0] < bk + blocksize:
o = que.popleft()
block = (block[0] + bulk[c][o][0], block[1] + bulk[c][o][1])
omap[o] = bk
if sum(block) > 0:
blocks[bk] = block
assert set(omap.values()) == set(blocks.keys())
assert set(snpsLR) == set(omap.keys())
allblocks = blocks.values()
nis = [sum(block) for block in allblocks]
xis = [block[0] if np.random.random() < 0.5 else block[1] for block in allblocks]
beta = max((EM(ns=nis, xs=xis, start=np.random.randint(low=1, high=49)/100.0) for r in xrange(restarts)), key=(lambda x : x[1]))[0]
if maxerror is None:
thres = max(minerror, est_error(ns=nis, significance=alpha, restarts=restarts, bootstrap=boot))
else:
thres = maxerror
if thres <= abs(beta - 0.5): # <= 0.25:
minel = (lambda p : 0 if p[0] < p[1] else 1)
sumpairs = (lambda I : reduce((lambda x, y : (x[0] + y[0], x[1] + y[1])), I))
if minel(sumpairs(allblocks)) == 0:
swap = {o : False if blocks[omap[o]][0] < blocks[omap[o]][1] else True for o in snpsLR}
else:
swap = {o : False if blocks[omap[o]][1] < blocks[omap[o]][0] else True for o in snpsLR}
else:
bkswap = {bk : False if np.random.random() < 0.5 else True for bk in blocks}
swap = {o : True if bkswap[omap[o]] else False for o in snpsLR}
else:
swap = {o : False for o in snpsLR}
A = reduce(inupdate, (Counter(cA[c][o] if not swap[o] else cB[c][o]) for o in snpsLR))
B = reduce(inupdate, (Counter(cB[c][o] if not swap[o] else cA[c][o]) for o in snpsLR))
return c, b, A, B
def EM(ns, xs, start, tol=10**-6):
nis = np.array(ns)
xis = np.array(xs)
assert nis.size == xis.size and 0 < start < 1 and np.all(nis >= xis)
if np.all(np.logical_or(nis == xis, xis == 0)):
return 0.0, 0.0
else:
beta = start
prev = None
while prev is None or abs(prev - beta) >= tol:
prev = beta
#print 'E-step'
assert 0 + tol < beta < 1 - tol, (beta, nis, xis, start)
M = (nis - 2*xis) * np.log(beta) + (2*xis - nis) * np.log(1.0 - beta)
M = np.exp(np.clip(a=M, a_min=-100, a_max=100))
his = np.reciprocal(1 + M)
#print 'M-step'
beta = float(np.sum(nis * (1 - his) + xis * (2*his - 1))) / float(np.sum(nis))
#print 'BETA = {}'.format(beta)
assert 0 + tol < beta < 1 - tol, (beta, nis, xis, start)
lpmf = scipy.stats.binom.logpmf
loglh = float(np.sum(his * lpmf(k=xis, n=nis, p=beta) + (1 - his) * lpmf(k=xis, n=nis, p=1-beta)))
return beta, loglh
def est_error(ns, significance=0.05, restarts=50, bootstrap=100):
nis = np.array(ns)
N = nis.size
genstart = (lambda : np.random.randint(low=1, high=49)/100.0)
runEM = (lambda xis : max((EM(ns=nis, xs=xis, start=genstart()) for x in xrange(restarts)), key=(lambda x : x[1]))[0])
mirror = (lambda b : min(b, 1 - b))
genneu = scipy.stats.binom.rvs
betas = sorted(mirror(runEM(genneu(n=nis, p=0.5, size=len(nis)))) for x in xrange(bootstrap))
betas = betas[int(round(len(betas) * significance)):]
return 0.5 - betas[0]
if __name__ == '__main__':
main()
| 42.738095 | 221 | 0.574294 |
546c8bae56c95be6f0287231bc2273f0982963a8 | 2,316 | py | Python | cls_python/config_loader.py | amryfitra/cls_python | 06f73624222e7c2d496b20b75f353f284ba2d47e | [
"MIT"
] | null | null | null | cls_python/config_loader.py | amryfitra/cls_python | 06f73624222e7c2d496b20b75f353f284ba2d47e | [
"MIT"
] | null | null | null | cls_python/config_loader.py | amryfitra/cls_python | 06f73624222e7c2d496b20b75f353f284ba2d47e | [
"MIT"
] | null | null | null | __author__ = 'Owner'
import os
from configparser import ConfigParser
from datetime import datetime
def config_assert(config_item):
assert os.path.exists(config_item), "{} file does not exist in cls_config folder".format(config_item)
class ClsConfig(object):
def __init__(self, root_folder):
self.root_folder_path = root_folder
self.config_folder_path = os.path.join(self.root_folder_path, "cls_config")
assert os.path.exists(self.config_folder_path), \
"config_folder ({}) does not exist".format(self.config_folder_path)
self.cls_config_path = self._set_config_item("cls_python.ini")
self.logging_config_path = self._set_config_item("logging.json")
self.pl_cam_config_path = self._set_config_item("PLCamera.xml")
self.nopl_cam_config_path = self._set_config_item("NOPLCamera.xml")
self.day_id_cam_config_path = self._set_config_item("DayIDCamera.xml")
self.night_id_cam_config_path = self._set_config_item("NightIDCamera.xml")
self.parser = self.read_config()
self.MAIN = self.parser["MAIN"]
self.ID = self.parser["ID"]
self.PL = self.parser["PL"]
self.NOPL = self.parser["NOPL"]
self.TEMPERATURE = self.parser["TEMPERATURE"]
self.ILLUMINATION = self.parser["ILLUMINATION"]
self.HUMIDITY = self.parser["HUMIDITY"]
def _set_config_item(self, suffix_path):
item = os.path.join(self.config_folder_path, suffix_path)
assert os.path.exists(item), "{} file does not exist in cls_config folder".format(item)
return item
def read_config(self):
parser = ConfigParser()
parser.read(self.cls_config_path)
return parser
def get_day_period(self):
cur_hour = datetime.now().hour
daytime_limit_hour = self.MAIN.getint("daytime_limit")
nighttime_limit_hour = self.MAIN.getint("nighttime_limit")
if cur_hour > daytime_limit_hour or cur_hour < nighttime_limit_hour:
return "nighttime"
else:
return "daytime"
def get_id_cam_config_path(self):
curr_period = self.get_day_period()
if curr_period == "nighttime":
return self.night_id_cam_config_path
else:
return self.day_id_cam_config_path
| 36.1875 | 105 | 0.683506 |
e0fcf233cc1c62f8f699d0ab76026a8287f5f216 | 180 | py | Python | color_config.py | c0dejump/CredzCheckr | cac325b50464cf2eb853540be3efdf993c903691 | [
"MIT"
] | null | null | null | color_config.py | c0dejump/CredzCheckr | cac325b50464cf2eb853540be3efdf993c903691 | [
"MIT"
] | null | null | null | color_config.py | c0dejump/CredzCheckr | cac325b50464cf2eb853540be3efdf993c903691 | [
"MIT"
] | 1 | 2021-11-28T18:01:33.000Z | 2021-11-28T18:01:33.000Z | found = "\033[32m|-- \033[0m"
action_found = "\033[32m└──── \033[0m"
not_found = "\033[33m|-- \033[0m"
action_not_found = "\033[33m└──── \033[0m"
INFO = "\033[34m|-- \033[0m"
| 30 | 43 | 0.561111 |
ded571be722f086daebcbefb6e7db209626946fc | 2,074 | py | Python | 07_week/ex7_4_1.py | WoojaeJang/AppliedOptimization-Gurobi | 067e4e5a0391de74f673f935b0ba765b037a4149 | [
"AFL-1.1"
] | null | null | null | 07_week/ex7_4_1.py | WoojaeJang/AppliedOptimization-Gurobi | 067e4e5a0391de74f673f935b0ba765b037a4149 | [
"AFL-1.1"
] | null | null | null | 07_week/ex7_4_1.py | WoojaeJang/AppliedOptimization-Gurobi | 067e4e5a0391de74f673f935b0ba765b037a4149 | [
"AFL-1.1"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 14 12:11:06 2021
@author: woojae-macbook13
"""
from gurobipy import*
try :
m = Model('ex7_4_1')
Z = LinExpr()
X = m.addVars(10, 10, vtype = GRB.BINARY, name= 'X')
S = m.addVars(10, 10, 10, vtype = GRB.BINARY, name = 'S')
RANK = m.addVars(10, vtype = GRB.INTEGER, name = 'RANK')
C = [[0, 2, 2, 3, 3, 5, 5, 5, 4, 4],
[4, 0, 3, 3, 4, 3, 2, 3, 2, 2],
[4, 3 ,0, 3, 5, 4, 3, 2, 4, 4],
[3, 3, 3, 0, 5, 6, 3, 4, 4, 3],
[3, 2, 1, 1, 0, 1, 4, 4, 5, 3],
[1, 3, 2, 0, 5, 0, 5, 4, 1, 4],
[1, 4, 3, 3, 2, 1, 0, 2, 1, 3],
[1, 3, 4, 2, 2, 2, 4, 0, 4, 2],
[2, 4, 2, 2, 1, 5, 5, 2, 0, 4],
[2, 4, 2, 3, 3, 2, 3, 4, 2, 0]]
NITEM = 10
# 목적함수
Z = 0
for i in range(NITEM) :
for j in range(NITEM) :
if (i < j) :
Z += C[i][j]*X[i,j]
elif (i > j) :
Z += C[i][j]*(1-X[j,i])
# 제약조건
for i in range(NITEM) :
for j in range(NITEM) :
if(i < j) :
c0 = X[i,j] + X[j,i] == 1
m.addConstr(c0, 'c0'+str(i*10)+str(j))
for i in range(NITEM) :
for j in range(NITEM) :
for k in range(NITEM) :
if (i < j and j < k) :
c1 = X[i,j] + X[j,k] - X[i,k] + S[i,j,k] == 1
m.addConstr(c1, 'c1'+str(i*10)+str(j*10)+str(k))
# 랭크
for i in range(NITEM) :
tempC = RANK[i]
for j in range(NITEM) :
if (i < j) :
tempC += (-1)*(1-X[i,j])
elif (j < i) :
tempC += (-1)*X[j,i]
m.addConstr(tempC == 1, 'R_'+str(i))
m.setObjective(Z, GRB.MAXIMIZE)
m.optimize()
for v in m.getVars() :
if v.x != 0:
print(v.varName, ':', v.x)
print('Z : ', m.objVal)
except GurobiError() :
print('Error reported')
| 25.925 | 68 | 0.374638 |
db069bf3c9131f93d73ecd6a57f71dd23fe4d5b9 | 2,972 | py | Python | colour/utilities/metrics.py | rift-labs-developer/colour | 15112dbe824aab0f21447e0db4a046a28a06f43a | [
"BSD-3-Clause"
] | 1,380 | 2015-01-10T12:30:33.000Z | 2022-03-30T10:19:57.000Z | colour/utilities/metrics.py | rift-labs-developer/colour | 15112dbe824aab0f21447e0db4a046a28a06f43a | [
"BSD-3-Clause"
] | 638 | 2015-01-02T10:49:05.000Z | 2022-03-29T10:16:22.000Z | colour/utilities/metrics.py | rift-labs-developer/colour | 15112dbe824aab0f21447e0db4a046a28a06f43a | [
"BSD-3-Clause"
] | 250 | 2015-01-21T15:27:19.000Z | 2022-03-30T10:23:58.000Z | # -*- coding: utf-8 -*-
"""
Metrics
=======
Defines various metrics:
- :func:`colour.utilities.metric_mse`
- :func:`colour.utilities.metric_psnr`
References
----------
- :cite:`Wikipedia2003c` : Wikipedia. (2003). Mean squared error. Retrieved
March 5, 2018, from https://en.wikipedia.org/wiki/Mean_squared_error
- :cite:`Wikipedia2004` : Wikipedia. (2004). Peak signal-to-noise ratio.
Retrieved March 5, 2018, from
https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
"""
import numpy as np
from colour.utilities import as_float_array
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = ['metric_mse', 'metric_psnr']
def metric_mse(a, b, axis=None):
"""
Computes the mean squared error (MSE) or mean squared deviation (MSD)
between given *array_like* :math:`a` and :math:`b` variables.
Parameters
----------
a : array_like
:math:`a` variable.
b : array_like
:math:`b` variable.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
Returns
-------
float
Mean squared error (MSE).
References
----------
:cite:`Wikipedia2003c`
Examples
--------
>>> a = np.array([0.48222001, 0.31654775, 0.22070353])
>>> b = a * 0.9
>>> metric_mse(a, b) # doctest: +ELLIPSIS
0.0012714...
"""
return np.mean((as_float_array(a) - as_float_array(b)) ** 2, axis=axis)
def metric_psnr(a, b, max_a=1, axis=None):
"""
Computes the peak signal-to-noise ratio (PSNR) between given *array_like*
:math:`a` and :math:`b` variables.
Parameters
----------
a : array_like
:math:`a` variable.
b : array_like
:math:`b` variable.
max_a : numeric, optional
Maximum possible pixel value of the :math:`a` variable.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
Returns
-------
float
Peak signal-to-noise ratio (PSNR).
References
----------
:cite:`Wikipedia2004`
Examples
--------
>>> a = np.array([0.48222001, 0.31654775, 0.22070353])
>>> b = a * 0.9
>>> metric_psnr(a, b) # doctest: +ELLIPSIS
28.9568515...
"""
return 10 * np.log10(max_a ** 2 / metric_mse(a, b, axis))
| 27.518519 | 78 | 0.625841 |
2e0fe376973d56b5ff22a5b515124a20c1c86fb3 | 15,947 | py | Python | openfermioncirq/gates/four_qubit_gates_test.py | oscarhiggott/OpenFermion-Cirq | dc7a6b3320c2f671fec444993a60be72ad0b54b8 | [
"Apache-2.0"
] | 1 | 2019-04-02T09:16:33.000Z | 2019-04-02T09:16:33.000Z | openfermioncirq/gates/four_qubit_gates_test.py | bryano/OpenFermion-Cirq | 1d931a8d73eb3300ff402e46f0b2858a283485ef | [
"Apache-2.0"
] | null | null | null | openfermioncirq/gates/four_qubit_gates_test.py | bryano/OpenFermion-Cirq | 1d931a8d73eb3300ff402e46f0b2858a283485ef | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy
import pytest
import scipy
import cirq
import openfermion
import openfermioncirq as ofc
from openfermioncirq.gates.four_qubit_gates import (
state_swap_eigen_component)
def test_state_swap_eigen_component_args():
with pytest.raises(TypeError):
state_swap_eigen_component(0, '12', 1)
with pytest.raises(ValueError):
state_swap_eigen_component('01', '01', 1)
with pytest.raises(ValueError):
state_swap_eigen_component('01', '10', 0)
with pytest.raises(ValueError):
state_swap_eigen_component('01', '100', 1)
with pytest.raises(ValueError):
state_swap_eigen_component('01', 'ab', 1)
@pytest.mark.parametrize('index_pair,n_qubits', [
((0, 1), 2),
((0, 3), 2),
])
def test_state_swap_eigen_component(index_pair, n_qubits):
state_pair = tuple(format(i, '0' + str(n_qubits) + 'b') for i in index_pair)
i, j = index_pair
dim = 2 ** n_qubits
for sign in (-1, 1):
actual_component = state_swap_eigen_component(
state_pair[0], state_pair[1], sign)
expected_component = numpy.zeros((dim, dim))
expected_component[i, i] = expected_component[j, j] = 0.5
expected_component[i, j] = expected_component[j, i] = sign * 0.5
assert numpy.allclose(actual_component, expected_component)
def test_double_excitation_init_with_multiple_args_fails():
with pytest.raises(ValueError):
_ = ofc.DoubleExcitationGate(exponent=1.0, duration=numpy.pi/2)
def test_double_excitation_eq():
eq = cirq.testing.EqualsTester()
eq.add_equality_group(
ofc.DoubleExcitationGate(exponent=1.5),
ofc.DoubleExcitationGate(exponent=-0.5),
ofc.DoubleExcitationGate(rads=-0.5 * numpy.pi),
ofc.DoubleExcitationGate(degs=-90),
ofc.DoubleExcitationGate(duration=-0.5 * numpy.pi / 2))
eq.add_equality_group(
ofc.DoubleExcitationGate(exponent=0.5),
ofc.DoubleExcitationGate(exponent=-1.5),
ofc.DoubleExcitationGate(rads=0.5 * numpy.pi),
ofc.DoubleExcitationGate(degs=90),
ofc.DoubleExcitationGate(duration=-1.5 * numpy.pi / 2))
eq.make_equality_group(lambda: ofc.DoubleExcitationGate(exponent=0.0))
eq.make_equality_group(lambda: ofc.DoubleExcitationGate(exponent=0.75))
def test_double_excitation_consistency():
ofc.testing.assert_implements_consistent_protocols(
ofc.DoubleExcitation)
def test_combined_double_excitation_consistency():
ofc.testing.assert_implements_consistent_protocols(
ofc.CombinedDoubleExcitationGate())
@pytest.mark.parametrize('weights', numpy.random.rand(10, 3))
def test_weights_and_exponent(weights):
exponents = numpy.linspace(-1, 1, 8)
gates = tuple(
ofc.CombinedDoubleExcitationGate(weights / exponent,
exponent=exponent)
for exponent in exponents)
for g1 in gates:
for g2 in gates:
assert cirq.approx_eq(g1, g2, atol=1e-100)
for i, (gate, exponent) in enumerate(zip(gates, exponents)):
assert gate.exponent == 1
new_exponent = exponents[-i]
new_gate = gate._with_exponent(new_exponent)
assert new_gate.exponent == new_exponent
double_excitation_simulator_test_cases = [
(ofc.DoubleExcitation, 1.0,
numpy.array([1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1]) / 4.,
numpy.array([1, 1, 1, -1, 1, 1, 1, 1,
1, 1, 1, 1, -1, 1, 1, 1]) / 4.,
5e-6),
(ofc.DoubleExcitation, -1.0,
numpy.array([1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1]) / 4.,
numpy.array([1, 1, 1, -1, 1, 1, 1, 1,
1, 1, 1, 1, -1, 1, 1, 1]) / 4.,
5e-6),
(ofc.DoubleExcitation, 0.5,
numpy.array([1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0]) / numpy.sqrt(8),
numpy.array([1, 1, 1, 0, 1, 1, 1, 1,
0, 0, 0, 0, 1j, 0, 0, 0]) / numpy.sqrt(8),
5e-6),
(ofc.DoubleExcitation, -0.5,
numpy.array([1, -1, -1, -1, -1, -1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1]) / 4.,
numpy.array([1, -1, -1, -1j, -1, -1, 1, 1,
1, 1, 1, 1, 1j, 1, 1, 1]) / 4.,
5e-6),
(ofc.DoubleExcitation, -1. / 7,
numpy.array([1, 1j, -1j, -1, 1, 1j, -1j, -1,
1, 1j, -1j, -1, 1, 1j, -1j, -1]) / 4.,
numpy.array([1, 1j, -1j,
-numpy.cos(numpy.pi / 7) - 1j * numpy.sin(numpy.pi / 7),
1, 1j, -1j, -1, 1, 1j, -1j, -1,
numpy.cos(numpy.pi / 7) + 1j * numpy.sin(numpy.pi / 7),
1j, -1j, -1]) / 4.,
5e-6),
(ofc.DoubleExcitation, 7. / 3,
numpy.array([0, 0, 0, 2,
(1 + 1j) / numpy.sqrt(2), (1 - 1j) / numpy.sqrt(2),
-(1 + 1j) / numpy.sqrt(2), -1,
1, 1j, -1j, -1,
1, 1j, -1j, -1]) / 4.,
numpy.array([0, 0, 0, 1 + 1j * numpy.sqrt(3) / 2,
(1 + 1j) / numpy.sqrt(2), (1 - 1j) / numpy.sqrt(2),
-(1 + 1j) / numpy.sqrt(2), -1,
1, 1j, -1j, -1,
0.5 + 1j * numpy.sqrt(3), 1j, -1j, -1]) / 4.,
5e-6),
(ofc.DoubleExcitation, 0,
numpy.array([1, -1, -1, -1, -1, -1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1]) / 4.,
numpy.array([1, -1, -1, -1, -1, -1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1]) / 4.,
5e-6),
(ofc.DoubleExcitation, 0.25,
numpy.array([1, 0, 0, -2, 0, 0, 0, 0,
0, 0, 0, 0, 3, 0, 0, 1]) / numpy.sqrt(15),
numpy.array([1, 0, 0, +3j / numpy.sqrt(2) - numpy.sqrt(2),
0, 0, 0, 0,
0, 0, 0, 0,
3 / numpy.sqrt(2) - 1j * numpy.sqrt(2), 0, 0, 1]) /
numpy.sqrt(15),
5e-6)
]
combined_double_excitation_simulator_test_cases = [
(ofc.CombinedDoubleExcitationGate((0, 0, 0)), 1.,
numpy.ones(16) / 4.,
numpy.ones(16) / 4.,
5e-6),
(ofc.CombinedDoubleExcitationGate((0.2, -0.1, 0.7)), 0.,
numpy.array([1, -1, -1, -1, -1, -1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1]) / 4.,
numpy.array([1, -1, -1, -1, -1, -1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1]) / 4.,
5e-6),
(ofc.CombinedDoubleExcitationGate((0.2, -0.1, 0.7)), 0.3,
numpy.array([1, -1, -1, -1, -1, -1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1]) / 4.,
numpy.array([1, -1, -1, -numpy.exp(-numpy.pi * 0.105j),
-1, -numpy.exp(-numpy.pi * 0.585j),
numpy.exp(numpy.pi * 0.03j), 1,
1, numpy.exp(numpy.pi * 0.03j),
numpy.exp(-numpy.pi * 0.585j), 1,
numpy.exp(-numpy.pi * 0.105j), 1, 1, 1]) / 4.,
5e-6),
(ofc.CombinedDoubleExcitationGate((1. / 3, 0, 0)), 1.,
numpy.array([0, 0, 0, 0, 0, 0, 1., 0,
0, 1., 0, 0, 0, 0, 0, 0]) / numpy.sqrt(2),
numpy.array([0, 0, 0, 0, 0, 0, 1., 0,
0, 1., 0, 0, 0, 0, 0, 0]) / numpy.sqrt(2),
5e-6),
(ofc.CombinedDoubleExcitationGate((0, -2. / 3, 0)), 1.,
numpy.array([1., 1., 0, 0, 0, 1., 0, 0,
0, 0., -1., 0, 0, 0, 0, 0]) / 2.,
numpy.array([1., 1., 0, 0, 0, -numpy.exp(4j * numpy.pi / 3), 0, 0,
0, 0., -numpy.exp(1j * numpy.pi / 3), 0, 0, 0, 0, 0]
) / 2.,
5e-6),
(ofc.CombinedDoubleExcitationGate((0, 0, 1)), 1.,
numpy.array([0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1., 0, 0, 0]),
numpy.array([0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0]),
5e-6),
(ofc.CombinedDoubleExcitationGate((0, 0, 0.5)), 1.,
numpy.array([0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0]),
numpy.array([0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1j, 0, 0, 0]) / numpy.sqrt(2),
5e-6),
(ofc.CombinedDoubleExcitationGate((0.5, -1./3, 1.)), 1.,
numpy.array([0, 0, 0, 0, 0, 0, 1, 0,
0, 0, 1, 0, 1, 0, 0, 0]) / numpy.sqrt(3),
numpy.array([0, 0, 0, 1j, 0, -1j / 2., 1 / numpy.sqrt(2), 0,
0, 1j / numpy.sqrt(2), numpy.sqrt(3) / 2, 0, 0, 0, 0, 0]
) / numpy.sqrt(3),
5e-6),
]
@pytest.mark.parametrize(
'gate, exponent, initial_state, correct_state, atol',
double_excitation_simulator_test_cases +
combined_double_excitation_simulator_test_cases)
def test_four_qubit_rotation_gates_on_simulator(
gate, exponent, initial_state, correct_state, atol):
a, b, c, d = cirq.LineQubit.range(4)
circuit = cirq.Circuit.from_ops(gate(a, b, c, d)**exponent)
result = circuit.apply_unitary_effect_to_state(initial_state)
cirq.testing.assert_allclose_up_to_global_phase(
result, correct_state, atol=atol)
def test_double_excitation_gate_text_diagrams():
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
c = cirq.NamedQubit('c')
d = cirq.NamedQubit('d')
circuit = cirq.Circuit.from_ops(
ofc.DoubleExcitation(a, b, c, d))
cirq.testing.assert_has_diagram(circuit, """
a: ───⇅───
│
b: ───⇅───
│
c: ───⇵───
│
d: ───⇵───
""")
circuit = cirq.Circuit.from_ops(
ofc.DoubleExcitation(a, b, c, d)**-0.5)
cirq.testing.assert_has_diagram(circuit, """
a: ───⇅────────
│
b: ───⇅────────
│
c: ───⇵────────
│
d: ───⇵^-0.5───
""")
circuit = cirq.Circuit.from_ops(
ofc.DoubleExcitation(a, c, b, d)**0.2)
cirq.testing.assert_has_diagram(circuit, """
a: ───⇅───────
│
b: ───⇵───────
│
c: ───⇅───────
│
d: ───⇵^0.2───
""")
circuit = cirq.Circuit.from_ops(
ofc.DoubleExcitation(d, b, a, c)**0.7)
cirq.testing.assert_has_diagram(circuit, """
a: ───⇵───────
│
b: ───⇅───────
│
c: ───⇵───────
│
d: ───⇅^0.7───
""")
circuit = cirq.Circuit.from_ops(
ofc.DoubleExcitation(d, b, a, c)**2.3)
cirq.testing.assert_has_diagram(circuit, """
a: ───⇵───────
│
b: ───⇅───────
│
c: ───⇵───────
│
d: ───⇅^0.3───
""")
def test_double_excitation_gate_text_diagrams_no_unicode():
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
c = cirq.NamedQubit('c')
d = cirq.NamedQubit('d')
circuit = cirq.Circuit.from_ops(
ofc.DoubleExcitation(a, b, c, d))
cirq.testing.assert_has_diagram(circuit, """
a: ---/\ \/---
|
b: ---/\ \/---
|
c: ---\/ /\---
|
d: ---\/ /\---
""", use_unicode_characters=False)
circuit = cirq.Circuit.from_ops(
ofc.DoubleExcitation(a, b, c, d)**-0.5)
cirq.testing.assert_has_diagram(circuit, """
a: ---/\ \/--------
|
b: ---/\ \/--------
|
c: ---\/ /\--------
|
d: ---\/ /\^-0.5---
""", use_unicode_characters=False)
circuit = cirq.Circuit.from_ops(
ofc.DoubleExcitation(a, c, b, d)**0.2)
cirq.testing.assert_has_diagram(circuit, """
a: ---/\ \/-------
|
b: ---\/ /\-------
|
c: ---/\ \/-------
|
d: ---\/ /\^0.2---
""", use_unicode_characters=False)
circuit = cirq.Circuit.from_ops(
ofc.DoubleExcitation(d, b, a, c)**0.7)
cirq.testing.assert_has_diagram(circuit, """
a: ---\/ /\-------
|
b: ---/\ \/-------
|
c: ---\/ /\-------
|
d: ---/\ \/^0.7---
""", use_unicode_characters=False)
circuit = cirq.Circuit.from_ops(
ofc.DoubleExcitation(d, b, a, c)**2.3)
cirq.testing.assert_has_diagram(circuit, """
a: ---\/ /\-------
|
b: ---/\ \/-------
|
c: ---\/ /\-------
|
d: ---/\ \/^0.3---
""", use_unicode_characters=False)
@pytest.mark.parametrize('exponent', [1.0, 0.5, 0.25, 0.1, 0.0, -0.5])
def test_double_excitation_matches_fermionic_evolution(exponent):
gate = ofc.DoubleExcitation ** exponent
op = openfermion.FermionOperator('3^ 2^ 1 0')
op += openfermion.hermitian_conjugated(op)
matrix_op = openfermion.get_sparse_operator(op)
time_evol_op = scipy.linalg.expm(-1j * matrix_op * exponent * numpy.pi)
time_evol_op = time_evol_op.todense()
cirq.testing.assert_allclose_up_to_global_phase(
cirq.unitary(gate), time_evol_op, atol=1e-7)
def test_combined_double_excitation_init_with_multiple_args_fails():
with pytest.raises(ValueError):
_ = ofc.CombinedDoubleExcitationGate(
(1,1,1), exponent=1.0, duration=numpy.pi/2)
def test_combined_double_excitation_eq():
eq = cirq.testing.EqualsTester()
eq.add_equality_group(
ofc.CombinedDoubleExcitationGate((1.2, 0.4, -0.4), exponent=0.5),
ofc.CombinedDoubleExcitationGate((0.3, 0.1, -0.1), exponent=2),
ofc.CombinedDoubleExcitationGate((-0.6, -0.2, 0.2), exponent=-1),
ofc.CombinedDoubleExcitationGate((0.6, 0.2, 3.8)),
ofc.CombinedDoubleExcitationGate(
(1.2, 0.4, -0.4), rads=0.5 * numpy.pi),
ofc.CombinedDoubleExcitationGate((1.2, 0.4, -0.4), degs=90),
ofc.CombinedDoubleExcitationGate(
(1.2, 0.4, -0.4), duration=0.5 * numpy.pi / 2)
)
eq.add_equality_group(
ofc.CombinedDoubleExcitationGate((-0.6, 0.0, 0.3), exponent=0.5),
ofc.CombinedDoubleExcitationGate((-0.6, 0.0, 0.3),
rads=0.5 * numpy.pi),
ofc.CombinedDoubleExcitationGate((-0.6, 0.0, 0.3), degs=90))
eq.make_equality_group(
lambda: ofc.CombinedDoubleExcitationGate(
(0.1, -0.3, 0.0), exponent=0.0))
eq.make_equality_group(
lambda: ofc.CombinedDoubleExcitationGate(
(1., -1., 0.5), exponent=0.75))
def test_combined_double_excitation_gate_text_diagram():
gate = ofc.CombinedDoubleExcitationGate((1,1,1))
qubits = cirq.LineQubit.range(6)
circuit = cirq.Circuit.from_ops(
[gate(*qubits[:4]), gate(*qubits[-4:])])
actual_text_diagram = circuit.to_text_diagram()
expected_text_diagram = """
0: ───⇊⇈────────
│
1: ───⇊⇈────────
│
2: ───⇊⇈───⇊⇈───
│ │
3: ───⇊⇈───⇊⇈───
│
4: ────────⇊⇈───
│
5: ────────⇊⇈───
""".strip()
assert actual_text_diagram == expected_text_diagram
actual_text_diagram = circuit.to_text_diagram(use_unicode_characters=False)
expected_text_diagram = """
0: ---a*a*aa------------
|
1: ---a*a*aa------------
|
2: ---a*a*aa---a*a*aa---
| |
3: ---a*a*aa---a*a*aa---
|
4: ------------a*a*aa---
|
5: ------------a*a*aa---
""".strip()
assert actual_text_diagram == expected_text_diagram
test_weights = [1.0, 0.5, 0.25, 0.1, 0.0, -0.5]
@pytest.mark.parametrize('weights', itertools.chain(
itertools.product(test_weights, repeat=3),
numpy.random.rand(10, 3)
))
def test_combined_double_excitation_decompose(weights):
cirq.testing.assert_decompose_is_consistent_with_unitary(
ofc.CombinedDoubleExcitationGate(weights))
| 33.714588 | 80 | 0.520223 |
91a75f3a1b793867f97d8c4eb0e9f5c6e9084e21 | 1,381 | py | Python | cms/signals/apphook.py | devyntk/django-cms | f889a30e94f268394ae9abf32c032239d0a9be55 | [
"BSD-3-Clause"
] | 5,659 | 2015-01-01T02:42:30.000Z | 2020-10-07T02:38:29.000Z | cms/signals/apphook.py | devyntk/django-cms | f889a30e94f268394ae9abf32c032239d0a9be55 | [
"BSD-3-Clause"
] | 3,264 | 2015-01-02T10:11:48.000Z | 2020-10-08T13:15:07.000Z | cms/signals/apphook.py | devyntk/django-cms | f889a30e94f268394ae9abf32c032239d0a9be55 | [
"BSD-3-Clause"
] | 2,132 | 2015-01-01T11:28:21.000Z | 2020-10-06T09:09:11.000Z | import logging
import sys
from django.core.management import color_style
from django.core.signals import request_finished
from django.urls import clear_url_caches
from cms.utils.apphook_reload import mark_urlconf_as_changed
logger = logging.getLogger(__name__)
DISPATCH_UID = 'cms-restart'
def trigger_server_restart(**kwargs):
"""
Marks the URLs as stale so that they can be reloaded.
"""
mark_urlconf_as_changed()
def set_restart_trigger():
request_finished.connect(trigger_restart, dispatch_uid=DISPATCH_UID)
def trigger_restart(**kwargs):
from cms.signals import urls_need_reloading
request_finished.disconnect(trigger_restart, dispatch_uid=DISPATCH_UID)
urls_need_reloading.send(sender=None)
def debug_server_restart(**kwargs):
from cms.appresolver import clear_app_resolvers
if 'runserver' in sys.argv or 'server' in sys.argv:
clear_app_resolvers()
clear_url_caches()
import cms.urls
try:
reload(cms.urls)
except NameError: #python3
from imp import reload
reload(cms.urls)
if not 'test' in sys.argv:
msg = 'Application url changed and urls_need_reloading signal fired. ' \
'Please reload the urls.py or restart the server.\n'
styles = color_style()
msg = styles.NOTICE(msg)
sys.stderr.write(msg)
| 27.62 | 80 | 0.712527 |
33b98e568739f0baf281dd9e2a3b4cc1e28de79a | 2,318 | py | Python | anime_downloader/sites/animeflix.py | nulligor/anime-downloader | ea7956f42f292f0416baa71225241b4e457f7098 | [
"Unlicense"
] | 1 | 2020-01-20T21:59:10.000Z | 2020-01-20T21:59:10.000Z | anime_downloader/sites/animeflix.py | nulligor/anime-downloader | ea7956f42f292f0416baa71225241b4e457f7098 | [
"Unlicense"
] | null | null | null | anime_downloader/sites/animeflix.py | nulligor/anime-downloader | ea7956f42f292f0416baa71225241b4e457f7098 | [
"Unlicense"
] | null | null | null | from anime_downloader.sites.anime import Anime, AnimeEpisode, SearchResult
from anime_downloader.sites import helpers
class AnimeFlix(Anime, sitename='animeflix'):
"""
Site :https://animeflix.io/
"""
sitename = 'animeflix'
search_url = 'https://www.animeflix.io/api/search'
anime_url = 'https://www.animeflix.io/shows'
episodeList_url = 'https://www.animeflix.io/api/anime-schema'
meta_url = 'https://animeflix.io/api/anime/detail'
QUALITIES = ['360p', '480p', '720p', '1080p']
@classmethod
def search(cls, query):
search_results = helpers.get(cls.search_url,
params={'q' : query}).json()
search_results = [
SearchResult(
title=result['title'],
url=f'{cls.anime_url}/{result["slug"]}',
)
for result in search_results.get('data',[])
]
return search_results
def _scrape_episodes(self):
# TODO: find a better way to do splits
# find a way to pass some values within the class
self.slug = self.url.strip('/').split('/')[-1]
episodes = helpers.get(self.episodeList_url,
params={'slug': self.slug}).json()
return [ self.anime_url + episode['url'] for episode in episodes['episodes'] ]
def _scrape_metadata(self):
meta = helpers.get(self.meta_url,
params={'slug': self.slug}).json()
self.title = meta['data']['title']
class AnimeFlixEpisode(AnimeEpisode, sitename='animeflix'):
episodeId_url = 'https://animeflix.io/api/episode'
stream_url = 'https://animeflix.io/api/videos?episode_id'
anime_url = 'https://www.animeflix.io/shows'
def _get_sources(self):
episode = helpers.get(self.episodeId_url,
params={'episode_num': self.ep_no, 'slug': self.url.strip('/').split('/')[-2]}).json()
id = episode['data']['current']['id']
download_link = helpers.get(
f'{self.stream_url}={id}').json()[0]['file']
return [('no_extractor',download_link)]
| 39.965517 | 120 | 0.542709 |
d466cae53fcfd255d7331e5ebef5ff49dacaf7e5 | 1,188 | py | Python | photologue/migrations/0010_auto_20160105_1307.py | erdnaxe/django-photologue | 97768fef566cb19eb3c8454d58d9abf2e7b9f2b6 | [
"BSD-3-Clause"
] | 364 | 2015-01-03T00:06:55.000Z | 2019-03-10T20:00:11.000Z | photologue/migrations/0010_auto_20160105_1307.py | erdnaxe/django-photologue | 97768fef566cb19eb3c8454d58d9abf2e7b9f2b6 | [
"BSD-3-Clause"
] | 79 | 2015-01-03T03:35:36.000Z | 2019-03-13T20:05:55.000Z | photologue/migrations/0010_auto_20160105_1307.py | erdnaxe/django-photologue | 97768fef566cb19eb3c8454d58d9abf2e7b9f2b6 | [
"BSD-3-Clause"
] | 135 | 2015-01-09T01:36:11.000Z | 2019-03-12T02:54:59.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-05 13:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('photologue', '0009_auto_20160102_0904'),
]
operations = [
migrations.AlterField(
model_name='gallery',
name='slug',
field=models.SlugField(help_text='A "slug" is a unique URL-friendly title for an object.', max_length=250, unique=True, verbose_name='title slug'),
),
migrations.AlterField(
model_name='gallery',
name='title',
field=models.CharField(max_length=250, unique=True, verbose_name='title'),
),
migrations.AlterField(
model_name='photo',
name='slug',
field=models.SlugField(help_text='A "slug" is a unique URL-friendly title for an object.', max_length=250, unique=True, verbose_name='slug'),
),
migrations.AlterField(
model_name='photo',
name='title',
field=models.CharField(max_length=250, unique=True, verbose_name='title'),
),
]
| 33 | 159 | 0.607744 |
210df301a6027d8b97cc7d77ef786ef3180df153 | 33,153 | py | Python | apps/xgds_subsea_app/importer/eventLogCsvImporter.py | xgds/xgds_subsea | 43b6d97330afacae60989d9bf767fef1d5f3fd5e | [
"Apache-2.0"
] | 2 | 2019-01-08T04:37:10.000Z | 2019-06-17T19:34:34.000Z | apps/xgds_subsea_app/importer/eventLogCsvImporter.py | xgds/xgds_subsea | 43b6d97330afacae60989d9bf767fef1d5f3fd5e | [
"Apache-2.0"
] | null | null | null | apps/xgds_subsea_app/importer/eventLogCsvImporter.py | xgds/xgds_subsea | 43b6d97330afacae60989d9bf767fef1d5f3fd5e | [
"Apache-2.0"
] | null | null | null | # __BEGIN_LICENSE__
# Copyright (c) 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The xGDS platform is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# __END_LICENSE__
import json
import traceback
import re
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from django.utils import timezone
from django.contrib.contenttypes.models import ContentType
from geocamUtil.UserUtil import getUserByUsername, getUserByNames, create_user
from geocamUtil.models import SiteFrame
from xgds_core.models import Condition, ConditionHistory, ConditionStatus
from xgds_core.importer import csvImporter
from xgds_core.flightUtils import lookup_flight
from xgds_notes2.models import LocatedNote, HierarchichalTag, TaggedNote, Role, Location
from xgds_map_server.models import Place
from xgds_sample.models import Sample, SampleType, Label
def safe_delete_key(dictionary, key):
"""
Safely delete a key from a dictionary only if it exists
:param dictionary: the dictionary
:param key: the key to delete
:return: the dictionary
"""
try:
del dictionary[key]
except:
pass
return dictionary
def clean_key_value(dictionary):
"""
Return a tuple including the key and value string replacing underscores with spaces
:param dictionary: should have one entry
:return: None if it is NaN, or the cleaned value string
"""
if not dictionary:
return None, None
if not isinstance(dictionary, dict):
return None, None
key = dictionary.keys()[0]
value_string = dictionary.values()[0]
if value_string == 'NaN':
return key, None
value_string = value_string.replace('_', ' ')
# in case we have evilbad unicode characters that cannot be stored
value_string = unicode(value_string, errors='ignore')
return key, value_string
def clean_append(part_1, part_2):
"""
Safely append 2 parts together, handling None
:param part_1:
:param part_2:
:return:
"""
if part_1:
if part_2:
return part_1 + part_2
return part_1
return part_2
def remove_empty_keys(row):
"""
Remove all unneccessary things from the dictionary
:param row:
:return:
"""
if None in row:
del row[None]
if '' in row:
del row['']
return row
def append_key_value(content, key, value):
"""
Safely append the key/value as a separate line to the content
:param content:
:param key:
:param value:
:return: new content
"""
if key and value:
if content:
return '%s\n%s: %s' % (content, key, value)
return '%s: %s' % (key, value)
return content
def add_notes_tag(row, value):
"""
Appends the correct tag value to the row
:param row:
:param value:
:return: True if added, False otherwise, "NO VALUE" if no value
"""
if not value:
return "NO VALUE"
if 'Trash' in value:
add_tag(row, 'Trash')
elif 'Microbiology' in value:
add_tag(row, 'Microbiology')
elif 'Operations' in value:
add_tag(row, 'Operations')
elif 'Biology' in value:
add_tag(row, 'Biology')
elif 'Geology' in value:
add_tag(row, 'Geology')
elif 'Geochemistry' in value:
add_tag(row, 'Geochemistry')
elif 'T-ROV' in value:
add_tag(row, 'TempProbe')
elif 'T-IGT' in value:
add_tag(row, 'TempIGT')
elif 'T-SUPR' in value:
if 'SUPRa' in value:
add_tag(row, 'TempSUPRa')
elif 'SUPRb' in value:
add_tag(row, 'TempSUPRb')
add_tag(row, 'TempSUPR')
elif 'Laser Mapping' in value:
add_tag(row, 'LaserMapping')
elif 'Other' in value:
add_tag(row, 'Other')
else:
return False
return True
def add_sample_type_tag(row, value):
"""
Add the sample specific type tags
:param row:
:param value:
:return: True if added, False otherwise, "NO VALUE" if no value
"""
if not value:
return "NO VALUE"
lower_value = value.lower()
if 'supr' in lower_value:
if 'bag' in lower_value:
add_tag(row, 'SUPR-BAG')
elif 'filter' in lower_value:
add_tag(row, 'SUPR-FILTER')
elif 'tube' in lower_value:
add_tag(row, 'SUPR-TUBE')
elif '1' in lower_value:
add_tag(row, 'SUPR-1')
elif '2' in lower_value:
add_tag(row, 'SUPR-2')
elif 'igt' in lower_value:
add_tag(row, 'IGT')
elif 'rovg' in lower_value:
add_tag(row, 'ROVGrab')
elif 'rovpc' in lower_value:
add_tag(row, 'PushCore')
elif 'niskin' in lower_value:
add_tag(row, 'Niskin')
elif 'scoop' in lower_value:
add_tag(row, 'Scoop')
elif 'mat' in lower_value:
add_tag(row, 'MicrobialMat')
elif 'colonizer' in lower_value:
add_tag(row, 'MicrobialColonizer')
if 'deployed' in lower_value:
add_tag(row, 'Deployed')
elif 'recovered' in lower_value:
add_tag(row, 'Recovered')
elif 'marker' in lower_value:
add_tag(row, 'Marker')
if 'deployed' in lower_value:
add_tag(row, 'Deployed')
elif 'recovered' in lower_value:
add_tag(row, 'Recovered')
else:
return False
return True
def add_divestatus_tag(row, value):
"""
Add the divestatus specific type tags
:param row:
:param value:
:return: True if added, False otherwise, "NO VALUE" if no value
"""
if not value:
return "NO VALUE"
if 'onbottom' in value:
add_tag(row, 'OnBottom')
elif 'offbottom' in value:
add_tag(row, 'OffBottom')
elif 'inwater' in value:
add_tag(row, 'InWater')
elif 'ondeck' in value:
add_tag(row, 'OnDeck')
else:
return False
return True
def add_audiovideo_rating_tag(row, value):
"""
Add the rating tags for audiovideo
:param row:
:param value:
:return: True if valid tag was made, False otherwise, "NO VALUE" if no value
"""
if not value:
return "NO VALUE"
try:
int_val = int(value)
if str(int_val) != str(value):
return False
if 0 <= int_val <= 5:
add_tag(row, 'Rating' + str(int_val))
return True
else:
return False
except:
return False
def add_timing_or_data_tag(row, value):
"""
Add tags for timing (start/end/pause/restart etc) or data (average, min, max)
:param row:
:param value:
:return: True if tag added, False otherwise, "NO VALUE" if no value
"""
if not value:
return "NO VALUE"
lower_value = value.lower()
if 'min' in lower_value:
add_tag(row, 'Min')
elif 'max' in lower_value:
add_tag(row, 'Max')
elif 'avg' in lower_value:
add_tag(row, 'Average')
elif 'average' in lower_value:
add_tag(row, 'Average')
elif 'end' in lower_value:
add_tag(row, 'End')
elif 'start' in lower_value:
if 'restart' in lower_value:
add_tag(row, 'Resume')
else:
add_tag(row, 'Start')
elif 'pause' in lower_value:
add_tag(row, 'Pause')
elif 'resume' in lower_value:
add_tag(row, 'Resume')
elif 'stop' in lower_value:
add_tag(row, 'Stop')
else:
return False
return True
def add_tag(row, tag_key, capitalize=False):
"""
Looks up the correct tag based on the row, and appends it to the list of tags
:param row:
:param tag_key: the tag key (string of tag) to add
:param capitalize: True to initial cap the tag key
:return: the updated row
"""
if not tag_key:
return
if 'tag' not in row:
row['tag'] = []
if capitalize:
tag_key = tag_key.capitalize()
if tag_key not in row['tag']:
row['tag'].append(tag_key)
return row
class EventLogCsvImporter(csvImporter.CsvImporter):
"""
Utilities for loading event log files from files such as <cruise>/processed/eventlog/by-dive/all_eventlog_<DIVE>.txt
This will create notes with references to the correct users, roles, locations and tags.
It will also do sample creation, as samples are recorded through the event log.
"""
def __init__(self, yaml_file_path, csv_file_path, vehicle_name=None, flight_name=None, timezone_name='UTC',
defaults=None, force=False, replace=False, skip_bad=False):
self.datalogger_user = getUserByUsername('datalogger')
self.navigator_user = getUserByUsername('navigator')
self.scf_user = getUserByUsername('scicommfellow')
self.herc_user = getUserByUsername('herc')
self.importer_user = getUserByUsername('importer')
self.roles = {'NAVIGATOR': Role.objects.get(value='NAVIGATOR'),
'SCF': Role.objects.get(value='SCIENCE_COMMUNICATION_FELLOW'),
'DATA_LOGGER': Role.objects.get(value='DATA_LOGGER')}
self.ship_location = Location.objects.get(value='SHIP')
self.sample_content_type = ContentType.objects.get_for_model(Sample)
self.condition_started = ConditionStatus.objects.get(value='started')
self.condition_completed = ConditionStatus.objects.get(value='completed')
super(EventLogCsvImporter, self).__init__(yaml_file_path, csv_file_path, vehicle_name, flight_name,
timezone_name, defaults, force, replace, skip_bad)
def check_data_exists(self, row):
"""
See if there is already identical data
:param row: typically the first row
:return: True if it already exists, false otherwise
"""
row = self.update_row(row)
row_copy = row.copy()
safe_delete_key(row, 'tag')
if 'condition_data' in row_copy:
safe_delete_key(row_copy, 'condition_data')
safe_delete_key(row_copy, 'condition_history_data')
if row:
result = LocatedNote.objects.filter(**row_copy)
return result.exists()
return False
def update_row(self, row):
"""
Update the row from the self.config
:param row: the loaded row
:return: the updated row, with timestamps and defaults
"""
result = super(EventLogCsvImporter, self).update_row(row)
result = self.clean_site(result)
result = self.clean_author(result)
result = self.clean_key_values(result)
if result:
result = remove_empty_keys(result)
result['location'] = self.ship_location
return result
def clean_site(self, row):
"""
Updates the row based on the site
:param row:
:return:
"""
if 'site' in row:
key, site_string = clean_key_value(row['site'])
if site_string:
site_string = site_string.replace('_',' ')
try:
place = Place.objects.get(name=site_string)
except:
# create a new place
place = Place(name=site_string, creator=self.importer_user,
creation_time=timezone.now(),
region=SiteFrame.objects.get(pk=settings.XGDS_CURRENT_SITEFRAME_ID))
Place.add_root(instance=place)
row['place'] = place
safe_delete_key(row, 'site')
return row
def clean_author(self, row):
"""
Updates the row by looking up the correct author id by the name
Also figure out the role based on the author. Defaults to DATA_LOGGER
:param row:
:return: the updated row
"""
if 'author_name' in row:
author_name = row['author_name']
lower_name = author_name.lower()
if lower_name == 'nav' or lower_name == 'navigator' or lower_name == 'navigation':
row['role'] = self.roles['NAVIGATOR']
row['author'] = self.navigator_user
elif lower_name == 'default_scf_user':
row['role'] = self.roles['SCF']
row['author'] = self.scf_user
else:
row['role'] = self.roles['DATA_LOGGER']
splits = author_name.split('_')
if len(splits) == 2:
try:
row['author'] = getUserByNames(splits[0], splits[1])
if not row['author']:
row['author'] = create_user(splits[0], splits[1])
except:
# TODO This happend for NA100 due to errors in cruise-record.xml
print 'COULD NOT FIND USER FOR %s' % author_name
print 'creating new user'
row['author'] = create_user(splits[0], splits[1])
if 'author' not in row:
row['author'] = self.datalogger_user
if 'author_name' in row:
safe_delete_key(row, 'author_name')
else:
print "*** THIS ROW HAS NO AUTHOR FIELD **"
print row
return row
def clean_flight(self, row, vehicle_name=None):
"""
Updates the row by looking up the correct flight id by the name.
Hardcoding to Hercules vehicle
:param row: the dict of the row we are working on
:param vehicle_name: the name of the vehicle
:return: the updated row
"""
if not vehicle_name and 'vehicle_name' in row:
key, rvn = clean_key_value(row['vehicle_name'])
if not rvn:
print 'NULL VEHICLE, DEFAULTING TO HERCULES %s' % rvn
else:
if rvn == 'Argus':
vehicle_name = rvn
elif 'Herc' in rvn:
vehicle_name = 'Hercules'
else:
print 'INVALID VEHICLE, DEFAULTING TO HERCULES %s' % rvn
if not vehicle_name:
vehicle_name = self.vehicle.name
safe_delete_key(row, 'vehicle_name')
if 'group_flight_name' in row:
if 'flight' not in row or row['flight'] is None:
if row['group_flight_name'] != 'NaN':
flight_name = row['group_flight_name'] + '_' + vehicle_name
row['flight'] = lookup_flight(flight_name)
if row['flight']:
print ('looked up flight %d %s' % (row['flight'].id, row['flight'].name))
safe_delete_key(row, 'group_flight_name')
return row
def append_key_value_to_content(self, row, key, content):
if key in row:
inner_key, value = clean_key_value(row[key])
content = append_key_value(content, inner_key, value)
return content
def clean_key_values(self, row):
"""
Cleans the key/value pairs.
This is including setting up the note content, tags and flight.
:param row:
:return: the updated row
"""
key_1, value_1 = clean_key_value(row['key_value_1'])
key_2 = None
value_2 = None
if 'key_value_2' in row:
key_2, value_2 = clean_key_value(row['key_value_2'])
key_3 = None
value_3 = None
if 'key_value_3' in row:
key_3, value_3 = clean_key_value(row['key_value_3'])
event_type = row['event_type']
if event_type == 'NOTES':
row['content'] = value_2
if value_3:
td_tag_added = add_timing_or_data_tag(row, value_3)
if td_tag_added and 'Max' in row['tag']:
# the max value is stored in the marker field but it is not really a marker
prefix = '%s\n' % value_3
else:
prefix = '%s: %s\n' % (key_3, value_3)
row['content'] = clean_append(prefix, row['content'])
sample_type_tag_added = add_sample_type_tag(row, value_1)
notes_tag_added = add_notes_tag(row, value_1)
if not sample_type_tag_added and not notes_tag_added:
print 'MATCHING TAG NOT FOUND FOR %s IN %s' % (value_1, str(row))
row['content'] = '%s\n%s: %s' % (row['content'], key_1, value_1)
elif event_type == 'SAMPLE':
tag_added = add_sample_type_tag(row, value_2)
sample_data = self.populate_sample_data(row, value_1, value_3)
if sample_data:
row['sample_data'] = sample_data
row['content'] = '%s: %s\n%s' % (key_1, value_1, value_3)
add_tag(row, 'Sample')
if not tag_added:
print 'MATCHING TAG NOT FOUND FOR %s IN %s' % (value_2, str(row))
row['content'] = '%s\n%s: %s' % (row['content'], key_2, value_2)
condition, condition_history = self.populate_condition_data(row, value_1.replace(' ', '-'),
self.condition_completed)
row['condition_data'] = condition
row['condition_history_data'] = condition_history
elif event_type == 'DIVESTATUS':
row['content'] = value_1
add_tag(row, 'DiveStatus')
tag_added = add_divestatus_tag(row, value_1)
if not tag_added:
print 'MATCHING TAG NOT FOUND FOR %s IN %s' % (value_1, str(row))
row['content'] = '%s\n%s: %s' % (row['content'], key_1, value_1)
last_tag = row['tag'][-1]
status = self.condition_completed
if last_tag == 'OnBottom' or last_tag == 'InWater':
status = self.condition_started
condition, condition_history = self.populate_condition_data(row, last_tag, status)
row['condition_data'] = condition
row['condition_history_data'] = condition_history
elif event_type == 'OBJECTIVE':
row['content'] = value_1
add_tag(row, 'Objective')
elif event_type == 'ENGEVENT':
row['content'] = value_3
add_tag(row, 'Engineering')
tag_added = add_tag(row, value_1, capitalize=True)
if not tag_added:
print 'MATCHING TAG NOT FOUND FOR %s IN %s' % (value_1, str(row))
row['content'] = '%s\n%s: %s' % (row['content'], key_1, value_1)
tag_added = add_tag(row, value_2, capitalize=True)
if not tag_added:
print 'MATCHING TAG NOT FOUND FOR %s IN %s' % (value_2, str(row))
row['content'] = '%s\n%s: %s' % (row['content'], key_2, value_2)
elif event_type == 'AUDIOVIDEO':
if value_3:
if 'Herc/Argus' not in value_3:
if 'argus' in value_3.lower():
row = self.clean_flight(row, 'Argus')
key_4, value_4 = clean_key_value(row['key_value_4'])
row['content'] = '%s: %s\n %s' % (key_2, value_2, value_1)
add_tag(row, 'AudioVideo')
tag_added = add_audiovideo_rating_tag(row, value_4)
if not tag_added:
print 'MATCHING TAG NOT FOUND FOR %s IN %s' % (value_4, str(row))
row['content'] = '%s\nRATING: %s' % (row['content'], value_4)
elif event_type == 'DATA':
content = append_key_value(None, key_1, value_1)
content = append_key_value(content, key_2, value_2)
content = append_key_value(content, key_3, value_3)
self.append_key_value_to_content(row, 'key_value_4', content)
self.append_key_value_to_content(row, 'key_value_5', content)
self.append_key_value_to_content(row, 'key_value_6', content)
self.append_key_value_to_content(row, 'key_value_7', content)
self.append_key_value_to_content(row, 'key_value_8', content)
if row['task_type'] == 'MULTIBEAMLINE':
add_tag(row, 'MultibeamLine')
elif row['task_type'] == 'PROFILES':
add_tag(row, 'Profiles')
else:
print 'UNKOWN DATA TASK TYPE %s: %s' % (row['task_type'], str(row))
row['content'] = content
else:
print '*** UNKONWN EVENT TYPE ** %s' % event_type
row = self.clean_flight(row)
if 'condition_data' in row:
if 'flight' in row:
row['condition_data']['flight'] = row['flight']
if 'tag' in row and not row['tag']:
safe_delete_key(row, 'tag')
safe_delete_key(row, 'key_value_1')
safe_delete_key(row, 'key_value_2')
safe_delete_key(row, 'key_value_3')
safe_delete_key(row, 'key_value_4')
safe_delete_key(row, 'key_value_5')
safe_delete_key(row, 'key_value_6')
safe_delete_key(row, 'key_value_7')
safe_delete_key(row, 'key_value_8')
safe_delete_key(row, 'event_type')
safe_delete_key(row, 'task_type')
return row
def populate_condition_data(self, row, name, status):
"""
Create metadata for storing condition with condition history for specific events, including:
DiveStatus
Sample
:param row:
:return: the condition and condition history dictionaries as a tuple
"""
now = timezone.now()
condition_data = {'source': 'Event Log Import',
'start_time': row['event_time'],
'end_time': row['event_time'],
'name': name
}
if 'flight' in row and row['flight'] is not None:
condition_data['flight'] = row['flight']
content_dict = {"content": "%s" % row["content"]}
condition_history_data = {'source_time': row['event_time'],
'creation_time': now,
'status': status,
'jsonData': json.dumps(content_dict)
}
return condition_data, condition_history_data
def populate_sample_data(self, row, name, description):
"""
Since samples are created in the event log and we have already parsed the information,
we will create a dictionary with information to create the sample here
:return: the dictionary of sample data
"""
found_sample = None
# we have found cases where samples had bad names with underscores or space
name = name.replace(' ', '-')
name = name.replace('_', '-')
try:
found_sample = Sample.objects.get(name=name)
if not self.replace:
# we want to continue
print 'Sample already exists and replace not specified %s' % name
return None
except ObjectDoesNotExist:
pass
sample_type = None
if 'tag' in row and row['tag']:
try:
sample_type = SampleType.objects.get(value=row['tag'][0])
except:
print 'sample type %s NOT FOUND' % row['tag'][0]
else:
print 'SAVING SAMPLE WITH NO TYPE %s' % name
right_now = timezone.now()
place = None
if 'place' in row:
place = row['place']
sample_data = {'name': name,
'sample_type': sample_type,
'place': place,
'creator': row['author'],
'collector': self.herc_user,
'collection_time': row['event_time'],
'collection_timezone': settings.TIME_ZONE,
'modification_time': right_now,
'description': description,
}
if not found_sample:
name_match = re.search('NA(\d*)[-|\s](\d*)', name)
sample_label_string = ""
sample_label_number = None
for n in name_match.groups():
sample_label_string += n
if sample_label_string:
sample_label_number = int(sample_label_string)
sample_data['sample_label_number'] = sample_label_number
else:
sample_data['sample_label_number'] = found_sample.label.number
return sample_data
def build_models(self, row):
"""
Build the models based on the cleaned up row
:return: list of models of varying types
"""
if not row:
return None
the_model = LocatedNote
new_models = []
new_note_tags = None
# Create the note and the tags. Because the tags cannot be created until the note exists,
# we have to do this one at a time.
try:
has_tag = False
found_position_id = None
sample = None
if 'tag' in row:
has_tag = True
new_note_tags = row['tag']
safe_delete_key(row, 'tag')
if 'sample_data' in row:
# This is a sample; create it and set the foreign key
sample_data = row['sample_data']
safe_delete_key(row, 'sample_data')
label, created = Label.objects.get_or_create(number=sample_data['sample_label_number'])
safe_delete_key(sample_data, 'sample_label_number')
sample_data['label'] = label
sample_data['flight'] = row['flight']
if created:
sample_data['creation_time'] = sample_data['modification_time']
# assume the time of the sample will not change; look up position here
sample_data = csvImporter.lookup_position(sample_data, timestamp_key='collection_time',
position_id_key='track_position_id',
retries=3)
if 'track_position_id' in sample_data:
found_position_id = sample_data['track_position_id']
row['position_id'] = found_position_id
row['position_found'] = True
else:
row['position_found'] = False
try:
sample = Sample.objects.create(**sample_data)
new_models.append(sample)
except Exception as e:
# This sample already existed, update it instead.
sample_filter = Sample.objects.filter(name=sample_data['name'], label=label)
sample_filter.update(**sample_data)
sample = sample_filter[0]
# set the generic foreign key on the note
if sample:
row['content_type'] = self.sample_content_type
row['object_id'] = sample.pk
if 'condition_data' in row:
# this has a condition.
# If it was not a sample, see if it already exists.
skip = False
if not sample:
found_conditions = Condition.objects.filter(flight=row['flight'],
name=row['condition_data']['name'])
if found_conditions:
skip = True
if not skip:
condition = Condition.objects.create(**row['condition_data'])
new_models.append(condition)
condition_history_data = row['condition_history_data']
condition_history_data['condition'] = condition
condition_history = ConditionHistory.objects.create(**condition_history_data)
new_models.append(condition_history)
# update prior condition to complete it if it is a dive status
update = False
filter_name = None
if condition.name == 'OnDeck':
filter_name = 'InWater'
elif condition.name == 'OffBottom':
filter_name = 'OnBottom'
if filter_name:
found_conditions = Condition.objects.filter(flight=condition.flight, name=filter_name)
if found_conditions:
last_condition = found_conditions.last()
last_condition_history = last_condition.getHistory().last()
if last_condition_history.status != self.condition_completed:
condition_history_data['condition'] = last_condition
update = True
if update:
condition_history_data['status'] = self.condition_completed
condition_history2 = ConditionHistory.objects.create(**condition_history_data)
new_models.append(condition_history2)
safe_delete_key(row, 'condition_data')
safe_delete_key(row, 'condition_history_data')
if not found_position_id:
row = csvImporter.lookup_position(row, timestamp_key='event_time',
position_id_key='position_id',
position_found_key='position_found',
retries=3)
# this should really never happen!!!
if 'author' not in row or not row['author']:
print('NO AUTHOR IN ROW, defaulting to datalogger')
print(row)
row['author'] = self.datalogger_user
if self.replace:
new_note, note_created = the_model.objects.update_or_create(**row)
else:
new_note = the_model.objects.create(**row)
if has_tag:
new_note.tags.clear()
new_note.tags.add(*new_note_tags)
new_models.append(new_note)
except Exception as e:
traceback.print_exc()
print new_note_tags
print row
e.message = str(row) + e.message
raise e
return new_models
def load_csv(self):
"""
Load the CSV file according to the self.configuration, and store the values in the database using the
Django ORM.
Warning: the model's save method will not be called as we are using bulk_create.
:return: the newly created models, which may be an empty list
"""
new_models = []
try:
self.reset_csv()
for row in self.csv_reader:
row = self.update_row(row)
if row:
models = self.build_models(row)
if models:
new_models.extend(models)
self.handle_last_row(row)
finally:
self.csv_file.close()
return new_models
def update_stored_data(self, the_model, rows):
"""
# search for matching data based on each row, and update it.
:param the_model: the model we are working with
:param rows: the cleaned up rows we are working with
:return:
"""
for row in rows:
filter_dict = {self.config['timefield_default']: row[self.config['timefield_default']]}
if self.flight:
filter_dict['flight'] = self.flight
found = the_model.objects.filter(**filter_dict)
if found.count() != 1:
print "ERROR: DID NOT FIND MATCH FOR %s" % str(row[self.config['timefield_default']])
else:
item = found[0]
has_tag = False
if 'tag' in row:
has_tag = True
new_note_tags = row['tag']
safe_delete_key(row, 'tag')
for key, value in row.iteritems():
setattr(item, key, value)
item.tags.clear()
if has_tag:
item.tags.add(*new_note_tags)
print 'UPDATED: %s ' % str(item)
item.save()
| 38.505226 | 120 | 0.562181 |
319be12b9184c25fa448a784c5c58dea56793303 | 4,593 | py | Python | workers.py | big-skim-milk/Quetzal | 806d379fe4815f642e75b0efebf5262d7ab8af8d | [
"MIT"
] | 2 | 2020-03-05T16:22:57.000Z | 2020-03-05T20:32:40.000Z | workers.py | big-skim-milk/Quetzal | 806d379fe4815f642e75b0efebf5262d7ab8af8d | [
"MIT"
] | null | null | null | workers.py | big-skim-milk/Quetzal | 806d379fe4815f642e75b0efebf5262d7ab8af8d | [
"MIT"
] | null | null | null | import sys
from PyQt5 import QtCore
from pathlib import Path
from subprocess import (run, CalledProcessError)
from quetzal import (getProjects, getDirs, writeProjects, writeDirs, loggedIn)
class Updater(QtCore.QRunnable):
def __init__(self, _mode, do_debug=False, override=False):
super(Updater, self).__init__()
if not loggedIn():
return self.signals.finished.emit('You must be logged in!')
self.signals = UpdateSignals()
self.threads = ["main"]
self.workers = ["none"]
self.current_count = 1
self.debug_flag = 'corvid'
if do_debug:
self.debug_flag = + 'corvid-debug'
if override:
self.override_flag = '--override'
else:
self.override_flag = '--move'
self.args = ['npx', self.debug_flag, 'pull', self.override_flag]
if _mode == 'all':
self.to_update = [p['abs_dir'] for p in getProjects()]
elif _mode == 'stop':
for worker in self.workers:
worker.thread.stop()
else:
self.to_update = _mode
try:
for _p_ in range(thread_range):
self.threads.append(QtCore.QThread())
self.workers.append(UpdateWorker())
self.workers[_p_].moveToThread(self.threads[_p_])
except IndexError:
print(len(self.to_update))
self.iterate(self.current_count)
def iterate(self, _ix_):
try:
is_done = self.workers[_ix_].startUpdate(
self.to_update[_ix_], self.args)
except AttributeError:
self.loop(1)
if _ix_ < len(self.to_update) - 1:
is_done.npx_output.connect(self.loop)
else:
self.signals.finished.emit('success')
def loop(self, exit_code):
if exit_code != 0:
self.signals.error.emit('error')
self.signals.current.emit(self.to_update[self.current_count])
self.current_count += 1
self.iterate(self.current_count)
self.signals.progress.emit(self.current_count, len(self.to_update))
class UpdateWorker(QtCore.QObject):
npx_output = QtCore.pyqtSignal(int)
@QtCore.pyqtSlot(int)
def startUpdate(self, _path, args):
try:
now_updating = run(args, cwd=_path, capture_output=True)
self.npx_output.emit(now_updating.check_returncode())
except CalledProcessError:
self.npx_output.emit(1)
class UpdateSignals(QtCore.QObject):
progress = QtCore.pyqtSignal(int, int)
finished = QtCore.pyqtSignal(str)
current = QtCore.pyqtSignal(str)
error = QtCore.pyqtSignal(str)
class FileWatcher(QtCore.QFileSystemWatcher, QtCore.QRunnable):
def __init__(self):
super(FileWatcher, self).__init__()
self.watched_dirs = [p for p in getDirs().values()]
self.addPaths(self.watched_dirs)
self.signals = FileWatcherSignals()
self.fileChanged.connect(self.handleFileChange)
self.directoryChanged.connect(self.handleDirChange)
def addWatched(self, _path):
if isinstance(_path, str):
self.addPath(_path)
self.signals.success.emit()
if isinstance(_path, list):
self.addPaths(_path)
self.signals.success.emit()
self.signals.failure.emit(f'{_path} not valid type')
def rmWatched(self, _path):
if isinstance(_path, str):
self.removePath(_path)
self.signals.success.emit()
if isinstance(_path, list):
self.removePaths(_path)
self.signals.success.emit()
self.signals.failure.emit(f'{_path} not valid type')
# def queryWatched(self, _pattern):
# self.signals.result.emit(
# [d for d in self.directories() if _pattern in d])
# def queryInstanced(self, _path):
# _instance = [f for f in self.files() if f == _path][0]
# if not _instance:
# _closest = [f for f in self.files() if f in _path][0]
# self.signals.result.emit(_closest)
# self.signals.result.emit(_instance)
def handleFileChange(self):
writeProjects(self.files())
self.signals.filechange.emit()
def handleDirChange(self):
writeDirs(self.directories())
self.signals.dirchange.emit()
class FileWatcherSignals(QtCore.QObject):
dirchange = QtCore.pyqtSignal()
failure = QtCore.pyqtSignal(str)
filechange = QtCore.pyqtSignal()
# result = QtCore.pyqtSignal(list)
success = QtCore.pyqtSignal()
| 32.574468 | 78 | 0.618768 |
112eaab2a35c0e968b5acf8e4ae93ab2320acdfc | 201 | py | Python | creators/utils.py | samuelviveiros/connector | 85f7a93f15ef3d01b648a64422fbfdea0191de76 | [
"MIT"
] | null | null | null | creators/utils.py | samuelviveiros/connector | 85f7a93f15ef3d01b648a64422fbfdea0191de76 | [
"MIT"
] | 13 | 2021-11-23T00:19:34.000Z | 2021-11-29T21:05:57.000Z | creators/utils.py | samuelviveiros/connector | 85f7a93f15ef3d01b648a64422fbfdea0191de76 | [
"MIT"
] | null | null | null | from django.utils.deprecation import MiddlewareMixin
class DisableCSRFMiddleware(MiddlewareMixin):
def process_request(self, request):
setattr(request, '_dont_enforce_csrf_checks', True)
| 28.714286 | 59 | 0.79602 |
6cf8a5f6cb539a1886d52cf968780bcc47896568 | 543 | py | Python | 0401-0500/0491-Increasing Subsequences/0491-Increasing Subsequences.py | jiadaizhao/LeetCode | 4ddea0a532fe7c5d053ffbd6870174ec99fc2d60 | [
"MIT"
] | 49 | 2018-05-05T02:53:10.000Z | 2022-03-30T12:08:09.000Z | 0401-0500/0491-Increasing Subsequences/0491-Increasing Subsequences.py | jolly-fellow/LeetCode | ab20b3ec137ed05fad1edda1c30db04ab355486f | [
"MIT"
] | 11 | 2017-12-15T22:31:44.000Z | 2020-10-02T12:42:49.000Z | 0401-0500/0491-Increasing Subsequences/0491-Increasing Subsequences.py | jolly-fellow/LeetCode | ab20b3ec137ed05fad1edda1c30db04ab355486f | [
"MIT"
] | 28 | 2017-12-05T10:56:51.000Z | 2022-01-26T18:18:27.000Z | class Solution:
def findSubsequences(self, nums: List[int]) -> List[List[int]]:
path = []
result = []
def dfs(start):
if len(path) >= 2:
result.append(path[:])
used = set()
for i in range(start, len(nums)):
if nums[i] not in used and (not path or nums[i] >= path[-1]):
path.append(nums[i])
used.add(nums[i])
dfs(i + 1)
path.pop()
dfs(0)
return result
| 31.941176 | 77 | 0.41989 |
a57b636cfd3e48e22eee09f67c82044ce44f07c0 | 9,818 | py | Python | kucoin_futures/user/user.py | codewc/kucoin-futures-python-sdk | 327be4873873651ea562c91b2848148387110326 | [
"MIT"
] | 25 | 2020-12-18T05:06:34.000Z | 2022-02-23T10:14:31.000Z | kucoin_futures/user/user.py | codewc/kucoin-futures-python-sdk | 327be4873873651ea562c91b2848148387110326 | [
"MIT"
] | 13 | 2020-12-28T20:57:29.000Z | 2022-03-22T07:21:38.000Z | kucoin_futures/user/user.py | codewc/kucoin-futures-python-sdk | 327be4873873651ea562c91b2848148387110326 | [
"MIT"
] | 18 | 2020-12-01T07:27:56.000Z | 2022-03-24T13:24:49.000Z | from kucoin_futures.base_request.base_request import KucoinFuturesBaseRestApi
class UserData(KucoinFuturesBaseRestApi):
def transfer_kucoin_account(self, amount, bizNo=''):
"""
https://docs.kumex.com/#transfer-funds-to-kucoin-main-account
:param bizNo: (Mandatory) A unique ID generated by the user, to ensure the operation is processed by the system only once.
You are suggested to use UUID
:type: str
:param amount: (Mandatory) Amount to be transfered out
:type:float
:return:
{
"applyId": "5bffb63303aa675e8bbe18f9" //Transfer-out request ID
}
"""
params = {'amount': amount}
if not bizNo:
bizNo = self.return_unique_id[0:23]
params['bizNo'] = bizNo
return self._request('POST', '/api/v1/transfer-out', params=params)
def transfer_kucoin_account_v2(self, amount, bizNo=''):
"""
https://docs.kumex.com/#transfer-funds-to-kucoin-main-account-2
:param bizNo: (Mandatory) A unique ID generated by the user, to ensure the operation is processed by the system only once.
You are suggested to use UUID
:type: str
:param amount: (Mandatory) Amount to be transfered out
:type:float
:return:
{
"applyId": "5bffb63303aa675e8bbe18f9" //Transfer-out request ID
}
"""
params = {'amount': amount}
if not bizNo:
bizNo = self.return_unique_id[0:23]
params['bizNo'] = bizNo
return self._request('POST', '/api/v2/transfer-out', params=params)
def get_Transfer_history(self, **kwargs):
"""
https://docs.kumex.com/#get-transfer-out-request-records-2
:param kwargs: [optional] status, startAt, endAt, currentPage , pageSize and so on
:return: {'totalNum': 0, 'totalPage': 0, 'pageSize': 50, 'currentPage': 1, 'items': [{
"applyId": "5cd53be30c19fc3754b60928", //Transfer-out request ID
"currency": "XBT", //Currency
"status": "SUCCESS", //Status PROCESSING, SUCCESS, FAILURE
"amount": "0.01", //Transaction amount
"reason": "", //Reason caused the failure
"offset": 31986850860000, //Offset
"createdAt": 1557769977000 //Request application time}, ....]}
"""
params = {}
if kwargs:
params.update(kwargs)
return self._request('GET', '/api/v1/transfer-list', params=params)
def cancel_Transfer_out(self, applyId):
"""
https://docs.kumex.com/#cancel-transfer-out-request
:param applyId: (Mandatory) Transfer ID (Initiate to cancel the transfer-out request)
:return: {'code': '200000',"data": {
"applyId": "5bffb63303aa675e8bbe18f9" //Transfer-out request ID
} }
"""
return self._request('DELETE', '/api/v1/cancel/transfer-out?applyId={}'.format(applyId))
def get_withdrawal_quota(self, currency):
"""
https://docs.kumex.com/#get-withdrawal-limit
:param currency: XBT str (Mandatory)
:return:
{
"currency": "XBT",//Currency
"limitAmount": 2,//24h withdrawal limit
"usedAmount": 0,//Withdrawal amount over the past 24h.
"remainAmount": 2,//24h available withdrawal amount
"availableAmount": 99.89993052,//Available balance
"withdrawMinFee": 0.0005,//Withdrawal fee charges
"innerWithdrawMinFee": 0,//Inner withdrawal fee charges
"withdrawMinSize": 0.002,//Min. withdrawal amount
"isWithdrawEnabled": true,//Available to withdrawal or not
"precision": 8//Precision of the withdrawal amount
}
"""
params = {
'currency': currency
}
return self._request('GET', '/api/v1/withdrawals/quotas', params=params)
def sand_withdrawal(self, currency, address, amount, **kwargs):
"""
https://docs.kumex.com/#withdraw-funds
:param currency: Currency, only Bitcoin (XBT) is currently supported. (Mandatory)
:type: str
:param address: Withdrawal address (Mandatory)
:type: str
:param amount: Withdrawal amount (Mandatory)
:type: float
:param kwargs: [Optional] isInner, remark
:return:
{
"withdrawalId": "" // Withdrawal ID. This ID can be used to cancel the withdrawal
}
"""
params = {
'currency': currency,
'address': address,
'amount': amount
}
if kwargs:
params.update(kwargs)
return self._request('POST', '/api/v1/withdrawals', params=params)
def get_withdrawal_list(self, **kwargs):
"""
https://docs.kumex.com/#get-withdrawal-list
:param kwargs: [optional] currentPage , pageSize and so on
:return:
{
"currentPage": 1,
"pageSize": 50,
"totalNum": 10,
"totalPage": 1,
"items": [{
"withdrawalId": "5cda659603aa67131f305f7e",//Withdrawal ID. This ID can be used to cancel the withdrawal
"currency": "XBT",//Currency
"status": "FAILURE",//Status
"address": "3JaG3ReoZCtLcqszxMEvktBn7xZdU9gaoJ",//Withdrawal address
"isInner": true,//Inner withdrawal or not
"amount": 1,//Withdrawal amount
"fee": 0,//Withdrawal fee charges
"walletTxId": "",//Wallet TXID
"createdAt": 1557816726000,//Withdrawal time
"remark": "",//Withdrawal remarks
"reason": "Assets freezing failed."// Reason causing the failure
}]
}
"""
params = {}
if kwargs:
params.update(kwargs)
return self._request('GET', '/api/v1/withdrawal-list', params=params)
def cancel_withdrawal(self, withdrawalId):
"""
https://docs.kumex.com/#cancel-withdrawal
:param withdrawalId: Path Parameter. Withdrawal ID (Mandatory)
:type: str
:return: {'address': '', 'memo': ''}
"""
return self._request('DELETE', '/api/v1/withdrawals/{withdrawalId}'.format(withdrawalId=withdrawalId))
def get_deposit_address(self, currency):
"""
https://docs.kumex.com/#get-deposit-address
:param currency: XBT str (Mandatory)
:return:
"""
params = {
'currency': currency
}
return self._request('GET', '/api/v1/deposit-address', params=params)
def get_deposit_list(self, **kwargs):
"""
https://docs.kumex.com/#get-deposits-list
:param kwargs: [optional] currentPage , pageSize and so on
:return:
{
"currentPage": 1,
"pageSize": 50,
"totalNum": 1,
"totalPage": 1,
"items": [{
"currency": "XBT",//Currency
"status": "SUCCESS",//Status type: PROCESSING, WALLET_PROCESSING, SUCCESS, FAILURE
"address": "5CD018972914B66104BF8842",//Deposit address
"isInner": false,//Inner transfer or not
"amount": 1,//Deposit amount
"fee": 0,//Fees for deposit
"walletTxId": "5CD018972914B66104BF8842",//Wallet TXID
"createdAt": 1557141673000 //Funds deposit time
}]
}
"""
params = {}
if kwargs:
params.update(kwargs)
return self._request('GET', '/api/v1/deposit-list', params=params)
def get_account_overview(self, currency='XBT'):
"""
https://docs.kumex.com/#get-account-overview
:return:
{
"accountEquity": 99.8999305281, //Account equity
"unrealisedPNL": 0, //Unrealised profit and loss
"marginBalance": 99.8999305281, //Margin balance
"positionMargin": 0, //Position margin
"orderMargin": 0, //Order margin
"frozenFunds": 0, //Frozen funds for withdrawal and out-transfer
"availableBalance": 99.8999305281 //Available balance
"currency": "XBT" //currency code
}
"""
params = {
'currency': currency
}
return self._request('GET', '/api/v1/account-overview', params=params)
def get_transaction_history(self, **kwargs):
"""
https://docs.kumex.com/#get-transaction-history
:param kwargs: [optional] startAt, endAt, type, offset maxCount
:return:
{
"hasMore": false,//Whether there are more pages
"dataList": [{
"time": 1558596284040, //Event time
"type": "RealisedPNL", //Type
"amount": 0, //Transaction amount
"fee": null,//Fees
"accountEquity": 8060.7899305281, //Account equity
"status": "Pending", //Status. If you have held a position in the current 8-hour settlement period.
"remark": "XBTUSDM",//Ticker symbol of the contract
"offset": -1 //Offset,
"currency": "XBT" //Currency
},
{
"time": 1557997200000,
"type": "RealisedPNL",
"amount": -0.000017105,
"fee": 0,
"accountEquity": 8060.7899305281,
"status": "Completed",//Status. Status. Funding period that has been settled.
"remark": "XBTUSDM",
"offset": 1,
"currency": "XBT" //Currency
}]
}
"""
params = {}
if kwargs:
params.update(kwargs)
return self._request('GET', '/api/v1/transaction-history', params=params)
| 38.202335 | 131 | 0.561723 |
38e539bd71a908160d1b3e466b575e548c467f2a | 536 | py | Python | visualisation_tsne.py | YoungGunsHackathon/MyiMatcher | a5238e64d608cab38dd60cf9e1d135efd0b3554e | [
"Unlicense"
] | 1 | 2018-02-17T10:11:09.000Z | 2018-02-17T10:11:09.000Z | visualisation_tsne.py | YoungGunsHackathon/MyiMatcher | a5238e64d608cab38dd60cf9e1d135efd0b3554e | [
"Unlicense"
] | null | null | null | visualisation_tsne.py | YoungGunsHackathon/MyiMatcher | a5238e64d608cab38dd60cf9e1d135efd0b3554e | [
"Unlicense"
] | null | null | null | import numpy as np
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
X = np.load('X.npy')
note = np.load('Y.npy')
X_embedded = TSNE(n_components=2).fit_transform(X)
fig, ax = plt.subplots(figsize=(15,15))
fig.suptitle("t-SNE dimensionality reduction", fontsize=16)
for x in range(X_embedded.shape[0]):
ax.scatter(X_embedded[x][0], X_embedded[x][1], color='blue')
for i, txt in enumerate(note):
ax.annotate(txt, (X_embedded[i][0],X_embedded[i][1]))
fig.savefig('tsne.png')
| 26.8 | 64 | 0.722015 |
aa5f565b136e39b2afc05c3643ab81725b2385e7 | 105,305 | py | Python | cli/polyaxon/client/run.py | polyaxon/cli | 3543c0220a8a7c06fc9573cd2a740f8ae4930641 | [
"Apache-2.0"
] | null | null | null | cli/polyaxon/client/run.py | polyaxon/cli | 3543c0220a8a7c06fc9573cd2a740f8ae4930641 | [
"Apache-2.0"
] | 1 | 2022-01-24T11:26:47.000Z | 2022-03-18T23:17:58.000Z | cli/polyaxon/client/run.py | polyaxon/cli | 3543c0220a8a7c06fc9573cd2a740f8ae4930641 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import uuid
from collections.abc import Mapping
from datetime import datetime
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
from urllib.parse import urlparse
import ujson
from marshmallow import EXCLUDE
from urllib3.exceptions import HTTPError
import polyaxon_sdk
from polyaxon import settings
from polyaxon.api import K8S_V1_LOCATION, STREAMS_V1_LOCATION
from polyaxon.cli.errors import handle_cli_error
from polyaxon.client.client import PolyaxonClient
from polyaxon.client.decorators import client_handler, get_global_or_inline_config
from polyaxon.constants.metadata import META_COPY_ARTIFACTS
from polyaxon.containers.names import MAIN_CONTAINER_NAMES
from polyaxon.contexts import paths as ctx_paths
from polyaxon.env_vars.getters import (
get_artifacts_store_name,
get_project_error_message,
get_project_or_local,
get_run_info,
get_run_or_local,
)
from polyaxon.exceptions import PolyaxonClientException
from polyaxon.lifecycle import (
LifeCycle,
V1ProjectFeature,
V1StatusCondition,
V1Statuses,
)
from polyaxon.logger import logger
from polyaxon.managers.ignore import IgnoreConfigManager
from polyaxon.polyaxonfile import check_polyaxonfile
from polyaxon.polyflow import V1Matrix, V1Operation, V1RunKind
from polyaxon.schemas.types import V1ArtifactsType
from polyaxon.stores.polyaxon_store import PolyaxonStore
from polyaxon.utils.code_reference import get_code_reference
from polyaxon.utils.date_utils import file_modified_since
from polyaxon.utils.formatting import Printer
from polyaxon.utils.fqn_utils import get_entity_full_name, to_fqn_name
from polyaxon.utils.hashing import hash_dir, hash_file, hash_value
from polyaxon.utils.http_utils import absolute_uri
from polyaxon.utils.list_utils import to_list
from polyaxon.utils.path_utils import (
check_or_create_path,
delete_path,
get_base_filename,
get_dirs_under_path,
get_files_in_path_context,
)
from polyaxon.utils.query_params import get_logs_params, get_query_params
from polyaxon.utils.tz_utils import now
from polyaxon.utils.urls_utils import get_proxy_run_url
from polyaxon.utils.validation import validate_tags
from polyaxon_sdk.rest import ApiException
from traceml.artifacts import V1ArtifactKind, V1RunArtifact
from traceml.events import V1Events
from traceml.logging.streamer import get_logs_streamer
class RunClient:
"""RunClient is a client to communicate with Polyaxon runs endpoints.
If no values are passed to this class,
Polyaxon will try to resolve the owner, project, and run uuid from the environment:
* If you have a configured CLI, Polyaxon will use the configuration of the cli.
* If you have a cached run using the CLI,
the client will default to that cached run unless you override the values.
* If you use this client in the context of a job or a service managed by Polyaxon,
a configuration will be available to resolve the values based on that run.
If you intend to create a new run instance or to list runs,
only the `owner` and `project` parameters are required.
You can always access the `self.client` to execute more APIs.
Properties:
project: str.
owner: str.
run_uuid: str.
run_data: V1Run.
status: str.
namespace: str.
client: [PolyaxonClient](/docs/core/python-library/polyaxon-client/)
Args:
owner: str, optional, the owner is the username or
the organization name owning this project.
project: str, optional, project name owning the run(s).
run_uuid: str, optional, run uuid.
client: [PolyaxonClient](/docs/core/python-library/polyaxon-client/), optional,
an instance of a configured client, if not passed,
a new instance will be created based on the available environment.
is_offline: bool, optional,
To trigger the offline mode manually instead of depending on `POLYAXON_IS_OFFLINE`.
no_op: bool, optional,
To set the NO_OP mode manually instead of depending on `POLYAXON_NO_OP`.
Raises:
PolyaxonClientException: If no owner and/or project are passed and Polyaxon cannot
resolve the values from the environment.
"""
@client_handler(check_no_op=True)
def __init__(
self,
owner: str = None,
project: str = None,
run_uuid: str = None,
client: PolyaxonClient = None,
is_offline: bool = None,
no_op: bool = None,
):
self._is_offline = get_global_or_inline_config(
config_key="is_offline", config_value=is_offline, client=client
)
self._no_op = get_global_or_inline_config(
config_key="no_op", config_value=no_op, client=client
)
if self._no_op:
return
try:
owner, project = get_project_or_local(
get_entity_full_name(owner=owner, entity=project)
)
except PolyaxonClientException:
pass
if project is None:
if settings.CLIENT_CONFIG.is_managed:
owner, project, _run_uuid = get_run_info()
run_uuid = run_uuid or _run_uuid
elif not self._is_offline:
raise PolyaxonClientException(
"Please provide a valid project, "
"or make sure this operation is managed by Polyaxon."
)
error_message = get_project_error_message(owner, project)
if error_message and not self._is_offline:
raise PolyaxonClientException(error_message)
self._client = client
self._owner = owner
self._project = project
self._run_uuid = (
get_run_or_local(run_uuid)
if not self._is_offline
else run_uuid or uuid.uuid4().hex
)
default_runtime = (
V1RunKind.JOB
if self._is_offline or not settings.CLIENT_CONFIG.is_managed
else None
)
self._run_data = polyaxon_sdk.V1Run(
owner=self._owner,
project=self._project,
uuid=self._run_uuid,
kind=default_runtime,
runtime=default_runtime,
is_managed=False if self._is_offline else None,
)
self._namespace = None
self._results = {}
self._artifacts_lineage = {}
self._default_filename_sanitize_paths = []
self._last_update = None
self._store = None
def _set_is_offline(
self,
client: PolyaxonClient = None,
is_offline: bool = None,
):
if is_offline is not None:
return is_offline
if client and client.config and client.config.is_offline is not None:
return client.config.is_offline
return settings.CLIENT_CONFIG.is_offline
def _set_no_op(
self,
client: PolyaxonClient = None,
no_op: bool = None,
):
if no_op is not None:
return no_op
if client and client.config and client.config.no_op is not None:
return client.config.no_op
return settings.CLIENT_CONFIG.no_op
@property
def client(self):
if self._client:
return self._client
self._client = PolyaxonClient()
return self._client
@property
def store(self):
if self._store:
return self._store
self._store = PolyaxonStore(client=self)
return self._store
@property
def status(self) -> str:
return self._run_data.status
@property
def settings(self) -> Optional[polyaxon_sdk.V1RunSettings]:
if not self.run_data:
return None
if self.run_data.settings and isinstance(self.run_data.settings, Mapping):
self._run_data.settings = polyaxon_sdk.V1RunSettings(
**self.run_data.settings
)
return self.run_data.settings
@property
def namespace(self) -> str:
if self._namespace:
return self._namespace
if self.settings and self.settings.namespace:
self._namespace = self.settings.namespace
else:
self._namespace = self.get_namespace()
return self._namespace
@property
def owner(self) -> str:
return self._owner
def set_owner(self, owner: str):
self._owner = owner
@property
def project(self) -> str:
return self._project
def set_project(self, project: str):
self._project = project
@property
def run_uuid(self) -> str:
return self._run_uuid
def set_run_uuid(self, run_uuid):
self._run_uuid = run_uuid
@property
def run_data(self):
return self._run_data
@property
def artifacts_lineage(self):
return self._artifacts_lineage
@client_handler(check_no_op=True)
def get_inputs(self) -> Dict:
"""Gets the run's inputs.
Returns:
dict, all the run inputs/params.
"""
if not self._run_data.inputs:
self.refresh_data()
return self._run_data.inputs
@client_handler(check_no_op=True)
def get_outputs(self) -> Dict:
"""Gets the run's outputs.
Returns:
dict, all the run outputs/metrics.
"""
if not self._run_data.inputs:
self.refresh_data()
return self._run_data.outputs
@client_handler(check_no_op=True, check_offline=True)
def refresh_data(
self, load_artifacts_lineage: bool = False, load_conditions: bool = False
):
"""Fetches the run data from the api."""
self._run_data = self.client.runs_v1.get_run(
self.owner, self.project, self.run_uuid
)
if load_conditions:
_, conditions = self.get_statuses()
self._run_data.status_conditions = conditions
if load_artifacts_lineage:
lineages = self.get_artifacts_lineage(limit=1000).results
self._artifacts_lineage = {l.name: l for l in lineages}
def _throttle_updates(self) -> bool:
current_time = now().replace(microsecond=0)
last_time, updates = self._last_update or (current_time, 0)
if current_time == last_time and updates > 2:
return True
self._last_update = (current_time, updates + 1)
return False
def _update(
self, data: Union[Dict, polyaxon_sdk.V1Run], async_req: bool = True
) -> polyaxon_sdk.V1Run:
if self._is_offline:
return self.run_data
response = self.client.runs_v1.patch_run(
owner=self.owner,
project=self.project,
run_uuid=self.run_uuid,
body=data,
async_req=async_req,
)
if not async_req:
self._run_data = response
return response
@client_handler(check_no_op=True)
def update(
self, data: Union[Dict, polyaxon_sdk.V1Run], async_req: bool = False
) -> polyaxon_sdk.V1Run:
"""Updates a run based on the data passed.
[Run API](/docs/api/#operation/PatchRun)
Args:
data: Dict or V1Run, required.
async_req: bool, optional, default: False, execute request asynchronously.
Returns:
V1Run, run instance from the response.
"""
if self._is_offline:
for k in data:
setattr(self._run_data, k, getattr(data, k, None))
return self._update(data=data, async_req=async_req)
@client_handler(check_no_op=True)
def transfer(self, to_project: str, async_req: bool = False):
"""Transfers the run to a project under the same owner/organization.
[Run API](/docs/api/#operation/TransferRun)
Args:
to_project: str, required, the destination project to transfer the run to.
async_req: bool, optional, default: False, execute request asynchronously.
"""
def _update_run():
self._project = to_project
self._run_data._project = to_project
if self._is_offline:
_update_run()
return
self.client.runs_v1.transfer_run(
owner=self.owner,
project=self.project,
run_uuid=self.run_uuid,
body={"project": to_project},
async_req=async_req,
)
_update_run()
def _create(
self, data: Union[Dict, polyaxon_sdk.V1OperationBody], async_req: bool = False
) -> polyaxon_sdk.V1Run:
response = self.client.runs_v1.create_run(
owner=self.owner,
project=self.project,
body=data,
async_req=async_req,
)
if not async_req:
self._run_data = response
self._run_uuid = self._run_data.uuid
self._run_data.status = V1Statuses.CREATED
self._namespace = None
self._results = {}
self._artifacts_lineage = {}
return response
@client_handler(check_no_op=True)
def create(
self,
name: str = None,
description: str = None,
tags: Union[str, Sequence[str]] = None,
content: Union[str, Dict, V1Operation] = None,
is_managed: bool = True,
pending: Optional[str] = None,
meta_info: Optional[Dict] = None,
) -> polyaxon_sdk.V1Run:
"""Creates a new run based on the data passed.
N.B. Create methods are only useful if you want to create a run programmatically,
if you run a component/operation from the CLI/UI an instance will be created automatically.
This is a generic create function, you can check other methods for creating runs:
* from yaml: `create_from_polyaxonfile`
* from url: `create_from_url`
* from hub: `create_from_hub`
> **Note**: If the `content` param is not passed, the run will be marked as non-managed.
[Run API](/docs/api/#operation/CreateRun)
Args:
name: str, optional, it will override the name in the operation if provided.
description: str, optional,
it will override the description in the operation if provided.
tags: str or List[str], optional, list of tags,
it will override the tags in the operation if provided.
content: str or Dict or V1Operation, optional.
is_managed: bool, flag to create a managed run.
pending: str, to specify if the run is pending approval (requires human validation) or pending upload. # noqa
meta_info: dict, meta info to create the run with.
Returns:
V1Run, run instance from the response.
"""
tags = validate_tags(tags, validate_yaml=True)
if self._is_offline:
self._run_data.name = name
self._run_data.description = description
self._run_data.tags = tags
self._run_data.owner = self._owner
self._run_data.project = self._project
if not self._run_uuid:
self._run_uuid = uuid.uuid4().hex
self.run_data.uuid = self._run_uuid
return self.run_data
if not content:
is_managed = False
elif not isinstance(content, (str, Mapping, V1Operation)):
raise PolyaxonClientException(
"Received an invalid content: {}".format(content)
)
if content:
if isinstance(content, Mapping):
content = V1Operation.from_dict(content)
content = (
content if isinstance(content, str) else content.to_dict(dump=True)
)
data = polyaxon_sdk.V1OperationBody(
name=name,
description=description,
tags=tags,
content=content,
is_managed=is_managed,
pending=pending,
meta_info=meta_info,
)
self._create(data=data, async_req=False)
return self.run_data
@client_handler(check_no_op=True, check_offline=True)
def create_from_polyaxonfile(
self,
polyaxonfile: str,
name: str = None,
description: str = None,
tags: Union[str, Sequence[str]] = None,
params: Dict = None,
matrix: Union[Dict, V1Matrix] = None,
presets: List[str] = None,
queue: str = None,
nocache: bool = None,
cache: Union[int, str, bool] = None,
approved: Union[int, str, bool] = None,
) -> polyaxon_sdk.V1Run:
"""Creates a new run based on a polyaxonfile.
N.B. Create methods are only useful if you want to create a run programmatically,
if you run a component/operation from the CLI/UI an instance will be created automatically.
[Run API](/docs/api/#operation/CreateRun)
Args:
polyaxonfile: str, path to the polyaxonfile containing a YAML/Json specification.
The polyaxonfile should contain a
[V1Component](/docs/core/specification/component/) or an
[V1Operation](/docs/core/specification/operation/).
name: str, optional,
it will override the name in the operation if provided.
description: str, optional,
it will override the description in the operation if provided.
tags: str or List[str], optional, list of tags,
it will override the tags in the operation if provided.
params: dict, optional, a dictionary of parameters that will be
used to resolve the component's inputs/outputs.
matrix: dict or V1Matrix, a matrix definition.
presets: List[str], optional, the name of the
[presets](/docs/core/scheduling-presets/).
queue: str, optional, the name of the
[queue](/docs/core/scheduling-strategies/queues/) to assign the run to.
nocache: bool, optional, DEPRECATED Please use `cache='f'`
simple flag to disable
[cache check](/docs/automation/helpers/cache/).
If passed and the Polyaxonfile has cache section,
it will be patched with `disabled: true`.
cache: Union[int, str, bool], optional, simple flag to enable/disable
[cache check](/docs/automation/helpers/cache/).
If passed and the Polyaxonfile will be patched with `disabled: true/false`.
e.g. `cache=1`, `cache='yes'`, `cache=False`, `cache='t'`, ...
approved: Union[int, str, bool], optional, simple flag to enable/disable
human in the loop validation without changing the polyaxonfile,
similar to `isApproved: true/false`,
[manual approval](/docs/core/scheduling-strategies/manual-approval/).
Can be used with yes/no, y/n, false/true, f/t, 1/0. "
"e.g. `approved=1`, `approved='yes'`, `approved=False`, `approved='t'`, ..."
Returns:
V1Run, run instance from the response.
"""
op_spec = check_polyaxonfile(
polyaxonfile=polyaxonfile,
params=params,
matrix=matrix,
presets=presets,
queue=queue,
nocache=nocache,
cache=cache,
approved=approved,
verbose=False,
is_cli=False,
)
return self.create(
name=name, description=description, tags=tags, content=op_spec
)
@client_handler(check_no_op=True, check_offline=True)
def create_from_url(
self,
url: str,
name: str = None,
description: str = None,
tags: Union[str, Sequence[str]] = None,
params: Dict = None,
matrix: Union[Dict, V1Matrix] = None,
presets: List[str] = None,
queue: str = None,
nocache: bool = None,
cache: Union[int, str, bool] = None,
approved: Union[int, str, bool] = None,
) -> polyaxon_sdk.V1Run:
"""Creates a new run from a url containing a Polyaxonfile specification.
N.B. Create methods are only useful if you want to create a run programmatically,
if you run a component/operation from the CLI/UI an instance will be created automatically.
[Run API](/docs/api/#operation/CreateRun)
Args:
url: str, url containing a YAML/Json specification.
The url's polyaxonfile should contain a
[V1Component](/docs/core/specification/component/) or an
[V1Operation](/docs/core/specification/operation/).
name: str, optional, it will override the name in the operation if provided.
description: str, optional,
it will override the description in the operation if provided.
tags: str or List[str], optional, list of tags,
it will override the tags in the operation if provided.
params: dict, optional, a dictionary of parameters that will be
used to resolve the component's inputs/outputs.
matrix: dict or V1Matrix, a matrix definition.
presets: List[str], optional, the name of the
[presets](/docs/core/scheduling-presets/).
queue: str, optional, the name of the
[queue](/docs/core/scheduling-strategies/queues/) to assign the run to.
nocache: bool, optional, DEPRECATED Please use `cache='f'`
simple flag to disable
[cache check](/docs/automation/helpers/cache/).
If passed and the Polyaxonfile has cache section,
it will be patched with `disabled: true`.
cache: Union[int, str, bool], optional, simple flag to enable/disable
[cache check](/docs/automation/helpers/cache/).
If passed and the Polyaxonfile will be patched with `disabled: true/false`.
e.g. `cache=1`, `cache='yes'`, `cache=False`, `cache='t'`, ...
approved: Union[int, str, bool], optional, simple flag to enable/disable
human in the loop validation without changing the polyaxonfile,
similar to `isApproved: true/false`,
[manual approval](/docs/core/scheduling-strategies/manual-approval/).
Can be used with yes/no, y/n, false/true, f/t, 1/0. "
"e.g. `approved=1`, `approved='yes'`, `approved=False`, `approved='t'`, ..."
Returns:
V1Run, run instance from the response.
"""
op_spec = check_polyaxonfile(
url=url,
params=params,
matrix=matrix,
presets=presets,
queue=queue,
nocache=nocache,
cache=cache,
approved=approved,
verbose=False,
is_cli=False,
)
return self.create(
name=name, description=description, tags=tags, content=op_spec
)
@client_handler(check_no_op=True, check_offline=True)
def create_from_hub(
self,
component: str,
name: str = None,
description: str = None,
tags: Union[str, Sequence[str]] = None,
params: Dict = None,
matrix: Union[Dict, V1Matrix] = None,
presets: str = None,
queue: str = None,
nocache: bool = None,
cache: Union[int, str, bool] = None,
approved: Union[int, str, bool] = None,
) -> polyaxon_sdk.V1Run:
"""Creates a new run from the hub based on the component name.
N.B. Create methods are only useful if you want to create a run programmatically,
if you run a component/operation from the CLI/UI an instance will be created automatically.
If the component has required inputs, you should pass the params.
[Run API](/docs/api/#operation/CreateRun)
Args:
component: str, name of the hub component.
name: str, optional, it will override the name in the component if provided.
description: str, optional,
it will override the description in the component if provided.
tags: str or List[str], optional, list of tags,
it will override the tags in the component if provided.
params: dict, optional, a dictionary of parameters that will be
used to resolve the component's inputs/outputs.
matrix: dict or V1Matrix, a matrix definition.
presets: List[str], optional, the name of the
[presets](/docs/core/scheduling-presets/).
queue: str, optional, the name of the
[queue](/docs/core/scheduling-strategies/queues/) to assign the run to.
nocache: bool, optional, DEPRECATED Please use `cache='f'`
simple flag to disable
[cache check](/docs/automation/helpers/cache/).
If passed and the Polyaxonfile has cache section,
it will be patched with `disabled: true`.
cache: Union[int, str, bool], optional, simple flag to enable/disable
[cache check](/docs/automation/helpers/cache/).
If passed and the Polyaxonfile will be patched with `disabled: true/false`.
e.g. `cache=1`, `cache='yes'`, `cache=False`, `cache='t'`, ...
approved: Union[int, str, bool], optional, simple flag to enable/disable
human in the loop validation without changing the polyaxonfile,
similar to `isApproved: true/false`,
[manual approval](/docs/core/scheduling-strategies/manual-approval/).
Can be used with yes/no, y/n, false/true, f/t, 1/0. "
"e.g. `approved=1`, `approved='yes'`, `approved=False`, `approved='t'`, ..."
Returns:
V1Run, run instance from the response.
"""
op_spec = check_polyaxonfile(
hub=component,
params=params,
matrix=matrix,
presets=presets,
queue=queue,
nocache=nocache,
cache=cache,
approved=approved,
verbose=False,
is_cli=False,
)
return self.create(
name=name, description=description, tags=tags, content=op_spec
)
@client_handler(check_no_op=True)
def log_status(
self,
status: str,
reason: str = None,
message: str = None,
last_transition_time: datetime = None,
last_update_time: datetime = None,
):
"""Logs a new run status.
<blockquote class="info">
<strong>Note</strong>: If you are executing a managed run, you don't need to call this method manually.
This method is only useful for manual runs outside of Polyaxon.
</blockquote>
N.B you will probably use one of the simpler methods:
* log_succeeded
* log_stopped
* log_failed
* start
* end
[Run API](/docs/api/#operation/CreateRunStatus)
Args:
status: str, a valid [Statuses](/docs/core/specification/lifecycle/) value.
reason: str, optional, reason or service issuing the status change.
message: str, optional, message to log with this status.
last_transition_time: datetime, default `now`.
last_update_time: datetime, default `now`.
"""
reason = reason or "PolyaxonClient"
self._run_data.status = status
current_date = now()
status_condition = V1StatusCondition(
type=status,
status=True,
reason=reason,
message=message,
last_transition_time=last_transition_time or current_date,
last_update_time=last_update_time or current_date,
)
if self._is_offline:
self._run_data.status_conditions = self._run_data.status_conditions or []
self._run_data.status_conditions.append(status_condition)
if status == V1Statuses.CREATED:
self._run_data.created_at = current_date
LifeCycle.set_started_at(self._run_data)
LifeCycle.set_finished_at(self._run_data)
return
self.client.runs_v1.create_run_status(
owner=self.owner,
project=self.project,
uuid=self.run_uuid,
body={"condition": status_condition},
async_req=True,
)
@client_handler(check_no_op=True, check_offline=True)
def get_statuses(
self, last_status: str = None
) -> Tuple[str, List[V1StatusCondition]]:
"""Gets the run's statuses.
[Run API](/docs/api/#operation/GetRunStatus)
Args:
last_status: str, a valid [Statuses](/docs/core/specification/lifecycle/) value.
Returns:
Tuple[str, List[Conditions]], last status and ordered status conditions.
"""
try:
response = self.client.runs_v1.get_run_statuses(
self.owner, self.project, self.run_uuid
)
if not last_status:
return response.status, response.status_conditions
if last_status == response.status:
return last_status, []
_conditions = []
for c in reversed(response.status_conditions):
if c.type == last_status:
break
_conditions.append(c)
return response.status, reversed(_conditions)
except (ApiException, HTTPError) as e:
raise PolyaxonClientException("Api error: %s" % e) from e
def _wait_for_condition(self, statuses: List[str] = None):
statuses = to_list(statuses, check_none=True)
def condition():
if statuses:
return last_status in statuses
return LifeCycle.is_done(last_status)
last_status = None
while not condition():
if last_status:
time.sleep(settings.CLIENT_CONFIG.watch_interval)
try:
last_status, _conditions = self.get_statuses(last_status)
yield last_status, _conditions
except ApiException as e:
if e.status in {500, 502, 503, 504}:
yield last_status, []
else:
raise e
@client_handler(check_no_op=True, check_offline=True)
def wait_for_condition(
self,
statuses: List[str] = None,
print_status: bool = False,
live_update: any = None,
):
"""Waits for the run's last status to meet a condition.
If a list of statuses is passed, it will wait for the condition:
* last status is one of the statuses passed.
Otherwise, it will wait until the user interrupts the function or
when the run reaches a final status.
N.B. if you want to watch the statuses and receive the status/conditions,
please use `watch_statuses` instead which yields the results.
"""
for status, conditions in self._wait_for_condition(statuses):
self._run_data.status = status
if print_status:
print("Last received status: {}\n".format(status))
if live_update:
latest_status = Printer.add_status_color(
{"status": status}, status_key="status"
)
live_update.update(status="{}\n".format(latest_status["status"]))
@client_handler(check_no_op=True, check_offline=True)
def watch_statuses(self, statuses: List[str] = None):
"""Watches run statuses.
If statuses is passed the watch will wait for a condition:
* last status is one of the statuses passed.
Otherwise, it will watch until the user interrupts it or
when the run reaches a final status.
N.B. if you just want to wait for a status condition without expecting a yield,
please use `wait_for_condition` instead
Yields:
Tuple[status, List[conditions]]:
This function will yield the last status and condition for every check.
"""
for status, conditions in self._wait_for_condition(statuses):
self._run_data.status = status
yield status, conditions
@client_handler(check_no_op=True, check_offline=True)
def get_logs(self, last_file=None, last_time=None) -> "V1Logs":
"""Gets the run's logs.
This method return up-to 2000 line logs per request.
Returns:
V1Logs
"""
params = get_logs_params(last_file=last_file, last_time=last_time)
return self.client.runs_v1.get_run_logs(
self.namespace, self.owner, self.project, self.run_uuid, **params
)
@client_handler(check_no_op=True, check_offline=True)
def watch_logs(self, hide_time: bool = False, all_info: bool = False):
"""Watches run logs.
Args:
hide_time: bool, optional, default: False, remove time information from log lines.
all_info: bool, optional, default: False, show all information about log lines.
"""
return get_run_logs(
client=self, hide_time=hide_time, all_info=all_info, follow=True
)
@client_handler(check_no_op=True, check_offline=True)
def inspect(self):
return self.client.runs_v1.inspect_run(
self.namespace, self.owner, self.project, self.run_uuid
)
@client_handler(check_no_op=True, check_offline=True)
def shell(
self,
command: str = None,
pod: str = None,
container: str = None,
stderr: bool = True,
stdin: bool = True,
stdout: bool = True,
tty: bool = True,
):
"""Executes a command in a container.
Streams allows to switch to raw terminal mode by sending stdin to 'bash'
and receives stdout/stderr from 'bash' back to the client.
Args:
command: str, optional, a command to execute.
pod: str, optional, the pod to use for executing the command.
container: str, optional, the container to use for executing the command.
stderr: bool, optional
stdin: bool, optional
stdout: bool, optional
tty: bool, optional
"""
from polyaxon.client.transport import ws_client
if not pod or not container:
inspection = self.inspect()
if not inspection:
raise PolyaxonClientException(
"The shell command is only usable for operations managed by Polyaxon "
"and actively running."
)
if not pod:
pod = next(iter(inspection.keys()))
pod_content = inspection.get(pod, {})
if not pod_content:
raise PolyaxonClientException(
"The shell command is only usable for operations managed by Polyaxon "
"and actively running. Error: the pod `{}` was not found.".format(
pod
)
)
pod_content = pod_content.get("spec", {})
pod_containers = [c.get("name") for c in pod_content.get("containers", [])]
if not pod_containers:
raise PolyaxonClientException(
"The shell command is only usable for operations managed by Polyaxon "
"and actively running. Error: the operation does not have containers."
)
if container:
if container not in pod_containers:
raise PolyaxonClientException(
"The shell command is only usable for operations managed by Polyaxon "
"and actively running. "
"Error: the container `{}` was not found under the pod `{}`.".format(
container, pod
)
)
else:
for c in MAIN_CONTAINER_NAMES:
if c in pod_containers:
container = c
break
if not container:
container = pod_containers[0]
url = get_proxy_run_url(
service=K8S_V1_LOCATION,
namespace=self.namespace,
owner=self.owner,
project=self.project,
run_uuid=self.run_uuid,
subpath="k8s_exec/{pod}/{container}".format(
pod=pod,
container=container,
),
)
url = absolute_uri(url=url, host=self.client.config.host)
command = command or "/bin/bash"
return ws_client.websocket_call(
self.client.config.sdk_config,
url,
query_params=[
("command", command.split()),
("stderr", stderr),
("stdin", stdin),
("stdout", stdout),
("tty", tty),
],
headers=self.client.config.get_full_headers(auth_key="authorization"),
)
@client_handler(check_no_op=True, check_offline=True)
def get_events(
self,
kind: V1ArtifactKind,
names: List[str],
orient: str = None,
force: bool = False,
):
"""Gets the run's events
Args:
kind: str, a valid `V1ArtifactKind`.
names: List[str], list of events to return.
orient: str, csv or dict.
force: bool, force reload the events.
"""
return self.client.runs_v1.get_run_events(
self.namespace,
self.owner,
self.project,
self.run_uuid,
kind=kind,
names=names,
orient=orient,
force=force,
)
@client_handler(check_no_op=True, check_offline=True)
def get_multi_run_events(
self,
kind: V1ArtifactKind,
runs: List[str],
names: List[str],
orient: str = None,
force: bool = False,
):
"""Gets events for multiple runs.
Args:
kind: str, a valid `V1ArtifactKind`.
runs: List[str], list of run uuids to return events for.
names: List[str], list of events to return.
orient: str, csv or dict.
force: bool, force reload the events.
Returns:
V1EventsResponse
"""
return self.client.runs_v1.get_multi_run_events(
self.namespace,
self.owner,
self.project,
kind=kind,
names=names,
runs=runs,
orient=orient,
force=force,
)
@client_handler(check_no_op=True, check_offline=True)
def get_artifacts_lineage(
self, query: str = None, sort: str = None, limit: int = None, offset: int = None
):
"""Gets the run's artifacts lineage.
[Run API](/docs/api/#operation/GetRunArtifactsLineage)
Args:
query: str, optional, query filters, please refer to
[Run PQL](/docs/core/query-language/artifacts-lineage/#query)
sort: str, optional, fields to order by, please refer to
[Run PQL](/docs/core/query-language/artifacts-lineage/#sort)
limit: int, optional, limit of runs to return.
offset: int, optional, offset pages to paginate runs.
Returns:
V1ListRunArtifactsResponse.
"""
params = get_query_params(
limit=limit or 20, offset=offset, query=query, sort=sort
)
return self.client.runs_v1.get_run_artifacts_lineage(
self.owner, self.project, self.run_uuid, **params
)
@client_handler(check_no_op=True, check_offline=True)
def get_runs_artifacts_lineage(
self, query: str = None, sort: str = None, limit: int = None, offset: int = None
):
"""Gets the artifacts lineage for multiple runs under project based on query.
[Run API](/docs/api/#operation/GetRunsArtifactsLineage)
**Available from v1.18**
Args:
query: str, optional, query filters, please refer to
[Run PQL](/docs/core/query-language/artifacts-lineage/#query)
sort: str, optional, fields to order by, please refer to
[Run PQL](/docs/core/query-language/artifacts-lineage/#sort)
limit: int, optional, limit of runs to return.
offset: int, optional, offset pages to paginate runs.
Returns:
V1ListRunArtifactsResponse.
"""
params = get_query_params(
limit=limit or 20, offset=offset, query=query, sort=sort
)
return self.client.runs_v1.get_runs_artifacts_lineage(
self.owner, self.project, **params
)
@client_handler(check_no_op=True, check_offline=True)
def get_artifact(self, path: str, stream: bool = True, force: bool = False):
"""Gets the run's artifact.
Args:
path: str, the relative path of the artifact to return.
stream: bool, optional, default: True, whether to stream the artifact content.
force: bool, force reload the artifact.
Returns:
str.
"""
return self.client.runs_v1.get_run_artifact(
namespace=self.namespace,
owner=self.owner,
project=self.project,
uuid=self.run_uuid,
path=path,
stream=stream,
force=force,
_preload_content=True,
)
@client_handler(check_no_op=True, check_offline=True)
def download_artifact_for_lineage(
self,
lineage: polyaxon_sdk.V1RunArtifact,
force: bool = False,
path_to: str = None,
):
"""Downloads an run artifact given a lineage reference.
Args:
lineage: V1RunArtifact, the artifact lineage.
path_to: str, optional, path to download to.
force: bool, force reload the artifact.
Returns:
str
"""
if not self.run_uuid:
return
lineage_path = lineage.path
summary = lineage.summary or {}
is_event = summary.get("is_event")
has_step = summary.get("step")
if self.run_uuid in lineage_path:
lineage_path = os.path.relpath(lineage_path, self.run_uuid)
if V1ArtifactKind.is_single_file_event(lineage.kind):
return self.download_artifact(
path=lineage_path, force=force, path_to=path_to
)
if V1ArtifactKind.is_single_or_multi_file_event(lineage.kind):
if is_event or has_step:
url = get_proxy_run_url(
service=STREAMS_V1_LOCATION,
namespace=self.namespace,
owner=self.owner,
project=self.project,
run_uuid=self.run_uuid,
subpath="events/{}".format(lineage.kind),
)
url = absolute_uri(url=url, host=self.client.config.host)
params = {"names": lineage.name, "pkg_assets": True}
if force:
params["force"] = True
return self.store.download_file(
url=url,
path=self.run_uuid,
use_filepath=False,
extract_path=path_to,
path_to=path_to,
params=params,
untar=True,
)
elif V1ArtifactKind.is_file_or_dir(lineage.kind):
return self.download_artifacts(
path=lineage_path, path_to=path_to, check_path=True
)
else:
return self.download_artifact(
path=lineage_path, force=force, path_to=path_to
)
if V1ArtifactKind.is_file(lineage.kind):
return self.download_artifact(
path=lineage_path, force=force, path_to=path_to
)
if V1ArtifactKind.is_dir(lineage.kind):
return self.download_artifacts(path=lineage_path, path_to=path_to)
if V1ArtifactKind.is_file_or_dir(lineage.kind):
return self.download_artifacts(
path=lineage_path, path_to=path_to, check_path=True
)
@client_handler(check_no_op=True, check_offline=True)
def download_artifact(self, path: str, force: bool = False, path_to: str = None):
"""Downloads a single run artifact.
Args:
path: str, the relative path of the artifact to return.
path_to: str, optional, path to download to.
force: bool, force reload the artifact.
Returns:
str
"""
url = get_proxy_run_url(
service=STREAMS_V1_LOCATION,
namespace=self.namespace,
owner=self.owner,
project=self.project,
run_uuid=self.run_uuid,
subpath="artifact",
)
url = absolute_uri(url=url, host=self.client.config.host)
params = {}
if force:
params["force"] = True
return self.store.download_file(
url=url, path=path, path_to=path_to, params=params
)
@client_handler(check_no_op=True, check_offline=True)
def download_artifacts(
self,
path: str = "",
path_to: str = None,
untar: bool = True,
delete_tar: bool = True,
extract_path: str = None,
check_path: bool = False,
):
"""Downloads a subpath containing multiple run artifacts.
Args:
path: str, the relative path of the artifact to return.
path_to: str, optional, path to download to.
untar: bool, optional, default: true.
delete_tar: bool, optional, default: true.
extract_path: str, optional.
check_path: bool, optional, default: false.
To force the API to check if the path is file or dir.
Returns:
str.
"""
url = get_proxy_run_url(
service=STREAMS_V1_LOCATION,
namespace=self.namespace,
owner=self.owner,
project=self.project,
run_uuid=self.run_uuid,
subpath="artifacts",
)
url = absolute_uri(url=url, host=self.client.config.host)
params = {}
if check_path:
params["check_path"] = True
return self.store.download_file(
url=url,
path=path,
untar=untar,
path_to=path_to,
delete_tar=delete_tar and untar,
extract_path=extract_path,
params=params,
)
@client_handler(check_no_op=True, check_offline=True)
def upload_artifact(
self,
filepath: str,
path: str = None,
untar: bool = False,
overwrite: bool = True,
show_progress: bool = True,
):
"""Uploads a single artifact to the run's artifacts store path.
Args:
filepath: str, the filepath to upload.
path: str, optional, path to upload to, otherwise it will be on the run's root path.
untar: bool, optional, if the file uploaded is tar.gz and
it should be decompressed on the artifacts store.
overwrite: bool, optional, if the file uploaded should overwrite any previous content.
show_progress: bool, to show a progress bar.
Returns:
str
"""
url = get_proxy_run_url(
service=STREAMS_V1_LOCATION,
namespace=self.namespace,
owner=self.owner,
project=self.project,
run_uuid=self.run_uuid,
subpath="artifact",
)
url = absolute_uri(url=url, host=self.client.config.host)
return self.store.upload_file(
url=url,
filepath=filepath,
path=path or "",
untar=untar,
overwrite=overwrite,
show_progress=show_progress,
)
@client_handler(check_no_op=True, check_offline=True)
def upload_artifacts_dir(
self,
dirpath: str,
path: str = "",
overwrite: bool = True,
relative_to: str = None,
):
"""Uploads a full directory to the run's artifacts store path.
> This function crawls all files to upload and uses `upload_artifacts`,
> it also respects `.polyaxonignore` file if it exists or the default ignore pattern.
Args:
dirpath: str, the dirpath to upload.
path: str, the relative path of the artifact to return.
overwrite: bool, optional, if the file uploaded should overwrite any previous content.
relative_to: str, optional, if the path uploaded is not the current dir,
and you want to cancel the relative path.
Returns:
str.
"""
files = IgnoreConfigManager.get_unignored_filepaths(
path=dirpath, addtional_patterns=IgnoreConfigManager.get_push_patterns()
)
if not files:
logger.warning(
"No files detected under the path %s.\n"
"This could happen if the path is empty or "
"ignored by one of the patterns in the ignore manager.",
dirpath,
)
return
return self.upload_artifacts(
files=files,
path=path or "",
overwrite=overwrite,
relative_to=relative_to,
)
@client_handler(check_no_op=True, check_offline=True)
def upload_artifacts(
self,
files: List[str],
path: str = "",
overwrite: bool = True,
relative_to: str = None,
):
"""Uploads multiple artifacts to the run's artifacts store path.
Args:
files: List[str], list of files to upload.
path: str, the relative path of the artifact to return.
overwrite: bool, optional, if the file uploaded should overwrite any previous content.
relative_to: str, optional, if the path uploaded is not the current dir,
and you want to cancel the relative path.
Returns:
str.
"""
if not files:
logger.warning("No files to upload to %s.", path)
return
url = get_proxy_run_url(
service=STREAMS_V1_LOCATION,
namespace=self.namespace,
owner=self.owner,
project=self.project,
run_uuid=self.run_uuid,
subpath="artifacts",
)
url = absolute_uri(url=url, host=self.client.config.host)
return self.store.upload_dir(
url=url,
path=path,
files=files,
overwrite=overwrite,
relative_to=relative_to,
)
@client_handler(check_no_op=True, check_offline=True)
def delete_artifact(self, path: str):
"""Deletes a single run artifact.
Args:
path: str, the relative path of the artifact to return.
"""
self.client.runs_v1.delete_run_artifact(
namespace=self.namespace,
owner=self.owner,
project=self.project,
uuid=self.run_uuid,
path=path,
)
@client_handler(check_no_op=True, check_offline=True)
def delete_artifacts(self, path: str):
"""Deletes a subpath containing multiple run artifacts.
Args:
path: str, the relative path of the artifact to return.
"""
return self.client.runs_v1.delete_run_artifacts(
namespace=self.namespace,
owner=self.owner,
project=self.project,
uuid=self.run_uuid,
path=path,
)
@client_handler(check_no_op=True, check_offline=True)
def get_artifacts_tree(self, path: str = ""):
"""Return the artifacts tree based on the path.
Args:
path: str, the relative path of the artifact tree to return.
Returns:
V1ArtifactTree.
"""
return self.client.runs_v1.get_run_artifacts_tree(
namespace=self.namespace,
owner=self.owner,
project=self.project,
uuid=self.run_uuid,
path=path,
)
@client_handler(check_no_op=True, check_offline=True)
def stop(self):
"""Stops the current run."""
self.client.runs_v1.stop_run(
self.owner,
self.project,
self.run_uuid,
)
@client_handler(check_no_op=True, check_offline=True)
def approve(self):
"""Approves the current run if it's pending upload or human approval."""
self.client.runs_v1.approve_run(
self.owner,
self.project,
self.run_uuid,
)
@client_handler(check_no_op=True, check_offline=True)
def invalidate(self):
"""Invalidates the current run."""
self.client.runs_v1.invalidate_run(
self.owner,
self.project,
self.run_uuid,
)
@client_handler(check_no_op=True, check_offline=True)
def restart(
self,
override_config=None,
copy: bool = False,
copy_dirs: List[str] = None,
copy_files: List[str] = None,
name: str = None,
description: str = None,
tags: Union[str, Sequence[str]] = None,
**kwargs,
):
"""Restarts the current run
Args:
override_config: Dict or str, optional,
config to use for overriding the original run's config.
copy: bool, optional, default: False, to restart with copy mechanism.
copy_dirs: List[str], optional, default: None or all in copy mode, list of dirs to copy.
copy_files: List[str], optional, default: None or all in copy mode, list of files to copy. # noqa
name: str, optional, default: None, name to use for the restarted run.
description: str, optional, default: None, description to use for the restarted run.
tags: list[str], optional, default: None, tags to use for the restarted run.
Returns:
V1Run instance.
"""
body = polyaxon_sdk.V1Run(content=override_config)
if name:
body.name = name
if description:
body.description = description
if tags:
tags = validate_tags(tags, validate_yaml=True)
body.tags = tags
if copy or copy_dirs or copy_files:
if copy_dirs or copy_files:
copy_dirs = to_list(copy_dirs, check_none=True)
copy_files = to_list(copy_files, check_none=True)
copy_artifacts = V1ArtifactsType()
if copy_dirs:
copy_artifacts.dirs = [
"{}/{}".format(self.run_uuid, cp) for cp in copy_dirs
]
if copy_files:
copy_artifacts.files = [
"{}/{}".format(self.run_uuid, cp) for cp in copy_files
]
body.meta_info = {META_COPY_ARTIFACTS: copy_artifacts.to_dict()}
return self.client.runs_v1.copy_run(
self.owner, self.project, self.run_uuid, body=body, **kwargs
)
else:
return self.client.runs_v1.restart_run(
self.owner, self.project, self.run_uuid, body=body, **kwargs
)
@client_handler(check_no_op=True, check_offline=True)
def resume(self, override_config=None, **kwargs):
"""Resumes the current run
Args:
override_config: Dict or str, optional,
config to use for overriding the original run's config.
Returns:
V1Run instance.
"""
body = polyaxon_sdk.V1Run(content=override_config)
return self.client.runs_v1.resume_run(
self.owner, self.project, self.run_uuid, body=body, **kwargs
)
@client_handler(check_no_op=True)
def set_description(self, description: str, async_req: bool = True):
"""Sets a new description for the current run.
Args:
description: str, the description to set.
async_req: bool, optional, default: False, execute request asynchronously.
"""
self._run_data.description = description
self._update({"description": description}, async_req=async_req)
@client_handler(check_no_op=True)
def set_name(self, name: str, async_req: bool = True):
"""Sets a new name for the current run.
Args:
name: str, the name to set.
async_req: bool, optional, default: False, execute request asynchronously.
"""
self._run_data.name = name
self._update({"name": name}, async_req=async_req)
@client_handler(check_no_op=True)
def log_inputs(self, reset: bool = False, async_req: bool = True, **inputs):
"""Logs or resets new inputs/params for the current run.
> **Note**: If you are starting a run from the CLI/UI
> polyaxon will track all inputs from the Polyaxonfile,
> so you generally don't need to set them manually.
> But you can always add or reset these params/inputs once your code starts running.
Args:
reset: bool, optional, if True, it will reset the whole inputs state.
Note that Polyaxon will automatically populate the inputs based
on the Polyaxonfile inputs definition and params passed.
async_req: bool, optional, default: False, execute request asynchronously.
inputs: **kwargs, e.g. param1=value1, param2=value2, ...
"""
inputs = {to_fqn_name(k): v for k, v in inputs.items()}
patch_dict = {"inputs": inputs}
if reset is False:
patch_dict["merge"] = True
self._run_data.inputs = self._run_data.inputs or {}
self._run_data.inputs.update(inputs)
else:
self._run_data.inputs = inputs
self._update(patch_dict, async_req=async_req)
@client_handler(check_no_op=True)
def log_outputs(self, reset: bool = False, async_req: bool = True, **outputs):
"""Logs a new outputs/results for the current run.
Args:
reset: bool, optional, if True, it will reset the whole outputs state.
Note that Polyaxon will automatically populate some outputs based
on the Polyaxonfile outputs definition and params passed.
async_req: bool, optional, default: False, execute request asynchronously.
outputs: **kwargs, e.g. output1=value1, metric2=value2, ...
"""
outputs = {to_fqn_name(k): v for k, v in outputs.items()}
patch_dict = {"outputs": outputs}
if reset is False:
patch_dict["merge"] = True
self._run_data.outputs = self._run_data.outputs or {}
self._run_data.outputs.update(outputs)
else:
self._run_data.outputs = outputs
self._update(patch_dict, async_req=async_req)
@client_handler(check_no_op=True)
def log_meta(self, reset: bool = False, async_req: bool = True, **meta):
"""Logs meta_info for the current run.
> **Note**: Use carefully! The meta information is used by
> Polyaxon internally to perform several information.
Polyaxon Client already uses this method to log information
about several events and artifacts, Polyaxon API/Scheduler uses
this information to set meta information about the run.
An example use case for this method is to update the concurrency
of a pipeline to increase/decrease the initial value:
```python
>>> from polyaxon.client import RunClient
>>> client = RunClient()
>>> client.log_meta(concurrency=5)
```
Args:
reset: bool, optional, if True, it will reset the whole meta info state.
async_req: bool, optional, default: False, execute request asynchronously.
meta: **kwargs, e.g. concurrency=10, has_flag=True, ...
"""
meta = {to_fqn_name(k): v for k, v in meta.items()}
patch_dict = {"meta_info": meta}
if reset is False:
patch_dict["merge"] = True
self._run_data.meta_info = self._run_data.meta_info or {}
self._run_data.meta_info.update(meta)
else:
self._run_data.meta_info = meta
self._update(patch_dict, async_req=async_req)
@client_handler(check_no_op=True)
def log_tags(
self,
tags: Union[str, Sequence[str]],
reset: bool = False,
async_req: bool = True,
):
"""Logs new tags for the current run.
Args:
tags: str or List[str], tag or tags to log.
reset: bool, optional, if True, it will reset the whole tags state.
Note that Polyaxon will automatically populate the tags based
on the Polyaxonfile.
async_req: bool, optional, default: False, execute request asynchronously.
"""
tags = validate_tags(tags, validate_yaml=True)
patch_dict = {"tags": tags}
if reset is False:
patch_dict["merge"] = True
self._run_data.tags = self._run_data.tags or []
self._run_data.tags += [t for t in tags if t not in self._run_data.tags]
else:
self._run_data.tags = tags
self._update(patch_dict, async_req=async_req)
@client_handler(check_no_op=True)
def start(self):
"""Sets the current run to `running` status.
<blockquote class="info">
<strong>Note</strong>: If you are executing a managed run, you don't need to call this method manually.
This method is only useful for manual runs outside of Polyaxon.
</blockquote>
"""
self.log_status(V1Statuses.RUNNING, message="Operation is running")
def _log_end_status(
self,
status: str,
reason: str = None,
message: str = None,
):
"""Sets the current run to `status` status.
<blockquote class="info">
<strong>Note</strong>: If you are executing a managed run, you don't need to call this method manually.
This method is only useful for manual runs outside of Polyaxon.
</blockquote>
Args:
status: str, a valid [Statuses](/docs/core/specification/lifecycle/) value.
reason: str, optional, reason or service issuing the status change.
message: str, optional, message to log with this status.
"""
if self.status in LifeCycle.DONE_VALUES:
return
self.log_status(status=status, reason=reason, message=message)
time.sleep(
0.1
) # Just to give the opportunity to the worker to pick the message
@client_handler(check_no_op=True)
def log_succeeded(self, message="Operation has succeeded"):
"""Sets the current run to `succeeded` status.
<blockquote class="info">
<strong>Note</strong>: If you are executing a managed run,
you don't need to call this method manually.
This method is only useful for manual runs outside of Polyaxon.
</blockquote>
"""
self._log_end_status(status=V1Statuses.SUCCEEDED, message=message)
@client_handler(check_no_op=True)
def log_stopped(self, message="Operation is stopped"):
"""Sets the current run to `stopped` status.
<blockquote class="info">
<strong>Note</strong>: If you are executing a managed run,
you don't need to call this method manually.
This method is only useful for manual runs outside of Polyaxon.
</blockquote>
"""
self._log_end_status(status=V1Statuses.STOPPED, message=message)
@client_handler(check_no_op=True)
def log_failed(self, reason: str = None, message: str = None):
"""Sets the current run to `failed` status.
<blockquote class="info">
<strong>Note</strong>: If you are executing a managed run, you don't need to call this method manually.
This method is only useful for manual runs outside of Polyaxon.
</blockquote>
Args:
reason: str, optional, reason or service issuing the status change.
message: str, optional, message to log with this status.
"""
self._log_end_status(
status=V1Statuses.FAILED,
reason=reason,
message=message,
)
def _sanitize_filename(self, filename: str, for_patterns: List[str] = None) -> str:
"""Ensures that the filename never includes common context paths"""
if not self.run_uuid or ctx_paths.CONTEXT_ROOT not in filename:
return to_fqn_name(filename)
for_patterns = for_patterns or []
if not self._default_filename_sanitize_paths:
self._default_filename_sanitize_paths = [
ctx_paths.CONTEXT_MOUNT_RUN_OUTPUTS_FORMAT.format(self.run_uuid)
+ os.sep,
ctx_paths.CONTEXT_MOUNT_RUN_EVENTS_FORMAT.format(self.run_uuid)
+ os.sep,
ctx_paths.CONTEXT_MOUNT_RUN_ASSETS_FORMAT.format(self.run_uuid)
+ os.sep,
ctx_paths.CONTEXT_MOUNT_RUN_SYSTEM_RESOURCES_EVENTS_FORMAT.format(
self.run_uuid
)
+ os.sep,
ctx_paths.CONTEXT_MOUNT_ARTIFACTS_FORMAT.format(self.run_uuid) + os.sep,
ctx_paths.get_offline_path(
entity_value=self.run_uuid, entity_kind=V1ProjectFeature.RUNTIME
)
+ os.sep,
]
for p in self._default_filename_sanitize_paths + for_patterns:
if filename.startswith(p):
filename = filename[len(p) :]
break
return to_fqn_name(filename)
def _sanitize_filepath(self, filepath: str, rel_path: str = None) -> str:
"""Ensures that the filepath never includes common context paths"""
if not filepath or rel_path:
return rel_path
if not self.run_uuid:
return rel_path or filepath
if self.run_uuid in filepath:
return filepath.split(self.run_uuid + "/")[1]
def is_abs():
if os.path.isabs(filepath):
return True
try:
if urlparse(filepath).scheme:
return True
return False
except Exception: # noqa
return False
abspath = filepath if is_abs() else os.path.abspath(filepath)
for_patterns = []
if getattr(self, "_artifacts_path"):
for_patterns.append(getattr(self, "_artifacts_path"))
if getattr(self, "_store_path"):
for_patterns.append(getattr(self, "_store_path"))
context_root = (
ctx_paths.CONTEXT_OFFLINE_ROOT
if self._is_offline
else ctx_paths.CONTEXT_MOUNT_ARTIFACTS
)
for_patterns += [os.path.join(context_root, self.run_uuid), context_root]
for _path in for_patterns:
if _path in abspath:
try:
return os.path.relpath(abspath, _path)
except Exception as e:
logger.debug("could not calculate relative path %s", e)
return rel_path or abspath
def _log_has_events(self):
if not self._has_meta_key("has_events"):
self.log_meta(has_events=True)
def _log_has_metrics(self):
data = {}
if not self._has_meta_key("has_metrics"):
data["has_metrics"] = True
if not self._has_meta_key("has_events"):
data["has_events"] = True
if data:
self.log_meta(**data)
def _log_has_model(self):
if not self._has_meta_key("has_model"):
self.log_meta(has_model=True)
@client_handler(check_no_op=True)
def log_progress(self, value: float):
"""Logs the progress of the run.
In offline
Args:
value: float, a value between 0 and 1 representing the percentage of run's progress.
"""
if not isinstance(value, (int, float)):
raise TypeError(
"`log_progress` received the value `{}` of type `{}` "
"which is not supported. "
"Please pass a valid percentage between [0, 1].".format(
value, type(value).__name__
)
)
if value < 0 or value > 1:
raise ValueError(
"`log_progress` received an invalid value `{}`. "
"Please pass a valid percentage between [0, 1].".format(value)
)
current_value = self._get_meta_key("progress", 0) or 0
if current_value == value:
return
if (value - current_value < 0.025 and value < 1) and self._throttle_updates():
return
self.log_meta(progress=value)
@client_handler(check_no_op=True)
def log_code_ref(self, code_ref: Dict = None, is_input: bool = True):
"""Logs code reference as a
lineage information with the code_ref dictionary in the summary field.
Args:
code_ref: dict, optional, if not provided,
Polyaxon will detect the code reference from the git repo in the current path.
is_input: bool, if the code reference is an input or outputs.
"""
code_ref = code_ref or get_code_reference()
if code_ref and "commit" in code_ref:
artifact_run = V1RunArtifact(
name=code_ref.get("commit"),
kind=V1ArtifactKind.CODEREF,
summary=code_ref,
is_input=is_input,
)
self.log_artifact_lineage(body=artifact_run)
def _calculate_summary_for_path_or_content(
self,
hash: str = None,
path: str = None,
content=None,
summary: Dict = None,
skip_hash_calculation: bool = False,
):
summary = summary or {}
if hash:
summary["hash"] = hash
elif content is not None and not skip_hash_calculation:
summary["hash"] = hash_value(content)
if path is not None:
try:
if os.path.exists(path):
context_root = (
ctx_paths.CONTEXT_OFFLINE_ROOT
if self._is_offline
else ctx_paths.CONTEXT_MOUNT_ARTIFACTS
)
summary["path"] = os.path.relpath(path, context_root)
else:
summary["path"] = path
except Exception as e: # noqa
logger.debug(
"Could not resolve path `%s` "
"in _calculate_summary_for_path_or_content. "
"Error: %s",
path,
e,
)
summary["path"] = path
if not summary.get("hash") and not skip_hash_calculation:
try:
if os.path.exists(path):
summary["path"] = os.path.abspath(path)
summary["hash"] = (
hash_file(path) if os.path.isfile(path) else hash_dir(path)
)
else:
summary["path"] = path
logger.info(
"The path `%s` is not accessible to the tracking module.",
path,
)
except Exception as e:
logger.warning(
"Could not calculate hash for path `%s` "
"in _calculate_summary_for_path_or_content. "
"Error: %s",
path,
e,
)
return summary
@client_handler(check_no_op=True)
def log_data_ref(
self,
name: str,
hash: str = None,
path: str = None,
content=None,
summary: Dict = None,
is_input: bool = True,
skip_hash_calculation: bool = False,
):
"""Logs data reference.
Args:
name: str, name of the data.
hash: str, optional, default = None, the hash version of the data,
if not provided it will be calculated based on the data in the content.
path: str, optional, path of where the data is coming from.
summary: Dict, optional, additional summary information to log about data
in the lineage table.
is_input: bool, optional, if the data reference is an input or outputs.
content: optional, if the data content is passed, polyaxon will calculate the hash.
skip_hash_calculation: optional, flag to instruct the client to skip hash calculation.
"""
return self.log_artifact_ref(
path=path,
hash=hash,
content=content,
kind=V1ArtifactKind.DATA,
name=name,
summary=summary,
is_input=is_input,
skip_hash_calculation=skip_hash_calculation,
)
@client_handler(check_no_op=True)
def log_artifact_ref(
self,
path: str,
kind: V1ArtifactKind,
name: str = None,
hash: str = None,
content=None,
summary: Dict = None,
is_input: bool = False,
rel_path: str = None,
skip_hash_calculation: bool = False,
):
"""Logs an artifact reference with custom kind.
Logging a generic file reference to the lineage table:
```python
>>> # Get outputs artifact path
>>> asset_path = tracking.get_outputs_path("test.txt")
>>> with open(asset_path, "w") as f:
>>> f.write("Artifact content.")
>>> # Log reference to the lineage table
>>> # Name of the artifact will default to test
>>> tracking.log_artifact_ref(path=asset_path, kind=V1ArtifactKind.FILE)
```
**Note**: This is a generic method that is used by `log_file_ref` and `log_model_ref`.
Args:
path: str, filepath, the name is extracted from the filepath
kind: V1ArtifactKind, the artifact kind
name: str, if the name is passed it will be used instead of the filename from the path.
hash: str, optional, default = None, the hash version of the file,
if not provided it will be calculated based on the file content
content: the file content
summary: Dict, optional, additional summary information to log about data
in the lineage table
is_input: bool, if the file reference is an input or outputs
rel_path: str, optional relative path to the run artifacts path
skip_hash_calculation: optional, flag to instruct the client to skip hash calculation
"""
summary = self._calculate_summary_for_path_or_content(
hash=hash,
path=path,
content=content,
summary=summary,
skip_hash_calculation=skip_hash_calculation,
)
if path:
name = name or get_base_filename(path)
rel_path = self._sanitize_filepath(filepath=path, rel_path=rel_path)
if name:
artifact_run = V1RunArtifact(
name=self._sanitize_filename(name),
kind=kind,
path=rel_path,
summary=summary,
is_input=is_input,
)
self.log_artifact_lineage(body=artifact_run)
@client_handler(check_no_op=True)
def log_model_ref(
self,
path: str,
name: str = None,
framework: str = None,
summary: Dict = None,
is_input: bool = False,
rel_path: str = None,
skip_hash_calculation: bool = False,
):
"""Logs model reference.
> **Note**: The difference between this method and the `log_model`
> is that this one does not copy or move the asset, it only registers a lineage reference.
> If you need the model asset to be on the `artifacts_path` or the `outputs_path`
> you have to copy it manually using a relative path to
> `self.get_artifacts_path` or `self.get_outputs_path`.
```python
>>> # Get outputs artifact path
>>> asset_path = tracking.get_outputs_path("model/model_data.h5")
>>> with open(asset_path, "w") as f:
>>> f.write("Artifact content.")
>>> # Log reference to the lineage table
>>> # Name of the artifact will default to model_data
>>> tracking.log_model_ref(path=asset_path)
```
Args:
path: str, filepath, the name is extracted from the filepath.
name: str, if the name is passed it will be used instead of the filename from the path.
framework: str, optional, name of the framework.
summary: Dict, optional, additional summary information to log about data
in the lineage table.
is_input: bool, if the file reference is an input or outputs.
rel_path: str, optional relative path to the run artifacts path.
skip_hash_calculation: optional, flag to instruct the client to skip hash calculation.
"""
summary = summary or {}
summary["framework"] = framework
self._log_has_model()
return self.log_artifact_ref(
path=path,
kind=V1ArtifactKind.MODEL,
name=name,
summary=summary,
is_input=is_input,
rel_path=rel_path,
skip_hash_calculation=skip_hash_calculation,
)
@client_handler(check_no_op=True)
def log_file_ref(
self,
path: str,
name: str = None,
hash: str = None,
content=None,
summary: Dict = None,
is_input: bool = False,
rel_path: str = None,
skip_hash_calculation: bool = False,
):
"""Logs file reference.
Args:
path: str, filepath, the name is extracted from the filepath.
name: str, if the name is passed it will be used instead of the filename from the path.
hash: str, optional, default = None, the hash version of the file,
if not provided it will be calculated based on the file content.
content: the file content.
summary: Dict, optional, additional summary information to log about data
in the lineage table.
is_input: bool, if the file reference is an input or outputs.
rel_path: str, optional relative path to the run artifacts path.
skip_hash_calculation: optional, flag to instruct the client to skip hash calculation.
"""
return self.log_artifact_ref(
path=path,
kind=V1ArtifactKind.FILE,
name=name,
hash=hash,
content=content,
summary=summary,
is_input=is_input,
rel_path=rel_path,
skip_hash_calculation=skip_hash_calculation,
)
@client_handler(check_no_op=True)
def log_dir_ref(
self,
path: str,
name: str = None,
hash: str = None,
summary: Dict = None,
is_input: bool = False,
rel_path: str = None,
skip_hash_calculation: bool = False,
):
"""Logs dir reference.
Args:
path: str, dir path, the name is extracted from the path.
name: str, if the name is passed it will be used instead of the dirname from the path.
hash: str, optional, default = None, the hash version of the file,
if not provided it will be calculated based on the file content.
summary: Dict, optional, additional summary information to log about data
in the lineage table.
is_input: bool, if the dir reference is an input or outputs.
rel_path: str, optional relative path to the run artifacts path.
skip_hash_calculation: optional, flag to instruct the client to skip hash calculation.
"""
return self.log_artifact_ref(
path=path,
kind=V1ArtifactKind.DIR,
name=name or os.path.basename(path),
hash=hash,
summary=summary,
is_input=is_input,
rel_path=rel_path,
skip_hash_calculation=skip_hash_calculation,
)
def _get_meta_key(self, key: str, default: Any = None):
if not self.run_data or not self.run_data.meta_info:
return default
return self.run_data.meta_info.get(key, default)
def _has_meta_key(self, key: str):
return self._get_meta_key(key, False)
@client_handler(check_no_op=True)
def log_tensorboard_ref(
self,
path: str,
name: str = "tensorboard",
is_input: bool = False,
rel_path: str = None,
):
"""Logs tensorboard reference.
Args:
path: str, path to the tensorboard logdir.
name: str, if the name is passed it will be used instead of the dirname from the path.
is_input: bool, if the tensorboard reference is an input or outputs
rel_path: str, optional relative path to run the artifacts path.
"""
if not self._has_meta_key("has_tensorboard"):
self.log_artifact_ref(
path=path,
kind=V1ArtifactKind.TENSORBOARD,
name=name,
is_input=is_input,
rel_path=rel_path,
skip_hash_calculation=True,
)
self.log_meta(has_tensorboard=True)
@client_handler(check_no_op=True)
def log_artifact_lineage(
self,
body: Union[Dict, List[Dict], V1RunArtifact, List[V1RunArtifact]],
async_req: bool = True,
):
"""Logs an artifact lineage.
> **Note**: This method can be used to log manual lineage objects, it is used internally
> to log model/file/artifact/code refs
Args:
body: dict or List[dict] or V1RunArtifact or List[V1RunArtifact], body of the lineage.
async_req: bool, optional, default: False, execute request asynchronously.
"""
if self._is_offline:
for b in to_list(body, check_none=True):
if not isinstance(b, V1RunArtifact):
b = V1RunArtifact.read(b)
self._artifacts_lineage[b.name] = b
return
self.client.runs_v1.create_run_artifacts_lineage(
self.owner,
self.project,
self.run_uuid,
body=body,
async_req=async_req,
)
@client_handler(check_no_op=True, check_offline=True)
def get_namespace(self):
"""Fetches the run namespace."""
return self.client.runs_v1.get_run_namespace(
self.owner,
self.project,
self.run_uuid,
).namespace
@client_handler(check_no_op=True, check_offline=True)
def delete(self):
"""Deletes the current run."""
return self.client.runs_v1.delete_run(self.owner, self.project, self.run_uuid)
@client_handler(check_no_op=True, check_offline=True)
def list(
self, query: str = None, sort: str = None, limit: int = None, offset: int = None
):
"""Lists runs under the current owner - project.
[Run API](/docs/api/#operation/ListRuns)
Args:
query: str, optional, query filters, please refer to
[Run PQL](/docs/core/query-language/runs/#query)
sort: str, optional, fields to order by, please refer to
[Run PQL](/docs/core/query-language/runs/#sort)
limit: int, optional, limit of runs to return.
offset: int, optional, offset pages to paginate runs.
Returns:
List[V1Run], list of run instances.
"""
params = get_query_params(
limit=limit or 20, offset=offset, query=query, sort=sort
)
return self.client.runs_v1.list_runs(self.owner, self.project, **params)
@client_handler(check_no_op=True, check_offline=True)
def list_children(
self, query: str = None, sort: str = None, limit: int = None, offset: int = None
):
"""Lists run's children if the current run has a pipeline.
[Run API](/docs/api/#operation/ListRuns)
Args:
query: str, optional, query filters, please refer to
[Project PQL](/docs/core/query-language/runs/#query)
sort: str, optional, fields to order by, please refer to
[Project PQL](/docs/core/query-language/runs/#sort)
limit: int, optional, limit of runs to return.
offset: int, optional, offset pages to paginate runs.
Returns:
List[V1Run], list of run instances.
"""
params = get_query_params(limit=limit, offset=offset, query=query, sort=sort)
query = params.get("query")
query = query + "&" if query else "?"
query += "pipeline={}".format(self.run_uuid)
params["query"] = query
return self.client.runs_v1.list_runs(self.owner, self.project, **params)
@client_handler(check_no_op=True, check_offline=True)
def promote_to_model_version(
self,
version: str,
description: str = None,
tags: Union[str, List[str]] = None,
content: Union[str, Dict] = None,
connection: str = None,
artifacts: List[str] = None,
force: bool = False,
) -> polyaxon_sdk.V1ProjectVersion:
"""Similar to
[ProjectClient.register_model_version](/docs/core/python-library/project-client/#register_model_version),
directly from the run client instance,
allows to create or Update a model version based on the current run.
**Available from v1.18**
Args:
version: str, optional, the version name/tag.
description: str, optional, the version description.
tags: str or List[str], optional.
content: str or dict, optional, content/metadata (JSON object) of the version.
connection: str, optional, a uuid reference to a connection.
artifacts: List[str], optional, list of artifacts to highlight(requires passing a run)
force: bool, optional, to force push, i.e. update if exists.
Returns:
V1ProjectVersion, model version.
"""
from polyaxon.client.project import ProjectClient
return ProjectClient(self.owner, self.project).register_model_version(
version=version,
description=description,
tags=tags,
content=content,
run=self.run_uuid,
connection=connection,
artifacts=artifacts,
force=force,
)
@client_handler(check_no_op=True, check_offline=True)
def promote_to_artifact_version(
self,
version: str,
description: str = None,
tags: Union[str, List[str]] = None,
content: Union[str, Dict] = None,
connection: str = None,
artifacts: List[str] = None,
force: bool = False,
) -> polyaxon_sdk.V1ProjectVersion:
"""Similar to
[ProjectClient.register_artifact_version](/docs/core/python-library/project-client/#register_artifact_version),
directly from the run client instance,
allows to create or Update an artifact version based on the current run.
**Available from v1.18**
Args:
version: str, optional, the version name/tag.
description: str, optional, the version description.
tags: str or List[str], optional.
content: str or dict, optional, content/metadata (JSON object) of the version.
connection: str, optional, a uuid reference to a connection.
artifacts: List[str], optional, list of artifacts to highlight(requires passing a run)
force: bool, optional, to force push, i.e. update if exists.
Returns:
V1ProjectVersion, artifact version.
"""
from polyaxon.client.project import ProjectClient
return ProjectClient(self.owner, self.project).register_artifact_version(
version=version,
description=description,
tags=tags,
content=content,
run=self.run_uuid,
connection=connection,
artifacts=artifacts,
force=force,
)
def _collect_events_summaries(
self,
events_path: str,
events_kind: str,
last_check: Optional[datetime],
is_system_resource: bool = False,
) -> Tuple[List, Dict]:
current_events_path = os.path.join(events_path, events_kind)
summaries = []
last_values = {}
connection_name = get_artifacts_store_name()
with get_files_in_path_context(current_events_path) as files:
for f in files:
if last_check and not file_modified_since(
filepath=f, last_time=last_check
):
continue
event_name = os.path.basename(f).split(".plx")[0]
event = V1Events.read(kind=events_kind, name=event_name, data=f)
if event.df.empty:
continue
# Get only the relpath from run uuid
event_rel_path = self._sanitize_filepath(filepath=f)
summary = event.get_summary()
run_artifact = V1RunArtifact(
name=event_name,
kind=V1ArtifactKind.SYSTEM if is_system_resource else events_kind,
connection=connection_name,
summary=summary,
path=event_rel_path,
is_input=False,
)
summaries.append(run_artifact)
if events_kind == V1ArtifactKind.METRIC:
last_values[event_name] = summary[V1ArtifactKind.METRIC]["last"]
return summaries, last_values
def _sync_events_summaries(
self,
last_check: Optional[datetime],
events_path: str,
is_system_resource: bool = False,
):
# check if there's a path to sync
if not events_path or not os.path.exists(events_path):
return
# crawl dirs
summaries = []
last_values = {}
set_last_values = not is_system_resource
for events_kind in get_dirs_under_path(events_path):
_summaries, _last_values = self._collect_events_summaries(
events_path=events_path,
events_kind=events_kind,
last_check=last_check,
is_system_resource=is_system_resource,
)
summaries += _summaries
if set_last_values:
last_values.update(_last_values)
if summaries:
self.log_artifact_lineage(summaries)
if set_last_values and last_values:
self.log_outputs(**last_values)
@client_handler(check_no_op=True)
def sync_events_summaries(self, last_check: Optional[datetime], events_path: str):
"""Syncs all tracked events and auto-generates summaries and lineage data.
> **Note**: Both `in-cluster` and `offline` modes will manage syncing events summaries
> automatically, so you should not call this method manually.
"""
self._sync_events_summaries(
last_check=last_check,
events_path=events_path,
is_system_resource=False,
)
@client_handler(check_no_op=True)
def sync_system_events_summaries(
self, last_check: Optional[datetime], events_path: str
):
"""Syncs all tracked system events and auto-generates summaries and lineage data.
> **Note**: Both `in-cluster` and `offline` modes will manage syncing events summaries
> automatically, so you should not call this method manually.
"""
self._sync_events_summaries(
last_check=last_check,
events_path=events_path,
is_system_resource=True,
)
@client_handler(check_no_op=True)
def persist_run(self, path: str):
"""Persists a run to a local path.
> **Note**: You generally do not need to call this method manually,
> When the `offline` mode is enabled, this method is triggered automatically at the end.
Args:
path: str, The path where to persist the run's metadata.
"""
if not self.run_data:
logger.debug(
"Persist offline run call failed. "
"Make sure that the offline mode is enabled and that run_data is provided."
)
return
if not path or not os.path.exists(path):
check_or_create_path(path, is_dir=True)
run_path = "{}/{}".format(path, ctx_paths.CONTEXT_LOCAL_RUN)
with open(run_path, "w") as config_file:
config_file.write(
ujson.dumps(self.client.sanitize_for_serialization(self.run_data))
)
if not self._artifacts_lineage:
logger.debug("Persist offline run call did not find any lineage data. ")
return
lineages_path = "{}/{}".format(path, ctx_paths.CONTEXT_LOCAL_LINEAGES)
with open(lineages_path, "w") as config_file:
config_file.write(
ujson.dumps(
[
self.client.sanitize_for_serialization(l)
for l in self._artifacts_lineage.values()
]
)
)
@classmethod
@client_handler(check_no_op=True)
def load_offline_run(
cls,
path: str,
run_client: Union["RunClient", "Run"] = None,
reset_project: bool = False,
raise_if_not_found: bool = False,
) -> Union["RunClient", "Run"]:
"""Loads an offline run from a local path.
> **Note**: When the `offline` mode is enabled, and the run uuid is provided,
> this method is triggered automatically to load last checkpoint.
Args:
path: str, The path where the run's metadata is persisted.
run_client: RunClient, optional, instance of the client to update with
the loaded run's information.
reset_project: bool, optional, a flag to reset the run's owner and/or project based on
the data from the passed `run_client` instead of the persisted data
from the local run.
raise_if_not_found: bool, optional, a flag to raise an error if the local path does not
contain a persisted run.
"""
run_path = "{}/{}".format(path, ctx_paths.CONTEXT_LOCAL_RUN)
if not os.path.isfile(run_path):
if raise_if_not_found:
raise PolyaxonClientException(f"Offline data was not found: {run_path}")
else:
logger.info(f"Offline data was not found: {run_path}")
return
with open(run_path, "r") as config_file:
config_str = config_file.read()
run_config = polyaxon_sdk.V1Run(**ujson.loads(config_str))
owner = run_config.owner
project = run_config.project
if reset_project or not owner:
owner = run_client.owner
if reset_project or not project:
project = run_client.project
if run_client:
run_client._owner = owner
run_client._project = project
run_client._run_uuid = run_config.uuid
else:
run_client = cls(
owner=owner,
project=project,
run_uuid=run_config.uuid,
)
run_client._run_data = run_config
logger.info(f"Offline data loaded from: {run_path}")
lineages_path = "{}/{}".format(path, ctx_paths.CONTEXT_LOCAL_LINEAGES)
if not os.path.isfile(lineages_path):
logger.info(f"Offline lineage data was not found: {lineages_path}")
return run_client
with open(lineages_path, "r") as config_file:
config_str = config_file.read()
lineages = [
V1RunArtifact.from_dict(l, unknown=EXCLUDE)
for l in ujson.loads(config_str)
]
run_client._artifacts_lineage = {l.name: l for l in lineages}
logger.info(f"Offline lineage data loaded from: {lineages_path}")
return run_client
@client_handler(check_no_op=True)
def pull_remote_run(
self,
path: str = None,
download_artifacts: bool = True,
):
"""Download a run on Polyaxon's API and artifacts store to local path.
Args:
path: str, optional, defaults to the offline root path,
path where the run's metadata & artifacts will be stored.
download_artifacts: bool, optional, flag to trigger artifacts download.
"""
path = ctx_paths.get_offline_path(
entity_value=self.run_uuid, entity_kind=V1ProjectFeature.RUNTIME, path=path
)
delete_path(path)
self.refresh_data(load_artifacts_lineage=True, load_conditions=True)
if download_artifacts:
self.download_artifacts(path_to=path)
self.persist_run(path)
return path
@client_handler(check_no_op=True)
def push_offline_run(
self,
path: str,
upload_artifacts: bool = True,
clean: bool = False,
):
"""Syncs an offline run to Polyaxon's API and artifacts store.
Args:
path: str, root path where the run's metadata & artifacts are stored.
upload_artifacts: bool, optional, flag to trigger artifacts upload.
clean: bool, optional, flag to clean local path after pushing the run.
"""
# We ensure that the is_offline is False
is_offline = self._is_offline
self._is_offline = False
if not self.run_data:
logger.warning(
"Push offline run failed. Make sure that run_data is provided."
)
return
self.client.runs_v1.sync_run(
owner=self.owner,
project=self.project,
body=self.run_data,
async_req=False,
)
logger.info(f"Offline data for run {self.run_data.uuid} synced")
if self._artifacts_lineage:
self.log_artifact_lineage(
[l for l in self._artifacts_lineage.values()], async_req=False
)
logger.info(f"Offline lineage data for run {self.run_data.uuid} synced")
else:
logger.warning("Push offline run failed. No lineage data found.")
return
if path and upload_artifacts:
self.upload_artifacts_dir(
dirpath=path,
path="/",
overwrite=True,
relative_to=path,
)
logger.info(f"Offline artifacts for run {self.run_data.uuid} uploaded")
if clean:
delete_path(path)
# Reset is_offline
self._is_offline = is_offline
def get_run_logs(
client: RunClient,
hide_time: bool = False,
all_containers: bool = False,
all_info: bool = False,
follow: bool = False,
):
def get_logs(last_file=None, last_time=None):
try:
response = client.get_logs(last_file=last_file, last_time=last_time)
get_logs_streamer(
show_timestamp=not hide_time,
all_containers=all_containers,
all_info=all_info,
)(response)
return response
except (ApiException, HTTPError) as e:
if not follow:
handle_cli_error(
e,
message="Could not get logs for run `{}`.".format(client.run_uuid),
)
sys.exit(1)
def handle_status(last_status: str = None, live_update=None):
if not last_status:
return {"status": None}
if live_update:
live_update.update(
status="{}".format(
Printer.add_status_color(
{"status": last_status}, status_key="status"
)["status"]
)
)
else:
Printer.print(
"{}".format(
Printer.add_status_color(
{"status": last_status}, status_key="status"
)["status"]
)
)
return last_status
def handle_logs():
is_done = False
last_file = None
_status = None
files = []
last_transition_time = now()
last_status, conditions = client.get_statuses()
if conditions:
last_transition_time = conditions[0].last_transition_time
with Printer.console.status("Waiting for running condition ...") as live_update:
while not LifeCycle.is_done(last_status) and not LifeCycle.is_running(
last_status
):
time.sleep(settings.CLIENT_CONFIG.watch_interval)
last_status, conditions = client.get_statuses()
if conditions:
last_transition_time = conditions[0].last_transition_time
if _status != last_status:
_status = handle_status(last_status, live_update)
if LifeCycle.is_done(last_status):
last_time = None
else:
last_time = last_transition_time
checks = 0
while not is_done:
response = get_logs(last_time=last_time, last_file=last_file)
if response:
last_time = response.last_time
last_file = response.last_file
files = response.files
else:
last_time = None
last_file = None
# Follow logic
if not any([last_file, last_time]) or checks > 3:
if follow:
last_status, _ = client.get_statuses()
if _status != last_status:
_status = handle_status(last_status)
is_done = LifeCycle.is_done(last_status)
if not is_done:
checks = 0
else:
is_done = True
if last_time and not follow:
is_done = True
if not is_done:
if last_file:
if len(files) > 1 and last_file != files[-1]:
time.sleep(1)
else:
is_done = True
else:
time.sleep(settings.CLIENT_CONFIG.watch_interval)
checks += 1
handle_logs()
| 37.608929 | 122 | 0.587645 |
1e08850aa99780ecb4fcb68b63e2766d4803d72e | 4,511 | py | Python | elliot/recommender/latent_factor_models/PMF/probabilistic_matrix_factorization.py | gategill/elliot | 113763ba6d595976e14ead2e3d460d9705cd882e | [
"Apache-2.0"
] | 175 | 2021-03-04T15:46:25.000Z | 2022-03-31T05:56:58.000Z | elliot/recommender/latent_factor_models/PMF/probabilistic_matrix_factorization.py | gategill/elliot | 113763ba6d595976e14ead2e3d460d9705cd882e | [
"Apache-2.0"
] | 15 | 2021-03-06T17:53:56.000Z | 2022-03-24T17:02:07.000Z | elliot/recommender/latent_factor_models/PMF/probabilistic_matrix_factorization.py | gategill/elliot | 113763ba6d595976e14ead2e3d460d9705cd882e | [
"Apache-2.0"
] | 39 | 2021-03-04T15:46:26.000Z | 2022-03-09T15:37:12.000Z | """
Module description:
Mnih, Andriy, and Russ R. Salakhutdinov. "Probabilistic matrix factorization." Advances in neural information processing systems 20 (2007)
"""
__version__ = '0.3.1'
__author__ = 'Vito Walter Anelli, Claudio Pomo'
__email__ = 'vitowalter.anelli@poliba.it, claudio.pomo@poliba.it'
import numpy as np
import pickle
from tqdm import tqdm
from elliot.dataset.samplers import pointwise_pos_neg_sampler as pws
from elliot.recommender.latent_factor_models.PMF.probabilistic_matrix_factorization_model import ProbabilisticMatrixFactorizationModel
from elliot.recommender.recommender_utils_mixin import RecMixin
from elliot.utils.write import store_recommendation
from elliot.recommender.base_recommender_model import BaseRecommenderModel
from elliot.recommender.base_recommender_model import init_charger
class PMF(RecMixin, BaseRecommenderModel):
r"""
Probabilistic Matrix Factorization
For further details, please refer to the `paper <https://papers.nips.cc/paper/2007/file/d7322ed717dedf1eb4e6e52a37ea7bcd-Paper.pdf>`_
Args:
factors: Number of latent factors
lr: Learning rate
reg: Regularization coefficient
gaussian_variance: Variance of the Gaussian distribution
To include the recommendation model, add it to the config file adopting the following pattern:
.. code:: yaml
models:
PMF:
meta:
save_recs: True
epochs: 10
batch_size: 512
factors: 50
lr: 0.001
reg: 0.0025
gaussian_variance: 0.1
"""
@init_charger
def __init__(self, data, config, params, *args, **kwargs):
self._params_list = [
("_learning_rate", "lr", "lr", 0.001, None, None),
("_factors", "factors", "factors", 50, None, None),
("_l_w", "reg", "reg", 0.0025, None, None),
("_gvar", "gaussian_variance", "gvar", 0.1, None, None),
]
self.autoset_params()
if self._batch_size < 1:
self._batch_size = self._data.transactions
self._ratings = self._data.train_dict
self._sp_i_train = self._data.sp_i_train
self._i_items_set = list(range(self._num_items))
self._sampler = pws.Sampler(self._data.i_train_dict)
self._model = ProbabilisticMatrixFactorizationModel(self._num_users,
self._num_items,
self._factors,
self._l_w,
self._gvar,
self._learning_rate,
self._seed)
@property
def name(self):
return "PMF"\
+ f"_{self.get_base_params_shortcut()}" \
+ f"_{self.get_params_shortcut()}"
def predict(self, u: int, i: int):
pass
def train(self):
if self._restore:
return self.restore_weights()
for it in self.iterate(self._epochs):
loss = 0
steps = 0
with tqdm(total=int(self._data.transactions // self._batch_size), disable=not self._verbose) as t:
for batch in self._sampler.step(self._data.transactions, self._batch_size):
steps += 1
loss += self._model.train_step(batch)
t.set_postfix({'loss': f'{loss.numpy() / steps:.5f}'})
t.update()
self.evaluate(it, loss.numpy()/(it + 1))
def get_recommendations(self, k: int = 100):
predictions_top_k_test = {}
predictions_top_k_val = {}
for index, offset in enumerate(range(0, self._num_users, self._batch_size)):
offset_stop = min(offset + self._batch_size, self._num_users)
predictions = self._model.get_recs(
(
np.repeat(np.array(list(range(offset, offset_stop)))[:, None], repeats=self._num_items, axis=1),
np.array([self._i_items_set for _ in range(offset, offset_stop)])
)
)
recs_val, recs_test = self.process_protocol(k, predictions, offset, offset_stop)
predictions_top_k_val.update(recs_val)
predictions_top_k_test.update(recs_test)
return predictions_top_k_val, predictions_top_k_test
| 36.674797 | 138 | 0.594103 |
5a3b61821d8cd0e7430eb7724a7efeb59aed650a | 2,307 | py | Python | asynet_mask_rcnn/data/datasets/abstract.py | alexisbdr/asynet-mask-rcnn | ba001fb21235251aa111ef7ccddb683852109e6f | [
"MIT"
] | null | null | null | asynet_mask_rcnn/data/datasets/abstract.py | alexisbdr/asynet-mask-rcnn | ba001fb21235251aa111ef7ccddb683852109e6f | [
"MIT"
] | null | null | null | asynet_mask_rcnn/data/datasets/abstract.py | alexisbdr/asynet-mask-rcnn | ba001fb21235251aa111ef7ccddb683852109e6f | [
"MIT"
] | null | null | null | import torch
class AbstractDataset(torch.utils.data.Dataset):
"""
Serves as a common interface to reduce boilerplate and help dataset
customization
A generic Dataset for the asynet_mask_rcnn must have the following
non-trivial fields / methods implemented:
CLASSES - list/tuple:
A list of strings representing the classes. It must have
"__background__" as its 0th element for correct id mapping.
__getitem__ - function(idx):
This has to return three things: img, target, idx.
img is the input image, which has to be load as a PIL Image object
implementing the target requires the most effort, since it must have
multiple fields: the size, bounding boxes, labels (contiguous), and
masks (either COCO-style Polygons, RLE or torch BinaryMask).
Usually the target is a BoxList instance with extra fields.
Lastly, idx is simply the input argument of the function.
also the following is required:
__len__ - function():
return the size of the dataset
get_img_info - function(idx):
return metadata, at least width and height of the input image
"""
def __init__(self, *args, **kwargs):
self.name_to_id = None
self.id_to_name = None
def __getitem__(self, idx):
raise NotImplementedError
def initMaps(self):
"""
Can be called optionally to initialize the id<->category name mapping
Initialize default mapping between:
class <==> index
class: this is a string that represents the class
index: positive int, used directly by the ROI heads.
NOTE:
make sure that the background is always indexed by 0.
"__background__" <==> 0
if initialized by hand, double check that the indexing is correct.
"""
assert isinstance(self.CLASSES, (list, tuple))
assert self.CLASSES[0] == "__background__"
cls = self.CLASSES
self.name_to_id = dict(zip(cls, range(len(cls))))
self.id_to_name = dict(zip(range(len(cls)), cls))
def get_img_info(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
| 33.434783 | 80 | 0.641526 |
8182bde90472fdbd6b98159470bde85eeea9d8eb | 6,467 | py | Python | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/constants.py | tellarin/Recognizers-Text | ff019a69e9cb64de862c94b08125baaaf832ed25 | [
"MIT"
] | 2 | 2017-08-22T11:21:19.000Z | 2017-09-17T20:06:00.000Z | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/constants.py | tellarin/Recognizers-Text | ff019a69e9cb64de862c94b08125baaaf832ed25 | [
"MIT"
] | null | null | null | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/constants.py | tellarin/Recognizers-Text | ff019a69e9cb64de862c94b08125baaaf832ed25 | [
"MIT"
] | null | null | null | from ..resources import BaseDateTime
class Constants:
WRITTEN_TIME = 'writtentime'
SYS_DATETIME_DATE: str = 'date'
SYS_DATETIME_TIME: str = 'time'
SYS_DATETIME_DATEPERIOD: str = 'daterange'
SYS_DATETIME_DATETIME: str = 'datetime'
SYS_DATETIME_TIMEPERIOD: str = 'timerange'
SYS_DATETIME_DATETIMEPERIOD: str = 'datetimerange'
SYS_DATETIME_DURATION: str = 'duration'
SYS_DATETIME_SET: str = 'set'
SYS_DATETIME_TIMEZONE: str = 'timezone'
SYS_DATETIME_MERGED: str = 'datetimeV2'
# SourceEntity Types
SYS_DATETIME_DATETIMEPOINT: str = 'datetimepoint'
# Multiple Duration Types
MULTIPLE_DURATION_PREFIX = 'multipleDuration'
MULTIPLE_DURATION_DATE = MULTIPLE_DURATION_PREFIX + 'Date'
MULTIPLE_DURATION_TIME = MULTIPLE_DURATION_PREFIX + 'Time'
MULTIPLE_DURATION_DATE_TIME = MULTIPLE_DURATION_PREFIX + 'DateTime'
# keys
TIMEX_KEY: str = 'timex'
COMMENT_KEY: str = 'Comment'
MOD_KEY: str = 'Mod'
SOURCE_TYPE: str = 'sourceEntity'
TYPE_KEY: str = 'type'
IS_LUNAR_KEY: str = 'isLunar'
RESOLVE_KEY: str = 'resolve'
RESOLVE_TO_PAST_KEY: str = 'resolveToPast'
RESOLVE_TO_FUTURE_KEY: str = 'resolveToFuture'
SEMESTER_MONTH_COUNT: int = 6
TRIMESTER_MONTH_COUNT: int = 3
QUARTER_COUNT: int = 4
FOUR_DIGITS_YEAR_LENGTH: int = 4
MIN_MONTH: int = 1
MAX_MONTH: int = 12
INVALID_YEAR = -2147483648
# Failed connector extraction
INVALID_CONNECTOR_CODE = -1
MIN_YEAR_NUM: int = int(BaseDateTime.MinYearNum)
MAX_YEAR_NUM: int = int(BaseDateTime.MaxYearNum)
DEFAULT_LANGUAGE_FALLBACK_MDY: str = 'MDY'
DEFAULT_LANGUAGE_FALLBACK_DMY: str = 'DMY'
MAX_TWO_DIGIT_YEAR_FUTURE_NUM: int = int(BaseDateTime.MaxTwoDigitYearFutureNum)
MIN_TWO_DIGIT_YEAR_PAST_NUM: int = int(BaseDateTime.MinTwoDigitYearPastNum)
# Timex
TIMEX_YEAR: str = "Y"
TIMEX_MONTH: str = "M"
TIMEX_MONTH_FULL: str = "MON"
TIMEX_WEEK: str = "W"
TIMEX_DAY: str = "D"
TIMEX_BUSINESS_DAY: str = "BD"
TIMEX_WEEKEND: str = "WE"
TIMEX_HOUR: str = "H"
TIMEX_MINUTE: str = "M"
TIMEX_SECOND: str = "S"
TIMEX_FUZZY: str = 'X'
TIMEX_FUZZY_YEAR: str = "XXXX"
TIMEX_FUZZY_MONTH: str = "XX"
TIMEX_FUZZY_WEEK: str = "WXX"
TIMEX_FUZZY_DAY: str = "XX"
DATE_TIMEX_CONNECTOR: str = "-"
TIME_TIMEX_CONNECTOR: str = ":"
GENERAL_PERIOD_PREFIX: str = "P"
TIME_TIMEX_PREFIX: str = "T"
EARLY_MORNING: str = "TDA"
MORNING: str = "TMO"
MID_DAY: str = "TMI"
AFTERNOON: str = "TAF"
EVENING: str = "TEV"
DAYTIME: str = "TDT"
NIGHT: str = "TNI"
BUSINESS_HOUR = "TBH"
# Groups' names for named groups in regexes
NEXT_GROUP_NAME = "next"
AM_GROUP_NAME = 'am'
PM_GROUP_NAME = 'pm'
AM_PM_GROUP_NAME = 'ampm'
IMPLICIT_AM_GROUP_NAME = 'iam'
IMPLICIT_PM_GROUP_NAME = 'ipm'
PREFIX_GROUP_NAME = 'prefix'
SUFFIX_GROUP_NAME = 'suffix'
SUFFIX_NUM_GROUP_NAME = 'suffix_num'
DESC_GROUP_NAME = 'desc'
LEFT_DESC_GROUP_NAME = 'leftDesc'
RIGHT_DESC_GROUP_NAME = 'rightDesc'
SECOND_GROUP_NAME = 'sec'
MINUTE_GROUP_NAME = 'min'
HOUR_GROUP_NAME = 'hour'
DAY_GROUP_NAME = 'day'
WEEK_GROUP_NAME = 'week'
WEEKDAY_GROUP_NAME = 'weekday'
MONTH_GROUP_NAME = 'month'
YEAR_GROUP_NAME = 'year'
HOUR_NUM_GROUP_NAME = 'hournum'
TENS_GROUP_NAME = 'tens'
TIME_OF_DAY_GROUP_NAME = 'timeOfDay'
BUSINESS_DAY_GROUP_NAME = 'business'
LEFT_AM_PM_GROUP_NAME = 'leftDesc'
RIGHT_AM_PM_GROUP_NAME = 'rightDesc'
HOLIDAY_GROUP_NAME = 'holiday'
REL_MONTH = 'relmonth'
FIRST_TWO_YEAR_NUM = 'firsttwoyearnum'
LAST_TWO_YEAR_NUM = 'lasttwoyearnum'
YEAR_CHINESE = 'yearchs'
OTHER = 'other'
YEAR_RELATIVE = 'yearrel'
DAY_OF_MONTH = 'DayOfMonth'
NEW_TIME = 'newTime'
ENGLISH_TIME = 'engtime'
WEEK_OF = 'WeekOf'
MONTH_OF = 'MonthOf'
ORDER = 'order'
ORDER_QUARTER = 'orderQuarter'
NUM = 'num'
NUMBER = 'number'
MIN_NUM = 'minnum'
MID = 'mid'
MIDDAY = 'midday'
MID_AFTERNOON = 'midafternoon'
MID_MORNING = 'midmorning'
MID_NIGHT = 'midnight'
CARDINAL = 'cardinal'
SEAS = 'seas'
SEASON = 'season'
UNIT = 'unit'
UNIT_Y = 'Y'
UNIT_D = 'D'
UNIT_M = 'M'
UNIT_W = 'W'
UNIT_MON = 'MON'
UNIT_S = 'S'
UNIT_H = 'H'
UNIT_T = 'T'
UNIT_P = 'P'
# Prefix
EARLY_PREFIX = 'EarlyPrefix'
LATE_PREFIX = 'LatePrefix'
MID_PREFIX = 'MidPrefix'
REL_EARLY = 'RelEarly'
REL_LATE = 'RelLate'
EARLY = 'early'
LATE = 'late'
HALF = 'half'
HAS_MOD = 'mod'
# Holidays
# These should not be constants, they should go on the resources files for English
FATHERS = 'fathers'
MOTHERS = 'mothers'
THANKS_GIVING_DAY = 'thanksgivingday'
THANKS_GIVING = 'thanksgiving'
BLACK_FRIDAY = 'blackfriday'
MARTIN_LUTHER_KING = 'martinlutherking'
WASHINGTON_BIRTHDAY = 'washingtonsbirthday'
LABOUR = 'labour'
CANBERRA = 'canberra'
COLUMBUS = 'columbus'
MEMORIAL = 'memorial'
AGO_LABEL = "ago"
LATER_LABEL = "later"
class TimeTypeConstants:
DATE: str = 'date'
START_DATE: str = 'startDate'
END_DATE: str = 'endDate'
DATETIME: str = 'dateTime'
START_DATETIME: str = 'startDateTime'
END_DATETIME: str = 'endDateTime'
DURATION: str = 'duration'
SET: str = 'set'
TIME: str = 'time'
VALUE: str = 'value'
START_TIME: str = 'startTime'
END_TIME: str = 'endTime'
START: str = 'start'
END: str = 'end'
# Mod Value
# "before" -> To mean "preceding in time". I.e.
# Does not include the extracted datetime entity in the resolution's ending point. Equivalent to "<"
BEFORE_MOD: str = 'before'
# "after" -> To mean "following in time". I.e.
# Does not include the extracted datetime entity in the resolution's starting point. Equivalent to ">"
AFTER_MOD: str = 'after'
# "since" -> Same as "after", but including the extracted datetime entity. Equivalent to ">="
SINCE_MOD: str = 'since'
# "until" -> Same as "before", but including the extracted datetime entity. Equivalent to "<="
UNTIL_MOD: str = 'until'
EARLY_MOD: str = 'start'
MID_MOD: str = 'mid'
LATE_MOD: str = 'end'
MORE_THAN_MOD: str = 'more'
LESS_THAN_MOD: str = 'less'
REF_UNDEF_MOD: str = 'ref_undef'
APPROX_MOD: str = 'approx'
| 28.240175 | 106 | 0.657801 |
656399dcd09dade51c7e13e1d0190b836669261e | 3,290 | py | Python | tests/apps/tethysapp-test_app/tethysapp/test_app/controllers.py | msouff/tethys | 45795d1e6561d5db8fddd838f4d1ae1d91dbb837 | [
"BSD-2-Clause"
] | null | null | null | tests/apps/tethysapp-test_app/tethysapp/test_app/controllers.py | msouff/tethys | 45795d1e6561d5db8fddd838f4d1ae1d91dbb837 | [
"BSD-2-Clause"
] | 1 | 2018-09-20T21:27:14.000Z | 2018-09-20T21:27:14.000Z | tests/apps/tethysapp-test_app/tethysapp/test_app/controllers.py | msouff/tethys | 45795d1e6561d5db8fddd838f4d1ae1d91dbb837 | [
"BSD-2-Clause"
] | null | null | null | from django.shortcuts import render
from tethys_sdk.permissions import login_required
from tethys_sdk.gizmos import Button
from channels.generic.websocket import WebsocketConsumer
import json
from typing import Any
from bokeh.models import ColumnDataSource, Slider
from bokeh.plotting import figure
from bokeh.layouts import column
from bokeh.embed import server_document
@login_required()
def home(request):
"""
Controller for the app home page.
"""
save_button = Button(
display_text='',
name='save-button',
icon='glyphicon glyphicon-floppy-disk',
style='success',
attributes={
'data-toggle': 'tooltip',
'data-placement': 'top',
'title': 'Save'
}
)
edit_button = Button(
display_text='',
name='edit-button',
icon='glyphicon glyphicon-edit',
style='warning',
attributes={
'data-toggle': 'tooltip',
'data-placement': 'top',
'title': 'Edit'
}
)
remove_button = Button(
display_text='',
name='remove-button',
icon='glyphicon glyphicon-remove',
style='danger',
attributes={
'data-toggle': 'tooltip',
'data-placement': 'top',
'title': 'Remove'
}
)
previous_button = Button(
display_text='Previous',
name='previous-button',
attributes={
'data-toggle': 'tooltip',
'data-placement': 'top',
'title': 'Previous'
}
)
next_button = Button(
display_text='Next',
name='next-button',
attributes={
'data-toggle': 'tooltip',
'data-placement': 'top',
'title': 'Next'
}
)
script = server_document(request.build_absolute_uri())
context = {
'save_button': save_button,
'edit_button': edit_button,
'remove_button': remove_button,
'previous_button': previous_button,
'next_button': next_button,
'script': script
}
return render(request, 'test_app/home.html', context)
def home_handler(doc):
data = {'x': [0, 1, 2, 3, 4, 5], 'y': [0, 10, 20, 30, 40, 50]}
source = ColumnDataSource(data=data)
plot = figure(x_axis_type="linear", y_range=(0, 50), title="Test App Bokeh + Channels Plot", height=250)
plot.line(x="x", y="y", source=source)
def callback(attr: str, old: Any, new: Any) -> None:
if new == 1:
data['y'] = [0, 10, 20, 30, 40, 50]
else:
data['y'] = [i * new for i in [0, 10, 20, 30, 40, 50]]
source.data = ColumnDataSource(data=data).data
plot.y_range.end = max(data['y'])
slider = Slider(start=1, end=5, value=1, step=1, title="Test App Bokeh + Channels Controller")
slider.on_change("value", callback)
doc.add_root(column(slider, plot))
class TestWS(WebsocketConsumer):
def connect(self):
self.accept()
def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['client_message']
self.send(text_data=json.dumps({
'server_message': message
}))
def disconnect(self, close_code):
pass
| 26.111111 | 108 | 0.575076 |
ea916b5eb5f041b1eea8beaa130dfb5755582766 | 9,706 | py | Python | vit_keras/vit.py | aiinside/vit-keras | 28815edc5c24492612af726d1b2ca78295128d84 | [
"Apache-2.0"
] | 193 | 2020-11-09T13:13:49.000Z | 2022-03-30T05:40:11.000Z | vit_keras/vit.py | aiinside/vit-keras | 28815edc5c24492612af726d1b2ca78295128d84 | [
"Apache-2.0"
] | 30 | 2020-12-16T14:03:19.000Z | 2022-03-29T18:36:46.000Z | vit_keras/vit.py | aiinside/vit-keras | 28815edc5c24492612af726d1b2ca78295128d84 | [
"Apache-2.0"
] | 40 | 2020-11-10T02:27:12.000Z | 2022-03-29T18:31:32.000Z | import typing
import warnings
import tensorflow as tf
import typing_extensions as tx
from . import layers, utils
ConfigDict = tx.TypedDict(
"ConfigDict",
{
"dropout": float,
"mlp_dim": int,
"num_heads": int,
"num_layers": int,
"hidden_size": int,
},
)
CONFIG_B: ConfigDict = {
"dropout": 0.1,
"mlp_dim": 3072,
"num_heads": 12,
"num_layers": 12,
"hidden_size": 768,
}
CONFIG_L: ConfigDict = {
"dropout": 0.1,
"mlp_dim": 4096,
"num_heads": 16,
"num_layers": 24,
"hidden_size": 1024,
}
BASE_URL = "https://github.com/faustomorales/vit-keras/releases/download/dl"
WEIGHTS = {"imagenet21k": 21_843, "imagenet21k+imagenet2012": 1_000}
SIZES = {"B_16", "B_32", "L_16", "L_32"}
ImageSizeArg = typing.Union[typing.Tuple[int, int], int]
def preprocess_inputs(X):
"""Preprocess images"""
return tf.keras.applications.imagenet_utils.preprocess_input(
X, data_format=None, mode="tf"
)
def interpret_image_size(image_size_arg: ImageSizeArg) -> typing.Tuple[int, int]:
"""Process the image_size argument whether a tuple or int."""
if isinstance(image_size_arg, int):
return (image_size_arg, image_size_arg)
if (
isinstance(image_size_arg, tuple)
and len(image_size_arg) == 2
and all(map(lambda v: isinstance(v, int), image_size_arg))
):
return image_size_arg
raise ValueError(
f"The image_size argument must be a tuple of 2 integers or a single integer. Received: {image_size_arg}"
)
def build_model(
image_size: ImageSizeArg,
patch_size: int,
num_layers: int,
hidden_size: int,
num_heads: int,
name: str,
mlp_dim: int,
classes: int,
dropout=0.1,
activation="linear",
include_top=True,
representation_size=None,
):
"""Build a ViT model.
Args:
image_size: The size of input images.
patch_size: The size of each patch (must fit evenly in image_size)
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
num_layers: The number of transformer layers to use.
hidden_size: The number of filters to use
num_heads: The number of transformer heads
mlp_dim: The number of dimensions for the MLP output in the transformers.
dropout_rate: fraction of the units to drop for dense layers.
activation: The activation to use for the final layer.
include_top: Whether to include the final classification layer. If not,
the output will have dimensions (batch_size, hidden_size).
representation_size: The size of the representation prior to the
classification layer. If None, no Dense layer is inserted.
"""
image_size_tuple = interpret_image_size(image_size)
assert (image_size_tuple[0] % patch_size == 0) and (
image_size_tuple[1] % patch_size == 0
), "image_size must be a multiple of patch_size"
x = tf.keras.layers.Input(shape=(image_size_tuple[0], image_size_tuple[1], 3))
y = tf.keras.layers.Conv2D(
filters=hidden_size,
kernel_size=patch_size,
strides=patch_size,
padding="valid",
name="embedding",
)(x)
y = tf.keras.layers.Reshape((y.shape[1] * y.shape[2], hidden_size))(y)
y = layers.ClassToken(name="class_token")(y)
y = layers.AddPositionEmbs(name="Transformer/posembed_input")(y)
for n in range(num_layers):
y, _ = layers.TransformerBlock(
num_heads=num_heads,
mlp_dim=mlp_dim,
dropout=dropout,
name=f"Transformer/encoderblock_{n}",
)(y)
y = tf.keras.layers.LayerNormalization(
epsilon=1e-6, name="Transformer/encoder_norm"
)(y)
y = tf.keras.layers.Lambda(lambda v: v[:, 0], name="ExtractToken")(y)
if representation_size is not None:
y = tf.keras.layers.Dense(
representation_size, name="pre_logits", activation="tanh"
)(y)
if include_top:
y = tf.keras.layers.Dense(classes, name="head", activation=activation)(y)
return tf.keras.models.Model(inputs=x, outputs=y, name=name)
def validate_pretrained_top(
include_top: bool, pretrained: bool, classes: int, weights: str
):
"""Validate that the pretrained weight configuration makes sense."""
assert weights in WEIGHTS, f"Unexpected weights: {weights}."
expected_classes = WEIGHTS[weights]
if classes != expected_classes:
warnings.warn(
f"Can only use pretrained_top with {weights} if classes = {expected_classes}. Setting manually.",
UserWarning,
)
assert include_top, "Can only use pretrained_top with include_top."
assert pretrained, "Can only use pretrained_top with pretrained."
return expected_classes
def load_pretrained(
size: str,
weights: str,
pretrained_top: bool,
model: tf.keras.models.Model,
image_size: ImageSizeArg,
patch_size: int,
):
"""Load model weights for a known configuration."""
image_size_tuple = interpret_image_size(image_size)
fname = f"ViT-{size}_{weights}.npz"
origin = f"{BASE_URL}/{fname}"
local_filepath = tf.keras.utils.get_file(fname, origin, cache_subdir="weights")
utils.load_weights_numpy(
model=model,
params_path=local_filepath,
pretrained_top=pretrained_top,
num_x_patches=image_size_tuple[1] // patch_size,
num_y_patches=image_size_tuple[0] // patch_size,
)
def vit_b16(
image_size: ImageSizeArg = (224, 224),
classes=1000,
activation="linear",
include_top=True,
pretrained=True,
pretrained_top=True,
weights="imagenet21k+imagenet2012",
):
"""Build ViT-B16. All arguments passed to build_model."""
if pretrained_top:
classes = validate_pretrained_top(
include_top=include_top,
pretrained=pretrained,
classes=classes,
weights=weights,
)
model = build_model(
**CONFIG_B,
name="vit-b16",
patch_size=16,
image_size=image_size,
classes=classes,
activation=activation,
include_top=include_top,
representation_size=768 if weights == "imagenet21k" else None,
)
if pretrained:
load_pretrained(
size="B_16",
weights=weights,
model=model,
pretrained_top=pretrained_top,
image_size=image_size,
patch_size=16,
)
return model
def vit_b32(
image_size: ImageSizeArg = (224, 224),
classes=1000,
activation="linear",
include_top=True,
pretrained=True,
pretrained_top=True,
weights="imagenet21k+imagenet2012",
):
"""Build ViT-B32. All arguments passed to build_model."""
if pretrained_top:
classes = validate_pretrained_top(
include_top=include_top,
pretrained=pretrained,
classes=classes,
weights=weights,
)
model = build_model(
**CONFIG_B,
name="vit-b32",
patch_size=32,
image_size=image_size,
classes=classes,
activation=activation,
include_top=include_top,
representation_size=768 if weights == "imagenet21k" else None,
)
if pretrained:
load_pretrained(
size="B_32",
weights=weights,
model=model,
pretrained_top=pretrained_top,
patch_size=32,
image_size=image_size,
)
return model
def vit_l16(
image_size: ImageSizeArg = (384, 384),
classes=1000,
activation="linear",
include_top=True,
pretrained=True,
pretrained_top=True,
weights="imagenet21k+imagenet2012",
):
"""Build ViT-L16. All arguments passed to build_model."""
if pretrained_top:
classes = validate_pretrained_top(
include_top=include_top,
pretrained=pretrained,
classes=classes,
weights=weights,
)
model = build_model(
**CONFIG_L,
patch_size=16,
name="vit-l16",
image_size=image_size,
classes=classes,
activation=activation,
include_top=include_top,
representation_size=1024 if weights == "imagenet21k" else None,
)
if pretrained:
load_pretrained(
size="L_16",
weights=weights,
model=model,
pretrained_top=pretrained_top,
patch_size=16,
image_size=image_size,
)
return model
def vit_l32(
image_size: ImageSizeArg = (384, 384),
classes=1000,
activation="linear",
include_top=True,
pretrained=True,
pretrained_top=True,
weights="imagenet21k+imagenet2012",
):
"""Build ViT-L32. All arguments passed to build_model."""
if pretrained_top:
classes = validate_pretrained_top(
include_top=include_top,
pretrained=pretrained,
classes=classes,
weights=weights,
)
model = build_model(
**CONFIG_L,
patch_size=32,
name="vit-l32",
image_size=image_size,
classes=classes,
activation=activation,
include_top=include_top,
representation_size=1024 if weights == "imagenet21k" else None,
)
if pretrained:
load_pretrained(
size="L_32",
weights=weights,
model=model,
pretrained_top=pretrained_top,
patch_size=32,
image_size=image_size,
)
return model
| 29.864615 | 112 | 0.630847 |
734ac315ee766e2173f7d6dbb952216752b05199 | 644 | py | Python | src/genie/libs/parser/bigip/get_sys_pfman.py | nujo/genieparser | 083b01efc46afc32abe1a1858729578beab50cd3 | [
"Apache-2.0"
] | 204 | 2018-06-27T00:55:27.000Z | 2022-03-06T21:12:18.000Z | src/genie/libs/parser/bigip/get_sys_pfman.py | nujo/genieparser | 083b01efc46afc32abe1a1858729578beab50cd3 | [
"Apache-2.0"
] | 468 | 2018-06-19T00:33:18.000Z | 2022-03-31T23:23:35.000Z | src/genie/libs/parser/bigip/get_sys_pfman.py | nujo/genieparser | 083b01efc46afc32abe1a1858729578beab50cd3 | [
"Apache-2.0"
] | 309 | 2019-01-16T20:21:07.000Z | 2022-03-30T12:56:41.000Z | # Global Imports
import json
from collections import defaultdict
# Metaparser
from genie.metaparser import MetaParser
# =============================================
# Collection for '/mgmt/tm/sys/pfman' resources
# =============================================
class SysPfmanSchema(MetaParser):
schema = {}
class SysPfman(SysPfmanSchema):
""" To F5 resource for /mgmt/tm/sys/pfman
"""
cli_command = "/mgmt/tm/sys/pfman"
def rest(self):
response = self.device.get(self.cli_command)
response_json = response.json()
if not response_json:
return {}
return response_json
| 18.941176 | 52 | 0.569876 |
407494161bda5ae73dbcde722097c680946ef83d | 412 | py | Python | H/1665.py | jnoddell/LC | be17ab80e745169a6617788ba82ad2c5b250c0d5 | [
"Apache-2.0"
] | 2 | 2021-01-20T14:31:36.000Z | 2021-01-20T14:32:31.000Z | H/1665.py | jnoddell/LC | be17ab80e745169a6617788ba82ad2c5b250c0d5 | [
"Apache-2.0"
] | null | null | null | H/1665.py | jnoddell/LC | be17ab80e745169a6617788ba82ad2c5b250c0d5 | [
"Apache-2.0"
] | null | null | null | class Solution:
def minimumEffort(self, tasks: List[List[int]]) -> int:
tasks.sort(key = lambda x: x[1] - x[0], reverse = True)
curr, init = 0, 0
for actual, minimum in tasks:
delta = max(0, minimum - curr)
init += delta
curr += delta - actual
return init
| 22.888889 | 63 | 0.419903 |
18818a5bb3f7b3d986d39c20d158667b65b3adc0 | 6,163 | py | Python | interfaces/cython/cantera/mixmaster/SpeciesFrame.py | MehulDokania/mixmaster | 4233b77de1cbe3fc658d0c01e2d74da166378603 | [
"BSD-3-Clause"
] | 1 | 2021-01-22T19:56:11.000Z | 2021-01-22T19:56:11.000Z | interfaces/cython/cantera/mixmaster/SpeciesFrame.py | MehulDokania/mixmaster | 4233b77de1cbe3fc658d0c01e2d74da166378603 | [
"BSD-3-Clause"
] | 8 | 2017-01-30T15:15:32.000Z | 2020-08-20T15:43:20.000Z | interfaces/cython/cantera/mixmaster/SpeciesFrame.py | MehulDokania/mixmaster | 4233b77de1cbe3fc658d0c01e2d74da166378603 | [
"BSD-3-Clause"
] | 16 | 2017-01-27T14:17:06.000Z | 2021-01-22T19:56:13.000Z | #
# function getElements displays a periodic table, and returns a list of
# the selected elements
#
# This file is part of Cantera. See License.txt in the top-level directory or
# at http://www.cantera.org/license.txt for license and copyright information.
import sys
if sys.version_info[0] == 3:
from tkinter import *
else:
from Tkinter import *
from types import *
from cantera import *
class SpeciesFrame(Frame):
def __init__(self, master, speciesList = [], selected=[]):
Frame.__init__(self,master)
self.master = master
self.control = Frame(self)
self.species = {}
for sp in speciesList:
self.species[sp.name] = sp
self.control.config(relief=GROOVE,bd=4)
Button(self.control, text = 'Display',command=self.show).pack(fill=X,pady=3, padx=10)
Button(self.control, text = 'Clear',command=self.clear).pack(fill=X,pady=3, padx=10)
Button(self.control, text = ' OK ',command=self.get).pack(side=BOTTOM,
fill=X,pady=3, padx=10)
Button(self.control, text = 'Cancel',command=self.master.quit).pack(side=BOTTOM,
fill=X,pady=3, padx=10)
self.entries = Frame(self)
self.entries.pack(side=LEFT)
self.control.pack(side=RIGHT,fill=Y)
self.c = {}
self.selected = selected
n=0
ncol = 8
rw = 1
col = 0
list = self.species.values()
list.sort()
for sp in list:
el = sp.name
self.species[el] = Frame(self.entries)
self.species[el].config(relief=GROOVE, bd=4, bg=self.color(el))
self.c[el] = Button(self.species[el],text=el,bg=self.color(el),width=6,relief=FLAT)
self.c[el].pack()
self.c[el].bind("<Button-1>",self.setColors)
self.species[el].grid(row= rw, column = col,sticky=W+N+E+S)
col += 1
if col > ncol:
rw += 1
col = 0
Label(self.entries,text='select the species to be included, and then press OK.\nTo view the properties of the selected species, press Display ').grid(row=0, column=2, columnspan=10, sticky=W)
def select(self, el):
self.c[el]['relief'] = RAISED
self.c[el]['bg'] = self.color(el, sel=1)
def deselect(self, el):
self.c[el]['relief'] = FLAT
self.c[el]['bg'] = self.color(el, sel=0)
def selectSpecies(self,splist):
for sp in splist:
spname = sp.name
self.select(spname)
def setColors(self,event):
el = event.widget['text']
if event.widget['relief'] == RAISED:
event.widget['relief'] = FLAT
back = self.color(el, sel=0)
fore = '#ffffff'
elif event.widget['relief'] == FLAT:
event.widget['relief'] = RAISED
fore = '#000000'
back = self.color(el, sel=1)
event.widget['bg'] = back
event.widget['fg'] = fore
def color(self, el, sel=0):
_normal = ['#88dddd','#005500','#dd8888']
_selected = ['#aaffff','#88dd88','#ffaaaa']
#row, column = _pos[el]
if sel: list = _selected
else: list = _normal
return list[1]
#if column < 3:
# return list[0]
#elif column > 12:
# return list[1]
#else:
# return list[2]
def show(self):
selected = []
for sp in self.species.values():
if self.c[sp.name]['relief'] == RAISED:
selected.append(sp)
#showElementProperties(selected)
def get(self):
self.selected = []
for sp in self.species.values():
if self.c[sp.name]['relief'] == RAISED:
self.selected.append(sp)
#self.master.quit()'
self.master.destroy()
def clear(self):
for sp in self.species.values():
self.c[sp]['bg'] = self.color(sp, sel=0)
self.c[sp]['relief'] = FLAT
## class ElementPropertyFrame(Frame):
## def __init__(self,master,ellist):
## Frame.__init__(self,master)
## n = 1
## ellist.sort()
## Label(self,text='Name').grid(column=0,row=0,sticky=W+S,padx=10,pady=10)
## Label(self,text='Atomic \nNumber').grid(column=1,row=0,sticky=W+S,padx=10,pady=10)
## Label(self,
## text='Atomic \nWeight').grid(column=2,
## row=0,
## sticky=W+S,
## padx=10,
## pady=10)
## for el in ellist:
## Label(self,
## text=el.name).grid(column=0,
## row=n,
## sticky=W,
## padx=10)
## Label(self,
## text=`el.atomicNumber`).grid(column=1,
## row=n,
## sticky=W,
## padx=10)
## Label(self,
## text=`el.atomicWeight`).grid(column=2,
## row=n,
## sticky=W,
## padx=10)
## n += 1
# utility functions
def getSpecies(splist=[],selected=[]):
master = Toplevel()
master.title('Species')
t = SpeciesFrame(master,splist,selected)
if splist: t.selectSpecies(splist)
t.pack()
t.focus_set()
t.grab_set()
t.wait_window()
try:
master.destroy()
except TclError:
pass
return t.selected
# display table of selected element properties in a window
def showElementProperties(ellist):
m = Tk()
m.title('Element Properties')
elem = []
ElementPropertyFrame(m, ellist).pack()
if __name__ == "__main__":
print(getSpecies())
| 34.049724 | 199 | 0.495213 |
81074e2ae22536e10ced091f082467dacad56004 | 88 | py | Python | requests_toolbar/__init__.py | yigor/django-debug-toolbar-requests | 46146609e9b63eb80ce0bd5952b380103dc6252d | [
"BSD-3-Clause"
] | null | null | null | requests_toolbar/__init__.py | yigor/django-debug-toolbar-requests | 46146609e9b63eb80ce0bd5952b380103dc6252d | [
"BSD-3-Clause"
] | null | null | null | requests_toolbar/__init__.py | yigor/django-debug-toolbar-requests | 46146609e9b63eb80ce0bd5952b380103dc6252d | [
"BSD-3-Clause"
] | null | null | null | VERSION = (1, 1, 0)
__version__ = VERSION
__versionstr__ = '.'.join(map(str, VERSION))
| 17.6 | 44 | 0.670455 |
faa432d7a515843d4471f9b174253c2e78c36c76 | 269 | py | Python | flask_discord/__init__.py | HyperGH/Flask-Discord | aa10eb0eaf1a7dfc9af6175f79e040309739b32c | [
"MIT"
] | 82 | 2019-05-08T17:01:36.000Z | 2021-01-27T02:17:48.000Z | flask_discord/__init__.py | HyperGH/Flask-Discord | aa10eb0eaf1a7dfc9af6175f79e040309739b32c | [
"MIT"
] | 28 | 2019-12-08T22:21:16.000Z | 2021-01-29T17:17:18.000Z | flask_discord/__init__.py | HyperGH/Flask-Discord | aa10eb0eaf1a7dfc9af6175f79e040309739b32c | [
"MIT"
] | 28 | 2019-07-08T13:36:37.000Z | 2020-12-08T11:21:48.000Z | from .exceptions import *
from .utils import *
from .client import DiscordOAuth2Session
__all__ = [
"DiscordOAuth2Session",
"requires_authorization",
"HttpException",
"RateLimited",
"Unauthorized",
"AccessDenied",
]
__version__ = "0.1.64"
| 14.157895 | 40 | 0.680297 |
760ff274ed499d7b7d4f774af0fcbbefa46b5280 | 27,123 | py | Python | test/test_cells.py | jonancm/viennagrid-python | a56f23ab65cf82b2f06ff546d45c056bb9d326b2 | [
"MIT"
] | null | null | null | test/test_cells.py | jonancm/viennagrid-python | a56f23ab65cf82b2f06ff546d45c056bb9d326b2 | [
"MIT"
] | 1 | 2015-05-13T08:28:52.000Z | 2015-05-13T08:28:52.000Z | test/test_cells.py | jonancm/viennagrid-python | a56f23ab65cf82b2f06ff546d45c056bb9d326b2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys
if len(sys.argv) > 1:
sys.path.insert(0, sys.argv.pop(1))
import unittest
import viennagrid.wrapper
##################
# LINEAR DOMAINS #
##################
class TestLinearCartesian1D_Cell(unittest.TestCase):
def setUp(self):
# Create domain and add vertices
self.vertices = [
viennagrid.wrapper.PointCartesian1D(1),
viennagrid.wrapper.PointCartesian1D(2),
viennagrid.wrapper.PointCartesian1D(3),
viennagrid.wrapper.PointCartesian1D(4),
viennagrid.wrapper.PointCartesian1D(5),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.LinearCartesian1D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
# Create a segmentation on the domain
self.segmentation = viennagrid.wrapper.LinearCartesian1D_Segmentation(self.domain)
# Create a new segment within the segmentation
self.segment = self.segmentation.make_segment()
# Create a cell in the segment
cell_vertices = [
self.domain.get_vertex(0),
self.domain.get_vertex(1),
]
self.num_vertices_per_cell = len(cell_vertices)
self.cell = self.segment.make_cell(*cell_vertices)
def test_vertices(self):
"""Test attributes 'vertices' and 'num_vertices' of cell, and method 'to_point' of vertices."""
self.assertEqual(len(self.cell.vertices), self.num_vertices_per_cell)
self.assertEqual(self.cell.num_vertices, self.num_vertices_per_cell)
self.assertTrue(isinstance(self.cell.vertices[0].to_point(), viennagrid.wrapper.PointCartesian1D))
class TestLinearCartesian2D_Cell(unittest.TestCase):
def setUp(self):
# Create domain and add vertices
self.vertices = [
viennagrid.wrapper.PointCartesian2D(1, 2),
viennagrid.wrapper.PointCartesian2D(2, 3),
viennagrid.wrapper.PointCartesian2D(3, 4),
viennagrid.wrapper.PointCartesian2D(4, 5),
viennagrid.wrapper.PointCartesian2D(5, 6),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.LinearCartesian2D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
# Create a segmentation on the domain
self.segmentation = viennagrid.wrapper.LinearCartesian2D_Segmentation(self.domain)
# Create a new segment within the segmentation
self.segment = self.segmentation.make_segment()
# Create a cell in the segment
cell_vertices = [
self.domain.get_vertex(0),
self.domain.get_vertex(1),
]
self.num_vertices_per_cell = len(cell_vertices)
self.cell = self.segment.make_cell(*cell_vertices)
def test_vertices(self):
"""Test attributes 'vertices' and 'num_vertices' of cell, and method 'to_point' of vertices."""
self.assertEqual(len(self.cell.vertices), self.num_vertices_per_cell)
self.assertEqual(self.cell.num_vertices, self.num_vertices_per_cell)
self.assertTrue(isinstance(self.cell.vertices[0].to_point(), viennagrid.wrapper.PointCartesian2D))
class TestLinearCartesian3D_Cell(unittest.TestCase):
def setUp(self):
# Create domain and add vertices
self.vertices = [
viennagrid.wrapper.PointCartesian3D(1, 2, 7),
viennagrid.wrapper.PointCartesian3D(2, 3, 7),
viennagrid.wrapper.PointCartesian3D(3, 4, 7),
viennagrid.wrapper.PointCartesian3D(4, 5, 7),
viennagrid.wrapper.PointCartesian3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.LinearCartesian3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
# Create a segmentation on the domain
self.segmentation = viennagrid.wrapper.LinearCartesian3D_Segmentation(self.domain)
# Create a new segment within the segmentation
self.segment = self.segmentation.make_segment()
# Create a cell in the segment
cell_vertices = [
self.domain.get_vertex(0),
self.domain.get_vertex(1),
]
self.num_vertices_per_cell = len(cell_vertices)
self.cell = self.segment.make_cell(*cell_vertices)
def test_vertices(self):
"""Test attributes 'vertices' and 'num_vertices' of cell, and method 'to_point' of vertices."""
self.assertEqual(len(self.cell.vertices), self.num_vertices_per_cell)
self.assertEqual(self.cell.num_vertices, self.num_vertices_per_cell)
self.assertTrue(isinstance(self.cell.vertices[0].to_point(), viennagrid.wrapper.PointCartesian3D))
class TestLinearCylindrical3D_Cell(unittest.TestCase):
def setUp(self):
# Create domain and add vertices
self.vertices = [
viennagrid.wrapper.PointCylindrical3D(1, 2, 7),
viennagrid.wrapper.PointCylindrical3D(2, 3, 7),
viennagrid.wrapper.PointCylindrical3D(3, 4, 7),
viennagrid.wrapper.PointCylindrical3D(4, 5, 7),
viennagrid.wrapper.PointCylindrical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.LinearCylindrical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
# Create a segmentation on the domain
self.segmentation = viennagrid.wrapper.LinearCylindrical3D_Segmentation(self.domain)
# Create a new segment within the segmentation
self.segment = self.segmentation.make_segment()
# Create a cell in the segment
cell_vertices = [
self.domain.get_vertex(0),
self.domain.get_vertex(1),
]
self.num_vertices_per_cell = len(cell_vertices)
self.cell = self.segment.make_cell(*cell_vertices)
def test_vertices(self):
"""Test attributes 'vertices' and 'num_vertices' of cell, and method 'to_point' of vertices."""
self.assertEqual(len(self.cell.vertices), self.num_vertices_per_cell)
self.assertEqual(self.cell.num_vertices, self.num_vertices_per_cell)
self.assertTrue(isinstance(self.cell.vertices[0].to_point(), viennagrid.wrapper.PointCylindrical3D))
class TestLinearPolar2D_Cell(unittest.TestCase):
def setUp(self):
# Create domain and add vertices
self.vertices = [
viennagrid.wrapper.PointPolar2D(1, 2),
viennagrid.wrapper.PointPolar2D(2, 3),
viennagrid.wrapper.PointPolar2D(3, 4),
viennagrid.wrapper.PointPolar2D(4, 5),
viennagrid.wrapper.PointPolar2D(5, 6),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.LinearPolar2D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
# Create a segmentation on the domain
self.segmentation = viennagrid.wrapper.LinearPolar2D_Segmentation(self.domain)
# Create a new segment within the segmentation
self.segment = self.segmentation.make_segment()
# Create a cell in the segment
cell_vertices = [
self.domain.get_vertex(0),
self.domain.get_vertex(1),
]
self.num_vertices_per_cell = len(cell_vertices)
self.cell = self.segment.make_cell(*cell_vertices)
def test_vertices(self):
"""Test attributes 'vertices' and 'num_vertices' of cell, and method 'to_point' of vertices."""
self.assertEqual(len(self.cell.vertices), self.num_vertices_per_cell)
self.assertEqual(self.cell.num_vertices, self.num_vertices_per_cell)
self.assertTrue(isinstance(self.cell.vertices[0].to_point(), viennagrid.wrapper.PointPolar2D))
class TestLinearSpherical3D_Cell(unittest.TestCase):
def setUp(self):
# Create domain and add vertices
self.vertices = [
viennagrid.wrapper.PointSpherical3D(1, 2, 7),
viennagrid.wrapper.PointSpherical3D(2, 3, 7),
viennagrid.wrapper.PointSpherical3D(3, 4, 7),
viennagrid.wrapper.PointSpherical3D(4, 5, 7),
viennagrid.wrapper.PointSpherical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.LinearSpherical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
# Create a segmentation on the domain
self.segmentation = viennagrid.wrapper.LinearSpherical3D_Segmentation(self.domain)
# Create a new segment within the segmentation
self.segment = self.segmentation.make_segment()
# Create a cell in the segment
cell_vertices = [
self.domain.get_vertex(0),
self.domain.get_vertex(1),
]
self.num_vertices_per_cell = len(cell_vertices)
self.cell = self.segment.make_cell(*cell_vertices)
def test_vertices(self):
"""Test attributes 'vertices' and 'num_vertices' of cell, and method 'to_point' of vertices."""
self.assertEqual(len(self.cell.vertices), self.num_vertices_per_cell)
self.assertEqual(self.cell.num_vertices, self.num_vertices_per_cell)
self.assertTrue(isinstance(self.cell.vertices[0].to_point(), viennagrid.wrapper.PointSpherical3D))
######################
# TRIANGULAR DOMAINS #
######################
class TestTriangularCartesian2D_Cell(unittest.TestCase):
def setUp(self):
# Create domain and add vertices
self.vertices = [
viennagrid.wrapper.PointCartesian2D(1, 2),
viennagrid.wrapper.PointCartesian2D(2, 3),
viennagrid.wrapper.PointCartesian2D(3, 4),
viennagrid.wrapper.PointCartesian2D(4, 5),
viennagrid.wrapper.PointCartesian2D(5, 6),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TriangularCartesian2D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
# Create a segmentation on the domain
self.segmentation = viennagrid.wrapper.TriangularCartesian2D_Segmentation(self.domain)
# Create a new segment within the segmentation
self.segment = self.segmentation.make_segment()
# Create a cell in the segment
cell_vertices = [
self.domain.get_vertex(0),
self.domain.get_vertex(1),
self.domain.get_vertex(2),
]
self.num_vertices_per_cell = len(cell_vertices)
self.cell = self.segment.make_cell(*cell_vertices)
def test_vertices(self):
"""Test attributes 'vertices' and 'num_vertices' of cell, and method 'to_point' of vertices."""
self.assertEqual(len(self.cell.vertices), self.num_vertices_per_cell)
self.assertEqual(self.cell.num_vertices, self.num_vertices_per_cell)
self.assertTrue(isinstance(self.cell.vertices[0].to_point(), viennagrid.wrapper.PointCartesian2D))
class TestTriangularCartesian3D_Cell(unittest.TestCase):
def setUp(self):
# Create domain and add vertices
self.vertices = [
viennagrid.wrapper.PointCartesian3D(1, 2, 7),
viennagrid.wrapper.PointCartesian3D(2, 3, 7),
viennagrid.wrapper.PointCartesian3D(3, 4, 7),
viennagrid.wrapper.PointCartesian3D(4, 5, 7),
viennagrid.wrapper.PointCartesian3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TriangularCartesian3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
# Create a segmentation on the domain
self.segmentation = viennagrid.wrapper.TriangularCartesian3D_Segmentation(self.domain)
# Create a new segment within the segmentation
self.segment = self.segmentation.make_segment()
# Create a cell in the segment
cell_vertices = [
self.domain.get_vertex(0),
self.domain.get_vertex(1),
self.domain.get_vertex(2),
]
self.num_vertices_per_cell = len(cell_vertices)
self.cell = self.segment.make_cell(*cell_vertices)
def test_vertices(self):
"""Test attributes 'vertices' and 'num_vertices' of cell, and method 'to_point' of vertices."""
self.assertEqual(len(self.cell.vertices), self.num_vertices_per_cell)
self.assertEqual(self.cell.num_vertices, self.num_vertices_per_cell)
self.assertTrue(isinstance(self.cell.vertices[0].to_point(), viennagrid.wrapper.PointCartesian3D))
class TestTriangularCylindrical3D_Cell(unittest.TestCase):
def setUp(self):
# Create domain and add vertices
self.vertices = [
viennagrid.wrapper.PointCylindrical3D(1, 2, 7),
viennagrid.wrapper.PointCylindrical3D(2, 3, 7),
viennagrid.wrapper.PointCylindrical3D(3, 4, 7),
viennagrid.wrapper.PointCylindrical3D(4, 5, 7),
viennagrid.wrapper.PointCylindrical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TriangularCylindrical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
# Create a segmentation on the domain
self.segmentation = viennagrid.wrapper.TriangularCylindrical3D_Segmentation(self.domain)
# Create a new segment within the segmentation
self.segment = self.segmentation.make_segment()
# Create a cell in the segment
cell_vertices = [
self.domain.get_vertex(0),
self.domain.get_vertex(1),
self.domain.get_vertex(2),
]
self.num_vertices_per_cell = len(cell_vertices)
self.cell = self.segment.make_cell(*cell_vertices)
def test_vertices(self):
"""Test attributes 'vertices' and 'num_vertices' of cell, and method 'to_point' of vertices."""
self.assertEqual(len(self.cell.vertices), self.num_vertices_per_cell)
self.assertEqual(self.cell.num_vertices, self.num_vertices_per_cell)
self.assertTrue(isinstance(self.cell.vertices[0].to_point(), viennagrid.wrapper.PointCylindrical3D))
class TestTriangularPolar2D_Cell(unittest.TestCase):
def setUp(self):
# Create domain and add vertices
self.vertices = [
viennagrid.wrapper.PointPolar2D(1, 2),
viennagrid.wrapper.PointPolar2D(2, 3),
viennagrid.wrapper.PointPolar2D(3, 4),
viennagrid.wrapper.PointPolar2D(4, 5),
viennagrid.wrapper.PointPolar2D(5, 6),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TriangularPolar2D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
# Create a segmentation on the domain
self.segmentation = viennagrid.wrapper.TriangularPolar2D_Segmentation(self.domain)
# Create a new segment within the segmentation
self.segment = self.segmentation.make_segment()
# Create a cell in the segment
cell_vertices = [
self.domain.get_vertex(0),
self.domain.get_vertex(1),
self.domain.get_vertex(2),
]
self.num_vertices_per_cell = len(cell_vertices)
self.cell = self.segment.make_cell(*cell_vertices)
def test_vertices(self):
"""Test attributes 'vertices' and 'num_vertices' of cell, and method 'to_point' of vertices."""
self.assertEqual(len(self.cell.vertices), self.num_vertices_per_cell)
self.assertEqual(self.cell.num_vertices, self.num_vertices_per_cell)
self.assertTrue(isinstance(self.cell.vertices[0].to_point(), viennagrid.wrapper.PointPolar2D))
class TestTriangularSpherical3D_Cell(unittest.TestCase):
def setUp(self):
# Create domain and add vertices
self.vertices = [
viennagrid.wrapper.PointSpherical3D(1, 2, 7),
viennagrid.wrapper.PointSpherical3D(2, 3, 7),
viennagrid.wrapper.PointSpherical3D(3, 4, 7),
viennagrid.wrapper.PointSpherical3D(4, 5, 7),
viennagrid.wrapper.PointSpherical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TriangularSpherical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
# Create a segmentation on the domain
self.segmentation = viennagrid.wrapper.TriangularSpherical3D_Segmentation(self.domain)
# Create a new segment within the segmentation
self.segment = self.segmentation.make_segment()
# Create a cell in the segment
cell_vertices = [
self.domain.get_vertex(0),
self.domain.get_vertex(1),
self.domain.get_vertex(2),
]
self.num_vertices_per_cell = len(cell_vertices)
self.cell = self.segment.make_cell(*cell_vertices)
def test_vertices(self):
"""Test attributes 'vertices' and 'num_vertices' of cell, and method 'to_point' of vertices."""
self.assertEqual(len(self.cell.vertices), self.num_vertices_per_cell)
self.assertEqual(self.cell.num_vertices, self.num_vertices_per_cell)
self.assertTrue(isinstance(self.cell.vertices[0].to_point(), viennagrid.wrapper.PointSpherical3D))
#########################
# QUADRILATERAL DOMAINS #
#########################
class TestQuadrilateralCartesian2D_Cell(unittest.TestCase):
def setUp(self):
# Create domain and add vertices
self.vertices = [
viennagrid.wrapper.PointCartesian2D(1, 2),
viennagrid.wrapper.PointCartesian2D(2, 3),
viennagrid.wrapper.PointCartesian2D(3, 4),
viennagrid.wrapper.PointCartesian2D(4, 5),
viennagrid.wrapper.PointCartesian2D(5, 6),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.QuadrilateralCartesian2D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
# Create a segmentation on the domain
self.segmentation = viennagrid.wrapper.QuadrilateralCartesian2D_Segmentation(self.domain)
# Create a new segment within the segmentation
self.segment = self.segmentation.make_segment()
# Create a cell in the segment
cell_vertices = [
self.domain.get_vertex(0),
self.domain.get_vertex(1),
self.domain.get_vertex(2),
self.domain.get_vertex(3),
]
self.num_vertices_per_cell = len(cell_vertices)
self.cell = self.segment.make_cell(*cell_vertices)
def test_vertices(self):
"""Test attributes 'vertices' and 'num_vertices' of cell, and method 'to_point' of vertices."""
self.assertEqual(len(self.cell.vertices), self.num_vertices_per_cell)
self.assertEqual(self.cell.num_vertices, self.num_vertices_per_cell)
self.assertTrue(isinstance(self.cell.vertices[0].to_point(), viennagrid.wrapper.PointCartesian2D))
class TestQuadrilateralCartesian3D_Cell(unittest.TestCase):
def setUp(self):
# Create domain and add vertices
self.vertices = [
viennagrid.wrapper.PointCartesian3D(1, 2, 7),
viennagrid.wrapper.PointCartesian3D(2, 3, 7),
viennagrid.wrapper.PointCartesian3D(3, 4, 7),
viennagrid.wrapper.PointCartesian3D(4, 5, 7),
viennagrid.wrapper.PointCartesian3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.QuadrilateralCartesian3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
# Create a segmentation on the domain
self.segmentation = viennagrid.wrapper.QuadrilateralCartesian3D_Segmentation(self.domain)
# Create a new segment within the segmentation
self.segment = self.segmentation.make_segment()
# Create a cell in the segment
cell_vertices = [
self.domain.get_vertex(0),
self.domain.get_vertex(1),
self.domain.get_vertex(2),
self.domain.get_vertex(3),
]
self.num_vertices_per_cell = len(cell_vertices)
self.cell = self.segment.make_cell(*cell_vertices)
def test_vertices(self):
"""Test attributes 'vertices' and 'num_vertices' of cell, and method 'to_point' of vertices."""
self.assertEqual(len(self.cell.vertices), self.num_vertices_per_cell)
self.assertEqual(self.cell.num_vertices, self.num_vertices_per_cell)
self.assertTrue(isinstance(self.cell.vertices[0].to_point(), viennagrid.wrapper.PointCartesian3D))
class TestQuadrilateralCylindrical3D_Cell(unittest.TestCase):
def setUp(self):
# Create domain and add vertices
self.vertices = [
viennagrid.wrapper.PointCylindrical3D(1, 2, 7),
viennagrid.wrapper.PointCylindrical3D(2, 3, 7),
viennagrid.wrapper.PointCylindrical3D(3, 4, 7),
viennagrid.wrapper.PointCylindrical3D(4, 5, 7),
viennagrid.wrapper.PointCylindrical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.QuadrilateralCylindrical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
# Create a segmentation on the domain
self.segmentation = viennagrid.wrapper.QuadrilateralCylindrical3D_Segmentation(self.domain)
# Create a new segment within the segmentation
self.segment = self.segmentation.make_segment()
# Create a cell in the segment
cell_vertices = [
self.domain.get_vertex(0),
self.domain.get_vertex(1),
self.domain.get_vertex(2),
self.domain.get_vertex(3),
]
self.num_vertices_per_cell = len(cell_vertices)
self.cell = self.segment.make_cell(*cell_vertices)
def test_vertices(self):
"""Test attributes 'vertices' and 'num_vertices' of cell, and method 'to_point' of vertices."""
self.assertEqual(len(self.cell.vertices), self.num_vertices_per_cell)
self.assertEqual(self.cell.num_vertices, self.num_vertices_per_cell)
self.assertTrue(isinstance(self.cell.vertices[0].to_point(), viennagrid.wrapper.PointCylindrical3D))
class TestQuadrilateralPolar2D_Cell(unittest.TestCase):
def setUp(self):
# Create domain and add vertices
self.vertices = [
viennagrid.wrapper.PointPolar2D(1, 2),
viennagrid.wrapper.PointPolar2D(2, 3),
viennagrid.wrapper.PointPolar2D(3, 4),
viennagrid.wrapper.PointPolar2D(4, 5),
viennagrid.wrapper.PointPolar2D(5, 6),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.QuadrilateralPolar2D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
# Create a segmentation on the domain
self.segmentation = viennagrid.wrapper.QuadrilateralPolar2D_Segmentation(self.domain)
# Create a new segment within the segmentation
self.segment = self.segmentation.make_segment()
# Create a cell in the segment
cell_vertices = [
self.domain.get_vertex(0),
self.domain.get_vertex(1),
self.domain.get_vertex(2),
self.domain.get_vertex(3),
]
self.num_vertices_per_cell = len(cell_vertices)
self.cell = self.segment.make_cell(*cell_vertices)
def test_vertices(self):
"""Test attributes 'vertices' and 'num_vertices' of cell, and method 'to_point' of vertices."""
self.assertEqual(len(self.cell.vertices), self.num_vertices_per_cell)
self.assertEqual(self.cell.num_vertices, self.num_vertices_per_cell)
self.assertTrue(isinstance(self.cell.vertices[0].to_point(), viennagrid.wrapper.PointPolar2D))
class TestQuadrilateralSpherical3D_Cell(unittest.TestCase):
def setUp(self):
# Create domain and add vertices
self.vertices = [
viennagrid.wrapper.PointSpherical3D(1, 2, 7),
viennagrid.wrapper.PointSpherical3D(2, 3, 7),
viennagrid.wrapper.PointSpherical3D(3, 4, 7),
viennagrid.wrapper.PointSpherical3D(4, 5, 7),
viennagrid.wrapper.PointSpherical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.QuadrilateralSpherical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
# Create a segmentation on the domain
self.segmentation = viennagrid.wrapper.QuadrilateralSpherical3D_Segmentation(self.domain)
# Create a new segment within the segmentation
self.segment = self.segmentation.make_segment()
# Create a cell in the segment
cell_vertices = [
self.domain.get_vertex(0),
self.domain.get_vertex(1),
self.domain.get_vertex(2),
self.domain.get_vertex(3),
]
self.num_vertices_per_cell = len(cell_vertices)
self.cell = self.segment.make_cell(*cell_vertices)
def test_vertices(self):
"""Test attributes 'vertices' and 'num_vertices' of cell, and method 'to_point' of vertices."""
self.assertEqual(len(self.cell.vertices), self.num_vertices_per_cell)
self.assertEqual(self.cell.num_vertices, self.num_vertices_per_cell)
self.assertTrue(isinstance(self.cell.vertices[0].to_point(), viennagrid.wrapper.PointSpherical3D))
#######################
# TETRAHEDRAL DOMAINS #
#######################
class TestTetrahedralCartesian3D_Cell(unittest.TestCase):
def setUp(self):
# Create domain and add vertices
self.vertices = [
viennagrid.wrapper.PointCartesian3D(1, 2, 7),
viennagrid.wrapper.PointCartesian3D(2, 3, 7),
viennagrid.wrapper.PointCartesian3D(3, 4, 7),
viennagrid.wrapper.PointCartesian3D(4, 5, 7),
viennagrid.wrapper.PointCartesian3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TetrahedralCartesian3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
# Create a segmentation on the domain
self.segmentation = viennagrid.wrapper.TetrahedralCartesian3D_Segmentation(self.domain)
# Create a new segment within the segmentation
self.segment = self.segmentation.make_segment()
# Create a cell in the segment
cell_vertices = [
self.domain.get_vertex(0),
self.domain.get_vertex(1),
self.domain.get_vertex(2),
self.domain.get_vertex(3),
]
self.num_vertices_per_cell = len(cell_vertices)
self.cell = self.segment.make_cell(*cell_vertices)
def test_vertices(self):
"""Test attributes 'vertices' and 'num_vertices' of cell, and method 'to_point' of vertices."""
self.assertEqual(len(self.cell.vertices), self.num_vertices_per_cell)
self.assertEqual(self.cell.num_vertices, self.num_vertices_per_cell)
self.assertTrue(isinstance(self.cell.vertices[0].to_point(), viennagrid.wrapper.PointCartesian3D))
class TestTetrahedralCylindrical3D_Cell(unittest.TestCase):
def setUp(self):
# Create domain and add vertices
self.vertices = [
viennagrid.wrapper.PointCylindrical3D(1, 2, 7),
viennagrid.wrapper.PointCylindrical3D(2, 3, 7),
viennagrid.wrapper.PointCylindrical3D(3, 4, 7),
viennagrid.wrapper.PointCylindrical3D(4, 5, 7),
viennagrid.wrapper.PointCylindrical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TetrahedralCylindrical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
# Create a segmentation on the domain
self.segmentation = viennagrid.wrapper.TetrahedralCylindrical3D_Segmentation(self.domain)
# Create a new segment within the segmentation
self.segment = self.segmentation.make_segment()
# Create a cell in the segment
cell_vertices = [
self.domain.get_vertex(0),
self.domain.get_vertex(1),
self.domain.get_vertex(2),
self.domain.get_vertex(3),
]
self.num_vertices_per_cell = len(cell_vertices)
self.cell = self.segment.make_cell(*cell_vertices)
def test_vertices(self):
"""Test attributes 'vertices' and 'num_vertices' of cell, and method 'to_point' of vertices."""
self.assertEqual(len(self.cell.vertices), self.num_vertices_per_cell)
self.assertEqual(self.cell.num_vertices, self.num_vertices_per_cell)
self.assertTrue(isinstance(self.cell.vertices[0].to_point(), viennagrid.wrapper.PointCylindrical3D))
class TestTetrahedralSpherical3D_Cell(unittest.TestCase):
def setUp(self):
# Create domain and add vertices
self.vertices = [
viennagrid.wrapper.PointSpherical3D(1, 2, 7),
viennagrid.wrapper.PointSpherical3D(2, 3, 7),
viennagrid.wrapper.PointSpherical3D(3, 4, 7),
viennagrid.wrapper.PointSpherical3D(4, 5, 7),
viennagrid.wrapper.PointSpherical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TetrahedralSpherical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
# Create a segmentation on the domain
self.segmentation = viennagrid.wrapper.TetrahedralSpherical3D_Segmentation(self.domain)
# Create a new segment within the segmentation
self.segment = self.segmentation.make_segment()
# Create a cell in the segment
cell_vertices = [
self.domain.get_vertex(0),
self.domain.get_vertex(1),
self.domain.get_vertex(2),
self.domain.get_vertex(3),
]
self.num_vertices_per_cell = len(cell_vertices)
self.cell = self.segment.make_cell(*cell_vertices)
def test_vertices(self):
"""Test attributes 'vertices' and 'num_vertices' of cell, and method 'to_point' of vertices."""
self.assertEqual(len(self.cell.vertices), self.num_vertices_per_cell)
self.assertEqual(self.cell.num_vertices, self.num_vertices_per_cell)
self.assertTrue(isinstance(self.cell.vertices[0].to_point(), viennagrid.wrapper.PointSpherical3D))
if __name__ == '__main__':
unittest.main()
| 37.881285 | 102 | 0.755964 |
62abae3e39f2bd521b5910d047a5426877fd3ae6 | 39,851 | py | Python | GPS/args.py | MarieNyst/GPS | 92d90bf8bd4e461c2063fb61048ff2d764470c17 | [
"BSD-3-Clause"
] | null | null | null | GPS/args.py | MarieNyst/GPS | 92d90bf8bd4e461c2063fb61048ff2d764470c17 | [
"BSD-3-Clause"
] | null | null | null | GPS/args.py | MarieNyst/GPS | 92d90bf8bd4e461c2063fb61048ff2d764470c17 | [
"BSD-3-Clause"
] | null | null | null | import inspect
import argparse
import os
import helper
class ArgumentParser:
"""ArgumentParser
The ArgumentParser can parse command line arguements, scenario file
arguments and global default arguments to initialize the parameters of GPS.
"""
def __init__(self):
self.setup_arguments = {
('--scenario-file',): {
'help': 'The scenario file (and location) that defines what settings are used for GPS.',
'type': str},
('--experiment-dir','-e'): {
'help': 'The root directory from which experiments will be run. By default, this is the '
'current working directory. GPS will change to this directory prior to running, '
'this means that if relative paths are specified for any other files or directories '
'then they must be given relative to your experiment directory.',
'type': _validate(str, 'The experiment directory must be a valid directory', lambda x: helper.isDir(x))},
('--output-dir', '--output-directory', '--out-dir', '--log-location'): {
'help': 'The directory where output will be stored. The actual directory for a particular '
'GPS run with ID gps_id will be stored in {experiment-dir}/{output-dir}/gps-run-{gps_id}',
'type': str},
('--temp-dir', '--temp', '--temporary-directory'): {
'help': 'The directory for GPS to use to write temporary files to. By default, GPS will '
'write all temporary files to the current working directory (i.e., the experiment-dir. '
'GPS will also clean up all such temporary files when it is done with them, unless GPS '
'crashes unexpectedly. GPS will create a single temporary file for every target '
'algorithm run, which means that it will create and delete and large number of these '
'files. It is therefore strongly recommended to use a directory with a fast filesystem '
'that is not automatically backed up. In some cases, GPS and other algorithm '
'configurators with similar behaviour have been known to unneccesarily stress file '
'systems with automatic back-ups due to the volume of temporary files created and '
'deleted. If this happens, the quality of the configurations found with GPS (when '
'using a wall clock budget) may suffer substantially, as well as any other person or '
'system that interacts with the filesystem.',
'type': str},
('--verbose', '--verbosity', '--log-level', '-v'): {
'help': 'Controls the verbosity of GPS\'s output. Set of 0 for warnings only. Set to '
'1 for more informative messages. And set to 2 for debug-level messages. The '
'default is 1.',
'type': _validate(int, 'The verbosity must be in [0, 1, 2]', lambda x: 0 <= x <= 2)}
}
self.redis_arguments = {
('--redis-host', '--host'): {
'help': 'The redis database host name.',
'type': str},
('--redis-port', '--port'): {
'help': 'The redis database port number.',
'type': int},
('--redis-dbid', '--dbid'): {
'help': 'The redis database ID number to be used by this instance of GPS. All workers '
'of this GPS instance must be given this ID. Each concurrent GPS instance must have a '
'unique database ID.',
'type': int},
}
self.scenario_arguments = {
('--pcs-file', '--param-file', '--p'): {
'help': 'The file that contains the algorithm parameter configuration space in PCS format. '
'GPS supports a subset of the syntax used for SMAC and ParamILS.',
'type': str},
('--instance-file', '--instances', '-i'): {
'help': 'The file (and location) containing the names of the instances to '
'be used to evaluate the target algorithm\'s configurations.',
'type': str},
('--algo', '--algo-exec', '--algorithm', '--wrapper'): {
'help': 'The command line string used to execute the target algorithm.',
'type': str},
('--algo-cutoff-time', '--target-run-cputime-limit', '--cutoff-time', '--cutoff'): {
'help': 'The CPU time limit for an individual target algorithm run, in seconds. If adaptive '
'capping is used, GPS may sometimes use smaller cutoff times as well.',
'type': _validate(float, 'The cutoff time must be a real, positive number', lambda x: float(x) > 0)},
('--runcount-limit', '--total-num-runs-limit', '--num-runs-limit', '--number-of-runs-limit'): {
'help': 'Limits the total number of target algorithm runs performed by GPS. Either this, '
'the wallclock or CPU time limit must be less than the maximum integer value. The default is the '
'maximum integer value.',
'type': _validate(int, 'The run count limit must be a positive integer', lambda x: int(x) > 0)},
('--wallclock-limit', '--runtime-limit'): {
'help': 'Limits the total wall-clock time used by GPS, in seconds. Either this, the runcount or the CPU '
'time limit must be less than the maximum integer value. The default is the maximum integer '
'value.',
'type': _validate(float, 'The wall-clock time must be a positive, real number', lambda x: float(x) > 0)},
('--cputime-limit', '--tunertime-limit', '--tuner-timeout'): {
'help': 'Limits the total CPU time used by the target algorithm, in seconds. Either this, the runcount '
'or the wallclock limit must be less than the maximum integer value. The default is the maximum integer '
'value. NOTE: Unlike SMAC, this does not include the CPU time spent by GPS -- this only adds the '
'running times reported by your target algorithm wrapper and terminates GPS once they have exceeded '
'this limit.',
'type': _validate(float, 'The CPU time limit must be a positive, real number', lambda x: float(x) > 0)},
('--seed',): {
'help': 'The random seed used by GPS. If -1, a random value will be used. Note that because '
'GPS is an asychronous parallel algorithm, it is not deterministic even when the seed '
'is set to the same value, as this does not control for random background environmental '
'noise that can affect the running times and order in which GPS receives target '
'algorithm run updates.',
'type': _validate(int, 'The seed must be a positive integer or -1', lambda x: int(x) >= -1)}
}
self.gps_parameters = {
('--minimum-runs', '--min-runs', '--minimum-run-equivalents', '--min-run-equivalents',
'--minimum-instances', '--min-instances'): {
'help': 'The minimum number of run equivalents on which a configuration must be run '
'before it can be accepted as a new incumbent. This is also the minimum number of '
'run equivalents required before two configurations will be compared to each other '
'using the permutation test. Configurations whose intersection of run equivalents '
'is less than this number will be considered equal. Consequentially, brackets cannot '
'be updated until at least this many runs have been performed for each configuration. '
'Setting this number too large will delay or completely stop GPS from making any '
'progress. However, setting it too small will allow GPS to make mistakes about the '
'relative performance of two configurations with high probability. Ultimately the '
'distribution of running times for your algorithm will impact what should be considered '
'a good setting for you. If you can only afford to perform a single run of GPS, it is '
'safest to set this parameter on the higher side: perhaps 10-25 (provided you can afford '
'at least thousands of target algorithm runs). Otherwise, 5-10 may be reasonable. '
'Should be at least 5. The default is 5.',
'type': _validate(int, 'The minimum run (equivalents) must be an integer greater than or equal to 5',
lambda x: int(x) >=5)},
('--alpha', '--significance-level'): {
'help': 'The significance level used in the permutation test to determine whether or not one '
'configuration is better than another. Multiple test correction is not applied, so '
'this is better viewed as a statistically-grounded heuristic than a true significance '
'level. Setting this value too small will slow GPS\'s progress. Setting this value too '
'high may allow GPS to make mistakes, which could potentially substantially adversely '
'affect the final solution quality of the configurations found; however, it will allow '
'GPS to move through the search space more quickly. If you can only afford to perform a '
'single run of GPS, it is safest to set this parameter on the lower side: perhaps 0.01-0.05. '
'Otherwise, you can experiment with larger values (say 0.1-0.25), which will increase the '
'variance in the output of GPS. This parameter should be in (0,0.25). The default is 0.05.',
'type': _validate(float, 'The significance level must be a real number in [0, 0.25)', lambda x: 0 < float(x) <= 0.25)},
('--decay-rate',): {
'help': 'The decay rate used in GPS\'s decaying memory heuristic. Larger values mean information '
'will be forgotten slowly, small values mean information will be forgotten quickly. '
'Set this value to 0 if you believe that all of your algorithm\'s parameters interact '
'strongly. Should be in [0, 0.5]. The default is 0.2',
'type': _validate(float, 'The decay rate must be a real number in [0, 0.5]', lambda x: 0 <= float(x) <= 0.5)},
('--bound-multiplier', '--bound-mult'): {
'help': 'The bound multiple used for adaptive capping. Should be \'adaptive\', False or a positive, '
'real number. We strongly recommend always setting it to \'adaptive\'. Using a value of '
'2 as is often done in other configurators is known to be overly aggressive, and will '
'frequently result in high-quality configurations that are incorrectly rejected. This '
'will cause GPS to eliminate large swaths of the configuration space, possibly eliminating '
'all high-quality configurations. If you believe that the running time distribution of your '
'algorithm has substantially heavier tails than an exponential distribution, then you could '
'set this to a large positive integer, e.g., 200. However, with a value so large you might '
'as well disable adaptive capping by setting it to False. The default is \'adaptive\'.',
'type': _validate_bound_multiplier},
('--instance-increment', '--instance-incr',): {
'help': 'The instance increment controls the number of instances that are queued at one time. '
'By increasing this value GPS will effectively operate on batches of instance_increment '
'instances at one time for its intensification and queuing mechanisms. This can help to '
'make better use of large amounts of parallel resources if the target algorithm runs can '
'be performed very quickly and/or there are few parameters to be optimized. The instance '
'increment must be a positive Fibonacci number. GPS will also dynamically update the '
'value for the instance increment if it observes that there are too few tasks in the '
'queue to keep the workers busy, or if there are too many tasks in the queue for the '
'workers to keep up. The default is 1.',
'type': _validate(int, 'The instance increment must be a positive fibonnaci number', lambda x: int(x) > 0)},
('--sleep-time',): {
'help': 'When the master or worker processes are blocked waiting for new results/tasks to be '
'pushed to the database, they will sleep for this amount of time, measured in CPU seconds.'
'The default is 0.',
'type': _validate(float, 'The sleep time must be a positive, real number', lambda x: float(x) >= 0)},
('--minimum-workers', '--min-workers'): {
'help': 'GPS must use at least two processes to run: the master process, which loops through '
'each parameter checking for updates and queuing runs; and at least one worker process, '
'which perform target algorithm runs. By default, GPS\'s master process will setup the '
'scenario files and then wait until it has received a notification that at least one '
'worker is ready to begin. GPS does not count any time while waiting towards its total '
'configuration budget. This parameter controls the minimum number of workers that need '
'to be ready in order for GPS\'s master process to start. Note that it does not place '
'any restriction on the maximum number of workers. If you set this value to 1, you can '
'still point an unlimitted number of workers to the same GPS ID and they will run. '
'This parameter is only used when starting GPS. If some or all of the workers crash '
'unexpectedly, the master process will continue running until it has exhausted its '
'configuration budget (which may be never if the configuration budget is based on the '
'maximum number of target algorithm runs). This must be a non-negative integer. The '
'default is 1.',
'type': _validate(int, 'The minimum workers must be a non-negative integer',
lambda x: int(x) >= 0)},
('--share-instance-order',): {
'help': 'GPS randomizes the order in which the configurations are evaluated on instances. Each '
'parameter search process can either share an instance ordering or not. In the original '
'version of GPS the instance ordering was shared, but we suspect it will slightly '
'improve the performance to do otherwise, so the default is False.',
'type': _validate(_to_bool, "Share instance order must be 'True' or 'False'")},
('--post-process-incumbent',): {
'help': 'GPS can make some mistakes. Most often, these will simply cause GPS to avoid high-'
'quality regions of the configuration space. However, in the presence of parameter '
'interactions some mistakes can cause GPS to return worse incumbents when given a '
'larger budget. This is because GPS can update the incumbent to a configuration which '
'has never been evaluated before. Given enough time, GPS should typically be able to '
'recover from these situations. However, if the configuration run is terminated shortly '
'after such an update, GPS may return a poor quality incumbent configuration. By '
'enabling this feature, GPS will automatically post-process all of the recorded '
'target algorithm runs and select the configuration which exhibits the best performance '
'on the largest number of instances. This post processing is an experimental method for '
'post-processing the output from one or more GPS runs to help protect against these '
'kinds of mistakes made by GPS. However, preliminary results testing this method '
'currently indicates that it typically decreases the performance of the incumbents '
'returned by GPS. Should be \'True\' or \'False\'. The default is \'False\'.',
'type': _validate(_to_bool, "The post-process-incumbent parameter must be 'True' or 'False'")},
}
self.postprocess_parameters = {
('--post-process-min-runs', '--post-process-min-instances'): {
'help': 'The minimum number of unique instances on which the intersection of the incumbent and '
'a challenger must have been evaluated in order for a challenger to be considered in '
'GPS\'s optional post-processing, incumbent-selection phase.',
'type': _validate(int, 'The post-process-min-runs parameter must be a positive integer greater '
'than 4', lambda x: int(x) >= 5)},
('--post-process-alpha', '--post-process-significance-level'): {
'help': 'The significance level used in the permutation tests performed during GPS\'s optional '
'incumbent post-processing procedure. Unlike the alpha parameter used by GPS\'s main '
'procedure, multiple test correction is enabled by default, so this can be viewed as '
'the actual significance level of the statistical tests performed, rather than as a '
'heuristic. As a result, it is not unreasonable to set the main alpha parameter to a '
'larger value than this one -- especially if multiple independent runs of GPS are '
'performed. Should be in (0, 0.25]. The default is 0.05. ',
'type': _validate(float, 'The post-process-alpha parameter must be a float in (0, 0.25]',
lambda x: 0 < float(x) <= 0.25)},
('--post-process-n-permutations', '--post-process-number-of-permutations'): {
'help': 'The number of permutations performed by the permutation test during GPS\'s '
'optional incumbent post-processing procedure. Recommended to be at least 10000 to '
'obtain stable permutation test results. Set it higher if you are using a smaller '
'significance level or are performing the procedure on many combined, independent '
'GPS runs, as the significance level will be smaller in such cases in order to '
'perform multiple test correction. Must be a positive integer greater than 1000. '
'The default is 10000.',
'type': _validate(int, 'The post-process number of permutations parameter must be a positive '
'integer greater than 1000.', lambda x: int(x) > 1000)},
('--post-process-multiple-test-correction', ): {
'help': 'Determines whether or not multiple test correction is used during GPS\'s optional '
'incumbent post-processing procedure. Must be \'True\' or \'False\'. The default is '
'\'True\'.',
'type': _validate(_to_bool, "The post-process multiple test correction parameter must be "
"'True' or 'False'")},
}
self.groups_in_order = ['Setup Arguments', 'Redis Arguments', 'Scenario Arguments', 'GPS Parameters',
'Post-Process Parameters']
self.argument_groups = {'Setup Arguments': self.setup_arguments,
'Redis Arguments': self.redis_arguments,
'Scenario Arguments': self.scenario_arguments,
'GPS Parameters': self.gps_parameters,
'Post-Process Parameters': self.postprocess_parameters}
self.group_help = {'Setup Arguments': 'These are general GPS arguments that are used to set up '
'the GPS run.',
'Redis Arguments': 'These arguments are required to configure GPS so that it '
'connect to your redis server installation, which it uses '
'to communicate between master and worker processes.',
'Scenario Arguments': 'These arguments define the scenario-specific '
'information.',
'GPS Parameters': 'These are the parameters of GPS itself. You can use these '
'to modify GPS to best suit your scenario, if desired. '
'Unless you know what you are doing, '
'we recommend not to change these parameters from their '
'defaults, as they have been chosen through careful '
'experimentation. However, we did this manually, so if '
'you have a large enough budget, you could always apply '
'GPS to configure itself, which would no doubt improve '
'the performance of GPS ;) . If you do this, please get in '
'touch! We would love to validate your GPS configuration '
'and include it as the new default settings. ',
'Post-Process Parameters': 'GPS comes with a currently-undocumented post-'
'processing procedure that can be used to post-'
'process the output from one or more runs of GPS '
'in order to extract the best configuration that '
'has been evaluated on the largest number of '
'instances. These are the parameters that control '
'the behaviour of this procedure. If you '
'perform multiple independent runs of GPS, but can '
'not afford the time required to validate all of '
'final incumbents, you may find this feature '
'helpful. However, preliminary data suggests that '
'using this procedure to post-process the output of '
'a single GPS run harms the quality of the final '
'configurations. Further study of this method is '
'still required.'}
# Location of the GPS source code directory
gps_directory = os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe())))
# File with hard-coded default values for all (optional) GPS parameters
self.defaults = '{}/.gps_defaults.txt'.format(gps_directory)
# File with user-specified default values for redis database
self.redis_defaults = '{}/../redis_configuration.txt'.format(gps_directory)
def parse_worker_command_line_arguments(self):
"""parse_worker_command_line_arguments
Parses the command line arguments for a GPS worker.
Returns
-------
arguments: dict
A dictionary containing the parsed arguments.
"""
parser = argparse.ArgumentParser()
for arg in self.redis_arguments:
parser.add_argument(*_get_aliases(arg), dest=_get_name(arg), **self.redis_arguments[arg])
# Parse the command line arguments and convert to a dictionary
args = vars(parser.parse_args())
keys = list(args.keys())
# Remove everything that is None so that we know to replace those values with scenario file arguments
# instead.
for arg in keys:
if args[arg] is None:
del args[arg]
if helper.isFile(self.redis_defaults):
args, _ = self.parse_file_arguments(self.redis_defaults, args)
self._validate_redis_arguments_defined(args)
return args
def parse_command_line_arguments(self):
"""parse_command_line_arguments
Parses the command line arguments for GPS.
Returns
-------
arguments: dict
A dictionary containing the parsed arguments.
"""
parser = argparse.ArgumentParser()
for group_name in self.argument_groups:
group = parser.add_argument_group(group_name)
for arg in self.argument_groups[group_name]:
group.add_argument(*_get_aliases(arg), dest=_get_name(arg), **self.argument_groups[group_name][arg])
# Parse the command line arguments and convert to a dictionary
args = vars(parser.parse_args())
keys = list(args.keys())
# Remove everything that is None so that we know to replace those values with scenario file arguments
# instead.
for arg in keys:
if args[arg] is None:
del args[arg]
return args
def parse_file_arguments(self, scenario_file, override_arguments={}):
"""parse_file_arguments
Reads in the scenario file arguments, over-writes any of them with their
override counterparts (for example, defined on the command line), if
applicable, and then saves them.
"""
parsed_arguments = {}
skipped_lines = []
with open(scenario_file) as f_in:
for line in f_in:
# Remove any comments
line = line.split('#')[0]
# Strip whitespace
line = line.strip()
# Skip empty lines
if len(line) == 0:
continue
key = line.split('=')[0].strip()
value = '='.join(line.split('=')[1:]).strip()
found = False
# Check for a match in any of the argument types
for group in self.argument_groups:
for argument in self.argument_groups[group]:
if '--{}'.format(key) in _get_aliases(argument) or '-{}'.format(key) in argument:
# We found a match, store it under the argument's proper name, convert the
# value to it's proper type and raise an exception if it is invalid.
parsed_arguments[_get_name(argument)] \
= self.argument_groups[group][argument]['type'](value)
found = True
continue
if found:
continue
if not found:
skipped_lines.append(line)
# Overwrite any argument definitions, as needed
for argument in override_arguments:
parsed_arguments[argument] = override_arguments[argument]
return parsed_arguments, skipped_lines
def parse_arguments(self):
"""parse_arguments
Parse the command line arguments, then, if provided, parse the
arguments in the scenario file. Then adds default values for
paramaters without definitions. Finally, validates all argument
definitions, checks that needed files and directories exist, and then
checks to make sure that all required arguements received definitions.
Returns
-------
arguments : dict
A dictionary mapping all GPS arguments to definitions.
skipped_lines : list of str
A list of all non-comment lines in the scenario file that were
skipped.
"""
skipped_lines = []
# First parse the command line arguments
arguments = self.parse_command_line_arguments()
# If a scenario file was provided, parse the arguments from it
if 'scenario_file' in arguments:
# If an experiment directory is specified, we will change to that directory
experiment_dir = arguments['experiment_dir'] if 'experiment_dir' in arguments else '.'
with helper.cd(experiment_dir):
try:
arguments, skipped_lines = self.parse_file_arguments(arguments['scenario_file'], arguments)
except IOError:
raise IOError("The scenario file '{}' could not be found from within GPS's "
"current working directory '{}' (which is the experiment directory, "
"if one was specified on the command line)."
"".format(arguments['scenario_file'], os.getcwd()))
# Finally, load the default values of the redis configuration parameters
if helper.isFile(self.redis_defaults):
arguments, _ = self.parse_file_arguments(self.redis_defaults, arguments)
# Finally, load the default values of all GPS parameters (that make sense to be shared)
arguments, _ = self.parse_file_arguments(self.defaults, arguments)
# Check that all parameters have defintions (optional parameters not specified by the
# user will have already been included with default values)
self._validate_all_arguments_defined(arguments)
# Make sure all of the files and directories can be found
_validate_files_and_directories(arguments)
# Make sure GPS's budget was set
_validate_budget(arguments)
# Save the data for later
self.parsed_arguments = arguments
return arguments, skipped_lines
def _validate_all_arguments_defined(self, arguments):
missing = []
# iterate over all arguments
for group in self.argument_groups:
for argument in self.argument_groups[group]:
name = _get_name(argument)
if name not in arguments:
missing.append(name)
# The scenario file is the only argument that is *truely* optional
if 'scenario_file' in missing:
missing.remove('scenario_file')
if len(missing) > 0:
raise TypeError('GPS was missing definitions for the following required arguments: {}'
''.format(missing))
def _validate_redis_arguments_defined(self, arguments):
missing = []
# iterate over all redis arguments
for argument in self.redis_arguments:
name = _get_name(argument)
if name not in arguments:
missing.append(name)
if len(missing) > 0:
raise TypeError('The GPS worker was missing definitions for the following required arguments: {}'
''.format(missing))
def create_scenario_file(self, scenario_file, arguments):
"""create_scenario_file
Creates a scenario file with the specified name and arguments.
"""
with open(scenario_file, 'w') as f_out:
for group in self.argument_groups:
f_out.write('# {}\n'.format(group))
f_out.write('# {}\n'.format('-'*len(group)))
for argument in self.argument_groups[group]:
name = _get_name(argument)
# Of course it doesn't really make sense to save
# the name of the file in the file...
if name == 'scenario_file':
continue
f_out.write('{} = {}\n'.format(name, arguments[name]))
f_out.write('\n')
def _get_name(names):
name = names[0] if isinstance(names, tuple) else names
name = name[2:] if len(name) > 2 else name[1]
return name.replace('-','_')
def _validate(types, message=None, valid=lambda x: True):
if not isinstance(types, tuple):
types = (types, )
def _check_valid(input_):
valid_type = False
for type_ in types:
try:
input_ = type_(input_)
valid_type = True
except:
pass
if not (valid_type and valid(input_)):
if message is not None:
raise argparse.ArgumentTypeError('{}. Provided "{}".'.format(message, input_))
else:
raise argparse.ArgumentTypeError('Input must be one of {}. Provided "{}".'.format(types, input_))
return input_
return _check_valid
def _validate_files_and_directories(arguments):
with helper.cd(arguments['experiment_dir']):
files = ['pcs_file', 'instance_file']
for filename in files:
if not helper.isFile(arguments[filename]):
raise IOError("The {} '{}' could not be found within GPS's current working "
"directory '{}' (which is the experiment directory, if one was "
"specified)."
"".format(filename.replace('_', ' '), arguments[filename], os.getcwd()))
directories = ['temp_dir']
for directory in directories:
if not helper.isDir(arguments[directory]):
raise IOError("The {} '{}' could not be found within GPS's current working "
"directory '{}' (which is the experiment directory, if one was "
"specified)."
"".format(directory.replace('_', ' '), arguments[directory], os.getcwd()))
def _validate_bound_multiplier(bm):
not_valid = False
try:
bm = float(bm)
if bm == 0:
bm = False
elif bm < 0:
not_valid = True
except:
if bm != 'adaptive':
not_valid = True
if not_valid:
raise argparse.ArgumentTypeError("The bound multiplier must either be 'adaptive', False, "
"or a positive real number. Provided {}".format(bm))
return bm
def _validate_budget(arguments):
budgets = ['runcount_limit', 'wallclock_limit', 'cputime_limit']
all_default = True
for budget in budgets:
all_default = all_default and arguments[budget] == 2147483647
if all_default:
raise ValueError('At least one of runcount_limit and wallclock_limit must be less than '
'the maximum integer value (which is their default value).')
def _to_bool(string):
if string == 'True':
return True
elif string == 'False':
return False
else:
raise ValueError("Booleans must be 'True' or 'False'. Provided {}".format(string))
def _get_aliases(names):
aliases = []
for name in names:
aliases.append(name)
if name[:2] == '--':
alias = '--{}'.format(name[2:].replace('-', '_'))
if alias not in aliases:
aliases.append(alias)
alias = '--{}{}'.format(name[2:].split('-')[0],
''.join([token.capitalize() for token in name[2:].split('-')[1:]]))
if alias not in aliases:
aliases.append(alias)
return tuple(aliases)
def _print_argument_documentation():
"""_print_argument_documentation
Prints out documentation on each of the parameters formated
to be included in the github readme file, including markdown.
"""
def _table_row(header, content):
return '<tr>{}{}</tr>'.format(_table_column(_bold(header)),
_table_column(content))
def _table_column(content):
return '<td>{}</td>'.format(content)
def _bold(header):
return '<b>{}</b>'.format(header)
def _list_of_code(aliases):
return ', '.join([_code(alias.strip()) for alias in aliases])
def _code(code):
return '<code>{}</code>'.format(code)
def _table(description, required, default, aliases):
return ('<table>\n{}\n{}\n{}\n</table>\n'
''.format(_table_row('Description', _abreviations_to_italics(description)),
_table_row('Required' if required else 'Default',
'Yes' if required else default),
_table_row('Aliases', _list_of_code(aliases))))
def _abreviations_to_italics(content):
abreviations = ['e.g.', 'i.e.', 'etc.', 'vs.']
for token in abreviations:
content = content.replace(token, '<i>{}</i>'.format(token))
return content
argument_parser = ArgumentParser()
defaults, _ = argument_parser.parse_file_arguments(argument_parser.defaults, {})
for group in argument_parser.groups_in_order:
print('## {}\n'.format(group))
print('{}\n'.format(_abreviations_to_italics(argument_parser.group_help[group])))
arguments = sorted(list(argument_parser.argument_groups[group].keys()))
for arg in arguments:
name = _get_name(arg)
print('### {}\n'.format(name))
description = argument_parser.argument_groups[group][arg]['help']
required = name not in defaults
default = None if required else defaults[name]
# Handle the one exception to the rule.
if name == 'scenario_file':
required = False
default = None
# Convert directories to code
if '_dir' in name:
default = _code(default)
aliases = _get_aliases(arg)
print(_table(description, required, default, aliases))
if __name__ == '__main__':
_print_argument_documentation()
| 63.863782 | 135 | 0.565783 |
e223c8049dbae1e6737181d2cfd0e89bf71b9763 | 12,773 | py | Python | deps/pmdk/src/tools/pmreorder/memoryoperations.py | kimleeju/xdp_redis | 52eaf9da59e5b9ddb009a7874791cdbe6ce9ba06 | [
"BSD-3-Clause"
] | null | null | null | deps/pmdk/src/tools/pmreorder/memoryoperations.py | kimleeju/xdp_redis | 52eaf9da59e5b9ddb009a7874791cdbe6ce9ba06 | [
"BSD-3-Clause"
] | null | null | null | deps/pmdk/src/tools/pmreorder/memoryoperations.py | kimleeju/xdp_redis | 52eaf9da59e5b9ddb009a7874791cdbe6ce9ba06 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2018, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from utils import Rangeable
from utils import range_cmp
from utils import StackTrace
from sys import byteorder
class BaseOperation:
"""
Base class for all memory operations.
"""
pass
class Fence(BaseOperation):
"""
Describes a fence operation.
The exact type of the memory barrier is not important,
it is interpreted as an SFENCE or MFENCE.
"""
class Factory:
"""
Internal factory class to be used in dynamic object creation.
"""
def create(self, values):
"""
Factory object creation method.
:param values: Ignored.
:type values: str
:return: New Fence object.
:rtype: Fence
"""
return Fence()
class Store(BaseOperation, Rangeable):
"""
Describes a store operation.
:ivar address: The virtual address at which to store the new value.
:type address: int
:ivar new_value: The new value to be written.
:type new_value: bytearray
:ivar size: The size of the store in bytes.
:type size: int
:ivar old_value: The old value read from the file.
:type old_value: bytearray
:ivar flushed: Indicates whether the store has been flushed.
:type flushed: bool
"""
def __init__(self, values):
"""
Initializes the object based on the describing string.
:param values: Pre-formatted string describing the store.
:type values: str
:return: None
"""
params = values.split(";")
# calculate the offset given the registered file mapping
self.address = int(params[1], 16)
self.size = int(params[3], 16)
self.new_value = \
int(params[2], 16).to_bytes(self.size, byteorder=byteorder)
if len(params) > 4:
self.trace = StackTrace(params[4:])
else:
self.trace = StackTrace(["No trace available", ])
self.old_value = None
self.flushed = False
def __str__(self):
return "addr: " + hex(self.address) + " size " + \
str(self.size) + " value " + str(self.new_value)
def get_base_address(self):
"""
Override from :class:`utils.Rangeable`.
:return: Virtual address of the store.
:rtype: int
"""
return self.address
def get_max_address(self):
"""
Override from :class:`utils.Rangeable`.
:return: Virtual address of the first byte after the store.
:rtype: int
"""
return self.address + self.size
class Factory():
"""
Internal factory class to be used in dynamic object creation.
"""
def create(self, values):
"""
Factory object creation method.
:param values: Pre-formatted string describing the store.
:type values: str
:return: New Store object.
:rtype: Store
"""
return Store(values)
class FlushBase(BaseOperation, Rangeable):
"""
Base class for flush operations.
"""
def is_in_flush(self, store_op):
"""
Check if a given store is within the flush.
:param store_op: Store operation to check.
:return: True if store is in flush, false otherwise.
:rtype: bool
"""
raise NotImplementedError
class Flush(FlushBase):
"""
Describes a flush operation.
Examples of flush instructions are CLFLUSH, CLFLUSHOPT or CLWB.
:ivar _address: Virtual address of the flush.
:type _address: int
:ivar _size: The size of the flush in bytes (should be cache line aligned).
:type _size: int
"""
def __init__(self, values):
"""
Initializes the object based on the describing string.
:param values: Pre-formatted string describing the flush.
:type values: str
:return: None
"""
params = values.split(";")
self._address = int(params[1], 16)
self._size = int(params[2], 16)
def is_in_flush(self, store_op):
"""
Override from :class:`FlushBase`.
:param store_op: Store operation to check.
:return: True if store is in flush, false otherwise.
:rtype: bool
"""
if range_cmp(store_op, self) == 0:
return True
else:
return False
def get_base_address(self):
"""
Override from :class:`utils.Rangeable`.
:return: Virtual address of the flush.
:rtype: int
"""
return self._address
def get_max_address(self):
"""
Override from :class:`utils.Rangeable`.
:return: Virtual address of the first byte after the flush.
:rtype: int
"""
return self._address + self._size
class Factory:
"""
Internal factory class to be used in dynamic object creation.
"""
def create(self, values):
"""
Factory object creation method.
:param values: Pre-formatted string describing the flush.
:type values: str
:return: New Flush object.
:rtype: Flush
"""
return Flush(values)
class ReorderBase(BaseOperation):
"""
Base class for all reorder type classes.
"""
pass
class NoReorderDoCheck(ReorderBase):
"""
Describes the type of reordering engine to be used.
This marker class triggers writing the whole sequence of stores
between barriers.
"""
class Factory:
"""
Internal factory class to be used in dynamic object creation.
"""
def create(self, values):
"""
Factory object creation method.
:param values: Ignored.
:type values: str
:return: New NoReorderDoCheck object.
:rtype: NoReorderDoCheck
"""
return NoReorderDoCheck()
class ReorderFull(ReorderBase):
"""
Describes the type of reordering engine to be used.
This marker class triggers writing all possible sequences of stores
between barriers.
"""
class Factory:
"""
Internal factory class to be used in dynamic object creation.
"""
def create(self, values):
"""
Factory object creation method.
:param values: Ignored.
:type values: str
:return: New ReorderFull object.
:rtype: ReorderFull
"""
return ReorderFull()
class ReorderAccumulative(ReorderBase):
"""
Describes the type of reordering engine to be used.
This marker class triggers writing all
possible accumulative sequences of stores
between barriers.
"""
class Factory:
"""
Internal factory class to be used in dynamic object creation.
"""
def create(self, values):
"""
Factory object creation method.
:param values: Ignored.
:type values: str
:return: New ReorderAccumulative object.
:rtype: ReorderAccumulative
"""
return ReorderAccumulative()
class ReorderReverseAccumulative(ReorderBase):
"""
Describes the type of reordering engine to be used.
This marker class triggers writing all
possible reverted accumulative sequences of stores
between barriers.
"""
class Factory:
"""
Internal factory class to be used in dynamic object creation.
"""
def create(self, values):
"""
Factory object creation method.
:param values: Ignored.
:type values: str
:return: New ReorderReverseAccumulative object.
:rtype: ReorderReverseAccumulative
"""
return ReorderReverseAccumulative()
class NoReorderNoCheck(ReorderBase):
"""
Describes the type of reordering engine to be used.
This marker class triggers writing the whole sequence of stores
between barriers. It additionally marks that no consistency checking
is to be made.
"""
class Factory:
"""
Internal factory class to be used in dynamic object creation.
"""
def create(self, values):
"""
Factory object creation method.
:param values: Ignored.
:type values: str
:return: New NoReorderNoCheck object.
:rtype: NoReorderNoCheck
"""
return NoReorderNoCheck()
class ReorderDefault(ReorderBase):
"""
Describes the default reordering engine to be used.
This marker class triggers default reordering.
"""
class Factory:
"""
Internal factory class to be used in dynamic object creation.
"""
def create(self, values):
"""
Factory object creation method.
:param values: Ignored.
:type values: str
:return: ReorderDefault object.
:rtype: ReorderDefault
"""
return ReorderDefault()
class ReorderPartial(ReorderBase):
"""
Describes the type of reordering engine to be used.
This marker class triggers writing a subset of all possible
sequences of stores between barriers.
The type of partial reordering is chosen at runtime. Not yet
implemented.
"""
class Factory:
"""
Internal factory class to be used in dynamic object creation.
"""
def create(self, values):
"""
Factory object creation method.
:param values: Ignored.
:type values: str
:return: New ReorderPartial object.
:rtype: ReorderPartial
"""
return ReorderPartial()
class Register_file(BaseOperation):
"""
Describes the file to be mapped into processes address space.
:ivar name: The full name of the file.
:type name: str
:ivar address: The base address where the file was mapped.
:type address: int
:ivar size: The size of the mapping.
:type size: int
:ivar offset: The start offset of the mapping within the file.
:type offset: int
"""
def __init__(self, values):
"""
Initializes the object based on the describing string.
:param values: Pre-formatted string describing the flush.
:type values: str
:return: None
"""
params = values.split(";")
self.name = params[1]
self.address = int(params[2], 16)
self.size = int(params[3], 16)
self.offset = int(params[4], 16)
class Factory():
"""
Internal factory class to be used in dynamic object creation.
"""
def create(self, values):
"""
Factory object creation method.
:param values: Pre-formatted string
describing the file registration.
:type values: str
:return: New Register_file object.
:rtype: Register_file
"""
return Register_file(values)
| 28.963719 | 79 | 0.604243 |
88b33c7c94cbf8e1d49d08961fd9f3ce3cfc6947 | 12,824 | py | Python | Lib/distutils/filelist.py | pharrera/CS008 | d516cf4e2da4f8b6cb8a4d7dc1c823ec6f9d0aa0 | [
"PSF-2.0"
] | null | null | null | Lib/distutils/filelist.py | pharrera/CS008 | d516cf4e2da4f8b6cb8a4d7dc1c823ec6f9d0aa0 | [
"PSF-2.0"
] | null | null | null | Lib/distutils/filelist.py | pharrera/CS008 | d516cf4e2da4f8b6cb8a4d7dc1c823ec6f9d0aa0 | [
"PSF-2.0"
] | 1 | 2021-02-23T21:57:26.000Z | 2021-02-23T21:57:26.000Z | """distutils.filelist
Provides the FileList class, used for poking about the filesystem
and building lists of files.
"""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id$"
import os, string, re
import fnmatch
from types import *
from distutils.util import convert_path
from distutils.errors import DistutilsTemplateError, DistutilsInternalError
from distutils import log
class FileList:
"""A list of files built by on exploring the filesystem and filtered by
applying various patterns to what we find there.
Instance attributes:
dir
directory from which files will be taken -- only used if
'allfiles' not supplied to constructor
files
list of filenames currently being built/filtered/manipulated
allfiles
complete list of files under consideration (ie. without any
filtering applied)
"""
def __init__(self,
warn=None,
debug_print=None):
# ignore argument to FileList, but keep them for backwards
# compatibility
self.allfiles = None
self.files = []
def set_allfiles (self, allfiles):
self.allfiles = allfiles
def findall (self, dir=os.curdir):
self.allfiles = findall(dir)
def debug_print (self, msg):
"""Print 'msg' to stdout if the global DEBUG (taken from the
DISTUTILS_DEBUG environment variable) flag is true.
"""
from distutils.debug import DEBUG
if DEBUG:
print msg
# -- List-like methods ---------------------------------------------
def append (self, item):
self.files.append(item)
def extend (self, items):
self.files.extend(items)
def sort (self):
# Not a strict lexical sort!
sortable_files = map(os.path.split, self.files)
sortable_files.sort()
self.files = []
for sort_tuple in sortable_files:
self.files.append(apply(os.path.join, sort_tuple))
# -- Other miscellaneous utility methods ---------------------------
def remove_duplicates (self):
# Assumes list has been sorted!
for i in range(len(self.files) - 1, 0, -1):
if self.files[i] == self.files[i - 1]:
del self.files[i]
# -- "File template" methods ---------------------------------------
def _parse_template_line (self, line):
words = string.split(line)
action = words[0]
patterns = dir = dir_pattern = None
if action in ('include', 'exclude',
'global-include', 'global-exclude'):
if len(words) < 2:
raise DistutilsTemplateError, \
"'%s' expects <pattern1> <pattern2> ..." % action
patterns = map(convert_path, words[1:])
elif action in ('recursive-include', 'recursive-exclude'):
if len(words) < 3:
raise DistutilsTemplateError, \
"'%s' expects <dir> <pattern1> <pattern2> ..." % action
dir = convert_path(words[1])
patterns = map(convert_path, words[2:])
elif action in ('graft', 'prune'):
if len(words) != 2:
raise DistutilsTemplateError, \
"'%s' expects a single <dir_pattern>" % action
dir_pattern = convert_path(words[1])
else:
raise DistutilsTemplateError, "unknown action '%s'" % action
return (action, patterns, dir, dir_pattern)
# _parse_template_line ()
def process_template_line (self, line):
# Parse the line: split it up, make sure the right number of words
# is there, and return the relevant words. 'action' is always
# defined: it's the first word of the line. Which of the other
# three are defined depends on the action; it'll be either
# patterns, (dir and patterns), or (dir_pattern).
(action, patterns, dir, dir_pattern) = self._parse_template_line(line)
# OK, now we know that the action is valid and we have the
# right number of words on the line for that action -- so we
# can proceed with minimal error-checking.
if action == 'include':
self.debug_print("include " + string.join(patterns))
for pattern in patterns:
if not self.include_pattern(pattern, anchor=1):
log.warn("warning: no files found matching '%s'",
pattern)
elif action == 'exclude':
self.debug_print("exclude " + string.join(patterns))
for pattern in patterns:
if not self.exclude_pattern(pattern, anchor=1):
log.warn(("warning: no previously-included files "
"found matching '%s'"), pattern)
elif action == 'global-include':
self.debug_print("global-include " + string.join(patterns))
for pattern in patterns:
if not self.include_pattern(pattern, anchor=0):
log.warn(("warning: no files found matching '%s' " +
"anywhere in distribution"), pattern)
elif action == 'global-exclude':
self.debug_print("global-exclude " + string.join(patterns))
for pattern in patterns:
if not self.exclude_pattern(pattern, anchor=0):
log.warn(("warning: no previously-included files matching "
"'%s' found anywhere in distribution"),
pattern)
elif action == 'recursive-include':
self.debug_print("recursive-include %s %s" %
(dir, string.join(patterns)))
for pattern in patterns:
if not self.include_pattern(pattern, prefix=dir):
log.warn(("warning: no files found matching '%s' " +
"under directory '%s'"),
pattern, dir)
elif action == 'recursive-exclude':
self.debug_print("recursive-exclude %s %s" %
(dir, string.join(patterns)))
for pattern in patterns:
if not self.exclude_pattern(pattern, prefix=dir):
log.warn(("warning: no previously-included files matching "
"'%s' found under directory '%s'"),
pattern, dir)
elif action == 'graft':
self.debug_print("graft " + dir_pattern)
if not self.include_pattern(None, prefix=dir_pattern):
log.warn("warning: no directories found matching '%s'",
dir_pattern)
elif action == 'prune':
self.debug_print("prune " + dir_pattern)
if not self.exclude_pattern(None, prefix=dir_pattern):
log.warn(("no previously-included directories found " +
"matching '%s'"), dir_pattern)
else:
raise DistutilsInternalError, \
"this cannot happen: invalid action '%s'" % action
# process_template_line ()
# -- Filtering/selection methods -----------------------------------
def include_pattern (self, pattern,
anchor=1, prefix=None, is_regex=0):
"""Select strings (presumably filenames) from 'self.files' that
match 'pattern', a Unix-style wildcard (glob) pattern. Patterns
are not quite the same as implemented by the 'fnmatch' module: '*'
and '?' match non-special characters, where "special" is platform-
dependent: slash on Unix; colon, slash, and backslash on
DOS/Windows; and colon on Mac OS.
If 'anchor' is true (the default), then the pattern match is more
stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
'anchor' is false, both of these will match.
If 'prefix' is supplied, then only filenames starting with 'prefix'
(itself a pattern) and ending with 'pattern', with anything in between
them, will match. 'anchor' is ignored in this case.
If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
'pattern' is assumed to be either a string containing a regex or a
regex object -- no translation is done, the regex is just compiled
and used as-is.
Selected strings will be added to self.files.
Return 1 if files are found.
"""
files_found = 0
pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
self.debug_print("include_pattern: applying regex r'%s'" %
pattern_re.pattern)
# delayed loading of allfiles list
if self.allfiles is None:
self.findall()
for name in self.allfiles:
if pattern_re.search(name):
self.debug_print(" adding " + name)
self.files.append(name)
files_found = 1
return files_found
# include_pattern ()
def exclude_pattern (self, pattern,
anchor=1, prefix=None, is_regex=0):
"""Remove strings (presumably filenames) from 'files' that match
'pattern'. Other parameters are the same as for
'include_pattern()', above.
The list 'self.files' is modified in place.
Return 1 if files are found.
"""
files_found = 0
pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
self.debug_print("exclude_pattern: applying regex r'%s'" %
pattern_re.pattern)
for i in range(len(self.files)-1, -1, -1):
if pattern_re.search(self.files[i]):
self.debug_print(" removing " + self.files[i])
del self.files[i]
files_found = 1
return files_found
# exclude_pattern ()
# class FileList
# ----------------------------------------------------------------------
# Utility functions
def findall (dir = os.curdir):
"""Find all files under 'dir' and return the list of full filenames
(relative to 'dir').
"""
from stat import ST_MODE, S_ISREG, S_ISDIR, S_ISLNK
list = []
stack = [dir]
pop = stack.pop
push = stack.append
while stack:
dir = pop()
names = os.listdir(dir)
for name in names:
if dir != os.curdir: # avoid the dreaded "./" syndrome
fullname = os.path.join(dir, name)
else:
fullname = name
# Avoid excess stat calls -- just one will do, thank you!
stat = os.stat(fullname)
mode = stat[ST_MODE]
if S_ISREG(mode):
list.append(fullname)
elif S_ISDIR(mode) and not S_ISLNK(mode):
push(fullname)
return list
def glob_to_re(pattern):
"""Translate a shell-like glob pattern to a regular expression; return
a string containing the regex. Differs from 'fnmatch.translate()' in
that '*' does not match "special characters" (which are
platform-specific).
"""
pattern_re = fnmatch.translate(pattern)
# '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
# IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
# and by extension they shouldn't match such "special characters" under
# any OS. So change all non-escaped dots in the RE to match any
# character except the special characters.
# XXX currently the "special characters" are just slash -- i.e. this is
# Unix-only.
pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', r'\1[^/]', pattern_re)
return pattern_re
# glob_to_re ()
def translate_pattern (pattern, anchor=1, prefix=None, is_regex=0):
"""Translate a shell-like wildcard pattern to a compiled regular
expression. Return the compiled regex. If 'is_regex' true,
then 'pattern' is directly compiled to a regex (if it's a string)
or just returned as-is (assumes it's a regex object).
"""
if is_regex:
if type(pattern) is StringType:
return re.compile(pattern)
else:
return pattern
if pattern:
pattern_re = glob_to_re(pattern)
else:
pattern_re = ''
if prefix is not None:
# ditch end of pattern character
empty_pattern = glob_to_re('')
prefix_re = (glob_to_re(prefix))[:-len(empty_pattern)]
pattern_re = "^" + os.path.join(prefix_re, ".*" + pattern_re)
else: # no prefix -- respect anchor flag
if anchor:
pattern_re = "^" + pattern_re
return re.compile(pattern_re)
# translate_pattern ()
| 35.821229 | 79 | 0.570805 |
101235270a76e42d28b03b680688ff68d53af937 | 4,179 | py | Python | tests/addr/test_fil_addr.py | 3rdIteration/bip_utils | 84abeff9158618a0ecf9a059c19fd1a3d882e724 | [
"MIT"
] | null | null | null | tests/addr/test_fil_addr.py | 3rdIteration/bip_utils | 84abeff9158618a0ecf9a059c19fd1a3d882e724 | [
"MIT"
] | null | null | null | tests/addr/test_fil_addr.py | 3rdIteration/bip_utils | 84abeff9158618a0ecf9a059c19fd1a3d882e724 | [
"MIT"
] | null | null | null | # Copyright (c) 2021 Emanuele Bellocchia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Imports
import unittest
from bip_utils import FilSecp256k1AddrDecoder, FilSecp256k1AddrEncoder
from tests.addr.test_addr_base import AddrBaseTestHelper
from tests.addr.test_addr_const import TEST_SECP256K1_ADDR_INVALID_KEY_TYPES
from tests.ecc.test_ecc import TEST_VECT_SECP256K1_PUB_KEY_INVALID, Secp256k1PublicKey
# Some random public keys
TEST_VECT = [
{
"pub_key": b"0258742e7596b2cb998b42dddffd7b5c7ba30702876f899d6f7188d23285fc3208",
"address_dec": b"3c07040c8746bfe1485021ca57623f866edd15d4",
"address_params": {},
"address": "f1hqdqidehi276cscqehffoyr7qzxn2foumjpq5zq",
},
{
"pub_key": b"03ad9c631c2fac4adca03c1abf9e473dc9bd6dca7868e6b961ebb81547819c6e8c",
"address_dec": b"b3f7b860d78adcde02e33a19d141782bebfed9ca",
"address_params": {},
"address": "f1wp33qygxrlon4axdhim5cqlyfpv75wokvcfdgyy",
},
{
"pub_key": b"036d34f7fde5eedcea7c35e59112abee4786190cec263469b8a92fa15222999cff",
"address_dec": b"9ddfdb18c55926e6817d4545c1886a30a487d8dc",
"address_params": {},
"address": "f1txp5wggfletonal5ivc4dcdkgcsipwg4fte42wi",
},
{
"pub_key": b"03560c22685ce5837b897bd553a4e23af0bf464ef72ddbb32d252d1fbcec4f8c81",
"address_dec": b"56cc818223ca3165efed0b4b600fc4011cc87f97",
"address_params": {},
"address": "f1k3gidardziywl37nbnfwad6eaeomq74xwcs2wxq",
},
{
"pub_key": b"021e8c1330274bd99ba01e019f2cbe07e8822f8b8919f4c1cb38d389903d67f158",
"address_dec": b"66b8a34e79114e30c6d8f96eb486878afc398c50",
"address_params": {},
"address": "f1m24kgttzcfhdbrwy7fxljbuhrl6dtdcqhy4xnla",
},
]
# Tests for decoding with invalid strings
TEST_VECT_DEC_INVALID = [
# Invalid prefix
"g1k3gidardziywl37nbnfwad6eaeomq74xwcs2wxq",
# Invalid address type
"f2m24kgttzcfhdbrwy7fxljbuhrl6dtdcqhy4xnla",
# Invalid encoding
"f1hqdqidehi276cscqehffoyr7qzxn2f0umjpq5zq",
# Invalid checksum
"f1y7pzdgdbeuuhazhrreys26a22fcv4ycjpi6hrxa",
# Invalid lengths
"f1y7pzdgdbeuuhazhrreys26a22fcv4ycjrxwpq",
"f1y7pzdgdbeuuhazhrreys26a22fcv4ycjlelq3qtb",
]
#
# Tests
#
class FilAddrTests(unittest.TestCase):
# Test encode key
def test_encode_key(self):
AddrBaseTestHelper.test_encode_key(self, FilSecp256k1AddrEncoder, Secp256k1PublicKey, TEST_VECT)
# Test decode address
def test_decode_addr(self):
AddrBaseTestHelper.test_decode_addr(self, FilSecp256k1AddrDecoder, TEST_VECT)
# Test invalid decoding
def test_invalid_dec(self):
AddrBaseTestHelper.test_invalid_dec(self, FilSecp256k1AddrDecoder, {}, TEST_VECT_DEC_INVALID)
# Test invalid keys
def test_invalid_keys(self):
AddrBaseTestHelper.test_invalid_keys(self,
FilSecp256k1AddrEncoder,
{},
TEST_SECP256K1_ADDR_INVALID_KEY_TYPES,
TEST_VECT_SECP256K1_PUB_KEY_INVALID)
| 40.970588 | 104 | 0.731275 |
0d42cc5f1fdf565e5c6b0e5874ba4d30be825dc1 | 868 | py | Python | cli/polyaxon/pkg.py | polyaxon/cli | 3543c0220a8a7c06fc9573cd2a740f8ae4930641 | [
"Apache-2.0"
] | null | null | null | cli/polyaxon/pkg.py | polyaxon/cli | 3543c0220a8a7c06fc9573cd2a740f8ae4930641 | [
"Apache-2.0"
] | 1 | 2022-01-24T11:26:47.000Z | 2022-03-18T23:17:58.000Z | cli/polyaxon/pkg.py | polyaxon/cli | 3543c0220a8a7c06fc9573cd2a740f8ae4930641 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
NAME = "polyaxon"
VERSION = "1.18.2"
SCHEMA_VERSION = 1.1
DESC = "Command Line Interface (CLI) and client to interact with Polyaxon API."
URL = "https://github.com/polyaxon/polyaxon"
AUTHOR = "Polyaxon, Inc."
EMAIL = "contact@polyaxon.com"
LICENSE = "Apache 2.0"
| 34.72 | 79 | 0.741935 |
ca18cb98ea27fe163ecde4e244097dee3970cd2b | 6,788 | py | Python | pychron/pipeline/nodes/grouping.py | aelamspychron/pychron | ad87c22b0817c739c7823a24585053041ee339d5 | [
"Apache-2.0"
] | 1 | 2019-02-27T21:57:44.000Z | 2019-02-27T21:57:44.000Z | pychron/pipeline/nodes/grouping.py | aelamspychron/pychron | ad87c22b0817c739c7823a24585053041ee339d5 | [
"Apache-2.0"
] | 20 | 2020-09-09T20:58:39.000Z | 2021-10-05T17:48:37.000Z | pychron/pipeline/nodes/grouping.py | AGESLDEO/pychron | 1a81e05d9fba43b797f335ceff6837c016633bcf | [
"Apache-2.0"
] | null | null | null | # ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from operator import attrgetter
from numpy import array, array_split
# ============= enthought library imports =======================
from traits.api import Str, Enum
from traitsui.api import UItem, EnumEditor, VGroup
from pychron.core.helpers.datetime_tools import bin_timestamps
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
from pychron.pipeline.grouping import group_analyses_by_key
from pychron.pipeline.nodes.base import BaseNode
from pychron.pipeline.subgrouping import apply_subgrouping, compress_groups
from pychron.processing.analyses.preferred import get_preferred_grp, Preferred
from pychron.pychron_constants import SUBGROUPING_ATTRS, WEIGHTED_MEAN, \
MSEM, SD, DEFAULT_INTEGRATED
class GroupingNode(BaseNode):
by_key = Str
keys = ('Aliquot', 'Comment', 'Identifier', 'Sample', 'Step', 'SubGroup', 'No Grouping')
analysis_kind = 'unknowns'
name = 'Grouping'
title = 'Edit Grouping'
attribute = Enum('Group', 'Graph', 'Tab')
# _attr = 'group_id'
_id_func = None
_sorting_enabled = True
_cached_items = None
_state = None
_parent_group = None
def load(self, nodedict):
self.by_key = nodedict.get('key', 'Identifier')
def _to_template(self, d):
d['key'] = self.by_key
def _generate_key(self):
if self.by_key != 'No Grouping':
return attrgetter(self.by_key.lower())
def run(self, state):
self._run(state)
def post_run(self, engine, state):
self._state = None
def _run(self, state):
unks = getattr(state, self.analysis_kind)
self._state = state
# print('clearsd', self._attr)
for unk in unks:
self._clear_grouping(unk)
if self.by_key != 'No Grouping':
key = self._generate_key()
items = group_analyses_by_key(unks, key=key, attr=self._attr, id_func=self._id_func,
sorting_enabled=self._sorting_enabled,
parent_group=self._parent_group)
setattr(state, self.analysis_kind, items)
setattr(self, self.analysis_kind, items)
def _clear_grouping(self, unk):
setattr(unk, self._attr, 0)
@property
def _attr(self):
return '{}_id'.format(self.attribute.lower())
def traits_view(self):
kgrp = VGroup(UItem('by_key',
style='custom',
editor=EnumEditor(name='keys')),
show_border=True,
label='Key')
agrp = VGroup(UItem('attribute',
tooltip='Group=Display all groups on a single graph\n'
'Graph=Display groups on separate graphs\n'
'Tab=Display groups on separate tabs'), label='To Group', show_border=True)
v = okcancel_view(VGroup(agrp, kgrp),
width=300,
title=self.title,
)
return v
class GraphGroupingNode(GroupingNode):
title = 'Edit Graph Grouping'
name = 'Graphing Group'
_attr = 'graph_id'
class SubGroupingNode(GroupingNode, Preferred):
title = 'Edit SubGrouping'
keys = ('Aliquot', 'Identifier', 'Step', 'Comment', 'No Grouping')
name = 'SubGroup'
by_key = 'Aliquot'
attribute = 'subgroup'
# include_j_error_in_individual_analyses = Bool(False)
# include_j_error_in_mean = Bool(True)
_sorting_enabled = False
_parent_group = 'group_id'
def load(self, nodedict):
self.by_key = nodedict.get('key', 'Aliquot')
def _clear_grouping(self, unk):
unk.subgroup = None
def _id_func(self, gid, analyses):
analyses = list(analyses)
naliquots = len({a.aliquot for a in analyses})
for attr in SUBGROUPING_ATTRS:
if attr == 'age':
continue
pv = self._get_pv(attr)
if attr == 'age':
kind, error = WEIGHTED_MEAN, MSEM
else:
kind = WEIGHTED_MEAN if naliquots > 1 else DEFAULT_INTEGRATED
error = MSEM if naliquots > 1 else SD
pv.kind = kind
pv.error_kind = error
grouping = {'{}_kind'.format(pv.attr): pv.kind for pv in self.preferred_values}
grouping.update({'{}_error_kind'.format(pv.attr): pv.error_kind for pv in self.preferred_values})
apply_subgrouping(grouping, analyses, gid=gid)
def _pre_run_hook(self, state):
# unks = getattr(state, self.analysis_kind)
self._run(state)
def _by_key_changed(self):
if self._state:
self._run(self._state)
def run(self, state):
self._run(state)
ans = getattr(state, self.analysis_kind)
compress_groups(ans)
def traits_view(self):
v = okcancel_view(VGroup(VGroup(UItem('by_key',
style='custom',
editor=EnumEditor(name='keys')),
show_border=True, label='Grouping'),
get_preferred_grp(label='Types', show_border=True)),
width=500,
resizable=True,
title=self.title)
return v
class BinNode(BaseNode):
analysis_kind = 'unknowns'
def run(self, state):
unks = getattr(state, self.analysis_kind)
key = attrgetter('timestamp')
unks = sorted(unks, key=key)
tol_hrs = 1
ts = array([ai.timestamp for ai in unks])
idxs = bin_timestamps(ts, tol_hrs)
if idxs:
unks = array(unks)
for i, ais in enumerate(array_split(unks, idxs + 1)):
for ai in ais:
ai.group_id = i
else:
for ai in unks:
ai.group_id = 0
# ============= EOF =============================================
| 32.792271 | 111 | 0.571744 |
439f3f68fa31fd4788ce6851fc55060991e591f6 | 3,030 | py | Python | Data-CSV/ci-square.py | GabrielCiolac/SYSC-4005-Simulation | bd68760aa09b7297a4dbca89f65682e635b79f70 | [
"MIT"
] | null | null | null | Data-CSV/ci-square.py | GabrielCiolac/SYSC-4005-Simulation | bd68760aa09b7297a4dbca89f65682e635b79f70 | [
"MIT"
] | 1 | 2021-03-12T22:53:27.000Z | 2021-07-14T15:32:18.000Z | Data-CSV/ci-square.py | GabrielCiolac/SYSC-4005-Simulation | bd68760aa09b7297a4dbca89f65682e635b79f70 | [
"MIT"
] | null | null | null | from scipy.stats import lognorm
import math
def log_normal(x:float,mu=0,sigma=1):
if(x == 0):
return 0
sigma = math.sqrt(sigma)
a = (math.log(x) - mu)/math.sqrt(2*sigma**2)
p = 0.5 + 0.5*math.erf(a)
return p
def read_csv(fName):
ln = None
with open(fName,'r') as f:
ln = f.read()
f.close()
return ln.splitlines()[1:]
def log_normal_in_interval(mu,sigma,min,max):
return (log_normal(max,mu,sigma) - log_normal(min,mu,sigma))
def expected_val(mu,sigma,min,max):
return 1000 * log_normal_in_interval(mu,sigma,min,max)
'''
Get the largest number in the data set
'''
def get_largest(ln):
largest = float(ln[0])
for i in ln:
current = float(i)
if current > largest:
largest = float(i)
return largest
'''
Calculates the actual value from the given data
'''
def density_in_interval(ln, min, max):
count = 0
for i in ln:
if float(i) > min and float(i) <= max:
count = count + 1
return count
'''
creates and overwrites csv
'''
def create_csv(fName):
csvName = fName[:-4]
csvName = csvName + '_table.csv'
with open(csvName,'w') as f:
f.write('Interval,P(X),Actual,Expected,Expected Normalized Square of Difference')
f.close()
return csvName
def add_to_table(csvName,interval,percentage,expected,actual,dif):
with open(csvName,'a') as f:
f.write('\n'+str(interval)+','+str(percentage*100)+'%,'+str(actual)+','+str(expected)+','+str(dif))
f.close()
def write_sum_to_end(csvName,sum):
with open(csvName,'a') as f:
f.write('\n\n\nSum,'+str(sum))
f.close()
fName = input('file name: ')
li = read_csv(fName)
avg = float(input('Mu of distribution: '))
sigma = float(input('Sigma of distribution: '))
csvName = create_csv(fName)
largest = get_largest(li) #gets largest value in dataset
interval = largest / 25 #calculates bin size
current = 0 #starts current at 0
sum_of_differences = 0 #initializes sum of differences
next_val = current + interval #sets the max of the bin
while True:
expected = expected_val(avg,sigma,current,next_val)#calculates expected value in the interval
actual = density_in_interval(li,current,next_val) #gets the actual value in the interval
sum_of_differences = sum_of_differences + (pow(actual - expected,2)/expected) #calculates the expected normalized square of the difference, and adds it
add_to_table(csvName,next_val,log_normal_in_interval(avg,sigma,current,next_val),expected,actual,(pow(actual - expected,2)/expected)) #adds above values to table
current = next_val #increments min of the bin
next_val = next_val + interval #increments max of bin
if current > largest: #decides if you're out of bounds of data
break
write_sum_to_end(csvName,sum_of_differences) #writes sum to the table
print('Degrees of Freedom: '+str(22))
print('Sum of chi: '+str(sum_of_differences))
print('Reject Hyptothisis? ' + str(sum_of_differences > 33.9))
| 30.3 | 165 | 0.672937 |
cb4bfdc100423a46f09d3e9d807a781eda72d531 | 150 | py | Python | src/dic/ja/raw/create_dic_inappropriate.py | izziiyt/normalizeNumexp | c3e1e92d1dbd115e30667ef9c1ea1df2a6d1792c | [
"BSD-3-Clause"
] | 2 | 2021-02-15T15:58:39.000Z | 2021-04-11T00:29:07.000Z | src/dic/ja/raw/create_dic_inappropriate.py | izziiyt/normalizeNumexp | c3e1e92d1dbd115e30667ef9c1ea1df2a6d1792c | [
"BSD-3-Clause"
] | 5 | 2021-02-17T00:41:20.000Z | 2021-03-12T11:32:07.000Z | resources/dic/ja/raw/create_dic_inappropriate.py | cotogoto/normalizeNumexp | cb9c5cb1f994e898a847439cbfb28ec8687bda5f | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
fin = open("inappropriate_strings.txt", "r")
for line in fin.readlines() :
l = line.rstrip()
print "{\"str\":\""+l+"\"}"
| 21.428571 | 44 | 0.553333 |
831bfbec486004ec8edba5eb01c99464fe2bfacf | 100 | py | Python | Modules/regularExpression.py | AsherSeiling/fhide | 647c8447e507f096fe971b8b2fec1201024412e1 | [
"MIT"
] | null | null | null | Modules/regularExpression.py | AsherSeiling/fhide | 647c8447e507f096fe971b8b2fec1201024412e1 | [
"MIT"
] | null | null | null | Modules/regularExpression.py | AsherSeiling/fhide | 647c8447e507f096fe971b8b2fec1201024412e1 | [
"MIT"
] | null | null | null | def regex(ref, char):
passed = True
for i in ref:
if char == i:
passed = False
return passed | 16.666667 | 21 | 0.64 |
69d83d5db84562552c172666d5b9f68fab9033a4 | 1,217 | py | Python | lintcode/Queue/492. Implement Queue by Linked List.py | yanshengjia/algorithm | 0608d286be9c93d51768d47f21e569c6b0be9cda | [
"MIT"
] | 23 | 2019-08-02T12:02:47.000Z | 2022-03-09T15:24:16.000Z | lintcode/Queue/492. Implement Queue by Linked List.py | yanshengjia/algorithm | 0608d286be9c93d51768d47f21e569c6b0be9cda | [
"MIT"
] | null | null | null | lintcode/Queue/492. Implement Queue by Linked List.py | yanshengjia/algorithm | 0608d286be9c93d51768d47f21e569c6b0be9cda | [
"MIT"
] | 21 | 2019-12-22T04:47:32.000Z | 2021-09-12T14:29:35.000Z | """
Implement a Queue by linked list. Support the following basic methods:
1.enqueue(item). Put a new item in the queue.
2.dequeue(). Move the first item out of the queue, return it.
Example 1:
Input:
enqueue(1)
enqueue(2)
enqueue(3)
dequeue() // return 1
enqueue(4)
dequeue() // return 2
Solution:
做一些必要的初始化,在 MyQueue 类的 __init__() 中维护一个链表,注意在链表长度为1时 dequeue 之后需要重置 tail (此时 tail == None)
"""
class Node:
def __init__(self, _val):
self.val = _val
self.next = None
class MyQueue:
def __init__(self):
self.head = Node(0)
self.tail = self.head
self.size = 0
"""
@param: item: An integer
@return: nothing
"""
def enqueue(self, item):
# write your code here
node = Node(item)
self.tail.next = node
self.tail = self.tail.next
self.size += 1
"""
@return: An integer
"""
def dequeue(self):
# write your code here
if self.size < 1:
return None
res = self.head.next.val
self.head.next = self.head.next.next
self.size -= 1
if self.size == 0:
self.tail = self.head # self.tail == None, need to reset
return res
| 20.627119 | 91 | 0.581758 |
7087165c51f0f893dabe8fded09149561fade255 | 1,876 | py | Python | Python/Create_Print/main.py | gb21oc/Projetos_Basicos | b88106b070039372865bf0ec3d0a00d95a41200b | [
"MIT"
] | 3 | 2022-01-03T11:02:47.000Z | 2022-01-10T23:50:12.000Z | Python/Create_Print/main.py | desecsecurity/Projetos_Basicos | b12ecbc637f6fba991ca2995ae479fa41b84131d | [
"MIT"
] | 1 | 2022-03-20T08:03:34.000Z | 2022-03-20T08:03:34.000Z | Python/Create_Print/main.py | desecsecurity/Projetos_Basicos | b12ecbc637f6fba991ca2995ae479fa41b84131d | [
"MIT"
] | 2 | 2022-01-03T11:02:36.000Z | 2022-01-08T22:25:00.000Z | import os
newLine = False
params_keyword = False
def verify_keyword(text, **kwargs):
global newLine, params_keyword
params_keyword = False
if kwargs.get("sep") is not None:
params_keyword = True
text = text.replace(" ", kwargs["sep"])
if kwargs.get("end") is not None:
if "\n" in kwargs.get("end"):
text = text.strip() + kwargs.get("end")
newLine = True
else:
text = text.strip() + kwargs.get("end")
params_keyword = True
if kwargs.get("file") is not None:
os.system(f'echo {text} > {kwargs.get("file")}')
return f"File is created: {kwargs.get('file')}"
if len(kwargs.keys()) == 0:
return
elif not params_keyword:
raise TypeError("Is an invalid keyword argument for print_f()")
else:
return text
def print_f(*args, **kwargs):
return_itens = ""
for help in args:
if help == "-h":
print_f("Unfortunately I wasn't able to perform all the functionality of print, but in fact it added to my "
"knowledge of how some functions work. If you use \n it will not return what you want sorry! :( "
"I accept tips, help, ideas, jobs, etc: "
"Gmail: gabrielsuporte2021@gmail.com Linkedin: https://www.linkedin.com/in/gabriel-jos%C3%A9/")
return
for value in args:
if type(value) == int:
return_itens += " " + str(value)
else:
return_itens += " " + value
text_sep = verify_keyword(return_itens.strip(), **kwargs)
if text_sep != "" and text_sep is not None:
if newLine:
os.system(f'echo {text_sep}')
else:
os.system(f'echo|set /p="{text_sep}"')
else:
os.system(f'echo {return_itens.strip()}')
print_f("Teste file", file="teste.txt")
| 33.5 | 120 | 0.572495 |
e860a3098af4358d49bcc77ceff5d6d312e12f24 | 2,585 | py | Python | glance/registry/db/migrate_repo/versions/007_add_owner.py | rcbops/glance-buildpackage | 13e52178fb25d6062db6c7fad9df122d279320ab | [
"Apache-2.0"
] | 2 | 2015-09-30T09:43:37.000Z | 2017-06-26T14:36:21.000Z | glance/registry/db/migrate_repo/versions/007_add_owner.py | rcbops/glance-buildpackage | 13e52178fb25d6062db6c7fad9df122d279320ab | [
"Apache-2.0"
] | null | null | null | glance/registry/db/migrate_repo/versions/007_add_owner.py | rcbops/glance-buildpackage | 13e52178fb25d6062db6c7fad9df122d279320ab | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset import *
from sqlalchemy import *
from sqlalchemy.sql import and_, not_
from glance.registry.db.migrate_repo.schema import (
Boolean, DateTime, BigInteger, Integer, String,
Text, from_migration_import)
def get_images_table(meta):
"""
Returns the Table object for the images table that corresponds to
the images table definition of this version.
"""
images = Table('images', meta,
Column('id', Integer(), primary_key=True, nullable=False),
Column('name', String(255)),
Column('disk_format', String(20)),
Column('container_format', String(20)),
Column('size', BigInteger()),
Column('status', String(30), nullable=False),
Column('is_public', Boolean(), nullable=False, default=False,
index=True),
Column('location', Text()),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
Column('deleted_at', DateTime()),
Column('deleted', Boolean(), nullable=False, default=False,
index=True),
Column('checksum', String(32)),
Column('owner', String(255)),
mysql_engine='InnoDB',
useexisting=True)
return images
def get_image_properties_table(meta):
"""
No changes to the image properties table from 006...
"""
(get_image_properties_table,) = from_migration_import(
'006_key_to_name', ['get_image_properties_table'])
image_properties = get_image_properties_table(meta)
return image_properties
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = get_images_table(meta)
owner = Column('owner', String(255))
owner.create(images)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = get_images_table(meta)
images.columns['owner'].drop()
| 31.144578 | 78 | 0.675435 |
90776829c807fedcfc373547051e28921a6adb87 | 4,228 | py | Python | tools/third_party/h2/test/test_interacting_stacks.py | ziransun/wpt | ab8f451eb39eb198584d547f5d965ef54df2a86a | [
"BSD-3-Clause"
] | 2,479 | 2018-05-28T14:51:29.000Z | 2022-03-30T14:41:18.000Z | tools/third_party/h2/test/test_interacting_stacks.py | ziransun/wpt | ab8f451eb39eb198584d547f5d965ef54df2a86a | [
"BSD-3-Clause"
] | 7,642 | 2018-05-28T09:38:03.000Z | 2022-03-31T20:55:48.000Z | tools/third_party/h2/test/test_interacting_stacks.py | ziransun/wpt | ab8f451eb39eb198584d547f5d965ef54df2a86a | [
"BSD-3-Clause"
] | 1,303 | 2018-05-29T14:50:02.000Z | 2022-03-30T17:30:42.000Z | # -*- coding: utf-8 -*-
"""
test_interacting_stacks
~~~~~~~~~~~~~~~~~~~~~~~
These tests run two entities, a client and a server, in parallel threads. These
two entities talk to each other, running what amounts to a number of carefully
controlled simulations of real flows.
This is to ensure that the stack as a whole behaves intelligently in both
client and server cases.
These tests are long, complex, and somewhat brittle, so they aren't in general
recommended for writing the majority of test cases. Their purposes is primarily
to validate that the top-level API of the library behaves as described.
We should also consider writing helper functions to reduce the complexity of
these tests, so that they can be written more easily, as they are remarkably
useful.
"""
import coroutine_tests
import h2.config
import h2.connection
import h2.events
import h2.settings
class TestCommunication(coroutine_tests.CoroutineTestCase):
"""
Test that two communicating state machines can work together.
"""
server_config = h2.config.H2Configuration(client_side=False)
def test_basic_request_response(self):
"""
A request issued by hyper-h2 can be responded to by hyper-h2.
"""
request_headers = [
(b':method', b'GET'),
(b':path', b'/'),
(b':authority', b'example.com'),
(b':scheme', b'https'),
(b'user-agent', b'test-client/0.1.0'),
]
response_headers = [
(b':status', b'204'),
(b'server', b'test-server/0.1.0'),
(b'content-length', b'0'),
]
def client():
c = h2.connection.H2Connection()
# Do the handshake. First send the preamble.
c.initiate_connection()
data = yield c.data_to_send()
# Next, handle the remote preamble.
events = c.receive_data(data)
assert len(events) == 2
assert isinstance(events[0], h2.events.SettingsAcknowledged)
assert isinstance(events[1], h2.events.RemoteSettingsChanged)
changed = events[1].changed_settings
assert (
changed[
h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS
].new_value == 100
)
# Send a request.
events = c.send_headers(1, request_headers, end_stream=True)
assert not events
data = yield c.data_to_send()
# Validate the response.
events = c.receive_data(data)
assert len(events) == 2
assert isinstance(events[0], h2.events.ResponseReceived)
assert events[0].stream_id == 1
assert events[0].headers == response_headers
assert isinstance(events[1], h2.events.StreamEnded)
assert events[1].stream_id == 1
@self.server
def server():
c = h2.connection.H2Connection(config=self.server_config)
# First, read for the preamble.
data = yield
events = c.receive_data(data)
assert len(events) == 1
assert isinstance(events[0], h2.events.RemoteSettingsChanged)
changed = events[0].changed_settings
assert (
changed[
h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS
].new_value == 100
)
# Send our preamble back.
c.initiate_connection()
data = yield c.data_to_send()
# Listen for the request.
events = c.receive_data(data)
assert len(events) == 3
assert isinstance(events[0], h2.events.SettingsAcknowledged)
assert isinstance(events[1], h2.events.RequestReceived)
assert events[1].stream_id == 1
assert events[1].headers == request_headers
assert isinstance(events[2], h2.events.StreamEnded)
assert events[2].stream_id == 1
# Send our response.
events = c.send_headers(1, response_headers, end_stream=True)
assert not events
yield c.data_to_send()
self.run_until_complete(client(), server())
| 34.942149 | 79 | 0.599101 |
51798c1fa2680ae00d08a877a2dd2510a5fca7c2 | 2,868 | py | Python | prune/rnn.py | jamesoneill12/LayerFusion | 99cba1030ed8c012a453bc7715830fc99fb980dc | [
"Apache-2.0"
] | null | null | null | prune/rnn.py | jamesoneill12/LayerFusion | 99cba1030ed8c012a453bc7715830fc99fb980dc | [
"Apache-2.0"
] | null | null | null | prune/rnn.py | jamesoneill12/LayerFusion | 99cba1030ed8c012a453bc7715830fc99fb980dc | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
from torch.autograd import Variable
def to_var(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
def compute_accuracy(rnn, sequence_length, input_size, data_loader):
# Test the Model
correct = 0;
total = 0
for images, labels in data_loader:
images = to_var(images.view(-1, sequence_length, input_size))
outputs = rnn(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted.cpu() == labels).sum()
accuracy = 100. * float(correct) / total
return accuracy
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.rnn = nn.RNN(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
# Set inital states (num_layers, batch, hidden_size)
h0 = to_var(torch.zeros(self.num_layers, x.size(0), self.hidden_size))
# Forward propagate RNN (input, h_0 -> output, h_n)
out, _ = self.rnn(x, h0)
# Decode hidden state of last time step
out = self.fc(out[:, -1, :])
return out
class LSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(LSTM, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
# Set initial states (num_layers, batch, hidden_size)
h0 = to_var(torch.zeros(self.num_layers, x.size(0), self.hidden_size))
c0 = to_var(torch.zeros(self.num_layers, x.size(0), self.hidden_size))
# Forward propagate RNN (input, (h_0, c_0) -> output, (h_n, c_n))
out, _ = self.lstm(x, (h0, c0))
# Decode hidden state of last time step
out = self.fc(out[:, -1, :])
return out
class GRU(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes):
super(GRU, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.gru = nn.GRU(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
# Set inital states (num_layers, batch, hidden_size)
h0 = to_var(torch.zeros(self.num_layers, x.size(0), self.hidden_size))
# Forward propagate RNN (input, h_0 -> output, h_n)
out, _ = self.gru(x, h0)
# Decode hidden state of last time step
out = self.fc(out[:, -1, :])
return out
| 32.965517 | 82 | 0.63424 |
223f40850da26ad39a281de52b811e6bc2aa4039 | 4,804 | py | Python | plantcv/plantcv/homology/space.py | jgerardhodge/plantcv | 0e20ac55d9ef81e54536f466020eba6e0c70e7fb | [
"MIT"
] | null | null | null | plantcv/plantcv/homology/space.py | jgerardhodge/plantcv | 0e20ac55d9ef81e54536f466020eba6e0c70e7fb | [
"MIT"
] | null | null | null | plantcv/plantcv/homology/space.py | jgerardhodge/plantcv | 0e20ac55d9ef81e54536f466020eba6e0c70e7fb | [
"MIT"
] | null | null | null | # Generate a plm multivariate space for downstream use in homology group assignments
import numpy as np
from plantcv.plantcv import params
def space(cur_plms, include_bound_dist=False, include_centroid_dist=False, include_orient_angles=False):
"""
Generate a plm multivariate space for downstream use in homology group assignments
Inputs:
cur_plms = A pandas array of acute plms representing capturing two adjacent frames in a time series
or otherwise analogous dataset in order to enable homology assignments
include_bound_dist = Add bounding box distances to space for Starscape clustering
include_centroid_dist = Add centroid distances to space for Starscape clustering
include_orient_angles = Add plm and centroid orientation angles to space for Starscape clustering
:param cur_plms: pandas.core.frame.DataFrame
:param include_bound_dist: bool
:param include_centroid_dist: bool
:param include_orient_angles: bool
:return new_plms: pandas.core.frame.DataFrame
"""
new_plms = cur_plms.copy(deep=True)
bot_left = [int(min(new_plms.loc[:, ['plm_x']].values)), int(max(new_plms.loc[:, ['plm_y']].values))]
bot_right = [int(max(new_plms.loc[:, ['plm_x']].values)), int(max(new_plms.loc[:, ['plm_y']].values))]
top_left = [int(min(new_plms.loc[:, ['plm_x']].values)), int(min(new_plms.loc[:, ['plm_y']].values))]
top_right = [int(max(new_plms.loc[:, ['plm_x']].values)), int(min(new_plms.loc[:, ['plm_y']].values))]
centroid = [int(np.mean(new_plms.loc[:, ['plm_x']].values)), int(np.mean(new_plms.loc[:, ['plm_y']].values))]
bot_left_dist = np.sqrt(np.square(new_plms.loc[:, ['plm_x']].values - bot_left[0]) + np.square(
new_plms.loc[:, ['plm_y']].values - bot_left[1]))
bot_right_dist = np.sqrt(np.square(new_plms.loc[:, ['plm_x']].values - bot_right[0]) + np.square(
new_plms.loc[:, ['plm_y']].values - bot_right[1]))
top_left_dist = np.sqrt(np.square(new_plms.loc[:, ['plm_x']].values - top_left[0]) + np.square(
new_plms.loc[:, ['plm_y']].values - top_left[1]))
top_right_dist = np.sqrt(np.square(new_plms.loc[:, ['plm_x']].values - top_right[0]) + np.square(
new_plms.loc[:, ['plm_y']].values - top_right[1]))
if include_bound_dist is True:
new_plms.insert(len(new_plms.columns), 'bot_left_dist', bot_left_dist, True)
new_plms.insert(len(new_plms.columns), 'bot_right_dist', bot_right_dist, True)
new_plms.insert(len(new_plms.columns), 'top_left_dist', top_left_dist, True)
new_plms.insert(len(new_plms.columns), 'top_right_dist', top_right_dist, True)
centroid_dist = np.sqrt(np.square(new_plms.loc[:, ['plm_x']].values - centroid[0]) + np.square(
new_plms.loc[:, ['plm_y']].values - centroid[1]))
if include_centroid_dist is True:
new_plms.insert(len(new_plms.columns), 'centroid_dist', centroid_dist, True)
run = (
(new_plms.loc[:, ['SS_x']].values + new_plms.loc[:, ['TS_x']].values) / 2
) - new_plms.loc[:, ['plm_x']].values
rise = (
(new_plms.loc[:, ['SS_y']].values + new_plms.loc[:, ['TS_y']].values) / 2
) - new_plms.loc[:, ['plm_y']].values
# print('delta_y=',rise,' delta_x=',run)
# slope=rise/run
centroid_run = (new_plms.loc[:, ['plm_x']].values - centroid[0])
centroid_rise = (new_plms.loc[:, ['plm_y']].values - centroid[1])
# print('cent_delta_y=',centroid_rise,' cent_delta_x=',centroid_run)
# centroid_slope=centroid_rise/centroid_run
orientation = []
centroid_orientation = []
for m in range(0, len(run)):
# Use the sign of the run to determine if the weight should shift
# in the 0-180 or 180-360 range for 360 arc conversion
a = 0
if run[m] > 0:
a = 90 - (np.arctan(rise[m] / run[m]) * (180 / np.pi))
elif run[m] < 0:
a = -(90 - (np.arctan(rise[m] / run[m]) * (180 / np.pi)))
orientation.append(float(a))
# Use the sign of the run to determine if the weight should
# shift in the 0-180 or 180-360 range for 360 arc conversion
centroid_a = 0
if centroid_run[m] > 0:
centroid_a = 90 - (np.arctan(centroid_rise[m] / centroid_run[m]) * (180 / np.pi))
elif centroid_run[m] < 0:
centroid_a = -(90 - (np.arctan(centroid_rise[m] / centroid_run[m]) * (180 / np.pi)))
centroid_orientation.append(float(centroid_a))
if include_orient_angles is True:
new_plms.insert(len(new_plms.columns), 'orientation', orientation, True)
new_plms.insert(len(new_plms.columns), 'centroid_orientation', centroid_orientation, True)
if params.debug is not None:
print(new_plms.head())
return new_plms
| 50.041667 | 113 | 0.649875 |
63dac9ab8e503447f2547068d58c5215967d3d2c | 39,974 | py | Python | Lib/site-packages/spyder/plugins/plots/widgets/figurebrowser.py | hirorin-demon/hirorin-streamlit | 03fbb6f03ec94f909d451e708a3b30b177607695 | [
"0BSD"
] | 1 | 2021-06-20T14:52:40.000Z | 2021-06-20T14:52:40.000Z | spyder/plugins/plots/widgets/figurebrowser.py | Pancakerr/spyder | 34a9878bba97f427fbdd7b4a6d77ac0651327565 | [
"MIT"
] | 1 | 2021-04-30T21:16:55.000Z | 2021-04-30T21:16:55.000Z | spyder/plugins/plots/widgets/figurebrowser.py | Pancakerr/spyder | 34a9878bba97f427fbdd7b4a6d77ac0651327565 | [
"MIT"
] | 1 | 2020-06-14T07:03:50.000Z | 2020-06-14T07:03:50.000Z | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Figure browser widget
This is the main widget used in the Plots plugin
"""
# Standard library imports
import datetime
import os.path as osp
import sys
# Third library imports
from qtconsole.svg import svg_to_clipboard, svg_to_image
from qtpy.compat import getexistingdirectory, getsavefilename
from qtpy.QtCore import QEvent, QPoint, QRect, QSize, Qt, QTimer, Signal, Slot
from qtpy.QtGui import QPainter, QPixmap
from qtpy.QtWidgets import (QApplication, QFrame, QGridLayout, QHBoxLayout,
QScrollArea, QScrollBar, QSplitter, QStyle,
QVBoxLayout, QWidget)
# Local library imports
from spyder.api.translations import get_translation
from spyder.api.widgets.mixins import SpyderWidgetMixin
from spyder.utils.misc import getcwd_or_home
from spyder.utils.palette import QStylePalette
# TODO:
# - [ ] Generalize style updates, handle dark_interface with widget option
# Localization
_ = get_translation('spyder')
def save_figure_tofile(fig, fmt, fname):
"""Save fig to fname in the format specified by fmt."""
root, ext = osp.splitext(fname)
if ext == '.png' and fmt == 'image/svg+xml':
qimg = svg_to_image(fig)
qimg.save(fname)
else:
if fmt == 'image/svg+xml' and isinstance(fig, str):
fig = fig.encode('utf-8')
with open(fname, 'wb') as f:
f.write(fig)
def get_unique_figname(dirname, root, ext, start_at_zero=False):
"""
Append a number to "root" to form a filename that does not already exist
in "dirname".
"""
i = 1
figname = '{}{}'.format(root, ext)
if start_at_zero:
i = 0
figname = '{} ({}){}'.format(root, i, ext)
while True:
if osp.exists(osp.join(dirname, figname)):
figname = '{} ({}){}'.format(root, i, ext)
i += 1
else:
return osp.join(dirname, figname)
class FigureBrowser(QWidget, SpyderWidgetMixin):
"""
Widget to browse the figures that were sent by the kernel to the IPython
console to be plotted inline.
"""
sig_figure_loaded = Signal()
"""This signal is emitted when a new figure is loaded."""
sig_figure_menu_requested = Signal(QPoint)
"""
This signal is emitted to request a context menu on the main figure
canvas.
Parameters
----------
point: QPoint
The QPoint in global coordinates where the menu was requested.
"""
sig_redirect_stdio_requested = Signal(bool)
"""
This signal is emitted to request the main application to redirect
standard output/error when using Open/Save/Browse dialogs within widgets.
Parameters
----------
redirect: bool
Start redirect (True) or stop redirect (False).
"""
sig_save_dir_changed = Signal(str)
"""
This signal is emitted to inform that the current folder where images are
saved has changed.
Parameters
----------
save_dir: str
The new path where images are saved.
"""
sig_thumbnail_menu_requested = Signal(QPoint, object)
"""
This signal is emitted to request a context menu on the figure thumbnails.
Parameters
----------
point: QPoint
The QPoint in global coordinates where the menu was requested.
figure_thumbnail: spyder.plugins.plots.widget.figurebrowser.FigureThumbnail
The clicked figure thumbnail.
"""
sig_zoom_changed = Signal(int)
"""
This signal is emitted when zoom has changed.
Parameters
----------
zoom_value: int
The new value for the zoom property.
"""
def __init__(self, parent=None, background_color=None):
super().__init__(parent=parent, class_parent=parent)
self.shellwidget = None
self.is_visible = True
self.figviewer = None
self.setup_in_progress = False
self.background_color = background_color
self.mute_inline_plotting = None
self.zoom_disp_value = None
# Setup the figure viewer.
self.figviewer = FigureViewer(parent=self,
background_color=self.background_color)
self.figviewer.sig_context_menu_requested.connect(
self.sig_figure_menu_requested)
self.figviewer.sig_figure_loaded.connect(self.sig_figure_loaded)
self.figviewer.sig_zoom_changed.connect(self.sig_zoom_changed)
self.figviewer.sig_zoom_changed.connect(self._update_zoom_value)
# Setup the thumbnail scrollbar.
self.thumbnails_sb = ThumbnailScrollBar(
self.figviewer,
parent=self,
background_color=self.background_color,
)
self.thumbnails_sb.sig_context_menu_requested.connect(
self.sig_thumbnail_menu_requested)
self.thumbnails_sb.sig_save_dir_changed.connect(
self.sig_save_dir_changed)
self.thumbnails_sb.sig_redirect_stdio_requested.connect(
self.sig_redirect_stdio_requested)
# Create the layout.
self.splitter = splitter = QSplitter(parent=self)
splitter.addWidget(self.figviewer)
splitter.addWidget(self.thumbnails_sb)
splitter.setFrameStyle(QScrollArea().frameStyle())
splitter.setContentsMargins(0, 0, 0, 0)
layout = QHBoxLayout(self)
layout.addWidget(splitter)
self.setLayout(layout)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
self.setContentsMargins(0, 0, 0, 0)
def _update_zoom_value(self, value):
"""
Used in testing.
"""
self.zoom_disp_value = value
def setup(self, options):
"""Setup the figure browser with provided options."""
self.splitter.setContentsMargins(0, 0, 0, 0)
for option, value in options.items():
if option == 'auto_fit_plotting':
self.change_auto_fit_plotting(value)
elif option == 'mute_inline_plotting':
self.mute_inline_plotting = value
elif option == 'show_plot_outline':
self.show_fig_outline_in_viewer(value)
elif option == 'save_dir':
self.thumbnails_sb.save_dir = value
def update_splitter_widths(self, base_width):
"""
Update the widths to provide the scrollbar with a fixed minimumwidth.
Parameters
----------
base_width: int
The available splitter width.
"""
min_sb_width = self.thumbnails_sb._min_scrollbar_width
if base_width - min_sb_width > 0:
self.splitter.setSizes([base_width - min_sb_width, min_sb_width])
def show_fig_outline_in_viewer(self, state):
"""Draw a frame around the figure viewer if state is True."""
if state is True:
self.figviewer.figcanvas.setStyleSheet(
"FigureCanvas{border: 2px solid %s;}" %
QStylePalette.COLOR_BACKGROUND_4
)
else:
self.figviewer.figcanvas.setStyleSheet(
"FigureCanvas{border: 0px;}")
def change_auto_fit_plotting(self, state):
"""Change the auto_fit_plotting option and scale images."""
self.figviewer.auto_fit_plotting = state
def set_shellwidget(self, shellwidget):
"""Bind the shellwidget instance to the figure browser"""
self.shellwidget = shellwidget
shellwidget.set_figurebrowser(self)
shellwidget.sig_new_inline_figure.connect(self._handle_new_figure)
def _handle_new_figure(self, fig, fmt):
"""
Handle when a new figure is sent to the IPython console by the
kernel.
"""
self.thumbnails_sb.add_thumbnail(fig, fmt)
# ---- Toolbar Handlers
def zoom_in(self):
"""Zoom the figure in by a single step in the figure viewer."""
self.figviewer.zoom_in()
def zoom_out(self):
"""Zoom the figure out by a single step in the figure viewer."""
self.figviewer.zoom_out()
def go_previous_thumbnail(self):
"""
Select the thumbnail previous to the currently selected one in the
thumbnail scrollbar.
"""
self.thumbnails_sb.go_previous_thumbnail()
def go_next_thumbnail(self):
"""
Select the thumbnail next to the currently selected one in the
thumbnail scrollbar.
"""
self.thumbnails_sb.go_next_thumbnail()
def save_figure(self):
"""Save the currently selected figure in the thumbnail scrollbar."""
self.thumbnails_sb.save_current_figure_as()
def save_all_figures(self):
"""Save all the figures in a selected directory."""
return self.thumbnails_sb.save_all_figures_as()
def close_figure(self):
"""Close the currently selected figure in the thumbnail scrollbar."""
self.thumbnails_sb.remove_current_thumbnail()
def close_all_figures(self):
"""Close all the figures in the thumbnail scrollbar."""
self.thumbnails_sb.remove_all_thumbnails()
def copy_figure(self):
"""Copy figure from figviewer to clipboard."""
if self.figviewer and self.figviewer.figcanvas.fig:
self.figviewer.figcanvas.copy_figure()
class FigureViewer(QScrollArea, SpyderWidgetMixin):
"""
A scrollarea that displays a single FigureCanvas with zooming and panning
capability with CTRL + Mouse_wheel and Left-press mouse button event.
"""
sig_zoom_changed = Signal(int)
"""
This signal is emitted when zoom has changed.
Parameters
----------
zoom_value: int
The new value for the zoom property.
"""
sig_context_menu_requested = Signal(QPoint)
"""
This signal is emitted to request a context menu.
Parameters
----------
point: QPoint
The QPoint in global coordinates where the menu was requested.
"""
sig_figure_loaded = Signal()
"""This signal is emitted when a new figure is loaded."""
def __init__(self, parent=None, background_color=None):
super().__init__(parent, class_parent=parent)
self.setAlignment(Qt.AlignCenter)
self.viewport().setObjectName("figviewport")
self.viewport().setStyleSheet(
"#figviewport {background-color:" + str(background_color) + "}")
self.setFrameStyle(0)
self.background_color = background_color
self._scalefactor = 0
self._scalestep = 1.2
self._sfmax = 10
self._sfmin = -10
self.setup_figcanvas()
self.auto_fit_plotting = False
# An internal flag that tracks when the figure is being panned.
self._ispanning = False
@property
def auto_fit_plotting(self):
"""
Return whether to automatically fit the plot to the scroll area size.
"""
return self._auto_fit_plotting
@auto_fit_plotting.setter
def auto_fit_plotting(self, value):
"""
Set whether to automatically fit the plot to the scroll area size.
"""
self._auto_fit_plotting = value
if value:
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
else:
self.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.scale_image()
def setup_figcanvas(self):
"""Setup the FigureCanvas."""
self.figcanvas = FigureCanvas(parent=self,
background_color=self.background_color)
self.figcanvas.installEventFilter(self)
self.figcanvas.customContextMenuRequested.connect(
self.show_context_menu)
self.setWidget(self.figcanvas)
def show_context_menu(self, qpoint):
"""Only emit context menu signal if there is a figure."""
if self.figcanvas and self.figcanvas.fig is not None:
# Convert to global
point = self.figcanvas.mapToGlobal(qpoint)
self.sig_context_menu_requested.emit(point)
def load_figure(self, fig, fmt):
"""Set a new figure in the figure canvas."""
self.figcanvas.load_figure(fig, fmt)
self.sig_figure_loaded.emit()
self.scale_image()
self.figcanvas.repaint()
def eventFilter(self, widget, event):
"""A filter to control the zooming and panning of the figure canvas."""
# ---- Zooming
if event.type() == QEvent.Wheel and not self.auto_fit_plotting:
modifiers = QApplication.keyboardModifiers()
if modifiers == Qt.ControlModifier:
if event.angleDelta().y() > 0:
self.zoom_in()
else:
self.zoom_out()
return True
else:
return False
# ---- Scaling
elif event.type() == QEvent.Paint and self.auto_fit_plotting:
self.scale_image()
# ---- Panning
# Set ClosedHandCursor:
elif event.type() == QEvent.MouseButtonPress:
if event.button() == Qt.LeftButton:
QApplication.setOverrideCursor(Qt.ClosedHandCursor)
self._ispanning = True
self.xclick = event.globalX()
self.yclick = event.globalY()
# Reset Cursor:
elif event.type() == QEvent.MouseButtonRelease:
QApplication.restoreOverrideCursor()
self._ispanning = False
# Move ScrollBar:
elif event.type() == QEvent.MouseMove:
if self._ispanning:
dx = self.xclick - event.globalX()
self.xclick = event.globalX()
dy = self.yclick - event.globalY()
self.yclick = event.globalY()
scrollBarH = self.horizontalScrollBar()
scrollBarH.setValue(scrollBarH.value() + dx)
scrollBarV = self.verticalScrollBar()
scrollBarV.setValue(scrollBarV.value() + dy)
return QWidget.eventFilter(self, widget, event)
# ---- Figure Scaling Handlers
def zoom_in(self):
"""Scale the image up by one scale step."""
if self._scalefactor <= self._sfmax:
self._scalefactor += 1
self.scale_image()
self._adjust_scrollbar(self._scalestep)
def zoom_out(self):
"""Scale the image down by one scale step."""
if self._scalefactor >= self._sfmin:
self._scalefactor -= 1
self.scale_image()
self._adjust_scrollbar(1/self._scalestep)
def scale_image(self):
"""Scale the image size."""
fwidth = self.figcanvas.fwidth
fheight = self.figcanvas.fheight
# Don't auto fit plotting
if not self.auto_fit_plotting:
new_width = int(fwidth * self._scalestep ** self._scalefactor)
new_height = int(fheight * self._scalestep ** self._scalefactor)
# Auto fit plotting
# Scale the image to fit the figviewer size while respecting the ratio.
else:
size = self.size()
style = self.style()
width = (size.width() -
style.pixelMetric(QStyle.PM_LayoutLeftMargin) -
style.pixelMetric(QStyle.PM_LayoutRightMargin))
height = (size.height() -
style.pixelMetric(QStyle.PM_LayoutTopMargin) -
style.pixelMetric(QStyle.PM_LayoutBottomMargin))
self.figcanvas.setToolTip('')
try:
if (fwidth / fheight) > (width / height):
new_width = int(width)
new_height = int(width / fwidth * fheight)
else:
new_height = int(height)
new_width = int(height / fheight * fwidth)
except ZeroDivisionError:
icon = self.create_icon('broken_image')
self.figcanvas._qpix_orig = icon.pixmap(fwidth, fheight)
self.figcanvas.setToolTip(
_('The image is broken, please try to generate it again'))
new_width = fwidth
new_height = fheight
if self.figcanvas.size() != QSize(new_width, new_height):
self.figcanvas.setFixedSize(new_width, new_height)
self.sig_zoom_changed.emit(self.get_scaling())
def get_scaling(self):
"""Get the current scaling of the figure in percent."""
return round(self.figcanvas.width() / self.figcanvas.fwidth * 100)
def reset_original_image(self):
"""Reset the image to its original size."""
self._scalefactor = 0
self.scale_image()
def _adjust_scrollbar(self, f):
"""
Adjust the scrollbar position to take into account the zooming of
the figure.
"""
# Adjust horizontal scrollbar :
hb = self.horizontalScrollBar()
hb.setValue(int(f * hb.value() + ((f - 1) * hb.pageStep()/2)))
# Adjust the vertical scrollbar :
vb = self.verticalScrollBar()
vb.setValue(int(f * vb.value() + ((f - 1) * vb.pageStep()/2)))
class ThumbnailScrollBar(QFrame):
"""
A widget that manages the display of the FigureThumbnails that are
created when a figure is sent to the IPython console by the kernel and
that controls what is displayed in the FigureViewer.
"""
_min_scrollbar_width = 100
# Signals
sig_redirect_stdio_requested = Signal(bool)
"""
This signal is emitted to request the main application to redirect
standard output/error when using Open/Save/Browse dialogs within widgets.
Parameters
----------
redirect: bool
Start redirect (True) or stop redirect (False).
"""
sig_save_dir_changed = Signal(str)
"""
This signal is emitted to inform that the current folder where images are
saved has changed.
Parameters
----------
save_dir: str
The new path where images are saved.
"""
sig_context_menu_requested = Signal(QPoint, object)
"""
This signal is emitted to request a context menu.
Parameters
----------
point: QPoint
The QPoint in global coordinates where the menu was requested.
"""
def __init__(self, figure_viewer, parent=None, background_color=None):
super().__init__(parent)
self._thumbnails = []
self.background_color = background_color
self.save_dir = getcwd_or_home()
self.current_thumbnail = None
self.set_figureviewer(figure_viewer)
self.setup_gui()
# Because the range of Qt scrollareas is not updated immediately
# after a new item is added to it, setting the scrollbar's value
# to its maximum value after adding a new item will scroll down to
# the penultimate item instead of the last.
# So to scroll programmatically to the latest item after it
# is added to the scrollarea, we need to do it instead in a slot
# connected to the scrollbar's rangeChanged signal.
# See spyder-ide/spyder#10914 for more details.
self._new_thumbnail_added = False
self.scrollarea.verticalScrollBar().rangeChanged.connect(
self._scroll_to_newest_item)
def setup_gui(self):
"""Setup the main layout of the widget."""
layout = QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.addWidget(self.setup_scrollarea())
def setup_scrollarea(self):
"""Setup the scrollarea that will contain the FigureThumbnails."""
self.view = QWidget()
self.scene = QGridLayout(self.view)
self.scene.setContentsMargins(0, 0, 0, 0)
# The vertical spacing between the thumbnails.
# Note that we need to set this value explicitly or else the tests
# are failing on macOS. See spyder-ide/spyder#11576.
self.scene.setSpacing(5)
self.scrollarea = QScrollArea()
self.scrollarea.setWidget(self.view)
self.scrollarea.setWidgetResizable(True)
self.scrollarea.setFrameStyle(0)
self.scrollarea.setViewportMargins(2, 2, 2, 2)
self.scrollarea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scrollarea.setMinimumWidth(self._min_scrollbar_width)
# Set the vertical scrollbar explicitly.
# This is required to avoid a "RuntimeError: no access to protected
# functions or signals for objects not created from Python" in Linux.
self.scrollarea.setVerticalScrollBar(QScrollBar())
# Install an event filter on the scrollbar.
self.scrollarea.installEventFilter(self)
self.layout().setContentsMargins(0, 0, 0, 0)
self.layout().setSpacing(0)
return self.scrollarea
def set_figureviewer(self, figure_viewer):
"""Set the namespace for the FigureViewer."""
self.figure_viewer = figure_viewer
def eventFilter(self, widget, event):
"""
An event filter to trigger an update of the thumbnails size so that
their width fit that of the scrollarea and to remap some key press
events to mimick navigational behaviour of a Qt widget list.
"""
if event.type() == QEvent.KeyPress:
key = event.key()
if key == Qt.Key_Up:
self.go_previous_thumbnail()
return True
elif key == Qt.Key_Down:
self.go_next_thumbnail()
return True
if event.type() == QEvent.Resize:
self._update_thumbnail_size()
return super().eventFilter(widget, event)
# ---- Save Figure
def save_all_figures_as(self):
"""Save all the figures to a file."""
self.sig_redirect_stdio_requested.emit(False)
dirname = getexistingdirectory(self, 'Save all figures',
self.save_dir)
self.sig_redirect_stdio_requested.emit(True)
if dirname:
self.sig_save_dir_changed.emit(dirname)
return self.save_all_figures_todir(dirname)
def save_all_figures_todir(self, dirname):
"""Save all figure in dirname."""
fignames = []
figname_root = ('Figure ' +
datetime.datetime.now().strftime('%Y-%m-%d %H%M%S'))
for thumbnail in self._thumbnails:
fig = thumbnail.canvas.fig
fmt = thumbnail.canvas.fmt
fext = {'image/png': '.png',
'image/jpeg': '.jpg',
'image/svg+xml': '.svg'}[fmt]
figname = get_unique_figname(dirname, figname_root, fext,
start_at_zero=True)
save_figure_tofile(fig, fmt, figname)
fignames.append(figname)
return fignames
def save_current_figure_as(self):
"""Save the currently selected figure."""
if self.current_thumbnail is not None:
self.save_figure_as(self.current_thumbnail.canvas.fig,
self.current_thumbnail.canvas.fmt)
def save_thumbnail_figure_as(self, thumbnail):
"""Save the currently selected figure."""
self.save_figure_as(thumbnail.canvas.fig, thumbnail.canvas.fmt)
def save_figure_as(self, fig, fmt):
"""Save the figure to a file."""
fext, ffilt = {
'image/png': ('.png', 'PNG (*.png)'),
'image/jpeg': ('.jpg', 'JPEG (*.jpg;*.jpeg;*.jpe;*.jfif)'),
'image/svg+xml': ('.svg', 'SVG (*.svg);;PNG (*.png)')}[fmt]
figname = get_unique_figname(
self.save_dir,
'Figure ' + datetime.datetime.now().strftime('%Y-%m-%d %H%M%S'),
fext)
self.sig_redirect_stdio_requested.emit(False)
fname, fext = getsavefilename(
parent=self.parent(), caption='Save Figure',
basedir=figname, filters=ffilt,
selectedfilter='', options=None)
self.sig_redirect_stdio_requested.emit(True)
if fname:
self.sig_save_dir_changed.emit(osp.dirname(fname))
save_figure_tofile(fig, fmt, fname)
# ---- Thumbails Handlers
def _calculate_figure_canvas_width(self):
"""
Calculate the width the thumbnails need to have to fit the scrollarea.
"""
extra_padding = 10 if sys.platform == 'darwin' else 0
figure_canvas_width = (
self.scrollarea.width() -
2 * self.lineWidth() -
self.scrollarea.viewportMargins().left() -
self.scrollarea.viewportMargins().right() -
extra_padding -
self.scrollarea.verticalScrollBar().sizeHint().width()
)
figure_canvas_width = figure_canvas_width - 6
return figure_canvas_width
def _setup_thumbnail_size(self, thumbnail):
"""
Scale the thumbnail's canvas size so that it fits the thumbnail
scrollbar's width.
"""
max_canvas_size = self._calculate_figure_canvas_width()
thumbnail.scale_canvas_size(max_canvas_size)
def _update_thumbnail_size(self):
"""
Update the thumbnails size so that their width fit that of
the scrollarea.
"""
# NOTE: We hide temporarily the thumbnails to prevent a repaint of
# each thumbnail as soon as their size is updated in the loop, which
# causes some flickering of the thumbnail scrollbar resizing animation.
# Once the size of all the thumbnails has been updated, we show them
# back so that they are repainted all at once instead of one after the
# other. This is just a trick to make the resizing animation of the
# thumbnail scrollbar look smoother.
self.view.hide()
for thumbnail in self._thumbnails:
self._setup_thumbnail_size(thumbnail)
self.view.show()
def show_context_menu(self, point, thumbnail):
"""
Emit global positioned point and thumbnail for context menu request.
"""
point = thumbnail.canvas.mapToGlobal(point)
self.sig_context_menu_requested.emit(point, thumbnail)
def add_thumbnail(self, fig, fmt):
"""
Add a new thumbnail to that thumbnail scrollbar.
"""
thumbnail = FigureThumbnail(
parent=self, background_color=self.background_color)
thumbnail.canvas.load_figure(fig, fmt)
thumbnail.sig_canvas_clicked.connect(self.set_current_thumbnail)
thumbnail.sig_remove_figure_requested.connect(self.remove_thumbnail)
thumbnail.sig_save_figure_requested.connect(self.save_figure_as)
thumbnail.sig_context_menu_requested.connect(
lambda point: self.show_context_menu(point, thumbnail))
self._thumbnails.append(thumbnail)
self._new_thumbnail_added = True
self.scene.setRowStretch(self.scene.rowCount() - 1, 0)
self.scene.addWidget(thumbnail, self.scene.rowCount() - 1, 0)
self.scene.setRowStretch(self.scene.rowCount(), 100)
self.set_current_thumbnail(thumbnail)
thumbnail.show()
self._setup_thumbnail_size(thumbnail)
def remove_current_thumbnail(self):
"""Remove the currently selected thumbnail."""
if self.current_thumbnail is not None:
self.remove_thumbnail(self.current_thumbnail)
def remove_all_thumbnails(self):
"""Remove all thumbnails."""
for thumbnail in self._thumbnails:
thumbnail.sig_canvas_clicked.disconnect()
thumbnail.sig_remove_figure_requested.disconnect()
thumbnail.sig_save_figure_requested.disconnect()
self.layout().removeWidget(thumbnail)
thumbnail.setParent(None)
thumbnail.hide()
thumbnail.close()
self._thumbnails = []
self.current_thumbnail = None
self.figure_viewer.figcanvas.clear_canvas()
def remove_thumbnail(self, thumbnail):
"""Remove thumbnail."""
if thumbnail in self._thumbnails:
index = self._thumbnails.index(thumbnail)
# Disconnect signals
try:
thumbnail.sig_canvas_clicked.disconnect()
thumbnail.sig_remove_figure_requested.disconnect()
thumbnail.sig_save_figure_requested.disconnect()
except TypeError:
pass
if thumbnail in self._thumbnails:
self._thumbnails.remove(thumbnail)
# Select a new thumbnail if any :
if thumbnail == self.current_thumbnail:
if len(self._thumbnails) > 0:
self.set_current_index(
min(index, len(self._thumbnails) - 1)
)
else:
self.figure_viewer.figcanvas.clear_canvas()
self.current_thumbnail = None
# Hide and close thumbnails
self.layout().removeWidget(thumbnail)
thumbnail.hide()
thumbnail.close()
# See: spyder-ide/spyder#12459
QTimer.singleShot(
150, lambda: self._remove_thumbnail_parent(thumbnail))
def _remove_thumbnail_parent(self, thumbnail):
try:
thumbnail.setParent(None)
except RuntimeError:
# Omit exception in case the thumbnail has been garbage-collected
pass
def set_current_index(self, index):
"""Set the currently selected thumbnail by its index."""
self.set_current_thumbnail(self._thumbnails[index])
def get_current_index(self):
"""Return the index of the currently selected thumbnail."""
try:
return self._thumbnails.index(self.current_thumbnail)
except ValueError:
return -1
def set_current_thumbnail(self, thumbnail):
"""Set the currently selected thumbnail."""
self.current_thumbnail = thumbnail
self.figure_viewer.load_figure(
thumbnail.canvas.fig, thumbnail.canvas.fmt)
for thumbnail in self._thumbnails:
thumbnail.highlight_canvas(thumbnail == self.current_thumbnail)
def go_previous_thumbnail(self):
"""Select the thumbnail previous to the currently selected one."""
if self.current_thumbnail is not None:
index = self._thumbnails.index(self.current_thumbnail) - 1
index = index if index >= 0 else len(self._thumbnails) - 1
self.set_current_index(index)
self.scroll_to_item(index)
def go_next_thumbnail(self):
"""Select thumbnail next to the currently selected one."""
if self.current_thumbnail is not None:
index = self._thumbnails.index(self.current_thumbnail) + 1
index = 0 if index >= len(self._thumbnails) else index
self.set_current_index(index)
self.scroll_to_item(index)
def scroll_to_item(self, index):
"""Scroll to the selected item of ThumbnailScrollBar."""
spacing_between_items = self.scene.verticalSpacing()
height_view = self.scrollarea.viewport().height()
height_item = self.scene.itemAt(index).sizeHint().height()
height_view_excluding_item = max(0, height_view - height_item)
height_of_top_items = spacing_between_items
for i in range(index):
item = self.scene.itemAt(i)
height_of_top_items += item.sizeHint().height()
height_of_top_items += spacing_between_items
pos_scroll = height_of_top_items - height_view_excluding_item // 2
vsb = self.scrollarea.verticalScrollBar()
vsb.setValue(pos_scroll)
def _scroll_to_newest_item(self, vsb_min, vsb_max):
"""
Scroll to the newest item added to the thumbnail scrollbar.
Note that this method is called each time the rangeChanged signal
is emitted by the scrollbar.
"""
if self._new_thumbnail_added:
self._new_thumbnail_added = False
self.scrollarea.verticalScrollBar().setValue(vsb_max)
# ---- ScrollBar Handlers
def go_up(self):
"""Scroll the scrollbar of the scrollarea up by a single step."""
vsb = self.scrollarea.verticalScrollBar()
vsb.setValue(int(vsb.value() - vsb.singleStep()))
def go_down(self):
"""Scroll the scrollbar of the scrollarea down by a single step."""
vsb = self.scrollarea.verticalScrollBar()
vsb.setValue(int(vsb.value() + vsb.singleStep()))
class FigureThumbnail(QWidget):
"""
A widget that consists of a FigureCanvas, a side toolbar, and a context
menu that is used to show preview of figures in the ThumbnailScrollBar.
"""
sig_canvas_clicked = Signal(object)
"""
This signal is emitted when the figure canvas is clicked.
Parameters
----------
figure_thumbnail: spyder.plugins.plots.widget.figurebrowser.FigureThumbnail
The clicked figure thumbnail.
"""
sig_remove_figure_requested = Signal(object)
"""
This signal is emitted to request the removal of a figure thumbnail.
Parameters
----------
figure_thumbnail: spyder.plugins.plots.widget.figurebrowser.FigureThumbnail
The figure thumbnail to remove.
"""
sig_save_figure_requested = Signal(object, str)
"""
This signal is emitted to request the saving of a figure thumbnail.
Parameters
----------
figure_thumbnail: spyder.plugins.plots.widget.figurebrowser.FigureThumbnail
The figure thumbnail to save.
format: str
The image format to use when saving the image. One of "image/png",
"image/jpeg" and "image/svg+xml".
"""
sig_context_menu_requested = Signal(QPoint)
"""
This signal is emitted to request a context menu.
Parameters
----------
point: QPoint
The QPoint in global coordinates where the menu was requested.
"""
def __init__(self, parent=None, background_color=None):
super().__init__(parent)
self.canvas = FigureCanvas(parent=self,
background_color=background_color)
self.canvas.sig_context_menu_requested.connect(
self.sig_context_menu_requested)
self.canvas.installEventFilter(self)
self.setup_gui()
def setup_gui(self):
"""Setup the main layout of the widget."""
layout = QGridLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self.canvas, 0, 0, Qt.AlignCenter)
layout.setSizeConstraint(layout.SetFixedSize)
def highlight_canvas(self, highlight):
"""
Set a colored frame around the FigureCanvas if highlight is True.
"""
if highlight:
# Highlighted figure is not clear in dark mode with blue color.
# See spyder-ide/spyder#10255.
self.canvas.setStyleSheet(
"FigureCanvas{border: 2px solid %s;}" %
QStylePalette.COLOR_ACCENT_3
)
else:
self.canvas.setStyleSheet("FigureCanvas{}")
def scale_canvas_size(self, max_canvas_size):
"""
Scale this thumbnail canvas size, while respecting its associated
figure dimension ratio.
"""
fwidth = self.canvas.fwidth
fheight = self.canvas.fheight
if fwidth / fheight > 1:
canvas_width = max_canvas_size
canvas_height = canvas_width / fwidth * fheight
else:
canvas_height = max_canvas_size
canvas_width = canvas_height / fheight * fwidth
self.canvas.setFixedSize(int(canvas_width), int(canvas_height))
self.layout().setColumnMinimumWidth(0, max_canvas_size)
def eventFilter(self, widget, event):
"""
A filter that is used to send a signal when the figure canvas is
clicked.
"""
if event.type() == QEvent.MouseButtonPress:
if event.button() == Qt.LeftButton:
self.sig_canvas_clicked.emit(self)
return super().eventFilter(widget, event)
class FigureCanvas(QFrame):
"""
A basic widget on which can be painted a custom png, jpg, or svg image.
"""
sig_context_menu_requested = Signal(QPoint)
"""
This signal is emitted to request a context menu.
Parameters
----------
point: QPoint
The QPoint in global coordinates where the menu was requested.
"""
def __init__(self, parent=None, background_color=None):
super().__init__(parent)
self.setLineWidth(2)
self.setMidLineWidth(1)
self.setObjectName("figcanvas")
self.setStyleSheet(
"#figcanvas {background-color:" + str(background_color) + "}")
self.fig = None
self.fmt = None
self.fwidth, self.fheight = 200, 200
self._blink_flag = False
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(
self.sig_context_menu_requested)
@Slot()
def copy_figure(self):
"""Copy figure to clipboard."""
if self.fmt in ['image/png', 'image/jpeg']:
qpixmap = QPixmap()
qpixmap.loadFromData(self.fig, self.fmt.upper())
QApplication.clipboard().setImage(qpixmap.toImage())
elif self.fmt == 'image/svg+xml':
svg_to_clipboard(self.fig)
else:
return
self.blink_figure()
def blink_figure(self):
"""Blink figure once."""
if self.fig:
self._blink_flag = not self._blink_flag
self.repaint()
if self._blink_flag:
timer = QTimer()
timer.singleShot(40, self.blink_figure)
def clear_canvas(self):
"""Clear the figure that was painted on the widget."""
self.fig = None
self.fmt = None
self._qpix_scaled = None
self.repaint()
def load_figure(self, fig, fmt):
"""
Load the figure from a png, jpg, or svg image, convert it in
a QPixmap, and force a repaint of the widget.
"""
self.fig = fig
self.fmt = fmt
if fmt in ['image/png', 'image/jpeg']:
self._qpix_orig = QPixmap()
self._qpix_orig.loadFromData(fig, fmt.upper())
elif fmt == 'image/svg+xml':
self._qpix_orig = QPixmap(svg_to_image(fig))
self._qpix_scaled = self._qpix_orig
self.fwidth = self._qpix_orig.width()
self.fheight = self._qpix_orig.height()
def paintEvent(self, event):
"""Qt method override to paint a custom image on the Widget."""
super().paintEvent(event)
# Prepare the rect on which the image is going to be painted.
fw = self.frameWidth()
rect = QRect(0 + fw, 0 + fw,
self.size().width() - 2 * fw,
self.size().height() - 2 * fw)
if self.fig is None or self._blink_flag:
return
# Prepare the scaled qpixmap to paint on the widget.
if (self._qpix_scaled is None or
self._qpix_scaled.size().width() != rect.width()):
if self.fmt in ['image/png', 'image/jpeg']:
self._qpix_scaled = self._qpix_orig.scaledToWidth(
rect.width(), mode=Qt.SmoothTransformation)
elif self.fmt == 'image/svg+xml':
self._qpix_scaled = QPixmap(svg_to_image(
self.fig, rect.size()))
if self._qpix_scaled is not None:
# Paint the image on the widget.
qp = QPainter()
qp.begin(self)
qp.drawPixmap(rect, self._qpix_scaled)
qp.end()
| 35.532444 | 79 | 0.624756 |
f2148a4e37c9f81a47fc0a3816f323138e2da51c | 1,625 | py | Python | src/socketaddress/test_socketaddress.py | zrthxn/lldb_folly_formatter | f8ed80c0b1bcd7573a7699e48c2f75d869ea7820 | [
"MIT"
] | 1 | 2021-11-13T22:11:29.000Z | 2021-11-13T22:11:29.000Z | src/socketaddress/test_socketaddress.py | zrthxn/lldb_folly_formatter | f8ed80c0b1bcd7573a7699e48c2f75d869ea7820 | [
"MIT"
] | 7 | 2021-10-30T07:53:38.000Z | 2021-11-29T18:22:36.000Z | src/socketaddress/test_socketaddress.py | zrthxn/lldb_folly_formatter | f8ed80c0b1bcd7573a7699e48c2f75d869ea7820 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from common import LLDBTestCase
from os import path
class FollySocketAddress(LLDBTestCase):
def get_formatter_path(self):
curr_dir = path.abspath(path.dirname(path.realpath(__file__)))
return path.join(curr_dir, "socketaddress_formatter.py")
def test_socketaddress(self):
script = """
b socketaddress.cpp:8
r
script lldb.debugger.HandleCommand("frame var sock")
script lldb.debugger.HandleCommand("p sock")
c
script lldb.debugger.HandleCommand("frame var sock")
script lldb.debugger.HandleCommand("p sock")
c
script lldb.debugger.HandleCommand("frame var sock")
script lldb.debugger.HandleCommand("p sock")
c
script lldb.debugger.HandleCommand("frame var sock")
script lldb.debugger.HandleCommand("p sock")
c
"""
expected = """
(folly::SocketAddress) sock = (folly::SocketAddress) 10.0.0.10:3000
(folly::SocketAddress) $0 = (folly::SocketAddress) 10.0.0.10:3000
10.0.0.10:3000
(folly::SocketAddress) sock = (folly::SocketAddress) 192.168.0.1:4000
(folly::SocketAddress) $1 = (folly::SocketAddress) 192.168.0.1:4000
192.168.0.1:4000
(folly::SocketAddress) sock = (folly::SocketAddress) [0000:0000:0000:0000:0000:0000:0000:0001]:5000
(folly::SocketAddress) $2 = (folly::SocketAddress) [0000:0000:0000:0000:0000:0000:0000:0001]:5000
[::1]:5000
(folly::SocketAddress) sock = (folly::SocketAddress) [2620:0000:1cfe:face:b00c:0000:0000:0003]:6000
(folly::SocketAddress) $3 = (folly::SocketAddress) [2620:0000:1cfe:face:b00c:0000:0000:0003]:6000
[2620:0:1cfe:face:b00c::3]:6000
""".strip()
self.assertEqual(expected, self.run_lldb("socketaddress", script)) | 36.931818 | 99 | 0.737846 |
ec877f50b19e974f75c4b5f5775358d4c6f0d0ba | 128 | py | Python | autokeras/__init__.py | woj-i/autokeras | aa14b230e8eb04480aebc2be0820bd9b934e2253 | [
"MIT"
] | null | null | null | autokeras/__init__.py | woj-i/autokeras | aa14b230e8eb04480aebc2be0820bd9b934e2253 | [
"MIT"
] | null | null | null | autokeras/__init__.py | woj-i/autokeras | aa14b230e8eb04480aebc2be0820bd9b934e2253 | [
"MIT"
] | null | null | null | from autokeras.image_supervised import ImageClassifier, ImageRegressor
from autokeras.text.text_supervised import TextClassifier | 64 | 70 | 0.90625 |
542ffb125d218186d75ae41c9b8138967ef7a245 | 1,660 | py | Python | src/parsing/world_parser.py | mkRPGDev/mkRPG | 154e5d264dc1cc5fba78980da430e9d7ca0ccc22 | [
"Beerware"
] | 2 | 2016-10-06T10:09:10.000Z | 2016-10-07T14:16:19.000Z | src/parsing/world_parser.py | mkRPGDev/mkRPG | 154e5d264dc1cc5fba78980da430e9d7ca0ccc22 | [
"Beerware"
] | 17 | 2016-12-01T10:10:23.000Z | 2017-01-12T16:41:51.000Z | src/parsing/world_parser.py | mkRPGDev/mkRPG | 154e5d264dc1cc5fba78980da430e9d7ca0ccc22 | [
"Beerware"
] | null | null | null | """
This file parses the current world state. It should contain the necessary items
to load a game, that is to say the current map, the entities...
"""
# -*- coding : utf-8 -*-
from collections import OrderedDict
import parsing.parsing_utils as parsing_utils
def parse_world(world_file):
""" Parses the file describing the world, and returns the adequate
dictionnaries.
"""
root = parsing_utils.try_open_and_parse(world_file)
# Getting the params and the entities on the map.
params = root.findall('Params')
entities = root.find('Entities')
objects = root.find('Objects')
maps = root.find('Maps')
ident = root.find("Ident")
params_dict = OrderedDict()
entities_list = []
objects_list = []
maps_list = []
if params is not []:
for _params in params:
for param in _params.getchildren():
if param.attrib.get('id'):
params_dict.update({param.tag: {"id": param.attrib["id"]}})
else:
params_dict.update({param.tag: param.text})
if entities is not None:
for entity in entities.getchildren():
entities_list += [entity.attrib]
if objects is not None:
for object_ in objects.getchildren():
objects_list += [object_.attrib]
if maps is not None:
for map_ in maps.getchildren():
maps_list += [map_.attrib]
return OrderedDict({
'name' : 'world',
'Entities': entities_list,
'params': params_dict,
'Objects': objects_list,
'Maps': maps_list,
'ident' : parsing_utils.format_type(ident.text)
})
| 32.54902 | 79 | 0.614458 |
d0cfd1a34496633ff381b05026481a33c2e3c34c | 3,988 | py | Python | mpfmc/effects/color_dmd.py | arthurlutz/mpf-mc | 6f4aac5d48eb07dd5aa6612985a7567af8577ce2 | [
"MIT"
] | null | null | null | mpfmc/effects/color_dmd.py | arthurlutz/mpf-mc | 6f4aac5d48eb07dd5aa6612985a7567af8577ce2 | [
"MIT"
] | null | null | null | mpfmc/effects/color_dmd.py | arthurlutz/mpf-mc | 6f4aac5d48eb07dd5aa6612985a7567af8577ce2 | [
"MIT"
] | null | null | null | from typing import List
from kivy.properties import NumericProperty, ListProperty, BooleanProperty
from mpfmc.uix.effects import EffectsChain
from mpfmc.effects.dot_filter import DotFilterEffect
from mpfmc.effects.gain import GainEffect
from mpfmc.effects.reduce import ReduceEffect
MYPY = False
if MYPY: # pragma: no cover
from kivy.uix.effectwidget import EffectBase
class ColorDmdEffect(EffectsChain):
"""GLSL effect to render an on-screen DMD to look like individual round pixels."""
dot_filter = BooleanProperty(True)
'''
Sets whether or not to apply the dot filter effect.
dot_filter is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
width = NumericProperty(128)
'''
Sets the width in pixels of the display widget on the screen. Typically
this is larger than the dots_x parameter.
width is a :class:`~kivy.properties.NumericProperty` and
defaults to 128.
'''
height = NumericProperty(32)
'''
Sets the height in pixels of the display widget on the screen. Typically
this is larger than the dots_y parameter.
height is a :class:`~kivy.properties.NumericProperty` and
defaults to 32.
'''
dots_x = NumericProperty(128)
'''
Sets the number of dots in the horizontal (x) direction.
dots_x is a :class:`~kivy.properties.NumericProperty` and
defaults to 128.
'''
dots_y = NumericProperty(32)
'''
Sets the number of dots in the vertical (y) direction.
dots_y is a :class:`~kivy.properties.NumericProperty` and
defaults to 32.
'''
blur = NumericProperty(0.1)
'''
Sets the size of the blur around each pixel where it's blended with
the background. The value is relative to the pixel. (e.g. a value of
0.1 will add a 10% blur around the edge of each pixel.)
blur is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.1.
'''
dot_size = NumericProperty(0.5)
'''
Sets the size of the circle for the dot/pixel relative to the size of the
square bounding box of the dot. A size of 1.0 means that the diameter
of the dot will be the same as its bounding box, in other words a
size of 1.0 means that the dot will touch each other.
dot_size is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.5.
'''
background_color = ListProperty([0.1, 0.1, 0.1, 1.0])
'''
A four-item tuple or list that represents the color of the space between the
dots, in RGBA format with individual values as floats between 0.0 - 1.0. If
you want the background to be transparent, set it to (0.0, 0.0, 0.0, 0.0).
background_color is a :class:`~kivy.properties.ListProperty` and
defaults to (0.1, 0.1, 0.1, 1.0) (which is 10% gray with 100% alpha/fully
opaque).
'''
gain = NumericProperty(1.0)
'''
Sets the gain factor which is multiplied by each color channel.
gain is a :class:`~kivy.properties.NumericProperty` and
defaults to 1.0 (which has no effect).
'''
shades = NumericProperty(16)
'''
Sets the number of shades per channel to reduce it to.
shades is a :class:`~kivy.properties.NumericProperty` and
defaults to 16.
'''
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
def get_effects(self) -> List["EffectBase"]:
effects = []
if bool(self.dot_filter):
effects.append(DotFilterEffect(
width=self.width,
height=self.height,
dots_x=self.dots_x,
dots_y=self.dots_y,
blur=self.blur,
dot_size=self.dot_size,
background_color=self.background_color
))
if self.shades > 0:
effects.append(ReduceEffect(shades=self.shades))
effects.append(GainEffect(gain=self.gain))
return effects
effect_cls = ColorDmdEffect
name = 'color_dmd'
| 29.540741 | 86 | 0.660983 |
f4bb92243c98f39226ce2c94d5b541daf798f153 | 9,584 | py | Python | underworld/function/branching.py | jmansour/underworld2 | 6da9f52268d366ae08533374afebb6f278c04576 | [
"CC-BY-4.0"
] | 1 | 2022-01-28T20:00:12.000Z | 2022-01-28T20:00:12.000Z | underworld/function/branching.py | jmansour/underworld2 | 6da9f52268d366ae08533374afebb6f278c04576 | [
"CC-BY-4.0"
] | null | null | null | underworld/function/branching.py | jmansour/underworld2 | 6da9f52268d366ae08533374afebb6f278c04576 | [
"CC-BY-4.0"
] | null | null | null | ##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
## ##
## This file forms part of the Underworld geophysics modelling application. ##
## ##
## For full license and copyright information, please refer to the LICENSE.md file ##
## located at the project root, or contact the authors. ##
## ##
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
"""
The branching module provides functions which provide branching behaviour.
Typically, these functions will select other user provided functions when
certain conditions are met (with the condition also described by a function!).
"""
import libUnderworld.libUnderworldPy.Function as _cfn
from ._function import Function as _Function
class map(_Function):
"""
This function performs a map to other functions. The user provides a python
dictionary which maps unsigned integers keys to underworld functions. The
user must also provide a key function. At evaluation time, the key function
is evaluated first, with the outcome determining which function should
finally be evaluated to return a value.
For a set of value functions :math:`\\{f_{v_0},f_{v_1},\\ldots,f_{v_n}\\}`,
corresponding keys :math:`\\{k_0,k_1,\\ldots,k_n\\}`, and key function
:math:`f_{k}`, we have:
.. math::
f(\\mathbf{r})=
\\begin{cases}
f_{v_0}(\\mathbf{r}), & \\text{if } f_{k}(\\mathbf{r}) = k_0\\\\
f_{v_1}(\\mathbf{r}), & \\text{if } f_{k}(\\mathbf{r}) = k_1\\\\
... \\\\
f_{v_n}(\\mathbf{r}), & \\text{if } f_{k}(\\mathbf{r}) = k_n\\\\
f_{d} (\\mathbf{r}), & \\text{otherwise}
\\end{cases}
As stated, the keys must be unsigned integers. The key function need not
return an unsigned integer, but whatever value it returns **will** be cast
to an unsigned integer so caution is advised.
The default function is optional, but if none is provided, and the key
function evaluates to a value which is not within the user provide set of
keys, an exception will be thrown.
Parameters
----------
fn_key: underworld.function.Function (or convertible)
Function which returns integer key values. This function will be evaluated
first to determine which function from the mapping is to be used.
mapping: dict(Function)
Python dictionary providing a mapping from unsigned integer 'key' values to
underworld 'value' functions. Note that the provided 'value' functions must
return values of type 'double'.
fn_default: underworld.function.Function (or convertible) (optional)
Default function to be utilised when the key (returned by fn_key function)
does not correspond to any key value in the mapping dictionary.
The following example sets different function behaviour inside and outside
of a unit sphere. The unit sphere is represented by particles which
record a swarm variable to determine if they are or not inside the sphere.
Example
-------
Setup mesh, swarm, swarmvariable & populate
>>> import underworld as uw
>>> import underworld.function as fn
>>> import numpy as np
>>> mesh = uw.mesh.FeMesh_Cartesian(elementRes=(8,8),minCoord=(-1.0, -1.0), maxCoord=(1.0, 1.0))
>>> swarm = uw.swarm.Swarm(mesh)
>>> svar = swarm.add_variable("int",1)
>>> swarm.populate_using_layout(uw.swarm.layouts.PerCellSpaceFillerLayout(swarm,20))
For all particles in unit circle, set svar to 1
>>> svar.data[:] = 0
>>> for index, position in enumerate(swarm.particleCoordinates.data):
... if position[0]**2 + position[1]**2 < 1.:
... svar.data[index] = 1
Create a function which reports the value '1.' inside the sphere, and
'0.' otherwise. Note that while we have only used constant value functions
here, you can use any object of the class Function.
>>> fn_map = fn.branching.map(fn_key=svar, mapping={0: 0., 1:1.})
>>> np.allclose(np.pi, uw.utils.Integral(fn_map,mesh).evaluate(),rtol=2e-2)
True
Alternatively, we could utilise the default function to achieve the same
result.
>>> fn_map = fn.branching.map(fn_key=svar, mapping={1: 1.}, fn_default=0.)
>>> np.allclose(np.pi, uw.utils.Integral(fn_map,mesh).evaluate(),rtol=2e-2)
True
"""
def __init__(self, fn_key=None, mapping=None, fn_default=None, *args, **kwargs):
if not mapping:
raise ValueError("You must specify a mapping via the 'mapping' parameter.")
if not isinstance(mapping, dict):
raise TypeError("'mapping' object passed in must be of python type 'dict'")
if not fn_key:
raise ValueError("You must specify a key function via the 'fn_key' parameter.")
fn_key = _Function.convert(fn_key)
self.fn_default = _Function.convert(fn_default)
if self.fn_default == None:
fn_defaultCself = None
else:
fn_defaultCself = self.fn_default._fncself
# create instance
self._fncself = _cfn.Map( fn_key._fncself, fn_defaultCself )
self._fn_key = fn_key
self._mapping = mapping
# build parent
super(map,self).__init__(argument_fns=[fn_key,self.fn_default],**kwargs)
self._map = {}
for key, value in mapping.items():
if not isinstance(key, int) or key < 0:
raise ValueError("Key '{}' not valid. Mapping keys must be unsigned integers.".format(key))
funcVal = _Function.convert(value)
if funcVal == None:
raise ValueError("'None' is not valid for mapped functions.")
self._underlyingDataItems.update(funcVal._underlyingDataItems) # update dictionary
# insert mapping and keep handles in py dict
self._map[key] = funcVal
self._fncself.insert( key, funcVal._fncself )
class conditional(_Function):
"""
This function provides 'if/elif' type conditional behaviour.
The user provides a list of tuples, with each tuple being of the
form (fn_condition, fn_resultant). Effectively, each tuple provides a clause
within the if/elif statement.
When evaluated, the function traverses the clauses, stopping at the first
fn_condition which returns 'true'. It then executes the corresponding
fn_resultant and returns the results.
If none of the provided clauses return a 'True' result, an exception is
raised.
For a set of condition functions { fc_0, fc_1, ... ,fc_n }, and
corresponding resultant functions { fr_0, fr_1, ... ,fr_n },
we have for a provided input f_in:
.. code-block:: python
if fc_0(f_in) :
return fr_0(f_in)
elif fc_1(f_in) :
return fr_1(f_in)
...
elif fc_n(f_in) :
return fr_n(f_in)
else :
raise RuntimeError("Reached end of conditional statement. At least one
of the clause conditions must evaluate to 'True'." );
Parameters
----------
clauses: list
list of tuples, with each tuple being of the form (fn_condition, fn_resultant).
Example
-------
The following example uses functions to represent a unit circle. Here a
conditional function report back the value '1.' inside the sphere (as per
the first condition), and '0.' otherwise.
>>> import underworld as uw
>>> import underworld.function as fn
>>> import numpy as np
>>> mesh = uw.mesh.FeMesh_Cartesian(elementRes=(16,16),minCoord=(-1.0, -1.0), maxCoord=(1.0, 1.0))
>>> circleFn = fn.coord()[0]**2 + fn.coord()[1]**2
>>> fn_conditional = fn.branching.conditional( [ (circleFn < 1., 1. ), \
( True, 0. ) ] )
>>> np.allclose(np.pi, uw.utils.Integral(fn_conditional,mesh).evaluate(),rtol=1e-2)
True
"""
def __init__(self, clauses, *args, **kwargs):
# error check mapping
if not isinstance(clauses, (list,tuple)):
raise TypeError("'clauses' object passed in must be of python type 'list' or 'tuple'")
self._clauses = []
funcSet = set()
for clause in clauses:
if not isinstance(clause, (list,tuple)):
raise TypeError("Clauses within the clause list must be of python type 'list' or 'tuple'")
if len(clause) != 2:
raise ValueError("Clauses tuples must be of length 2.")
conditionFn = _Function.convert(clause[0])
funcSet.add(conditionFn)
resultantFn = _Function.convert(clause[1])
funcSet.add(resultantFn)
self._clauses.append( (conditionFn,resultantFn) )
# build parent
self._fncself = _cfn.Conditional()
super(conditional,self).__init__(argument_fns=funcSet,**kwargs)
# insert clause into c object now
for clause in self._clauses:
self._fncself.insert( clause[0]._fncself, clause[1]._fncself )
| 41.851528 | 107 | 0.59735 |
89c721e5a2a15034420b3d5b5d957aa8452d1630 | 9,451 | py | Python | models/lib/gpn.py | Maxi-0902/DRAN | c3dbfcbc018446544150dc4e151442d6a9fcd4d9 | [
"MIT"
] | 71 | 2020-07-13T20:18:23.000Z | 2022-03-26T05:02:37.000Z | models/lib/gpn.py | Maxi-0902/DRAN | c3dbfcbc018446544150dc4e151442d6a9fcd4d9 | [
"MIT"
] | 12 | 2020-09-18T07:12:37.000Z | 2022-03-10T18:13:12.000Z | models/lib/gpn.py | Maxi-0902/DRAN | c3dbfcbc018446544150dc4e151442d6a9fcd4d9 | [
"MIT"
] | 9 | 2020-10-08T03:12:11.000Z | 2022-03-11T01:45:07.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
"""
Sub-graph proposal network
"""
class gpn_layer(nn.Module):
def __init__(self, GCN_dim=1024, hid_dim=512, test_LSTM=False, use_nms=True, iou_thres=0.75, max_subgraphs=1, use_sGPN_score=True):
super(gpn_layer, self).__init__()
self.GCN_dim = GCN_dim
self.test_LSTM = test_LSTM
self.use_nms = use_nms
self.iou_thres = iou_thres
self.max_subgraphs = max_subgraphs # how many subgraphs are kept after NMS
self.use_sGPN_score = use_sGPN_score # True, use sGPN network and sGPN loss; False, use gt sub-graphs (Sup. model for SCT)
if self.use_sGPN_score:
self.gpn_fc = nn.Sequential(nn.Linear(self.GCN_dim * 2, hid_dim),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(hid_dim, 1),
)
nn.init.constant_(self.gpn_fc[0].bias, 0)
nn.init.constant_(self.gpn_fc[3].bias, 0)
self.gpn_loss = nn.BCELoss()
self.sigmoid = nn.Sigmoid()
self.read_out_proj = nn.Sequential(nn.Linear(self.GCN_dim*2, hid_dim),
nn.Linear(hid_dim, self.GCN_dim*2))
nn.init.constant_(self.read_out_proj[0].bias, 0)
nn.init.constant_(self.read_out_proj[1].bias, 0)
def forward(self,b,N,K,L,gpn_obj_ind, gpn_pred_ind, gpn_nrel_ind,gpn_pool_mtx,att_feats,x_pred,fc_feats,att_masks):
"""
Input full graph, output sub-graph scores, sub-graph node features, and projected sub-graph read-out features
extract sub-graph features --> pooling --> MLP --> sGPN score for each sub-graph, and index the sub-graphs with highest scores
"""
# index subgraph node and edge features
pos_obj_ind, neg_obj_ind, gpn_att, gpn_pred = self.extract_subgraph_feats(b,N,K,L, att_feats, gpn_obj_ind, x_pred, gpn_pred_ind)
gb = gpn_att.size(0)
if self.use_sGPN_score:
# max pooling and mean pooling
read_out = self.graph_pooling(N, gpn_att, gpn_pool_mtx, att_masks)
# MLP to get subgraph score
subgraph_score = self.gpn_fc(read_out) # pos, neg
subgraph_score = self.sigmoid(subgraph_score) # pos, neg
gpn_target = torch.cat((subgraph_score.new(int(gb/2),1).fill_(1), subgraph_score.new(int(gb/2),1).fill_(0)),dim=0)
gpn_loss = self.gpn_loss(subgraph_score, gpn_target)
else:
# max pooling and mean pooling
read_out = self.graph_pooling(N, gpn_att, gpn_pool_mtx, att_masks)
subgraph_score = read_out.new(gb,1).fill_(1)
gpn_loss = None
if not self.test_LSTM: # train or validation, select a positive subgraph for each sentence
gpn_score = subgraph_score.squeeze().view(2,b,gpn_obj_ind.size(-2))
gpn_score = gpn_score[0]
gpn_ind = gpn_score.argmax(-1)
all_subgraph_obj_ind = pos_obj_ind
all_subgraph_att_masks = att_masks[:,0]
read_out = read_out.view(2,b,gpn_obj_ind.size(-2),read_out.size(-1))
all_read_out = read_out[0]
batch_ind = torch.arange(b).type_as(gpn_obj_ind)
subgraph_obj_ind = all_subgraph_obj_ind[batch_ind, gpn_ind, :].view(-1)
att_feats = att_feats[torch.arange(b).view(b,1).expand(b,N).contiguous().view(-1).type_as(gpn_obj_ind),subgraph_obj_ind,:].view(b,N,L)
att_masks = all_subgraph_att_masks[batch_ind, gpn_ind, :]
sub_read_out = all_read_out[batch_ind, gpn_ind, :].detach()
fc_feats = self.read_out_proj(sub_read_out)
return gpn_loss, subgraph_score, att_feats, fc_feats, att_masks
if self.test_LSTM: # test, use all subgraphs
assert b == 5
gpn_score = subgraph_score.squeeze().view(2,b,gpn_obj_ind.size(-2))
gpn_score = torch.transpose(gpn_score,0,1)[0].contiguous().view(-1)
sen_batch = gpn_score.size(0)
all_subgraph_obj_ind = gpn_obj_ind[0].contiguous().view(-1,N)
att_feats = att_feats[0][all_subgraph_obj_ind.view(-1),:].view(sen_batch,N,L)
s_att_masks = att_masks[0].contiguous().view(-1, N)
read_out = read_out.view(2,b,gpn_obj_ind.size(-2),read_out.size(-1))
all_read_out = torch.transpose(read_out,0,1)[0].contiguous().view(-1, read_out.size(-1))
fc_feats = self.read_out_proj(all_read_out)
keep_ind = torch.arange(gpn_score.size(0)).type_as(gpn_score)
if self.use_nms:
# nms to keep the subgraphs we need
keep_ind = self.subgraph_nms(gpn_score, all_subgraph_obj_ind, att_masks)
gpn_score = gpn_score[keep_ind]
att_feats = att_feats[keep_ind]
fc_feats = fc_feats[keep_ind]
s_att_masks = s_att_masks[keep_ind]
return gpn_loss, gpn_score, att_feats, fc_feats, s_att_masks, keep_ind
def subgraph_nms(self, gpn_score, all_subgraph_obj_ind, att_masks):
'''
Apply NMS over sub-graphs.
Input subgraph score and subgraph object index.
Output the indices of subgraphs which will be kept. Not that the output still use the original score order.
'''
sort_ind = np.argsort(gpn_score.cpu().numpy())[::-1] # Note: use sorted score (descending order) to do nms
masks = att_masks[0].contiguous().view(-1, all_subgraph_obj_ind.size(-1)).cpu().numpy()[sort_ind,:]
obj_ind = all_subgraph_obj_ind.cpu().numpy()[sort_ind,:]
assert ((obj_ind != 36).nonzero()[0] != masks.nonzero()[0]).nonzero()[0].shape[0] == 0
assert ((obj_ind != 36).nonzero()[1] != masks.nonzero()[1]).nonzero()[0].shape[0] == 0
sorted_keep = np.ones(sort_ind.shape[0])
for i in range(sort_ind.shape[0]):
if sorted_keep[i] == 0: # this subgraph has been abandoned
continue
else:
this_obj_ind = np.unique(obj_ind[i][masks[i].nonzero()[0]])
for j in range(sort_ind.shape[0])[i+1:]:
other_obj_ind = np.unique(obj_ind[j][masks[j].nonzero()[0]])
this_iou = self.cal_node_iou(this_obj_ind, other_obj_ind)
if this_iou > self.iou_thres:
sorted_keep[j] = 0
# map back to original score order
keep_sort_ind = sort_ind[sorted_keep == 1]
orig_keep = np.zeros(sort_ind.shape[0])
orig_keep[keep_sort_ind[:self.max_subgraphs]] = 1
orig_keep_ind = torch.from_numpy(orig_keep.nonzero()[0]).type_as(all_subgraph_obj_ind)
return orig_keep_ind
def cal_node_iou(self, this_obj_ind, other_obj_ind):
"""
Input node indices of 2 subgraphs.
Output the node iou of these 2 subgraphs.
"""
if this_obj_ind.shape[0] == 0 or other_obj_ind.shape[0] == 0: # no noun matched
this_obj_ind = np.arange(this_obj_ind.shape[0])
this = set(list(this_obj_ind))
other = set(list(other_obj_ind))
iou = len(set.intersection(this, other)) / float(len(set.union(this, other)))
return iou
def extract_subgraph_feats(self, b,N,K,L, att_feats, gpn_obj_ind, x_pred, gpn_pred_ind):
"""
Extract the node and edge features from full scene graph by using the sub-graph indices.
"""
# index subgraph object and predicate features
pos_obj_ind = gpn_obj_ind[:,0,:,:]
neg_obj_ind = gpn_obj_ind[:,1,:,:]
obj_batch_ind = torch.arange(b).view(b,1).expand(b,N*gpn_obj_ind.size(-2)).contiguous().view(-1).type_as(gpn_obj_ind)
pos_gpn_att = att_feats[obj_batch_ind, pos_obj_ind.contiguous().view(-1)]
neg_gpn_att = att_feats[obj_batch_ind, neg_obj_ind.contiguous().view(-1)]
pos_pred_ind = gpn_pred_ind[:,0,:,:].contiguous().view(-1)
neg_pred_ind = gpn_pred_ind[:,1,:,:].contiguous().view(-1)
pred_batch_ind = torch.arange(b).view(b,1).expand(b,K*gpn_pred_ind.size(-2)).contiguous().view(-1).type_as(gpn_pred_ind)
pos_gpn_pred = x_pred[pred_batch_ind, pos_pred_ind]
neg_gpn_pred = x_pred[pred_batch_ind, neg_pred_ind]
gpn_att = torch.cat((pos_gpn_att.view(-1,N,L), neg_gpn_att.view(-1,N,L)),dim=0) # pos, neg
gpn_pred = torch.cat((pos_gpn_pred.view(-1,K,L), neg_gpn_pred.view(-1,K,L)),dim=0) # pos, neg
return pos_obj_ind, neg_obj_ind, gpn_att, gpn_pred
def graph_pooling(self, N, gpn_att, gpn_pool_mtx, att_masks):
"""
Pooling features over nodes of input sub-graphs.
"""
# batch-wise max pooling and mean pooling, by diagonal matrix
each_pool_mtx = torch.transpose(gpn_pool_mtx, 0, 1).contiguous().view(-1,N,N)
clean_feats = torch.bmm(each_pool_mtx,gpn_att)
max_feat = torch.max(clean_feats,dim=1)[0]
mean_feat = torch.sum(clean_feats,dim=1) / torch.transpose(att_masks,0,1).sum(-1).view(-1,1)
read_out = torch.cat((max_feat, mean_feat),dim=-1)
return read_out
| 50.811828 | 146 | 0.622474 |
c693ccabb744830494a86fc5e8794b9a96d86cb3 | 2,000 | py | Python | 2020/day03.py | mbcollins2/aoc | b94380fd5e92b4fe9f4af654e7762174c1c6ac91 | [
"MIT"
] | null | null | null | 2020/day03.py | mbcollins2/aoc | b94380fd5e92b4fe9f4af654e7762174c1c6ac91 | [
"MIT"
] | 3 | 2021-12-15T19:12:38.000Z | 2021-12-15T19:14:42.000Z | 2020/day03.py | mbcollins2/aoc | b94380fd5e92b4fe9f4af654e7762174c1c6ac91 | [
"MIT"
] | null | null | null | import numpy as np
from termcolor import colored # type: ignore // ignore warning since this works
class solve_day(object):
with open('inputs/day03.txt', 'r') as f:
data = f.readlines()
def part1(self):
trees = 0
r = 3
for i, d in enumerate(self.data):
# skip first index but print if needed for tracking
if i == 0:
# print(d, end='')
continue
d = d.strip()
while r > len(d)-1:
d += d
trees += 1 if d[r] == '#' else 0
# print with red for open and green for tree
# print(''.join([colored(x, 'green') if (i==r) and (x=='#') else colored(x, 'red') if (i==r) and (x!='#') else x for i,x in enumerate(d)]))
r += 3
return trees
def part2(self):
c = []
slope = [[1, 1], [3, 1], [5, 1], [7, 1], [1, 2]]
for s in slope:
# print(f'slope: {s}')
# print(f'right: {s[0]}, down: {s[1]}')
trees = 0
r = s[0]
for i in range(0, len(self.data), s[1]):
d = self.data[i].strip()
# skip first index but print if needed for tracking
if i == 0:
# print(d)
continue
while r > len(d)-1:
d += d
trees += 1 if d[r] == '#' else 0
# print with red for open and green for tree
# print(''.join([colored(x, 'green') if (i==r) and (x=='#') else colored(x, 'red') if (i==r) and (x!='#') else x for i,x in enumerate(d)]))
r += s[0]
c.append(trees)
# print(f'trees: {trees}')
# print('\n--------------------------\n')
return np.prod(c)
if __name__ == '__main__':
s = solve_day()
print(f'Part 1: {s.part1()}')
print(f'Part 2: {s.part2()}') | 27.027027 | 155 | 0.4145 |
dad0233c2d48483135e62cad74d5e60758efe78c | 249 | py | Python | torchsupport/data/chem/qm9.py | bobelly/torchsupport | 5aa0a04f20c193ec99310f5d6a3375d2e95e740d | [
"MIT"
] | 18 | 2019-05-02T16:32:15.000Z | 2021-04-16T09:33:54.000Z | torchsupport/data/chem/qm9.py | bobelly/torchsupport | 5aa0a04f20c193ec99310f5d6a3375d2e95e740d | [
"MIT"
] | 5 | 2019-10-14T13:46:49.000Z | 2021-06-08T11:48:34.000Z | torchsupport/data/chem/qm9.py | bobelly/torchsupport | 5aa0a04f20c193ec99310f5d6a3375d2e95e740d | [
"MIT"
] | 12 | 2019-05-12T21:34:24.000Z | 2021-07-15T14:14:16.000Z | import torch
import torch.nn
import torch.nn.functional as func
from torch.utils.data import Dataset
import torchsupport.modules.nodegraph as ng
import torchsupport.data.graph as gdata
class QM9(Dataset):
def __init__(self):
# self.data =
| 19.153846 | 43 | 0.779116 |
d1409481563b79785cbde1a04ee04275bb15c642 | 12,032 | py | Python | asl_data.py | aryan51k/american_sign_language_reconizer | f4f74e6942f268912239945a67d18a6651c1df59 | [
"MIT"
] | null | null | null | asl_data.py | aryan51k/american_sign_language_reconizer | f4f74e6942f268912239945a67d18a6651c1df59 | [
"MIT"
] | null | null | null | asl_data.py | aryan51k/american_sign_language_reconizer | f4f74e6942f268912239945a67d18a6651c1df59 | [
"MIT"
] | null | null | null | import os
import numpy as np
import pandas as pd
class AslDb(object):
""" American Sign Language database drawn from the RWTH-BOSTON-104 frame positional data
This class has been designed to provide a convenient interface for individual word data for students in the Udacity AI Nanodegree Program.
For example, to instantiate and load train/test files using a feature_method
definition named features, the following snippet may be used:
asl = AslDb()
asl.build_training(tr_file, features)
asl.build_test(tst_file, features)
Reference for the original ASL data:
http://www-i6.informatik.rwth-aachen.de/~dreuw/database-rwth-boston-104.php
The sentences provided in the data have been segmented into isolated words for this database
"""
def __init__(self,
hands_fn=os.path.join('data', 'hands_condensed.csv'),
speakers_fn=os.path.join('data', 'speaker.csv'),
):
""" loads ASL database from csv files with hand position information by frame, and speaker information
:param hands_fn: str
filename of hand position csv data with expected format:
video,frame,left-x,left-y,right-x,right-y,nose-x,nose-y
:param speakers_fn:
filename of video speaker csv mapping with expected format:
video,speaker
Instance variables:
df: pandas dataframe
snippit example:
left-x left-y right-x right-y nose-x nose-y speaker
video frame
98 0 149 181 170 175 161 62 woman-1
1 149 181 170 175 161 62 woman-1
2 149 181 170 175 161 62 woman-1
"""
self.df = pd.read_csv(hands_fn).merge(pd.read_csv(speakers_fn),on='video')
self.df.set_index(['video','frame'], inplace=True)
def build_training(self, feature_list, csvfilename =os.path.join('data', 'train_words.csv')):
""" wrapper creates sequence data objects for training words suitable for hmmlearn library
:param feature_list: list of str label names
:param csvfilename: str
:return: WordsData object
dictionary of lists of feature list sequence lists for each word
{'FRANK': [[[87, 225], [87, 225], ...], [[88, 219], [88, 219], ...]]]}
"""
return WordsData(self, csvfilename, feature_list)
def build_test(self, feature_method, csvfile=os.path.join('data', 'test_words.csv')):
""" wrapper creates sequence data objects for individual test word items suitable for hmmlearn library
:param feature_method: Feature function
:param csvfile: str
:return: SinglesData object
dictionary of lists of feature list sequence lists for each indexed
{3: [[[87, 225], [87, 225], ...]]]}
"""
return SinglesData(self, csvfile, feature_method)
class WordsData(object):
""" class provides loading and getters for ASL data suitable for use with hmmlearn library
"""
def __init__(self, asl:AslDb, csvfile:str, feature_list:list):
""" loads training data sequences suitable for use with hmmlearn library based on feature_method chosen
:param asl: ASLdata object
:param csvfile: str
filename of csv file containing word training start and end frame data with expected format:
video,speaker,word,startframe,endframe
:param feature_list: list of str feature labels
"""
self._data = self._load_data(asl, csvfile, feature_list)
self._hmm_data = create_hmmlearn_data(self._data)
self.num_items = len(self._data)
self.words = list(self._data.keys())
def _load_data(self, asl, fn, feature_list):
""" Consolidates sequenced feature data into a dictionary of words
:param asl: ASLdata object
:param fn: str
filename of csv file containing word training data
:param feature_list: list of str
:return: dict
"""
tr_df = pd.read_csv(fn)
dict = {}
for i in range(len(tr_df)):
word = tr_df.loc[i,'word']
video = tr_df.loc[i,'video']
new_sequence = [] # list of sample lists for a sequence
for frame in range(tr_df.loc[i,'startframe'], tr_df.loc[i,'endframe']+1):
vid_frame = video, frame
sample = [asl.df.loc[vid_frame][f] for f in feature_list]
if len(sample) > 0: # dont add if not found
new_sequence.append(sample)
if word in dict:
dict[word].append(new_sequence) # list of sequences
else:
dict[word] = [new_sequence]
return dict
def get_all_sequences(self):
""" getter for entire db of words as series of sequences of feature lists for each frame
:return: dict
dictionary of lists of feature list sequence lists for each word
{'FRANK': [[[87, 225], [87, 225], ...], [[88, 219], [88, 219], ...]]],
...}
"""
return self._data
def get_all_Xlengths(self):
""" getter for entire db of words as (X, lengths) tuple for use with hmmlearn library
:return: dict
dictionary of (X, lengths) tuple, where X is a numpy array of feature lists and lengths is
a list of lengths of sequences within X
{'FRANK': (array([[ 87, 225],[ 87, 225], ... [ 87, 225, 62, 127], [ 87, 225, 65, 128]]), [14, 18]),
...}
"""
return self._hmm_data
def get_word_sequences(self, word:str):
""" getter for single word series of sequences of feature lists for each frame
:param word: str
:return: list
lists of feature list sequence lists for given word
[[[87, 225], [87, 225], ...], [[88, 219], [88, 219], ...]]]
"""
return self._data[word]
def get_word_Xlengths(self, word:str):
""" getter for single word (X, lengths) tuple for use with hmmlearn library
:param word:
:return: (list, list)
(X, lengths) tuple, where X is a numpy array of feature lists and lengths is
a list of lengths of sequences within X
(array([[ 87, 225],[ 87, 225], ... [ 87, 225, 62, 127], [ 87, 225, 65, 128]]), [14, 18])
"""
return self._hmm_data[word]
class SinglesData(object):
""" class provides loading and getters for ASL data suitable for use with hmmlearn library
"""
def __init__(self, asl:AslDb, csvfile:str, feature_list):
""" loads training data sequences suitable for use with hmmlearn library based on feature_method chosen
:param asl: ASLdata object
:param csvfile: str
filename of csv file containing word training start and end frame data with expected format:
video,speaker,word,startframe,endframe
:param feature_list: list str of feature labels
"""
self.df = pd.read_csv(csvfile)
self.wordlist = list(self.df['word'])
self.sentences_index = self._load_sentence_word_indices()
self._data = self._load_data(asl, feature_list)
self._hmm_data = create_hmmlearn_data(self._data)
self.num_items = len(self._data)
self.num_sentences = len(self.sentences_index)
# def _load_data(self, asl, fn, feature_method):
def _load_data(self, asl, feature_list):
""" Consolidates sequenced feature data into a dictionary of words and creates answer list of words in order
of index used for dictionary keys
:param asl: ASLdata object
:param fn: str
filename of csv file containing word training data
:param feature_method: Feature function
:return: dict
"""
dict = {}
# for each word indexed in the DataFrame
for i in range(len(self.df)):
video = self.df.loc[i,'video']
new_sequence = [] # list of sample dictionaries for a sequence
for frame in range(self.df.loc[i,'startframe'], self.df.loc[i,'endframe']+1):
vid_frame = video, frame
sample = [asl.df.loc[vid_frame][f] for f in feature_list]
if len(sample) > 0: # dont add if not found
new_sequence.append(sample)
if i in dict:
dict[i].append(new_sequence) # list of sequences
else:
dict[i] = [new_sequence]
return dict
def _load_sentence_word_indices(self):
""" create dict of video sentence numbers with list of word indices as values
:return: dict
{v0: [i0, i1, i2], v1: [i0, i1, i2], ... ,} where v# is video number and
i# is index to wordlist, ordered by sentence structure
"""
working_df = self.df.copy()
working_df['idx'] = working_df.index
working_df.sort_values(by='startframe', inplace=True)
p = working_df.pivot('video', 'startframe', 'idx')
p.fillna(-1, inplace=True)
p = p.transpose()
dict = {}
for v in p:
dict[v] = [int(i) for i in p[v] if i>=0]
return dict
def get_all_sequences(self):
""" getter for entire db of items as series of sequences of feature lists for each frame
:return: dict
dictionary of lists of feature list sequence lists for each indexed item
{3: [[[87, 225], [87, 225], ...], [[88, 219], [88, 219], ...]]],
...}
"""
return self._data
def get_all_Xlengths(self):
""" getter for entire db of items as (X, lengths) tuple for use with hmmlearn library
:return: dict
dictionary of (X, lengths) tuple, where X is a numpy array of feature lists and lengths is
a list of lengths of sequences within X; should always have only one item in lengths
{3: (array([[ 87, 225],[ 87, 225], ... [ 87, 225, 62, 127], [ 87, 225, 65, 128]]), [14]),
...}
"""
return self._hmm_data
def get_item_sequences(self, item:int):
""" getter for single item series of sequences of feature lists for each frame
:param word: str
:return: list
lists of feature list sequence lists for given word
[[[87, 225], [87, 225], ...]]]
"""
return self._data[item]
def get_item_Xlengths(self, item:int):
""" getter for single item (X, lengths) tuple for use with hmmlearn library
:param word:
:return: (list, list)
(X, lengths) tuple, where X is a numpy array of feature lists and lengths is
a list of lengths of sequences within X; lengths should always contain one item
(array([[ 87, 225],[ 87, 225], ... [ 87, 225, 62, 127], [ 87, 225, 65, 128]]), [14])
"""
return self._hmm_data[item]
def combine_sequences(sequences):
'''
concatenates sequences and return tuple of the new list and lengths
:param sequences:
:return: (list, list)
'''
sequence_cat = []
sequence_lengths = []
# print("num of sequences in {} = {}".format(key, len(sequences)))
for sequence in sequences:
sequence_cat += sequence
num_frames = len(sequence)
sequence_lengths.append(num_frames)
return sequence_cat, sequence_lengths
def create_hmmlearn_data(dict):
seq_len_dict = {}
for key in dict:
sequences = dict[key]
sequence_cat, sequence_lengths = combine_sequences(sequences)
seq_len_dict[key] = np.array(sequence_cat), sequence_lengths
return seq_len_dict
if __name__ == '__main__':
asl= AslDb()
print(asl.df.loc[98, 1])
| 40.375839 | 142 | 0.595163 |
cb563114d7e68c6ef5b94347f8ed827d4043b890 | 12,359 | py | Python | bnas/optimize.py | redperiabras/FILIPINEU | 833fd8d44c9d4de94d3433ca810a4a17831343ff | [
"MIT"
] | null | null | null | bnas/optimize.py | redperiabras/FILIPINEU | 833fd8d44c9d4de94d3433ca810a4a17831343ff | [
"MIT"
] | 1 | 2017-10-30T12:02:44.000Z | 2017-10-30T12:02:44.000Z | bnas/optimize.py | redperiabras/FILIPINEU | 833fd8d44c9d4de94d3433ca810a4a17831343ff | [
"MIT"
] | 1 | 2020-11-16T07:56:58.000Z | 2020-11-16T07:56:58.000Z | """Optimization algorithms.
This module provides different algorithms for optimization through (typically)
stochastic mini-batch gradient descent.
"""
import random
from collections import OrderedDict
import pickle
import numpy as np
import theano
from theano import tensor as T
from .fun import function
def iterate_batches(data, batch_size, len_f=None, n_batches=16):
"""Iterate over minibatches.
Arguments
---------
data : list of data items (typically example/label pairs)
Data set to iterate over
batch_size : int
Minibatch size. If len(data) is at above this, each batch is
guaranteed to be of exactly size batch_size.
len_f : function
If this is defined, it should be a function mapping items from the
data array to some ordered type. n_batches will be randomly
sampled at a time, the examples inside sorted and cut up into batches.
This is useful for variable-length sequences, so that batches aren't
too sparse.
n_batches : int
"""
order = list(range(len(data)))
random.shuffle(order)
if len(data) <= batch_size:
yield data
elif len_f is None:
for i in range(0, len(data) - len(data)%batch_size, batch_size):
yield [data[j] for j in order[i:i+batch_size]]
else:
for i in range(0, len(data), batch_size*n_batches):
if i > len(data) - batch_size: return
subset = [data[j] for j in order[i:i+batch_size*n_batches]]
subset.sort(key=len_f)
useful_length = len(subset) - len(subset)%batch_size
for j in range(0, useful_length, batch_size):
yield subset[j:j+batch_size]
class Optimizer:
"""Base class for optimizers.
Arguments
---------
params : iterable over (name, parameter) tuples
Parameters to optimize, in simple cases it's enough to pass
Model.parameters().
loss : Theano symbolic expression
Loss function to minimize.
inputs : list of Theano variables
Inputs to the model to optimize
outputs : list of Theano variables
Outputs of the model to optimize, the loss should depend on
`inputs + outputs`.
grad_max_norm : float
Clip gradients at this value.
"""
def __init__(self, params, loss, inputs=[], outputs=[], grad_max_norm=None):
self.params = OrderedDict(('_'.join(name), p) for name, p in params)
self.loss = loss
self.inputs = inputs
self.outputs = outputs
self.grad_max_norm = grad_max_norm
self._grad_fun = None
self.optimizer_params = []
self.n_updates = 0
self.raw_grad = OrderedDict((name, T.grad(loss, param))
for name, param in self.params.items())
if grad_max_norm is None:
self.grad = self.raw_grad
else:
norm = T.sqrt(T.stack(
[T.sqr(g).sum() for g in self.raw_grad.values()],
axis=0).sum())
a = T.switch(norm < self.grad_max_norm, 1, self.grad_max_norm/norm)
self.grad = OrderedDict((name, a*g)
for name, g in self.raw_grad.items())
def shared(self, *args, **kwargs):
s = theano.shared(*args, **kwargs)
self.optimizer_params.append(s)
return s
def get_extra_params(self):
return {'n_updates': self.n_updates}
def set_extra_params(self, x):
assert set(x.keys()) == {'n_updates'}
for name, v in x.items():
setattr(self, name, v)
def save(self, f):
pickle.dump(self.get_extra_params(), f, -1)
pickle.dump([s.get_value(borrow=True) for s in self.optimizer_params],
f, -1)
def load(self, f):
self.set_extra_params(pickle.load(f))
values = pickle.load(f)
if len(values) != len(self.optimizer_params):
raise ValueError(
'Expected %d optimizer parameters, %d in file' % (
len(self.optimizer_params), len(values)))
for s, v in zip(self.optimizer_params, values):
s.set_value(v)
def grad_fun(self):
if self._grad_fun is None:
self._grad_fun = function(
self.inputs + self.outputs,
list(self.raw_grad.values()),
name='grad_fun')
return self._grad_fun
def step(self, *args):
"""Take one optimization step.
Different subclasses use different rules, but in general this function
computes gradients and updates `self.params`.
Parameters
----------
*args : list of numpy.ndarray
The arguments passed to the step function correspond to the
arguments in `self.inputs + self.outputs`, i.e. the concatenated
arrays gives an `inputs` and `outputs` in the constructor.
Returns
-------
loss : float
Value of the loss function before the current parameter update.
"""
raise NotImplementedError
def create_shadows(self, name):
"""Create a set of shared variables of the same shapes as parameters.
This is useful for creating variables for e.g. gradient squares that
are used by some of the optimization algorithms.
Parameters
----------
name : str
Name prefix to attach to names in the returned object.
Returns
-------
shadows : OrderedDict
Map of names to shared variables.
"""
shadows = OrderedDict()
for param_name, param in self.params.items():
s = self.shared(np.zeros_like(param.get_value()),
name=name+'_'+param_name)
shadows[name+'_'+param_name] = s
return shadows
class SGD(Optimizer):
"""Plain Stochastic Gradient Descent (SGD) optimizer.
To adjust the learning rate, simply modify `self.learning_rate`.
Parameters
----------
learning_rate : float, optional
Initial learning rate. Default is 0.01.
"""
def __init__(self, *args, learning_rate=0.01, **kwargs):
super().__init__(*args, **kwargs)
self.learning_rate = learning_rate
learning_rate = T.scalar('learning_rate')
updates = [(param, param-(learning_rate*grad))
for param, grad
in zip(self.params.values(), self.grad.values())]
self.step1 = function(
self.inputs+self.outputs+[learning_rate],
self.loss,
default_mode=1,
name='SGD_step1',
updates=updates)
def step(self, *args):
self.n_updates += 1
return self.step1(*(args + (self.learning_rate,)))
class Nesterov(Optimizer):
"""Nesterov momentum optimizer.
To adjust the learning rate or momentum parameter, modify
`self.learning_rate` or `self.momentum`, respectively.
Implemented as equations (3) and (4) in Sutskever et al. (2013).
http://jmlr.org/proceedings/papers/v28/sutskever13.pdf
Parameters
----------
learning_rate : float, optional
Initial learning rate. Default is 0.01.
momentum : float, optional
Initial momentum parameter. Default is 0.9.
"""
def __init__(self, *args, learning_rate=0.01, momentum=0.9, **kwargs):
super().__init__(*args, **kwargs)
self.learning_rate = learning_rate
self.momentum = momentum
learning_rate = T.scalar('learning_rate')
momentum = T.scalar('momentum')
vs = self.create_shadows('v')
updates1 = [(p, p + momentum*v)
for p,v in zip(self.params.values(), vs.values())]
updates2 = [(v, momentum*v - learning_rate*grad)
for v,grad in zip(vs.values(), self.grad.values())] \
+ [(p, p - learning_rate*grad)
for p,grad in zip(self.params.values(),
self.grad.values())]
self.step1 = theano.function(
inputs=[momentum],
outputs=[],
name='Nesterov_step1',
updates=updates1)
self.step2 = function(
inputs=self.inputs+self.outputs+[
learning_rate, momentum],
default_mode=1,
outputs=self.loss,
name='Nesterov_step2',
updates=updates2)
def step(self, *args):
self.n_updates += 1
self.step1(self.momentum)
return self.step2(*(args + (self.learning_rate, self.momentum)))
class RMSProp(Optimizer):
"""RMSProp optimizer.
Parameters
----------
learning_rate : float, optional
Initial learning rate.
decay : float, optional
Decay rate.
epsilon : float, optional
Stabilizing constant.
"""
def __init__(self, *args, learning_rate=0.001, decay=0.9, epsilon=1e-8,
**kwargs):
super().__init__(*args, **kwargs)
self.learning_rate = learning_rate0
self.decay = deay0
self.epsilon = epsilon0
learning_rate = T.scalar('learning_rate')
decay = T.scalar('decay')
squares = self.create_shadows('squares')
new_squares = [decay*square + (1.0-decay)*T.sqr(g)
for g, square in zip(
self.grad.values(), squares.values())]
ds = [-g*learning_rate/T.sqrt(square + self.epsilon)
for g,square in zip(self.grad.values(), new_squares)]
updates = [(p, p+d) for p,d in zip(self.params.values(), ds)] \
+ list(zip(squares.values(), new_squares)) \
self.step1 = function(
inputs=self.inputs+self.outputs+[
learning_rate, decay],
default_mode=1,
outputs=self.loss,
name='RMSProp_step1',
updates=updates)
def step(self, *args):
self.n_updates += 1
return self.step1(*(args + (self.learning_rate, self.decay)))
class Adam(Optimizer):
"""Adam optimizer.
To adjust the learning rate, simply modify `self.learning_rate`, although
the whole point of this algorithm is that you should not need to do this.
Parameters
----------
learning_rate : float, optional
Initial learning rate. Default is 0.001.
beta_1 : float, optional
First moment decay rate, default: 0.9
beta_1 : float, optional
Second moment decay rate, default: 0.999
epsilon : float, optional
Stabilizing constant, default: 1e-8
Kingma and Ba (2014).
http://arxiv.org/abs/1412.6980
"""
def __init__(self, *args, learning_rate=0.001, beta_1=0.9,
beta_2=0.999, epsilon=1e-8, **kwargs):
super().__init__(*args, **kwargs)
self.learning_rate = learning_rate
learning_rate = T.scalar('learning_rate')
vs = self.create_shadows('v')
ms = self.create_shadows('m')
beta_1_t = self.shared(
np.asarray(beta_1, dtype=theano.config.floatX),
name='beta_1_t')
beta_2_t = self.shared(
np.asarray(beta_2, dtype=theano.config.floatX),
name='beta_2_t')
updates = [(m, beta_1*m + (1.0-beta_1)*g)
for m,g in zip(ms.values(), self.grad.values())] \
+ [(v, beta_2*v + (1.0-beta_2)*T.sqr(g))
for v,g in zip(vs.values(), self.grad.values())] \
+ [(p, p - (learning_rate*(m/(1.0-beta_1_t))/
(T.sqrt(v/(1.0-beta_2_t)) + epsilon)))
for p,m,v in zip(self.params.values(),
ms.values(), vs.values())] \
+ [(beta_1_t, beta_1_t * beta_1),
(beta_2_t, beta_2_t * beta_2)]
self.step1 = function(
inputs=self.inputs+self.outputs+[learning_rate],
outputs=self.loss,
default_mode=1,
name='Adam_step1',
updates=updates)
def step(self, *args):
self.n_updates += 1
return self.step1(*(args + (self.learning_rate,)))
| 33.134048 | 80 | 0.572134 |
d07085fc75b4298342fcaaf12a0192451c4ad8e3 | 1,191 | py | Python | lib/aquilon/worker/commands/rebind_client_hostname.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 7 | 2015-07-31T05:57:30.000Z | 2021-09-07T15:18:56.000Z | lib/aquilon/worker/commands/rebind_client_hostname.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 115 | 2015-03-03T13:11:46.000Z | 2021-09-20T12:42:24.000Z | lib/aquilon/worker/commands/rebind_client_hostname.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 13 | 2015-03-03T11:17:59.000Z | 2021-09-09T09:16:41.000Z | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2013,2014 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a wrapper for `aq rebind client --hostname`."""
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.commands.bind_client_hostname import CommandBindClientHostname
class CommandRebindClientHostname(CommandBindClientHostname):
required_parameters = ["hostname", "service"]
def render(self, *args, **arguments):
arguments["force"] = True
return CommandBindClientHostname.render(self, *args, **arguments)
| 39.7 | 82 | 0.752309 |
dcb81eed57cdd1817ed8c58361db3f5c4f1b2509 | 19,813 | py | Python | ipython_config.py | mrakitin/profile_collection-smi | 1eea45a3b886b2c0daeec715ce94f27da24d0ba3 | [
"BSD-3-Clause"
] | null | null | null | ipython_config.py | mrakitin/profile_collection-smi | 1eea45a3b886b2c0daeec715ce94f27da24d0ba3 | [
"BSD-3-Clause"
] | 13 | 2018-09-25T19:35:08.000Z | 2021-01-15T20:42:26.000Z | ipython_config.py | mrakitin/profile_collection-smi | 1eea45a3b886b2c0daeec715ce94f27da24d0ba3 | [
"BSD-3-Clause"
] | 3 | 2019-09-06T01:40:59.000Z | 2020-07-01T20:27:39.000Z | # Configuration file for ipython.
c = get_config()
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
# lines of code to run at IPython startup.
# c.InteractiveShellApp.exec_lines = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
c.InteractiveShellApp.gui = 'qt'
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
c.InteractiveShellApp.pylab = 'auto'
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.InteractiveShellApp.matplotlib = None
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.InteractiveShellApp.pylab_import_all = True
c.StoreMagics.autorestore = True
# A list of dotted module names of IPython extensions to load.
c.InteractiveShellApp.extensions = ['pyOlog.cli.ipy']
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.InteractiveShellApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# TerminalIPythonApp will inherit config from: BaseIPythonApplication,
# Application, InteractiveShellApp
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.TerminalIPythonApp.exec_PYTHONSTARTUP = True
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.TerminalIPythonApp.pylab = 'auto'
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.TerminalIPythonApp.verbose_crash = False
# Run the module as a script.
# c.TerminalIPythonApp.module_to_run = ''
# The date format used by logging formatters for %(asctime)s
c.TerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# Whether to overwrite existing config files when copying
# c.TerminalIPythonApp.overwrite = False
# Execute the given command string.
# c.TerminalIPythonApp.code_to_run = ''
# Set the log level by value or name.
# c.TerminalIPythonApp.log_level = 30
# lines of code to run at IPython startup.
# c.TerminalIPythonApp.exec_lines = []
# Suppress warning messages about legacy config files
# c.TerminalIPythonApp.ignore_old_config = False
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.TerminalIPythonApp.extra_config_file = u''
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.TerminalIPythonApp.hide_initial_ns = True
# dotted module name of an IPython extension to load.
# c.TerminalIPythonApp.extra_extension = ''
# A file to be run
# c.TerminalIPythonApp.file_to_run = ''
# The IPython profile to use.
# c.TerminalIPythonApp.profile = u'default'
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.TerminalIPythonApp.matplotlib = None
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.TerminalIPythonApp.force_interact = False
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.TerminalIPythonApp.pylab_import_all = True
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This options can also be specified through the environment
# variable IPYTHONDIR.
# c.TerminalIPythonApp.ipython_dir = u''
# Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.TerminalIPythonApp.copy_config_files = False
# List of files to run at IPython startup.
# c.TerminalIPythonApp.exec_files = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'none',
# 'osx', 'pyglet', 'qt', 'qt4', 'tk', 'wx').
# c.TerminalIPythonApp.gui = None
# A list of dotted module names of IPython extensions to load.
# c.TerminalIPythonApp.extensions = []
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
# The Logging format template
# c.TerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# TerminalInteractiveShell will inherit config from: InteractiveShell
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = False
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.TerminalInteractiveShell.color_info = True
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.TerminalInteractiveShell.ast_transformers = []
#
# c.TerminalInteractiveShell.history_length = 10000
# Don't call post-execute functions that have failed in the past.
# c.TerminalInteractiveShell.disable_failing_post_execute = False
# Show rewritten input, e.g. for autocall.
c.TerminalInteractiveShell.show_rewritten_input = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.TerminalInteractiveShell.colors = 'Linux'
# Autoindent IPython code entered interactively.
# c.TerminalInteractiveShell.autoindent = True
#
# c.TerminalInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.in2_template
# c.TerminalInteractiveShell.prompt_in2 = ' .\\D.: '
#
# c.TerminalInteractiveShell.separate_out = ''
# Deprecated, use PromptManager.in_template
# c.TerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
c.TerminalInteractiveShell.autocall = 0
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = u'/usr/bin/vim'
# Deprecated, use PromptManager.justify
# c.TerminalInteractiveShell.prompts_pad_left = True
# The part of the banner to be printed before the profile
# c.TerminalInteractiveShell.banner1 = 'Python 2.7.3 (default, Mar 13 2014, 11:03:55) \nType "copyright", "credits" or "license" for more information.\n\nIPython 2.3.1 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
#
# c.TerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# The part of the banner to be printed after the profile
# c.TerminalInteractiveShell.banner2 = ''
#
# c.TerminalInteractiveShell.separate_out2 = ''
#
# c.TerminalInteractiveShell.wildcards_case_sensitive = True
#
# c.TerminalInteractiveShell.debug = False
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
#
# c.TerminalInteractiveShell.ipython_dir = ''
#
# c.TerminalInteractiveShell.readline_remove_delims = '-/~'
# Start logging to the default log file.
# c.TerminalInteractiveShell.logstart = False
# The name of the logfile to use.
# c.TerminalInteractiveShell.logfile = ''
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
# Enable magic commands to be called without the leading %.
# c.TerminalInteractiveShell.automagic = True
# Save multi-line entries as one entry in readline history
# c.TerminalInteractiveShell.multiline_history = True
#
# c.TerminalInteractiveShell.readline_use = True
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.TerminalInteractiveShell.deep_reload = False
# Start logging to the given file in append mode.
#c.TerminalInteractiveShell.logappend = '/epics/xf/23id/ophyd/logs/xf23id1_log.py'
#
# c.TerminalInteractiveShell.xmode = 'Context'
#
# c.TerminalInteractiveShell.quiet = False
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
#
# c.TerminalInteractiveShell.object_info_string_level = 0
# Deprecated, use PromptManager.out_template
# c.TerminalInteractiveShell.prompt_out = 'Out[\\#]: '
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.TerminalInteractiveShell.cache_size = 1000
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.TerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# Automatically call the pdb debugger after every exception.
# c.TerminalInteractiveShell.pdb = False
#------------------------------------------------------------------------------
# PromptManager configuration
#------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
# Output prompt. '\#' will be transformed to the prompt number
# c.PromptManager.out_template = 'Out[\\#]: '
# Continuation prompt.
# c.PromptManager.in2_template = ' .\\D.: '
# If True (default), each prompt will be right-aligned with the preceding one.
# c.PromptManager.justify = True
# Input prompt. '\#' will be transformed to the prompt number
# c.PromptManager.in_template = 'BlueSky [\\#]: '
#
# c.PromptManager.color_scheme = 'Linux'
#------------------------------------------------------------------------------
# HistoryManager configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# HistoryManager will inherit config from: HistoryAccessor
# Should the history database include output? (default: no)
# c.HistoryManager.db_log_output = False
# Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
# c.HistoryManager.db_cache_size = 0
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
# c.HistoryManager.hist_file = u''
# Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
# c.HistoryManager.connection_options = {}
# enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# c.HistoryManager.enabled = True
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = u''
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
# PlainTextFormatter will inherit config from: BaseFormatter
#
# c.PlainTextFormatter.type_printers = {}
#
# c.PlainTextFormatter.newline = '\n'
#
# c.PlainTextFormatter.float_precision = ''
#
# c.PlainTextFormatter.verbose = False
#
# c.PlainTextFormatter.deferred_printers = {}
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.singleton_printers = {}
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# IPCompleter will inherit config from: Completer
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.IPCompleter.greedy = False
#------------------------------------------------------------------------------
# ScriptMagics configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = []
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = {}
#------------------------------------------------------------------------------
# StoreMagics configuration
#------------------------------------------------------------------------------
# Lightweight persistence for python variables.
#
# Provides the %store magic.
# If True, any %store-d variables will be automatically restored when IPython
# starts.
# c.StoreMagics.autorestore = False
| 37.033645 | 417 | 0.687427 |
9917c220542583b9c4b21b4402b69a5e06b96be2 | 3,020 | py | Python | src/programy/parser/template/nodes/thatstar.py | cdoebler1/AIML2 | ee692ec5ea3794cd1bc4cc8ec2a6b5e5c20a0d6a | [
"MIT"
] | 345 | 2016-11-23T22:37:04.000Z | 2022-03-30T20:44:44.000Z | src/programy/parser/template/nodes/thatstar.py | MikeyBeez/program-y | 00d7a0c7d50062f18f0ab6f4a041068e119ef7f0 | [
"MIT"
] | 275 | 2016-12-07T10:30:28.000Z | 2022-02-08T21:28:33.000Z | src/programy/parser/template/nodes/thatstar.py | VProgramMist/modified-program-y | f32efcafafd773683b3fe30054d5485fe9002b7d | [
"MIT"
] | 159 | 2016-11-28T18:59:30.000Z | 2022-03-20T18:02:44.000Z | """
Copyright (c) 2016-2020 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.parser.template.nodes.indexed import TemplateIndexedNode
class TemplateThatStarNode(TemplateIndexedNode):
def __init__(self, index=1):
TemplateIndexedNode.__init__(self, index)
def resolve_to_string(self, client_context):
conversation = client_context.bot.get_conversation(client_context)
resolved = ""
if conversation.has_current_question():
current_question = conversation.current_question()
current_sentence = current_question.current_sentence()
matched_context = current_sentence.matched_context
if matched_context is None:
YLogger.error(client_context, "ThatStar node has no matched context for clientid %s",
client_context.userid)
else:
int_index = int(self.index.resolve(client_context))
resolved = matched_context.thatstar(client_context, int_index)
if resolved is None:
YLogger.error(client_context, "ThatStar index not in range [%d]", int_index)
YLogger.debug(client_context, "ThatStar Node [%s] resolved to [%s]", self.to_string(), resolved)
return resolved
def to_string(self):
string = "[THATSTAR"
string += self.index.to_string() + ']'
return string
def to_xml(self, client_context):
xml = '<thatstar index="'
xml += self.index.to_xml(client_context)
xml += '"></thatstar>'
return xml
#######################################################################################################
# THATSTAR_EXPRESSION ::== <thatstar( INDEX_ATTRIBUTE)/> | <thatstar><index>TEMPLATE_EXPRESSION</index></thatstar>
def parse_expression(self, graph, expression):
self._parse_node_with_attrib(graph, expression, "index", "1")
| 45.757576 | 120 | 0.685762 |
6c290c448b9fd6c86a8814954645db280247cccb | 4,000 | py | Python | hanlp/layers/embeddings/char_rnn.py | yatwql/HanLP | 584ce7e5ed1b8f2209e14f32c44a55bb5a822e31 | [
"Apache-2.0"
] | 3 | 2022-03-22T05:47:50.000Z | 2022-03-22T05:47:58.000Z | hanlp/layers/embeddings/char_rnn.py | hushaoyun/HanLP | 967b52404c9d0adbc0cff2699690c127ecfca36e | [
"Apache-2.0"
] | null | null | null | hanlp/layers/embeddings/char_rnn.py | hushaoyun/HanLP | 967b52404c9d0adbc0cff2699690c127ecfca36e | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-06-02 23:49
from typing import Optional, Callable, Union
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from hanlp_common.configurable import AutoConfigurable
from hanlp.common.transform import VocabDict, ToChar
from hanlp.common.vocab import Vocab
from hanlp.layers.embeddings.embedding import Embedding, EmbeddingDim
class CharRNN(nn.Module, EmbeddingDim):
def __init__(self,
field,
vocab_size,
embed: Union[int, nn.Embedding],
hidden_size):
"""Character level RNN embedding module.
Args:
field: The field in samples this encoder will work on.
vocab_size: The size of character vocab.
embed: An ``Embedding`` object or the feature size to create an ``Embedding`` object.
hidden_size: The hidden size of RNNs.
"""
super(CharRNN, self).__init__()
self.field = field
# the embedding layer
if isinstance(embed, int):
self.embed = nn.Embedding(num_embeddings=vocab_size,
embedding_dim=embed)
elif isinstance(embed, nn.Module):
self.embed = embed
embed = embed.embedding_dim
else:
raise ValueError(f'Unrecognized type for {embed}')
# the lstm layer
self.lstm = nn.LSTM(input_size=embed,
hidden_size=hidden_size,
batch_first=True,
bidirectional=True)
def forward(self, batch, mask, **kwargs):
x = batch[f'{self.field}_char_id']
# [batch_size, seq_len, fix_len]
mask = x.ne(0)
# [batch_size, seq_len]
lens = mask.sum(-1)
char_mask = lens.gt(0)
# [n, fix_len, n_embed]
x = self.embed(batch) if isinstance(self.embed, EmbeddingDim) else self.embed(x[char_mask])
x = pack_padded_sequence(x[char_mask], lens[char_mask].cpu(), True, False)
x, (h, _) = self.lstm(x)
# [n, fix_len, n_out]
h = torch.cat(torch.unbind(h), -1)
# [batch_size, seq_len, n_out]
embed = h.new_zeros(*lens.shape, h.size(-1))
embed = embed.masked_scatter_(char_mask.unsqueeze(-1), h)
return embed
@property
def embedding_dim(self) -> int:
return self.lstm.hidden_size * 2
class CharRNNEmbedding(Embedding, AutoConfigurable):
def __init__(self,
field,
embed,
hidden_size,
max_word_length=None) -> None:
"""Character level RNN embedding module builder.
Args:
field: The field in samples this encoder will work on.
embed: An ``Embedding`` object or the feature size to create an ``Embedding`` object.
hidden_size: The hidden size of RNNs.
max_word_length: Character sequence longer than ``max_word_length`` will be truncated.
"""
super().__init__()
self.field = field
self.hidden_size = hidden_size
self.embed = embed
self.max_word_length = max_word_length
def transform(self, vocabs: VocabDict, **kwargs) -> Optional[Callable]:
if isinstance(self.embed, Embedding):
self.embed.transform(vocabs=vocabs)
vocab_name = self.vocab_name
if vocab_name not in vocabs:
vocabs[vocab_name] = Vocab()
return ToChar(self.field, vocab_name, max_word_length=self.max_word_length)
@property
def vocab_name(self):
vocab_name = f'{self.field}_char'
return vocab_name
def module(self, vocabs: VocabDict, **kwargs) -> Optional[nn.Module]:
embed = self.embed
if isinstance(self.embed, Embedding):
embed = self.embed.module(vocabs=vocabs)
return CharRNN(self.field, len(vocabs[self.vocab_name]), embed, self.hidden_size)
| 36.363636 | 99 | 0.60525 |
beb276e4b54b33db360b1078b52c202186de9ca7 | 872 | py | Python | Utility/Torch/Policy.py | smithblack-0/Utility | 875ab69fffad1412174d9d0a1de70edc1fd64152 | [
"MIT"
] | null | null | null | Utility/Torch/Policy.py | smithblack-0/Utility | 875ab69fffad1412174d9d0a1de70edc1fd64152 | [
"MIT"
] | null | null | null | Utility/Torch/Policy.py | smithblack-0/Utility | 875ab69fffad1412174d9d0a1de70edc1fd64152 | [
"MIT"
] | null | null | null | """
The purpose of this module is to provide infrastructure for the policy
machinery used to dynamically adjust the model during construction for
optimal results.
* each batch contains a number of experiments
* itemized loss used to help calculate penalty
* model size used to help calculate penalty
* Calculated penalty easily fed back to particular cases.
"""
import torch
from torch import nn
class PolicyModule(nn.Module):
"""
A module for implementing policy logic.
"""
pass
class PolicyPiece():
"""
A class representing a particular piece
of a policy.
"""
def __init__(self):
pass
class SlidingInt(PolicyPiece):
#If policy works...
pass
class PolicyKernel(nn.Module):
"""
Takes policy definitions and dynamically
runs experiments returning differently
sized kernels.
"""
pass | 18.166667 | 70 | 0.704128 |
f1c7b7a12750ff4024272ef1f483675231464173 | 563 | py | Python | blog/migrations/0003_auto_20160826_1653.py | clossmans2/my_django_blog | d7e3742390f58d436b95fc98642e2dfade60f8e0 | [
"MIT"
] | null | null | null | blog/migrations/0003_auto_20160826_1653.py | clossmans2/my_django_blog | d7e3742390f58d436b95fc98642e2dfade60f8e0 | [
"MIT"
] | 187 | 2018-08-03T14:43:34.000Z | 2021-07-15T10:23:34.000Z | blog/migrations/0003_auto_20160826_1653.py | clossmans2/my_django_blog | d7e3742390f58d436b95fc98642e2dfade60f8e0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-08-26 20:53
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_comment'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 24.478261 | 110 | 0.660746 |
6991b58d4077ed54c2eff517056d53a550325621 | 5,846 | py | Python | tests/Configuration/TemplateConfTests.py | owlfish/pubtal | fb20a0acf2769b2c06012b65bd462f02da12bd1c | [
"BSD-3-Clause"
] | null | null | null | tests/Configuration/TemplateConfTests.py | owlfish/pubtal | fb20a0acf2769b2c06012b65bd462f02da12bd1c | [
"BSD-3-Clause"
] | null | null | null | tests/Configuration/TemplateConfTests.py | owlfish/pubtal | fb20a0acf2769b2c06012b65bd462f02da12bd1c | [
"BSD-3-Clause"
] | null | null | null | """ Unit tests cases.
Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
If you make any bug fixes or feature enhancements please let me know!
"""
try:
import logging
except:
from simpletal import DummyLogger as logging
from pubtal import SiteUtils
import updateSite
import unittest, copy, os.path
root = logging.getLogger()
root.setLevel (logging.WARN)
# Default test
TEMPLATE1 = '<html><body><h1 tal:content="page/headers/title"></h1> <div tal:content="structure page/content"></div></body></html>'
TEMPLATE2 = '<html><body><h1 tal:content="page/headers/title"></h1> <h2>Content</h2><div tal:content="structure page/content"></div></body></html>'
TEMPLATE3 = '<html><body><h1 tal:content="page/headers/title"></h1> <h3>Content</h3><div tal:content="structure page/content"></div></body></html>'
TEMPLATE4 = '<html><body><h1 tal:content="page/headers/title"></h1> <h4>Content</h4><div tal:content="structure page/content"></div></body></html>'
CONTENT1 = """title: Test1
This is the <b>first</b> test."""
CONFIG1 = """
<Content onedir>
template template2.html
</Content>
<Content onedir/two.txt>
template template3.html
</Content>
<Content *.xtxt>
template template4.xhtml
content-type HTMLText
</Content>
"""
RESULT1 = {'index.html': """<html><body><h1>Test1</h1> <div><p>This is the <b>first</b> test.</p>
</div></body></html>""", 'onedir/one.html': """<html><body><h1>Test1</h1> <h2>Content</h2><div><p>This is the <b>first</b> test.</p>
</div></body></html>""", 'onedir/two.html': """<html><body><h1>Test1</h1> <h3>Content</h3><div><p>This is the <b>first</b> test.</p>
</div></body></html>""", 'twodir/three.xhtml': """<html><body><h1>Test1</h1> <h4>Content</h4><div><p>This is the <b>first</b> test.</p>
</div></body></html>"""}
CONFIG2 = """
<Content onedir>
template template2.html
</Content>
<Content onedir/two.txt>
template template3.xhtml2
</Content>
<Content *.xtxt>
template template4.xhtml
content-type HTMLText
</Content>
<Template *.xhtml2>
output-type XHTML
</Template>
<Template template4.xhtml>
output-type XHTML
</Template>
"""
RESULT2 = {'index.html': """<html><body><h1>Test1</h1> <div><p>This is the <b>first</b> test.</p>
</div></body></html>""", 'onedir/one.html': """<html><body><h1>Test1</h1> <h2>Content</h2><div><p>This is the <b>first</b> test.</p>
</div></body></html>""", 'onedir/two.xhtml2': """<?xml version="1.0" encoding="iso-8859-15"?>
<html><body><h1>Test1</h1> <h3>Content</h3><div><p>This is the <b>first</b> test.</p>
</div></body></html>""", 'twodir/three.xhtml': """<?xml version="1.0" encoding="iso-8859-15"?>
<html><body><h1>Test1</h1> <h4>Content</h4><div><p>This is the <b>first</b> test.</p>
</div></body></html>"""}
class TemplateConfCases (unittest.TestCase):
def setUp (self):
self.site = SiteUtils.SiteBuilder()
self.site.buildDirs()
def tearDown (self):
self.site.destroySite()
pass
def _runTest_ (self, expectedResult, configFile=None):
if (configFile is None):
conf = os.path.join (self.site.getSiteDir(), "test.config")
else:
conf = configFile
update = updateSite.UpdateSite (conf, None, ui=SiteUtils.SilentUI())
update.buildSite()
comp = SiteUtils.DirCompare()
res = comp.compare (self.site.getDestDir(), expectedResult)
self.failUnless (res is None, res)
def testContentConfigs (self):
self.site.createTemplate ('template.html', TEMPLATE1)
self.site.createTemplate ('template2.html', TEMPLATE2)
self.site.createTemplate ('template3.html', TEMPLATE3)
self.site.createTemplate ('template4.xhtml', TEMPLATE4)
self.site.createContent ('index.txt', CONTENT1)
self.site.createContent ('onedir/one.txt', CONTENT1)
self.site.createContent ('onedir/two.txt', CONTENT1)
self.site.createContent ('twodir/three.xtxt', CONTENT1)
self.site.createConfigFile ('test.config', CONFIG1)
self._runTest_ (RESULT1)
def testTemplateConfigs (self):
self.site.createTemplate ('template.html', TEMPLATE1)
self.site.createTemplate ('template2.html', TEMPLATE2)
self.site.createTemplate ('template3.xhtml2', TEMPLATE3)
self.site.createTemplate ('template4.xhtml', TEMPLATE4)
self.site.createContent ('index.txt', CONTENT1)
self.site.createContent ('onedir/one.txt', CONTENT1)
self.site.createContent ('onedir/two.txt', CONTENT1)
self.site.createContent ('twodir/three.xtxt', CONTENT1)
self.site.createConfigFile ('test.config', CONFIG2)
self._runTest_ (RESULT2)
if __name__ == '__main__':
unittest.main()
| 37.961039 | 147 | 0.715874 |
1cbf2c276f9c5ecd01c2515a4070e0607548978b | 1,216 | py | Python | pyvisdk/do/virtual_lsi_logic_sas_controller.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/virtual_lsi_logic_sas_controller.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/virtual_lsi_logic_sas_controller.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def VirtualLsiLogicSASController(vim, *args, **kwargs):
'''VirtualLsiLogicSASController is the data object that represents a LSI Logic SAS
SCSI controller.'''
obj = vim.client.factory.create('{urn:vim25}VirtualLsiLogicSASController')
# do some validation checking...
if (len(args) + len(kwargs)) < 3:
raise IndexError('Expected at least 4 arguments got: %d' % len(args))
required = [ 'sharedBus', 'busNumber', 'key' ]
optional = [ 'hotAddRemove', 'scsiCtlrUnitNumber', 'device', 'backing', 'connectable',
'controllerKey', 'deviceInfo', 'unitNumber', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| 34.742857 | 124 | 0.626645 |
92e3d9dd0c0a832a501a4198f898a3f9cd317fbf | 11,503 | py | Python | main.py | smalik169/cross-alligned-auto-encoder | c6d6a6dbc7563676d7ba942b95c7f12f3c9a4991 | [
"Apache-2.0"
] | null | null | null | main.py | smalik169/cross-alligned-auto-encoder | c6d6a6dbc7563676d7ba942b95c7f12f3c9a4991 | [
"Apache-2.0"
] | null | null | null | main.py | smalik169/cross-alligned-auto-encoder | c6d6a6dbc7563676d7ba942b95c7f12f3c9a4991 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import argparse
import time
import torch
import torch.optim as optim
import pprint
import data
import model
#from logger import Logger
parser = argparse.ArgumentParser(
description='Style Transfer from Non-Parallel Text by Cross-Alignment')
parser.add_argument('--data', type=str,
default='/pio/scratch/2/i264266/cross-alligned-auto-encoder/data/sentiment.',
help='location of the data corpus')
parser.add_argument('--encoder-kwargs', type=str, default='',
help='kwargs for the encoder')
parser.add_argument('--generator-kwargs', type=str, default='',
help='k=v list of kwargs for the generator')
parser.add_argument('--discriminator-kwargs', type=str, default='',
help='kwargs for the discriminators')
parser.add_argument('--style-dim', type=int, default=200,
help='style embedding size')
parser.add_argument('--encoder-emb-dim', type=int, default=100,
help='style embedding size')
parser.add_argument('--generator-emb-dim', type=int, default=100,
help='style embedding size')
parser.add_argument('--tie-embeddings', type=bool, default=True,
help='use same word embeddings in encoder and generator')
parser.add_argument('--lmb', type=float, default=1.0,
help='regulates hom much of discriminator error is added to the ae_loss')
parser.add_argument('--lr', type=float, default=0.0001,
help='initial learning rate')
parser.add_argument('--lr-decay', type=float, default=2.0,
help='learning rate decay')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=40,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=64, metavar='N',
help='batch size')
parser.add_argument('--optimizer', default='sgd',
choices=['sgd', 'adam', 'adagrad', 'adadelta'],
help='optimization method')
parser.add_argument('--optimizer-kwargs', type=str, default='',
help='kwargs for the optimizer (e.g., momentum=0.9)')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--logdir', type=str, default=None,
help='path to save the final model')
parser.add_argument('--log_weights', action='store_true',
help="log weights' histograms")
parser.add_argument('--log_grads', action='store_true',
help="log gradients' histograms")
parser.add_argument('--load-model', action='store_true',
help='loads pretrained model')
parser.add_argument('--global-dropout', type=float, default=None,
help='set given dropout throughout whole model')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably "
"run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
corpus = data.Corpus(args.data, cuda=args.cuda, rng=args.seed)
eval_batch_size = 20
###############################################################################
# Build the model
###############################################################################
ntokens = len(corpus.dictionary)
encoder_kwargs = {'nhid': 500}
encoder_kwargs.update(eval("dict(%s)" % (args.encoder_kwargs,)))
generator_kwargs = {'nhid': args.style_dim+encoder_kwargs['nhid']}
generator_kwargs.update(eval("dict(%s)" % (args.generator_kwargs,)))
generator_kwargs['eos_id'] = corpus.train.class0.eos_id
discriminator_kwargs = {'filter_sizes': [3,4,5], 'n_filters': 128}
discriminator_kwargs.update(eval("dict(%s)" % (args.discriminator_kwargs,)))
model_kwargs = {'ntokens': ntokens, 'style_dim': args.style_dim,
'encoder_emb_dim': args.encoder_emb_dim,
'generator_emb_dim': args.generator_emb_dim,
'tie_embeddings': args.tie_embeddings, 'lmb': args.lmb}
if args.global_dropout is not None:
model_kwargs['dropout'] = args.global_dropout
encoder_kwargs['dropout'] = args.global_dropout
generator_kwargs['dropout'] = args.global_dropout
discriminator_kwargs['dropout'] = args.global_dropout
model_kwargs['generator_kwargs'] = generator_kwargs
model_kwargs['encoder_kwargs'] = encoder_kwargs
model_kwargs['discriminator_kwargs'] = discriminator_kwargs
print("Instantiating model with args:\n%s" % (
pprint.pformat(model_kwargs, indent=1)))
model = model.Model(**model_kwargs)
print("Model summary:\n%s" % (model,))
print("Model params:\n%s" % ("\n".join(
["%s: %s" % (p[0], p[1].size()) for p in model.named_parameters()])))
if args.cuda:
model.cuda()
optimizer_proto = {'sgd': optim.SGD, 'adam': optim.Adam,
'adagrad': optim.Adagrad, 'adadelta': optim.Adadelta}
optimizer_kwargs = eval("dict(%s)" % args.optimizer_kwargs)
optimizer_kwargs['lr'] = args.lr
ae_optimizer = optimizer_proto[args.optimizer](
(param for name, param in model.named_parameters()
if name.split('.')[0] != 'discriminator'),
**optimizer_kwargs)
#optimizer_kwargs['lr'] = args.lr
discriminator_optimizer = optimizer_proto[args.optimizer](
model.discriminator.parameters(),
**optimizer_kwargs)
model_path = "./model.pt"
def save_model():
with open(model_path, 'wb') as f:
torch.save(model.state_dict(), f)
def load_model():
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
###############################################################################
# Training code
###############################################################################
# Loop over epochs.
best_val_loss = None
class OptimizerStep(object):
def __init__(self, model, clip, ae_optimizer, d_optimizer,
ae_update_freq=2, debug=False, printout_freq=100):
self.model = model
self.ae_optimizer = ae_optimizer
self.d_optimizer = d_optimizer
self.clip = clip
self.printout_freq = printout_freq
self.ae_update_freq = ae_update_freq
self.debug = debug
self.step = 0
self.epoch = 0
self.curr_ae_max = 0.0
self.curr_ae_min = float('inf')
self.global_ae_max = 0.0
self.global_ae_min = float('inf')
self.curr_d_max = 0.0
self.curr_d_min = float('inf')
self.global_d_max = 0.0
self.global_d_min = float('inf')
def __call__(self, rec_loss, adv_loss0, adv_loss1, batch_no):
ae_total_norm = None
if self.step % self.ae_update_freq == 0:
self.model.zero_grad()
ae_loss = rec_loss
if max(adv_loss0.data[0], adv_loss1.data[0]) < 0.9:
ae_loss = rec_loss - model.lmb * (adv_loss0 + adv_loss1)
ae_loss.backward(retain_graph=True)
# `clip_grad_norm` helps prevent the exploding gradient problem in
# RNNs / LSTMs.
ae_total_norm = torch.nn.utils.clip_grad_norm(self.model.parameters(), self.clip)
self.ae_optimizer.step()
self.model.zero_grad()
(adv_loss0 + adv_loss1).backward()
d_total_norm = torch.nn.utils.clip_grad_norm(
self.model.discriminator.parameters(), self.clip)
self.d_optimizer.step()
if self.debug:
if ae_total_norm is not None:
self.curr_ae_max = max(self.curr_ae_max, ae_total_norm)
self.curr_ae_min = min(self.curr_ae_min, ae_total_norm)
self.curr_d_max = max(self.curr_d_max, d_total_norm)
self.curr_d_min = min(self.curr_d_min, d_total_norm)
if self.step % self.printout_freq == 0:
self.global_ae_max = max(self.global_ae_max, self.curr_ae_max)
self.global_ae_min = min(self.global_ae_min, self.curr_ae_min)
self.global_d_max = max(self.global_d_max, self.curr_d_max)
self.global_d_min = min(self.global_d_min, self.curr_d_min)
print ("generator: grad_norm = %s, curr_min = %f, curr_max = %f, global_min = %f, global_max = %f"
% (str(ae_total_norm), self.curr_ae_min, self.curr_ae_max, self.global_ae_min, self.global_ae_max))
print ("discriminator: grad_norm = %f, curr_min = %f, curr_max = %f, global_min = %f, global_max = %f\n"
% (d_total_norm, self.curr_d_min, self.curr_d_max, self.global_d_min, self.global_d_max))
self.curr_ae_max = 0.0
self.curr_ae_min = float('inf')
self.curr_ae_max = 0.0
self.curr_ae_min = float('inf')
self.step += 1
optimizer_step = OptimizerStep(model=model, clip=args.clip, debug=True,
ae_optimizer=ae_optimizer, d_optimizer=discriminator_optimizer)
if args.load_model: # resume training
load_model()
gamma_decay = 0.5
gamma_init = 1.0
gamma_min = model.generator.gamma
model.generator.gamma = gamma_init
# At any point you can hit Ctrl + C to break out of training early.
try:
for epoch in range(1, args.epochs+1):
model.train_on(
corpus.train.iter_epoch(
args.batch_size, evaluation=False),
optimizer_step=optimizer_step)
val_loss = model.eval_on(
corpus.valid.iter_epoch(
eval_batch_size, evaluation=True))
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss['total_ae_loss'] < best_val_loss:
save_model()
#logger.save_model_state_dict(model.state_dict())
#logger.save_model(model)
best_val_loss = val_loss['total_ae_loss']
else:
# Anneal the learning rate if no improvement has been seen on
# the validation dataset.
if args.lr_decay:
continue
assert len(optimizer.param_groups) == 1
optimizer.param_groups[0]['lr'] /= args.lr_decay
# logger.lr = optimizer.param_groups[0]['lr']
model.generator.gamma = max(
gamma_min,
gamma_decay*model.generator.gamma)
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
# Load the best saved model.
#model = logger.load_model()
#model.load_state_dict(logger.load_model_state_dict())
load_model()
# Run on all data
train_loss = model.eval_on(
corpus.train.iter_epoch(eval_batch_size, evaluation=True))
valid_loss = model.eval_on(
corpus.valid.iter_epoch(eval_batch_size, evaluation=True))
test_loss = model.eval_on(
corpus.test.iter_epoch(eval_batch_size, evaluation=True))
results = dict(train=train_loss, valid=valid_loss,
test=test_loss)
#logger.final_log(results)
| 39.12585 | 123 | 0.616622 |
1fe120f013fe49569e93e4e234d80c98bbc77919 | 6,825 | py | Python | clinica/lib/nipype/interfaces/mrtrix/preprocess.py | Raelag0112/clinica | d301b1abfdf4d3b62dc4b329622340795ae51ef8 | [
"MIT"
] | 1 | 2020-06-08T15:27:55.000Z | 2020-06-08T15:27:55.000Z | clinica/lib/nipype/interfaces/mrtrix/preprocess.py | Raelag0112/clinica | d301b1abfdf4d3b62dc4b329622340795ae51ef8 | [
"MIT"
] | null | null | null | clinica/lib/nipype/interfaces/mrtrix/preprocess.py | Raelag0112/clinica | d301b1abfdf4d3b62dc4b329622340795ae51ef8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import absolute_import, division, print_function, unicode_literals
import os.path as op
from nipype.interfaces.base import (
CommandLine,
CommandLineInputSpec,
File,
InputMultiPath,
TraitedSpec,
isdefined,
traits,
)
from nipype.utils.filemanip import split_filename
class DWI2TensorInputSpec(CommandLineInputSpec):
in_file = InputMultiPath(
File(exists=True),
argstr="%s",
mandatory=True,
position=-2,
desc="Diffusion-weighted images",
)
out_filename = File(
name_template="%s_tensor.mif",
name_source="in_file",
output_name="tensor",
argstr="%s",
desc="Output tensor filename",
position=-1,
)
encoding_file = File(
argstr="-grad %s",
position=2,
desc=(
"Encoding file supplied as a 4xN text file with "
"each line is in the format [ X Y Z b ], where "
"[ X Y Z ] describe the direction of the applied "
"gradient, and b gives the b-value in units "
"(1000 s/mm^2). See FSL2MRTrix()"
),
)
ignore_slice_by_volume = traits.List(
traits.Int,
argstr="-ignoreslices %s",
sep=" ",
position=2,
minlen=2,
maxlen=2,
desc=(
"Requires two values (i.e. [34 "
"1] for [Slice Volume] Ignores "
"the image slices specified "
"when computing the tensor. "
"Slice here means the z "
"coordinate of the slice to be "
"ignored."
),
)
ignore_volumes = traits.List(
traits.Int,
argstr="-ignorevolumes %s",
sep=" ",
position=2,
minlen=1,
desc=(
"Requires two values (i.e. [2 5 6] for "
"[Volumes] Ignores the image volumes "
"specified when computing the tensor."
),
)
quiet = traits.Bool(
argstr="-quiet",
position=1,
desc=("Do not display information messages or progress " "status."),
)
debug = traits.Bool(argstr="-debug", position=1, desc="Display debugging messages.")
in_mask = File(
exists=True,
argstr="-mask %s",
desc=(
"only perform computation within the specified binary" " brain mask image"
),
)
class DWI2TensorOutputSpec(TraitedSpec):
tensor = File(exists=True, desc="path/name of output diffusion tensor image")
class DWI2Tensor(CommandLine):
"""
Converts diffusion-weighted images to tensor images.
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> dwi2tensor = mrt.DWI2Tensor()
>>> dwi2tensor.inputs.in_file = 'dwi.mif'
>>> dwi2tensor.inputs.encoding_file = 'encoding.txt'
>>> dwi2tensor.cmdline
'dwi2tensor -grad encoding.txt dwi.mif dwi_tensor.mif'
>>> dwi2tensor.run() # doctest: +SKIP
"""
_cmd = "dwi2tensor"
input_spec = DWI2TensorInputSpec
output_spec = DWI2TensorOutputSpec
class MRTransformInputSpec(CommandLineInputSpec):
in_files = InputMultiPath(
File(exists=True),
argstr="%s",
mandatory=True,
position=-2,
desc="Input images to be transformed",
)
out_filename = File(genfile=True, argstr="%s", position=-1, desc="Output image")
invert = traits.Bool(
argstr="-inverse",
position=1,
desc="Invert the specified transform before using it",
)
replace_transform = traits.Bool(
argstr="-replace",
position=1,
desc="replace the current transform by that specified, rather than applying it to the current transform",
)
transformation_file = File(
exists=True,
argstr="-transform %s",
position=1,
desc="The transform to apply, in the form of a 4x4 ascii file.",
)
linear_transform = File(
exists=True,
argstr="-linear %s",
position=1,
desc=(
"specify a linear transform to apply, in the form of a 3x4 or "
"4x4 ascii file. Note the standard reverse convention is used, "
"where the transform maps points in the template image to the "
"moving image. Note that the reverse convention is still assumed "
"even if no -template image is supplied"
),
)
template_image = File(
exists=True,
argstr="-template %s",
position=1,
desc="Reslice the input image to match the specified template image.",
)
reference_image = File(
exists=True,
argstr="-reference %s",
position=1,
desc="in case the transform supplied maps from the input image onto a reference image, use this option to specify the reference. Note that this implicitly sets the -replace option.",
)
flip_x = traits.Bool(
argstr="-flipx",
position=1,
desc="assume the transform is supplied assuming a coordinate system with the x-axis reversed relative to the MRtrix convention (i.e. x increases from right to left). This is required to handle transform matrices produced by FSL's FLIRT command. This is only used in conjunction with the -reference option.",
)
quiet = traits.Bool(
argstr="-quiet",
position=1,
desc="Do not display information messages or progress status.",
)
debug = traits.Bool(argstr="-debug", position=1, desc="Display debugging messages.")
class MRTransformOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="the output image of the transformation")
class MRTransform(CommandLine):
"""
Apply spatial transformations or reslice images
Example
-------
>>> MRxform = MRTransform()
>>> MRxform.inputs.in_files = 'anat_coreg.mif'
>>> MRxform.run() # doctest: +SKIP
"""
_cmd = "mrtransform"
input_spec = MRTransformInputSpec
output_spec = MRTransformOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = self.inputs.out_filename
if not isdefined(outputs["out_file"]):
outputs["out_file"] = op.abspath(self._gen_outfilename())
else:
outputs["out_file"] = op.abspath(outputs["out_file"])
return outputs
def _gen_filename(self, name):
if name == "out_filename":
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name, _ = split_filename(self.inputs.in_files[0])
return name + "_MRTransform.mif"
| 31.597222 | 315 | 0.601905 |
56a80869e339b05dd87aa4dbe504438878ea10a2 | 764 | py | Python | projects/bees/example.py | robfatland/pythonbytes | 33fcc3e0220210059c0ebd7adb6694ddebdbd82c | [
"MIT"
] | 2 | 2019-10-20T22:08:41.000Z | 2021-12-28T01:12:51.000Z | projects/bees/example.py | robfatland/pythonbytes | 33fcc3e0220210059c0ebd7adb6694ddebdbd82c | [
"MIT"
] | null | null | null | projects/bees/example.py | robfatland/pythonbytes | 33fcc3e0220210059c0ebd7adb6694ddebdbd82c | [
"MIT"
] | 2 | 2019-01-19T18:05:34.000Z | 2019-04-16T22:45:43.000Z | # This section of code: Keep as-is
import requests, numpy as np, matplotlib.pyplot as plt
def bees(x, y, z): return requests.get('https://52t7suregg.execute-api.us-east-1.amazonaws.com/' + \
'default/dronebees?' + 'x=' + str(x) + '&y=' + str(y) + '&z=' + str(z)).text
# Test code. Note: This will occasionally give you 'drone lost' as the result
# print(bees(2000., 2000., 50.))
# This is where you modify the code; check with a coach for more details
nStops = 5
dronecounts = np.zeros((nStops,nStops))
# result = bees(x, y, z)
# dronecounts[i, j] = float(result)
# This section of code: Keep as-is
fig = plt.figure(figsize=(6, 3.2)); ax = fig.add_subplot(111)
ax.set_title('My Baobab Orchard'); plt.imshow(dronecounts)
fig.savefig('graph.png')
| 29.384615 | 100 | 0.671466 |
37dbe9161b7be45bd52d3b4204509409a3779475 | 2,569 | py | Python | machine_learning/test/test_PCA.py | mideaconu/face_recognition | a45150845dbe3039c0dd8be72708ff9ca9588697 | [
"MIT"
] | 1 | 2019-12-04T13:46:50.000Z | 2019-12-04T13:46:50.000Z | machine_learning/test/test_PCA.py | mideaconu/face_recognition | a45150845dbe3039c0dd8be72708ff9ca9588697 | [
"MIT"
] | 1 | 2019-12-04T13:50:43.000Z | 2019-12-05T16:51:20.000Z | machine_learning/test/test_PCA.py | mideaconu/face-recognition | a45150845dbe3039c0dd8be72708ff9ca9588697 | [
"MIT"
] | null | null | null | #
# Author: Mihai-Ionut Deaconu
#
import sys
import unittest
import numpy as np
from machine_learning.decomposition import PCA
rng = np.random.RandomState(42)
data = rng.normal(0, 1, size=(5000, 2))
class PCATest(unittest.TestCase):
def setUp(self):
self.pca = PCA(n_components=3, method="svd", n_oversamples=10, n_iter=2)
""" Constructor input parameter test """
def test_n_components_type(self):
with self.assertRaises(TypeError):
self.pca = PCA(n_components="1")
def test_n_components_value(self):
with self.assertRaises(ValueError):
self.pca = PCA(n_components=-1)
def test_method_value(self):
with self.assertRaises(ValueError):
self.pca = PCA(n_components=1, method="qr")
def test_n_oversamples_type(self):
with self.assertRaises(TypeError):
self.pca = PCA(n_components=1, n_oversamples="1")
def test_n_oversamples_value(self):
with self.assertRaises(ValueError):
self.pca = PCA(n_components=1, n_oversamples=-1)
def test_n_iter_type(self):
with self.assertRaises(TypeError):
self.pca = PCA(n_components=1, n_iter="1")
def test_n_iter_value(self):
with self.assertRaises(ValueError):
self.pca = PCA(n_components=1, n_iter=-1)
""" Fit input parameter test """
def test_empty_data(self):
with self.assertRaises(ValueError):
self.pca.fit(np.array([]))
def test_n_components_v_features(self):
with self.assertRaises(ValueError):
self.pca.fit(data)
""" Setter input parameter test """
def test_n_components_setter_type(self):
with self.assertRaises(TypeError):
self.pca.n_components = "1"
def test_n_components_setter_value(self):
with self.assertRaises(ValueError):
self.pca.n_components = -1
def test_method_setter_value(self):
with self.assertRaises(ValueError):
self.pca.method = "qr"
def test_n_oversamples_setter_type(self):
with self.assertRaises(TypeError):
self.pca.n_oversamples = "1"
def test_n_oversamples_setter_value(self):
with self.assertRaises(ValueError):
self.pca.n_oversamples = -1
def test_n_iter_setter_type(self):
with self.assertRaises(TypeError):
self.pca.n_iter = "1"
def test_n_iter_setter_value(self):
with self.assertRaises(ValueError):
self.pca.n_iter = -1
if __name__ == "__main__":
unittest.main() | 28.230769 | 80 | 0.653172 |
47f5b9118777918174d2951659cc3bd565145654 | 3,181 | py | Python | chia/types/weight_proof.py | nur-azhar/chia-blockchain | 890da94024b4742bbbb93e47f72113e8344a20b3 | [
"Apache-2.0"
] | 9 | 2022-02-06T13:38:35.000Z | 2022-03-19T15:26:45.000Z | chia/types/weight_proof.py | nur-azhar/chia-blockchain | 890da94024b4742bbbb93e47f72113e8344a20b3 | [
"Apache-2.0"
] | 6 | 2022-02-06T13:48:07.000Z | 2022-03-09T20:04:15.000Z | chia/types/weight_proof.py | neurosis69/chia-blockchain | d346ef89f1e1aa9eec1e06f1ef93862c097f6bd7 | [
"Apache-2.0"
] | 1 | 2022-03-15T08:44:30.000Z | 2022-03-15T08:44:30.000Z | from dataclasses import dataclass
from typing import List, Optional
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.types.blockchain_format.reward_chain_block import RewardChainBlock
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.blockchain_format.vdf import VDFInfo, VDFProof
from chia.types.end_of_slot_bundle import EndOfSubSlotBundle
from chia.types.header_block import HeaderBlock
from chia.util.ints import uint8, uint32, uint64, uint128
from chia.util.streamable import Streamable, streamable
@streamable
@dataclass(frozen=True)
class SubEpochData(Streamable):
reward_chain_hash: bytes32
num_blocks_overflow: uint8
new_sub_slot_iters: Optional[uint64]
new_difficulty: Optional[uint64]
# number of challenge blocks
# Average iters for challenge blocks
# |--A-R----R-------R--------R------R----R----------R-----R--R---| Honest difficulty 1000
# 0.16
# compute total reward chain blocks
# |----------------------------A---------------------------------| Attackers chain 1000
# 0.48
# total number of challenge blocks == total number of reward chain blocks
@streamable
@dataclass(frozen=True)
class SubSlotData(Streamable):
# if infused
proof_of_space: Optional[ProofOfSpace]
# VDF to signage point
cc_signage_point: Optional[VDFProof]
# VDF from signage to infusion point
cc_infusion_point: Optional[VDFProof]
icc_infusion_point: Optional[VDFProof]
cc_sp_vdf_info: Optional[VDFInfo]
signage_point_index: Optional[uint8]
# VDF from beginning to end of slot if not infused
# from ip to end if infused
cc_slot_end: Optional[VDFProof]
icc_slot_end: Optional[VDFProof]
# info from finished slots
cc_slot_end_info: Optional[VDFInfo]
icc_slot_end_info: Optional[VDFInfo]
cc_ip_vdf_info: Optional[VDFInfo]
icc_ip_vdf_info: Optional[VDFInfo]
total_iters: Optional[uint128]
def is_challenge(self) -> bool:
if self.proof_of_space is not None:
return True
return False
def is_end_of_slot(self) -> bool:
if self.cc_slot_end_info is not None:
return True
return False
@streamable
@dataclass(frozen=True)
class SubEpochChallengeSegment(Streamable):
sub_epoch_n: uint32
sub_slots: List[SubSlotData]
rc_slot_end_info: Optional[VDFInfo] # in first segment of each sub_epoch
@streamable
@dataclass(frozen=True)
# this is used only for serialization to database
class SubEpochSegments(Streamable):
challenge_segments: List[SubEpochChallengeSegment]
@streamable
@dataclass(frozen=True)
# this is used only for serialization to database
class RecentChainData(Streamable):
recent_chain_data: List[HeaderBlock]
@streamable
@dataclass(frozen=True)
class ProofBlockHeader(Streamable):
finished_sub_slots: List[EndOfSubSlotBundle]
reward_chain_block: RewardChainBlock
@streamable
@dataclass(frozen=True)
class WeightProof(Streamable):
sub_epochs: List[SubEpochData]
sub_epoch_segments: List[SubEpochChallengeSegment] # sampled sub epoch
recent_chain_data: List[HeaderBlock]
| 30.883495 | 95 | 0.735303 |
3fa258eba111056d8233c3bdd96c8eddf70d3161 | 17,323 | py | Python | odoo/base-addons/point_of_sale/tests/test_pos_products_with_tax.py | LucasBorges-Santos/docker-odoo | 53987bbd61f6119669b5f801ee2ad54695084a21 | [
"MIT"
] | null | null | null | odoo/base-addons/point_of_sale/tests/test_pos_products_with_tax.py | LucasBorges-Santos/docker-odoo | 53987bbd61f6119669b5f801ee2ad54695084a21 | [
"MIT"
] | null | null | null | odoo/base-addons/point_of_sale/tests/test_pos_products_with_tax.py | LucasBorges-Santos/docker-odoo | 53987bbd61f6119669b5f801ee2ad54695084a21 | [
"MIT"
] | null | null | null | import odoo
from odoo.addons.point_of_sale.tests.common import TestPoSCommon
@odoo.tests.tagged('post_install', '-at_install')
class TestPoSProductsWithTax(TestPoSCommon):
""" Test normal configuration PoS selling products with tax
"""
def setUp(self):
super(TestPoSProductsWithTax, self).setUp()
self.config = self.basic_config
self.product1 = self.create_product(
'Product 1',
self.categ_basic,
10.0,
5.0,
tax_ids=self.taxes['tax7'].ids,
)
self.product2 = self.create_product(
'Product 2',
self.categ_basic,
20.0,
10.0,
tax_ids=self.taxes['tax10'].ids,
)
self.product3 = self.create_product(
'Product 3',
self.categ_basic,
30.0,
15.0,
tax_ids=self.taxes['tax_group_7_10'].ids,
)
self.adjust_inventory([self.product1, self.product2, self.product3], [100, 50, 50])
def test_orders_no_invoiced(self):
""" Test for orders without invoice
Orders
======
+---------+----------+-----------+----------+-----+---------+-----------------------+--------+
| order | payments | invoiced? | product | qty | untaxed | tax | total |
+---------+----------+-----------+----------+-----+---------+-----------------------+--------+
| order 1 | cash | no | product1 | 10 | 100 | 7 | 107 |
| | | | product2 | 5 | 90.91 | 9.09 | 100 |
+---------+----------+-----------+----------+-----+---------+-----------------------+--------+
| order 2 | cash | no | product2 | 7 | 127.27 | 12.73 | 140 |
| | | | product3 | 4 | 109.09 | 10.91[10%] + 7.64[7%] | 127.64 |
+---------+----------+-----------+----------+-----+---------+-----------------------+--------+
| order 3 | bank | no | product1 | 1 | 10 | 0.7 | 10.7 |
| | | | product2 | 3 | 54.55 | 5.45 | 60 |
| | | | product3 | 5 | 136.36 | 13.64[10%] + 9.55[7%] | 159.55 |
+---------+----------+-----------+----------+-----+---------+-----------------------+--------+
Calculated taxes
================
total tax 7% only + group tax (10+7%)
(7 + 0.7) + (7.64 + 9.55) = 7.7 + 17.19 = 24.89
total tax 10% only + group tax (10+7%)
(9.09 + 12.73 + 5.45) + (10.91 + 13.64) = 27.27 + 24.55 = 51.82
Thus, manually_calculated_taxes = (-24,89, -51.82)
"""
self.open_new_session()
# create orders
orders = []
orders.append(self.create_ui_order_data([(self.product1, 10), (self.product2, 5)]))
orders.append(self.create_ui_order_data([(self.product2, 7), (self.product3, 4)]))
orders.append(self.create_ui_order_data(
[(self.product1, 1), (self.product3, 5), (self.product2, 3)],
payments=[(self.bank_pm, 230.25)]
))
# sync orders
order = self.env['pos.order'].create_from_ui(orders)
# check values before closing the session
self.assertEqual(3, self.pos_session.order_count)
orders_total = sum(order.amount_total for order in self.pos_session.order_ids)
self.assertAlmostEqual(orders_total, self.pos_session.total_payments_amount, 'Total order amount should be equal to the total payment amount.')
# close the session
self.pos_session.action_pos_session_validate()
# check values after the session is closed
session_move = self.pos_session.move_id
sales_lines = session_move.line_ids.filtered(lambda line: line.account_id == self.sale_account)
self.assertAlmostEqual(sum(sales_lines.mapped('balance')), -628.18, msg='Sales line balance should be equal to untaxed orders amount.')
receivable_line_bank = session_move.line_ids.filtered(lambda line: self.bank_pm.name in line.name)
self.assertAlmostEqual(receivable_line_bank.balance, 230.25, msg='Bank receivable should be equal to the total bank payments.')
receivable_line_cash = session_move.line_ids.filtered(lambda line: self.cash_pm.name in line.name)
self.assertAlmostEqual(receivable_line_cash.balance, 474.64, msg='Cash receivable should be equal to the total cash payments.')
tax_lines = session_move.line_ids.filtered(lambda line: line.account_id == self.tax_received_account)
manually_calculated_taxes = (-24.89, -51.82)
self.assertAlmostEqual(sum(manually_calculated_taxes), sum(tax_lines.mapped('balance')))
for t1, t2 in zip(sorted(manually_calculated_taxes), sorted(tax_lines.mapped('balance'))):
self.assertAlmostEqual(t1, t2, msg='Taxes should be correctly combined.')
base_amounts = (355.45, 518.18)
self.assertAlmostEqual(sum(base_amounts), sum(tax_lines.mapped('tax_base_amount')))
self.assertTrue(receivable_line_cash.full_reconcile_id, 'Cash receivable line should be fully-reconciled.')
def test_orders_global_rounding(self):
""" Test for orders with global rounding enabled
Orders
======
+---------+----------+-----------+----------+-----+---------+-----------------------+--------+
| order | payments | invoiced? | product | qty | untaxed | tax | total |
+---------+----------+-----------+----------+-----+---------+-----------------------+--------+
| order 1 | cash | no | product1 | 2 | 5.21 | 1.09 | 6.30 |
| | | | product2 | 1 | 2.98 | 0.62 | 3.60 |
| | | | product2 | 1 | 0.04 | 0.01 | 0.05 |
+---------+----------+-----------+----------+-----+---------+-----------------------+--------+
"""
# create a VAT tax of 21%, included in the public price
Tax = self.env['account.tax']
account_tax_21_incl = Tax.create({
'name': 'VAT 21% incl',
'amount_type': 'percent',
'amount': 21.0,
'price_include': True,
})
account_tax_21_incl.company_id.tax_calculation_rounding_method = 'round_globally'
product1 = self.create_product(
'Product 1',
self.categ_basic,
3.15,
tax_ids=account_tax_21_incl.ids,
)
product2 = self.create_product(
'Product 2',
self.categ_basic,
3.60,
tax_ids=account_tax_21_incl.ids,
)
product3 = self.create_product(
'Product 3',
self.categ_basic,
0.05,
tax_ids=account_tax_21_incl.ids,
)
self.open_new_session()
# create orders
orders = []
orders.append(self.create_ui_order_data([
(product1, 2),
(product2, 1),
(product3, 1)
]))
# sync orders
order = self.env['pos.order'].create_from_ui(orders)
# check values before closing the session
self.assertEqual(1, self.pos_session.order_count)
orders_total = sum(order.amount_total for order in self.pos_session.order_ids)
self.assertAlmostEqual(orders_total, 9.95, msg='Total order amount should be 9.96 (Hint: check for rounding issues).')
def test_orders_with_invoiced(self):
""" Test for orders: one with invoice
Orders
======
+---------+----------+---------------+----------+-----+---------+---------------+--------+
| order | payments | invoiced? | product | qty | untaxed | tax | total |
+---------+----------+---------------+----------+-----+---------+---------------+--------+
| order 1 | cash | no | product1 | 6 | 60 | 4.2 | 64.2 |
| | | | product2 | 3 | 54.55 | 5.45 | 60 |
| | | | product3 | 1 | 27.27 | 2.73 + 1.91 | 31.91 |
+---------+----------+---------------+----------+-----+---------+---------------+--------+
| order 2 | bank | no | product1 | 1 | 10 | 0.7 | 10.7 |
| | | | product2 | 20 | 363.64 | 36.36 | 400 |
+---------+----------+---------------+----------+-----+---------+---------------+--------+
| order 3 | bank | yes, customer | product1 | 10 | 100 | 7 | 107 |
| | | | product3 | 10 | 272.73 | 27.27 + 19.09 | 319.09 |
+---------+----------+---------------+----------+-----+---------+---------------+--------+
Calculated taxes
================
total tax 7% only
4.2 + 0.7 => 4.9 + 1.91 = 6.81
total tax 10% only
5.45 + 36.36 => 41.81 + 2.73 = 44.54
Thus, manually_calculated_taxes = (-6.81, -44.54)
"""
self.open_new_session()
# create orders
orders = []
orders.append(self.create_ui_order_data(
[(self.product3, 1), (self.product1, 6), (self.product2, 3)],
payments=[(self.cash_pm, 156.11)],
))
orders.append(self.create_ui_order_data(
[(self.product2, 20), (self.product1, 1)],
payments=[(self.bank_pm, 410.7)],
))
orders.append(self.create_ui_order_data(
[(self.product1, 10), (self.product3, 10)],
payments=[(self.bank_pm, 426.09)],
customer=self.customer,
is_invoiced=True,
uid='09876-098-0987',
))
# sync orders
order = self.env['pos.order'].create_from_ui(orders)
# check values before closing the session
self.assertEqual(3, self.pos_session.order_count)
orders_total = sum(order.amount_total for order in self.pos_session.order_ids)
self.assertAlmostEqual(orders_total, self.pos_session.total_payments_amount, msg='Total order amount should be equal to the total payment amount.')
# check account move in the invoiced order
invoiced_order = self.pos_session.order_ids.filtered(lambda order: '09876-098-0987' in order.pos_reference)
self.assertEqual(1, len(invoiced_order), 'Only one order is invoiced in this test.')
invoice = invoiced_order.account_move
self.assertAlmostEqual(invoice.amount_total, 426.09)
# close the session
self.pos_session.action_pos_session_validate()
# check values after the session is closed
session_move = self.pos_session.move_id
# check sales line
# should not include tax amounts
sales_lines = session_move.line_ids.filtered(lambda line: line.account_id == self.sale_account)
self.assertAlmostEqual(sum(sales_lines.mapped('balance')), -515.46)
# check receivable line
# should be equivalent to receivable in the invoice
# should also be fully-reconciled
receivable_line = session_move.line_ids.filtered(lambda line: line.account_id == self.receivable_account)
self.assertAlmostEqual(receivable_line.balance, -426.09)
self.assertTrue(receivable_line.full_reconcile_id, msg='Receivable line for invoices should be fully reconciled.')
pos_receivable_line_bank = session_move.line_ids.filtered(
lambda line: self.bank_pm.name in line.name and line.account_id == self.bank_pm.receivable_account_id
)
self.assertAlmostEqual(pos_receivable_line_bank.balance, 836.79)
pos_receivable_line_cash = session_move.line_ids.filtered(
lambda line: self.cash_pm.name in line.name and line.account_id == self.bank_pm.receivable_account_id
)
self.assertAlmostEqual(pos_receivable_line_cash.balance, 156.11)
self.assertTrue(pos_receivable_line_cash.full_reconcile_id)
receivable_line = session_move.line_ids.filtered(lambda line: line.account_id == self.receivable_account)
self.assertAlmostEqual(receivable_line.balance, -invoice.amount_total)
tax_lines = session_move.line_ids.filtered(lambda line: line.account_id == self.tax_received_account)
manually_calculated_taxes = (-6.81, -44.54)
self.assertAlmostEqual(sum(manually_calculated_taxes), sum(tax_lines.mapped('balance')))
for t1, t2 in zip(sorted(manually_calculated_taxes), sorted(tax_lines.mapped('balance'))):
self.assertAlmostEqual(t1, t2, msg='Taxes should be correctly combined.')
base_amounts = (97.27, 445.46) # computation does not include invoiced order.
self.assertAlmostEqual(sum(base_amounts), sum(tax_lines.mapped('tax_base_amount')))
def test_return_order(self):
""" Test return order
Order (invoiced)
======
+----------+----------+---------------+----------+-----+---------+-------------+-------+
| order | payments | invoiced? | product | qty | untaxed | tax | total |
+----------+----------+---------------+----------+-----+---------+-------------+-------+
| order 1 | cash | yes, customer | product1 | 3 | 30 | 2.1 | 32.1 |
| | | | product2 | 2 | 36.36 | 3.64 | 40 |
| | | | product3 | 1 | 27.27 | 2.73 + 1.91 | 31.91 |
+----------+----------+---------------+----------+-----+---------+-------------+-------+
The order is invoiced so the tax of the invoiced order is in the account_move of the order.
However, the return order is not invoiced, thus, the journal items are in the session_move,
which will contain the tax lines of the returned products.
manually_calculated_taxes = (4.01, 6.37)
"""
self.open_new_session()
# create orders
orders = []
orders.append(self.create_ui_order_data(
[(self.product1, 3), (self.product2, 2), (self.product3, 1)],
payments=[(self.cash_pm, 104.01)],
customer=self.customer,
is_invoiced=True,
uid='12345-123-1234',
))
# sync orders
order = self.env['pos.order'].create_from_ui(orders)
# check values before closing the session
self.assertEqual(1, self.pos_session.order_count)
orders_total = sum(order.amount_total for order in self.pos_session.order_ids)
self.assertAlmostEqual(orders_total, self.pos_session.total_payments_amount, msg='Total order amount should be equal to the total payment amount.')
# return order
order_to_return = self.pos_session.order_ids.filtered(lambda order: '12345-123-1234' in order.pos_reference)
order_to_return.refund()
refund_order = self.pos_session.order_ids.filtered(lambda order: order.state == 'draft')
context_make_payment = {"active_ids": [refund_order.id], "active_id": refund_order.id}
make_payment = self.env['pos.make.payment'].with_context(context_make_payment).create({
'payment_method_id': self.cash_pm.id,
'amount': -104.01,
})
make_payment.check()
self.assertEqual(refund_order.state, 'paid', 'Payment is registered, order should be paid.')
self.assertAlmostEqual(refund_order.amount_paid, -104.01, msg='Amount paid for return order should be negative.')
# close the session
self.pos_session.action_pos_session_validate()
# check values after the session is closed
session_move = self.pos_session.move_id
# instead of credit, the sales line should be debit
sales_lines = session_move.line_ids.filtered(lambda line: line.account_id == self.sale_account)
self.assertAlmostEqual(sum(sales_lines.mapped('balance')), 93.63)
receivable_line_bank = session_move.line_ids.filtered(lambda line: self.bank_pm.name in line.name)
self.assertFalse(receivable_line_bank, msg='There should be no bank receivable line because no bank payment made.')
receivable_line_cash = session_move.line_ids.filtered(lambda line: self.cash_pm.name in line.name)
self.assertFalse(receivable_line_cash, msg='There should be no cash receivable line because it is combined with the original cash payment.')
manually_calculated_taxes = (4.01, 6.37) # should be positive since it is return order
tax_lines = session_move.line_ids.filtered(lambda line: line.account_id == self.tax_received_account)
self.assertAlmostEqual(sum(manually_calculated_taxes), sum(tax_lines.mapped('balance')))
for t1, t2 in zip(sorted(manually_calculated_taxes), sorted(tax_lines.mapped('balance'))):
self.assertAlmostEqual(t1, t2, msg='Taxes should be correctly combined and should be debit.')
| 49.778736 | 155 | 0.539052 |
07a9a54ba45e3191911364f8502194cfe1b61dae | 2,841 | py | Python | src/oci/identity/models/fully_qualified_scope.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/identity/models/fully_qualified_scope.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/identity/models/fully_qualified_scope.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class FullyQualifiedScope(object):
"""
FullyQualifiedScope model.
"""
def __init__(self, **kwargs):
"""
Initializes a new FullyQualifiedScope object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param audience:
The value to assign to the audience property of this FullyQualifiedScope.
:type audience: str
:param scope:
The value to assign to the scope property of this FullyQualifiedScope.
:type scope: str
"""
self.swagger_types = {
'audience': 'str',
'scope': 'str'
}
self.attribute_map = {
'audience': 'audience',
'scope': 'scope'
}
self._audience = None
self._scope = None
@property
def audience(self):
"""
**[Required]** Gets the audience of this FullyQualifiedScope.
Audience for the given scope context.
:return: The audience of this FullyQualifiedScope.
:rtype: str
"""
return self._audience
@audience.setter
def audience(self, audience):
"""
Sets the audience of this FullyQualifiedScope.
Audience for the given scope context.
:param audience: The audience of this FullyQualifiedScope.
:type: str
"""
self._audience = audience
@property
def scope(self):
"""
**[Required]** Gets the scope of this FullyQualifiedScope.
Allowed permission scope for the given context.
:return: The scope of this FullyQualifiedScope.
:rtype: str
"""
return self._scope
@scope.setter
def scope(self, scope):
"""
Sets the scope of this FullyQualifiedScope.
Allowed permission scope for the given context.
:param scope: The scope of this FullyQualifiedScope.
:type: str
"""
self._scope = scope
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 27.852941 | 245 | 0.629004 |
8c90d11dfd81ece50aa5fd0c41bc750cf168ae70 | 15 | py | Python | src/data_path.py | pawel-rozwoda/lstm-diarization | c900b08996bdd38b2d92182e8a428caa9de62445 | [
"MIT"
] | null | null | null | src/data_path.py | pawel-rozwoda/lstm-diarization | c900b08996bdd38b2d92182e8a428caa9de62445 | [
"MIT"
] | null | null | null | src/data_path.py | pawel-rozwoda/lstm-diarization | c900b08996bdd38b2d92182e8a428caa9de62445 | [
"MIT"
] | null | null | null | DATA_PATH = ''
| 7.5 | 14 | 0.6 |
c37b1715e90f0c5c107bb375c4412e9d4946e949 | 13,996 | py | Python | ivy/functional/ivy/manipulation.py | patrickf949/ivy | aa2929923ab31d952a3d75f1ad2def35635850ae | [
"Apache-2.0"
] | 1 | 2022-03-10T19:36:02.000Z | 2022-03-10T19:36:02.000Z | ivy/functional/ivy/manipulation.py | patrickf949/ivy | aa2929923ab31d952a3d75f1ad2def35635850ae | [
"Apache-2.0"
] | null | null | null | ivy/functional/ivy/manipulation.py | patrickf949/ivy | aa2929923ab31d952a3d75f1ad2def35635850ae | [
"Apache-2.0"
] | null | null | null | # global
from typing import Union, Optional, Tuple, List, Iterable
from numbers import Number
# local
import ivy
from ivy.framework_handler import current_framework as _cur_framework
# Array API Standard #
# -------------------#
def roll(x: Union[ivy.Array, ivy.NativeArray],
shift: Union[int, Tuple[int, ...]],
axis: Optional[Union[int, Tuple[int, ...]]] = None,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None) \
-> ivy.Array:
"""
Rolls array elements along a specified axis. Array elements that roll beyond the last position are re-introduced at the first position. Array elements that roll beyond the first position are re-introduced at the last position.
Parameters
----------
x: array
input array.
shift: Union[int, Tuple[int, ...]]
number of places by which the elements are shifted. If ``shift`` is a tuple, then ``axis`` must be a tuple of the same size, and each of the given axes must be shifted by the corresponding element in ``shift``. If ``shift`` is an ``int`` and ``axis`` a tuple, then the same ``shift`` must be used for all specified axes. If a shift is positive, then array elements must be shifted positively (toward larger indices) along the dimension of ``axis``. If a shift is negative, then array elements must be shifted negatively (toward smaller indices) along the dimension of ``axis``.
axis: Optional[Union[int, Tuple[int, ...]]]
axis (or axes) along which elements to shift. If ``axis`` is ``None``, the array must be flattened, shifted, and then restored to its original shape. Default: ``None``.
out:
optional output array, for writing the result to. It must have a shape that the inputs broadcast to.
Returns
-------
return: array
an output array having the same data type as ``x`` and whose elements, relative to ``x``, are shifted.
"""
return _cur_framework(x).roll(x, shift, axis, out)
def squeeze(x: Union[ivy.Array, ivy.NativeArray],
axis: Union[int, Tuple[int, ...]],
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None)\
-> ivy.Array:
"""
Removes singleton dimensions (axes) from ``x``.
Parameters
----------
x: array
input array.
axis: Union[int, Tuple[int, ...]]
axis (or axes) to squeeze. If a specified axis has a size greater than one, a ``ValueError`` must be raised.
out:
optional output array, for writing the result to. It must have a shape that the inputs broadcast to.
Returns
-------
return: array
an output array having the same data type and elements as ``x``.
"""
return _cur_framework(x).squeeze(x, axis, out)
def flip(x: Union[ivy.Array, ivy.NativeArray],
axis: Optional[Union[int, Tuple[int], List[int]]] = None,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None)\
-> ivy.Array:
"""
Reverses the order of elements in an array along the given axis. The shape of the array must be preserved.
Parameters
----------
x:
input array.
axis:
axis (or axes) along which to flip. If ``axis`` is ``None``, the function must flip all input array axes. If ``axis`` is negative, the function must count from the last dimension. If provided more than one axis, the function must flip only the specified axes. Default: ``None``.
out:
optional output array, for writing the result to. It must have a shape that the inputs broadcast to.
Returns
-------
return:
an output array having the same data type and shape as ``x`` and whose elements, relative to ``x``, are reordered.
"""
return _cur_framework(x).flip(x, axis, out)
def expand_dims(x: Union[ivy.Array, ivy.NativeArray],
axis: int = 0,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None) \
-> ivy.Array:
"""
Expands the shape of an array.
Inserts a new axis that will appear at the axis position in the expanded array shape.
:param x: Input array.
:type x: array
:param axis: Position in the expanded axes where the new axis is placed.
:type axis: int
:return: array with the number of dimensions increased by onearray
"""
return _cur_framework(x).expand_dims(x, axis, out)
def permute_dims(x: Union[ivy.Array, ivy.NativeArray],
axes: Tuple[int, ...],
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None)\
-> ivy.Array:
"""
Permutes the axes (dimensions) of an array x.
Parameters
----------
x:
input array.
axes:
tuple containing a permutation of (0, 1, ..., N-1) where N is the number of axes (dimensions) of x.
out:
optional output array, for writing the result to. It must have a shape that the inputs broadcast to.
Returns
-------
return:
an array containing the axes permutation. The returned array must have the same data type as x.
"""
return _cur_framework(x).permute_dims(x, axes, out)
def stack(arrays: Union[Tuple[ivy.Array], List[ivy.Array], Tuple[ivy.NativeArray], List[ivy.NativeArray]],
axis: int = 0, out: Optional[Union[ivy.Array, ivy.NativeArray]] = None) \
-> ivy.Array:
"""
Joins a sequence of arrays along a new axis.
Parameters
----------
arrays:
input arrays to join. Each array must have the same shape.
axis:
axis along which the arrays will be joined. Providing an ``axis`` specifies the index of the new axis in the dimensions of the result. For example, if ``axis`` is ``0``, the new axis will be the first dimension and the output array will have shape ``(N, A, B, C)``; if ``axis`` is ``1``, the new axis will be the second dimension and the output array will have shape ``(A, N, B, C)``; and, if ``axis`` is ``-1``, the new axis will be the last dimension and the output array will have shape ``(A, B, C, N)``. A valid ``axis`` must be on the interval ``[-N, N)``, where ``N`` is the rank (number of dimensions) of ``x``. If provided an ``axis`` outside of the required interval, the function must raise an exception. Default: ``0``.
out:
optional output array, for writing the result to. It must have a shape that the inputs broadcast to.
Returns
--------
return:
an output array having rank ``N+1``, where ``N`` is the rank (number of dimensions) of ``x``. If the input arrays have different data types, normal :ref:`type-promotion` must apply. If the input arrays have the same data type, the output array must have the same data type as the input arrays.
.. note::
This specification leaves type promotion between data type families (i.e., ``intxx`` and ``floatxx``) unspecified.
"""
return _cur_framework(arrays).stack(arrays, axis, out)
def reshape(x: Union[ivy.Array, ivy.NativeArray],
shape: Tuple[int, ...],
copy: Optional[bool] = None,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None)\
-> ivy.Array:
"""
Gives a new shape to an array without changing its data.
:param x: Tensor to be reshaped.
:type x: array
:param newshape: The new shape should be compatible with the original shape. One shape dimension can be -1.
In this case, the value is inferred from the length of the array and remaining dimensions.
:type newshape: int or sequence of ints
:return: Reshaped array.
Examples:
>>> x = ivy.array([[1,2,3], [4,5,6]])
>>> y = ivy.reshape(x, (3,2))
>>> print(y)
[[1, 2],
[3, 4],
[5, 6]]
"""
return _cur_framework(x).reshape(x, shape, copy, out)
def concat(xs: Union[Tuple[Union[ivy.Array, ivy.NativeArray, ivy.Container]],
List[Union[ivy.Array, ivy.NativeArray, ivy.Container]]],
axis: Optional[int] = 0,
out: Optional[Union[ivy.Array, ivy.NativeArray, ivy.Container]] = None)\
-> Union[ivy.Array, ivy.Container]:
"""
Casts an array to a specified type.
:param xs: The input arrays must have the same shape, except in the dimension corresponding to axis
(the first, by default).
:type xs: sequence of arrays
:param axis: The axis along which the arrays will be joined. Default is -1.
:type axis: int, optional
:return: The concatenated array.
"""
return _cur_framework(xs[0]).concat(xs, axis, out)
# Extra #
# ------#
def split(x: Union[ivy.Array, ivy.NativeArray], num_or_size_splits: Union[int, Iterable[int]] = None, axis: int = 0,
with_remainder: bool = False) -> Union[ivy.Array, ivy.NativeArray]:
"""
Splits an array into multiple sub-arrays.
:param x: Tensor to be divided into sub-arrays.
:type x: array
:param num_or_size_splits: Number of equal arrays to divide the array into along the given axis if an integer.
The size of each split element if a sequence of integers.
Default is to divide into as many 1-dimensional arrays as the axis dimension.
:type num_or_size_splits: int, optional
:param axis: The axis along which to split, default is 0.
:type axis: int, optional
:param with_remainder: If the tensor does not split evenly, then store the last remainder entry. Default is False.
:type with_remainder: bool, optional
:return: A list of sub-arrays.
"""
return _cur_framework(x).split(x, num_or_size_splits, axis, with_remainder)
def repeat(x: Union[ivy.Array, ivy.NativeArray], repeats: Union[int, Iterable[int]],
axis: int = None,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Repeat values along a given dimension
:param x: Input array.
:type x: array
:param repeats: The number of repetitions for each element. repeats is broadcast to fit the shape of the given axis.
:type repeats: int or sequence of ints.
:param axis: The axis along which to repeat values.
By default, use the flattened input array, and return a flat output array.
:type axis: int, optional
:return: The repeated output array.
"""
return _cur_framework(x).repeat(x, repeats, axis, out)
def tile(x: Union[ivy.Array, ivy.NativeArray],
reps: Iterable[int],
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Constructs an array by repeating x the number of times given by reps.
:param x: Input array.
:type x: array
:param reps: The number of repetitions of x along each axis.
:type reps: sequence of ints
:return: The tiled output array.
"""
return _cur_framework(x).tile(x, reps, out)
def constant_pad(x: Union[ivy.Array, ivy.NativeArray],
pad_width: Iterable[Tuple[int]],
value: Number = 0,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Pads an array with a constant value.
:param x: Input array to pad.
:type x: array
:param pad_width: Number of values padded to the edges of each axis.
Specified as ((before_1, after_1), … (before_N, after_N)), where N is number of axes of x.
:type pad_width: sequence of tuples of ints
:param value: The constant value to pad the array with.
:type value: float or int, default zero
:return: Padded array of rank equal to x with shape increased according to pad_width.
"""
return _cur_framework(x).constant_pad(x, pad_width, value, out)
def zero_pad(x: Union[ivy.Array, ivy.NativeArray],
pad_width: Iterable[Tuple[int]],
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Pads an array with zeros.
:param x: Input array to pad.
:type x: array
:param pad_width: Number of values padded to the edges of each axis.
Specified as ((before_1, after_1), … (before_N, after_N)), where N is number of axes of x.
:type pad_width: sequence of tuples of ints
:return: Padded array of rank equal to x with shape increased according to pad_width.
"""
return _cur_framework(x).zero_pad(x, pad_width, out)
def swapaxes(x: Union[ivy.Array, ivy.NativeArray],
axis0: int,
axis1: int,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Interchange two axes of an array.
:param x: Input array.
:type x: array
:param axis0: First axis to be swapped.
:type axis0: int
:param axis1: Second axis to be swapped.
:type axis1: int
:return: x with its axes permuted.
"""
return _cur_framework(x).swapaxes(x, axis0, axis1, out)
def clip(x: Union[ivy.Array, ivy.NativeArray],
x_min: Union[Number, Union[ivy.Array, ivy.NativeArray]],
x_max: Union[Number, Union[ivy.Array, ivy.NativeArray]],
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Clips (limits) the values in an array.
Given an interval, values outside the interval are clipped to the interval edges (element-wise).
For example, if an interval of [0, 1] is specified, values smaller than 0 become 0,
and values larger than 1 become 1.
:param x: Input array containing elements to clip.
:type x: array
:param x_min: Minimum value.
:type x_min: scalar or array
:param x_max: Maximum value.
:type x_max: scalar or array
:return: An array with the elements of x, but where values < x_min are replaced with x_min,
and those > x_max with x_max.
"""
return _cur_framework(x).clip(x, x_min, x_max, out)
| 41.904192 | 738 | 0.641969 |
22d38bf1673dd67749b7096b69a43eb8068bc798 | 260 | py | Python | Practice/Python/Sets/Set_Mutations.py | alexanderbauer89/HackerRank | 0fb6face083b0183692c9251ffe4bb635591393f | [
"MIT"
] | 1 | 2021-11-17T02:47:11.000Z | 2021-11-17T02:47:11.000Z | Practice/Python/Sets/Set_Mutations.py | alexanderbauer89/HackerRank | 0fb6face083b0183692c9251ffe4bb635591393f | [
"MIT"
] | null | null | null | Practice/Python/Sets/Set_Mutations.py | alexanderbauer89/HackerRank | 0fb6face083b0183692c9251ffe4bb635591393f | [
"MIT"
] | null | null | null | if __name__ == '__main__':
n, base_set = input(), set(map(int, input().split()))
for _ in range(int(input())):
operation = input().split()
getattr(base_set, operation[0])(set(map(int, input().split())))
print(sum(base_set))
| 37.142857 | 71 | 0.573077 |
ebceb0f81056f10ab08e2502bd43d8adbbd87985 | 18,330 | py | Python | main/1d/inverse_1d.py | QiuhongAnnaWei/PINNs | 5276b625f75ff613cdcc0133d9737a5f55b8a5eb | [
"MIT"
] | 1 | 2020-12-28T06:11:29.000Z | 2020-12-28T06:11:29.000Z | main/1d/inverse_1d.py | QiuhongAnnaWei/PINNs | 5276b625f75ff613cdcc0133d9737a5f55b8a5eb | [
"MIT"
] | null | null | null | main/1d/inverse_1d.py | QiuhongAnnaWei/PINNs | 5276b625f75ff613cdcc0133d9737a5f55b8a5eb | [
"MIT"
] | null | null | null | # Inverse: given observed data of u(t, x) -> model/pde parameters λ
import time, sys, os, json
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_toolkits.mplot3d import Axes3D
# from plotting import newfig, savefig
# from mpl_toolkits.axes_grid1 import make_axes_locatable
# from pyDOE import lhs
# from scipy.interpolate import griddata
# import scipy.io
sys.path.insert(0, '../../Utilities/') # for plotting
# from plotting import newfig, savefig
# np.random.seed(1234)
# tf.set_random_seed(1234)
class PhysicsInformedNN:
def __init__(self, x0, u0, xb, u_xb, xo, uo, xf, lambda0, layers, lowerbound, upperbound):
self.x0 = x0
self.u0 = u0
self.xb = xb
self.u_xb = u_xb
self.xo = xo
self.uo = uo
self.xf = xf
self.lowerbound = lowerbound
self.upperbound = upperbound
self.layers = layers
# Initialize NN
self.weights, self.biases = self.initialize_NN(layers)
self.lambda_1 = tf.Variable([lambda0[0]], dtype=tf.float32)
self.lambda_2 = tf.Variable([lambda0[1]], dtype=tf.float32)
# number of cols = 1
self.x0_tf = tf.placeholder(tf.float32, shape=[None, self.x0.shape[1]]) # (1, 1)
self.u0_tf = tf.placeholder(tf.float32, shape=[None, self.u0.shape[1]]) # (1, 1)
self.xb_tf = tf.placeholder(tf.float32, shape=[None, self.xb.shape[1]]) # (1, 1)
self.u_xb_tf = tf.placeholder(tf.float32, shape=[None, self.u_xb.shape[1]]) # (1, 1)
self.xo_tf = tf.placeholder(tf.float32, shape=[None, self.xo.shape[1]]) # N_train x 1
self.uo_tf = tf.placeholder(tf.float32, shape=[None, self.uo.shape[1]]) # N_train x 1
self.xf_tf = tf.placeholder(tf.float32, shape=[None, self.xf.shape[1]]) # N_f x 1
self.lr_tf = tf.placeholder(tf.float32)
# tf Graphs: u, u_x, f = net_all(x)
self.u0_pred, _, _ = self.net_all(self.x0_tf)
_, self.u_xb_pred, _ = self.net_all(self.xb_tf)
self.uo_pred, _, _ = self.net_all(self.xo_tf)
_, _, self.f_pred = self.net_all(self.xf_tf)
# Loss: initial + boundary + observed data + PDE
self.loss = tf.reduce_mean(tf.square(self.u0_tf - self.u0_pred)) + \
tf.reduce_mean(tf.square(self.u_xb_tf - self.u_xb_pred)) + \
tf.reduce_mean(tf.square(self.uo_tf - self.uo_pred)) + \
tf.reduce_mean(tf.square(self.f_pred)) # NOTE: different from the observed data
# tf.reduce_mean: computes the mean of elements across dimensions of a tensor
# Optimizers:
# return a minimization Op (a graph node that performs computation on tensors) -> updates weights, biases, lambdas
self.train_op_Adam = tf.train.AdamOptimizer(learning_rate=self.lr_tf).minimize(self.loss)
# NOTE: default: learning_rate=0.001 (typically 0.001 is the max, can make smaller)
# tf session: initiates a tf Graph (defines computations) that processes tensors through operations + allocates resources + holds intermediate values
self.sess = tf.Session()
init = tf.global_variables_initializer() # variables now hold the values from declarations: tf.Variable(tf.zeros(...)), tf.Variable(tf.random_normal(...)), etc
self.sess.run(init) # required to initialize the variables
def initialize_NN(self, layers):
weights = []
biases = []
num_layers = len(layers)
for l in range(0, num_layers-1):
# tf.Variable: for trainable variables/mutable tensor values that persist across multiple sesssion.run()
# https://towardsdatascience.com/understanding-fundamentals-of-tensorflow-program-and-why-it-is-necessary-94cf5b60e255
weights.append(self.xavier_init(size=[layers[l], layers[l+1]]))
biases.append(tf.Variable(
tf.zeros([1, layers[l+1]], dtype=tf.float32), dtype=tf.float32)) # all zeros
return weights, biases
def xavier_init(self, size):
# https://towardsdatascience.com/weight-initialization-in-neural-networks-a-journey-from-the-basics-to-kaiming-954fb9b47c79
# Want each layer's activation outputs to have stddev around 1 -> repeat matrix mult across as many layers without activations exploding or vanishing
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2/(in_dim + out_dim))
# random values from a truncated normal distribution (values whose magnitude>2 staddev from mean are dropped and re-picked)
# Shape of the output tensor: [layers[l], layers[l+1]]
return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32)
def neural_net(self, X, weights, biases):
num_layers = len(weights) + 1 # 6 in this case
H = 2.0*(X - self.lowerbound)/(self.upperbound - self.lowerbound) - 1.0 # Initializing first input: mapping to [-1, 1]
for l in range(0, num_layers-2):
W = weights[l]
b = biases[l]
H = tf.tanh(tf.add(tf.matmul(H, W), b)) # passing along networks
# NOTE: H*W=(50, 20) + B(1, 20) -> tf does broadcasting: B becomes (50, 20)
W = weights[-1]
b = biases[-1]
Y = tf.add(tf.matmul(H, W), b) # passed 5 times in total
return Y
def net_all(self, x):
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
u = self.neural_net(x, self.weights, self.biases)
u_x = tf.gradients(u, x)[0]
u_xx = tf.gradients(u_x, x)[0]
f = u_xx - lambda_1 * tf.sin(lambda_2 * x)
return u, u_x, f
def train(self, lr): # one iteration: uses all training data from tf_dict and updates weights, biases, lambdas
tf_dict = {self.x0_tf: self.x0, self.u0_tf: self.u0,
self.xb_tf: self.xb, self.u_xb_tf: self.u_xb,
self.xo_tf: self.xo, self.uo_tf: self.uo, self.xf_tf: self.xf,
self.lr_tf: lr}
# feeding training examples during training and running the minimization Op of self.loss
self.sess.run(self.train_op_Adam, tf_dict)
loss_value, lambda_1, lambda_2 = self.sess.run([self.loss, self.lambda_1, self.lambda_2], tf_dict) # lambda_1/2 do not need tf_dict
return loss_value, lambda_1, lambda_2
def predict(self, x):
tf_dict = {self.xo_tf: x, self.xf_tf: x} # no need for u
# want to use the values in Session
u, f = self.sess.run([self.uo_pred, self.f_pred], tf_dict)
return u, f
if __name__ == "__main__":
# u''(x) = lambda_1 * sin(lambda_2 * x), x in [0, pi] -> u''(x) = 9 * sin(3 * x)
# NOTE: lambda defined in f/pde, not in u(x)
# NOTE: to uniquely identify lambda, typically need boundary condition
# u(0) = 0
# u'(pi) = 3, u'(x) = - 3 cos(3x)
## NOTE: given initial and boundary -> used in loss (identical to forward implementation)
# analytical solution: u(x) = - sin(3x)
# global settings for all subplots
plt.rcParams['xtick.labelsize'] = 6
plt.rcParams['ytick.labelsize'] = 6
plt.rcParams['axes.labelsize'] = 7
plt.rcParams['axes.titlesize'] = 8
###########################
## PART 1: initialization
# 5-layer deep NN with 100 neurons/layer & hyperbolic tangent act. func.
layers = [1, 20, 20, 20, 1]
# Domain bounds
lowerbound = np.array([0])
upperbound = np.array([np.pi])
# true and initial value for lambda
lambda_gt = np.array([9, 3])
lambda0 = np.array([5, 2])
###########################
## PART 2: setting up data
# boundary condition
x0 = np.array([[lowerbound[0]]])
u0 = np.array([[0]])
xb = np.array([[upperbound[0]]])
u_xb = np.array([[3]])
# observed u based on analytical solution
N_observed = 5
xo = np.reshape(np.linspace(lowerbound[0]+0.2, upperbound[0]-0.2, N_observed), (-1, 1))
# xo = np.array([[1.1], [2.1]])
uo = -1 * np.sin(3 * xo)
# collocation points for enforcing f=0
# NOTE: separate from observed data: want f/residual = 0 everywhere
N_f = 30
xf = np.reshape(np.linspace(lowerbound[0], upperbound[0], N_f), (-1, 1)) # collocation points
# testing data
N_test = 50
xt = np.reshape(np.linspace(lowerbound[0], upperbound[0], N_test), (-1, 1)) # [[0] [pi/2] [pi]]
ut = -1 * np.sin(3 * xt)
###########################
## PART 3: forming the network, training, predicting
model = PhysicsInformedNN(x0, u0, xb, u_xb, xo, uo, xf, lambda0, layers, lowerbound, upperbound)
start_time = time.time()
# settings for plots
dirpath = f'./main/1d/inverse_1d_figures/{start_time}' # where figures are stored
os.mkdir(dirpath)
ticksize = 8.5
plt.rcParams['xtick.labelsize'] = ticksize
plt.rcParams['ytick.labelsize'] = ticksize
plt.rcParams['axes.labelsize'] = 9.5
plt.rcParams['axes.titlesize'] = 10.5
plt.rcParams['lines.markersize'] = 4
plt.rcParams['legend.handlelength'] = 0.4
annotatesize = 9.5
dataDict = {
'boundary points':{
'xb': xb.tolist(),
'u_xb': u_xb.tolist(),
'x0': x0.tolist(),
'u0': u0.tolist(),
},
'collocation points':{
'N_f': N_f,
'xf': xf.tolist(),
},
'observed data':{
'N_observed': N_observed,
'xo': xo.tolist(),
'uo': uo.tolist()
},
'testing data':{
"N_test": N_test,
"xt": xt.tolist(),
"ut": ut.tolist()
}
}
with open(f'{dirpath}/data.json', 'w') as f:
json.dump(dataDict, f)
# Loss: 10^-3/-4 should be about good
loss_values, u_preds, f_preds = ([] for i in range(3))
lambda_1s = [float(lambda0[0])] # 1d - initial
lambda_2s = [float(lambda0[1])] # 1d - initial
N_iter = 18000
loss_value_step = 10
pred_step = 2000
for i in range(N_iter):
lr = 10**-3 * 2**(-i/30000) if i <= 60000 else 10**-3 * 2**(-60000/30000) # 0.00002210(55000)/0.00001563 # learning rate decay
loss_value, lambda_1, lambda_2 = model.train(lr) # from last iteration
if (i+1) % loss_value_step == 0: # start with i=9 and end with i=3999 (last iter)
loss_values.append(float(loss_value))
lambda_1s.append(float(lambda_1))
lambda_2s.append(float(lambda_2))
print('Iter: %d, Loss: %.3e, Lambda_1: %.5f, lambda_2: %.5f, Time: %.2f, Learning Rate: %.8f' % (i+1, loss_value, lambda_1, lambda_2, time.time() - start_time, lr))
if (i+1) % pred_step == 0: # start with i=999 and end with i=3999 (last iter)
u_pred, f_pred = model.predict(xt)
u_preds.append(u_pred) # (N_test, 1)
f_preds.append(f_pred) # (N_test, 1)
training_time = time.time() - start_time
u_preds = np.array(u_preds)
f_preds = np.array(f_preds)
u_pred, f_pred = model.predict(xt)
print("Initial lambda_1: %.5f, Final lambda_1: %.5f" % (lambda0[0], lambda_1))
print("Initial lambda_2: %.5f, Final lambda_2: %.5f" % (lambda0[1], lambda_2))
# NOTE: what is important is the function u_pred resembles, not so much the parameters (weights & biases)
# NOTE: if no analytical solution, find numerical method/other method to verify -> directly use network
###########################
## PART 4: calculating errors
error_u = np.linalg.norm(u_pred - ut, 2) / np.linalg.norm(ut, 2) # scalar
error_lambda_1 = np.abs(lambda_1 - lambda_gt[0])/lambda_gt[0] * 100 # Ground truth: lambda_1=9 # 1d np array
error_lambda_2 = np.abs(lambda_2 - lambda_gt[1])/lambda_gt[1] * 100 # Ground truth: lambda_2=3 # 1d np array
print('Error u: %e | Error lambda_1: %.5f%% | Error lambda_2: %.5f%%' % (error_u, error_lambda_1, error_lambda_2))
###########################
## PART 5: Plotting
# Plot 1. loss vs. iteration,, lambda_2 vs iteration
fig = plt.figure(figsize=(5, 6))
plt.ticklabel_format(axis='x', style="sci", scilimits=(2,2))
x_coords = loss_value_step * (np.array(range(len(loss_values))) + 1)
plt.semilogy(x_coords, loss_values) # linear X axis, logarithmic y axis(log scaling on the y axis)
plt.gca().set(xlabel='Iteration', ylabel='Loss', title='Loss during Training')
init_tuple = (loss_value_step, loss_values[0])
plt.annotate('(%d, %.3e)' % init_tuple, xy=init_tuple, fontsize=annotatesize, ha='left')
last_tuple = (N_iter, loss_values[-1])
plt.annotate('(%d, %.3e)' % last_tuple, xy=last_tuple, fontsize=annotatesize, ha='right', va='top')
plt.plot([init_tuple[0], last_tuple[0]], [init_tuple[1], last_tuple[1]], '.', c='#3B75AF')
fig.subplots_adjust(left=0.13, right=0.98, bottom=0.07, top=0.95)
# NOTE: Oscillation: actually very small nummerical difference because of small y scale
# 1. overshoot (fixed -> decaying learning rate)
# 2. Adam: gradient descent + momentum (sometime parameter change makes the loss go up)
plt.savefig(f'{dirpath}/inverse_1d_loss.pdf')
plt.close(fig)
with open(f'{dirpath}/inverse_1d_loss.json', 'w') as f:
json.dump({"x_coords": x_coords.tolist(), "loss_values": loss_values}, f)
# Plot 2. lambda_1 vs iteration
fig = plt.figure(figsize=(4.3, 6))
plt.ticklabel_format(axis='x', style="sci", scilimits=(2,2))
x_coords = loss_value_step * (np.array(range(len(lambda_1s)))) # has an additional entry than loss_values: initial
plt.plot(x_coords, lambda_1s)
plt.gca().set(xlabel="Iteration", ylabel="Lambda_1", title="Lambda_1 during Training")
init_tuple = (0, lambda_1s[0])
plt.annotate('(%d, %.5f)' % init_tuple, xy=init_tuple, fontsize=annotatesize, ha='left')
last_tuple = (N_iter, lambda_1s[-1])
plt.annotate('(%d, %.5f)' % last_tuple, xy=last_tuple, fontsize=annotatesize, ha='right')
plt.plot([init_tuple[0], last_tuple[0]], [init_tuple[1], last_tuple[1]], '.', c='#3B75AF')
fig.subplots_adjust(left=0.13, right=0.98, bottom=0.07, top=0.95)
plt.savefig(f'{dirpath}/inverse_1d_lambda_1.pdf')
plt.close(fig)
with open(f'{dirpath}/inverse_1d_lambda_1.json', 'w') as f:
json.dump({"x_coords": x_coords.tolist(), "lambda_1s": lambda_1s}, f)
# Plot 3. lambda_2 vs iteration
fig = plt.figure(figsize=(4.3, 6))
plt.ticklabel_format(axis='x', style="sci", scilimits=(2,2))
x_coords = loss_value_step * (np.array(range(len(lambda_2s)))) # has an additional entry than loss_values: initial
plt.plot(x_coords, lambda_2s)
plt.gca().set(xlabel="Iteration", ylabel="Lambda_2", title="Lambda_2 during Training")
init_tuple = (0, lambda_2s[0])
plt.annotate('(%d, %.5f)' % init_tuple, xy=init_tuple, fontsize=annotatesize, ha='left')
last_tuple = (N_iter, lambda_2s[-1])
plt.annotate('(%d, %.5f)' % last_tuple, xy=last_tuple, fontsize=annotatesize, ha='right')
plt.plot([init_tuple[0], last_tuple[0]], [init_tuple[1], last_tuple[1]], '.', c='#3B75AF')
fig.subplots_adjust(left=0.13, right=0.98, bottom=0.07, top=0.95)
plt.savefig(f'{dirpath}/inverse_1d_lambda_2.pdf')
plt.close(fig)
with open(f'{dirpath}/inverse_1d_lambda_2.json', 'w') as f:
json.dump({"x_coords": x_coords.tolist(), "lambda_2s": lambda_2s}, f)
# Plot 4. u vs x (exact, prediction)
plt.rcParams['axes.labelpad'] = -1 # default = 4
fig = plt.figure(figsize=(11, 6))
for i in range(u_preds.shape[0]):
ax = plt.subplot(3, 3, i+1)
exact_plot, = ax.plot(xt, ut, 'b-', label='Exact') # tuple unpacking
pred_plot, = ax.plot(xt, u_preds[i], 'r--', label='Prediction')
plt.gca().set(xlabel="$x$", ylabel="$u$", title=f'Snapshot at Iteration = {(i+1)*pred_step}')
plt.figlegend(handles=(exact_plot, pred_plot), labels=('Exact', 'Prediction'), loc='upper center', ncol=2, fontsize=ticksize) # from last subplot
fig.subplots_adjust(wspace=0.2, hspace=0.48, left=0.05, right=0.98, bottom=0.06, top=0.89)
plt.savefig(f'{dirpath}/inverse_1d_u.pdf')
plt.close(fig)
with open(f'{dirpath}/inverse_1d_u_preds.json', 'w') as f:
json.dump(u_preds.tolist(), f)
###########################
## PART 6: Saving information
infoDict = {
'problem':{
'pde form': 'u_xx = lambda_1 * sin(lambda_2 * x)',
'lambda_1 gt': float(lambda_gt[0]),
'lambda_2 gt': float(lambda_gt[0]),
'boundary (x)': [float(lowerbound[0]),float(upperbound[0])],
'initial condition': 'u(0) = 0',
'boundary condition': "u'(pi) = 3",
'analytical solution': 'u(x) = - sin(3x)'
},
'model':{
'layers': str(layers),
'training iteration': N_iter,
'loss_value_step':loss_value_step,
'pred_step': pred_step,
'training_time': training_time,
'initial lambda_1': float(lambda0[0]),
'final lambda_1': float(lambda_1[0]),
'error_lambda_1 percentage': float(error_lambda_1[0]),
'initial lambda_2': float(lambda0[1]),
'final lambda_2': float(lambda_2[0]),
'error_lambda_2 percentage': float(error_lambda_2[0]),
'error_u': error_u
},
'training data':{
'initial': (float(x0[0][0]), float(u0[0][0])),
'boundary': (float(xb[0][0]), float(u_xb[0][0])) ,
'N_observed': N_observed,
'xo': str(xo),
'uo': str(uo),
'N_f': N_f,
'xf': str(xf),
},
'testing data':{
'N_test': N_test,
'xt': str(xt),
'ut': str(ut)
}
}
with open(f'{dirpath}/info.json', 'w') as f:
json.dump(infoDict, f, indent=4)
# TODO: noisy data (to reflect practiacl situation)
# noise = 0.01 # (to assume some error in observation data)
# u_train = u_train + noise*np.std(u_train)*np.random.randn(u_train.shape[0], u_train.shape[1]) # samples of specified size from standard normal
# v_train = v_train + noise*np.std(v_train)*np.random.randn(v_train.shape[0], v_train.shape[1])
# retrain model
# obtain error_lambda_1_noisy and print
| 47.120823 | 176 | 0.612057 |
23f0c28547ba808fa79b036b9801cda07a785440 | 4,814 | py | Python | rxbp/multicast/observer/flatconcatnobackpressureobserver.py | MichaelSchneeberger/rx_backpressure | 16173827498bf1bbee3344933cb9efbfd19699f5 | [
"Apache-2.0"
] | 24 | 2018-11-22T21:04:49.000Z | 2021-11-08T11:18:09.000Z | rxbp/multicast/observer/flatconcatnobackpressureobserver.py | MichaelSchneeberger/rx_backpressure | 16173827498bf1bbee3344933cb9efbfd19699f5 | [
"Apache-2.0"
] | 1 | 2019-02-06T15:58:46.000Z | 2019-02-12T20:31:50.000Z | rxbp/multicast/observer/flatconcatnobackpressureobserver.py | MichaelSchneeberger/rx_backpressure | 16173827498bf1bbee3344933cb9efbfd19699f5 | [
"Apache-2.0"
] | 1 | 2021-01-26T12:41:37.000Z | 2021-01-26T12:41:37.000Z | import threading
from dataclasses import dataclass
from typing import Callable, Any, List, Optional
from rx.disposable import CompositeDisposable
from rxbp.acknowledgement.continueack import continue_ack
from rxbp.acknowledgement.ack import Ack
from rxbp.acknowledgement.single import Single
from rxbp.acknowledgement.stopack import stop_ack
from rxbp.init.initobserverinfo import init_observer_info
from rxbp.observable import Observable
from rxbp.observer import Observer
from rxbp.observerinfo import ObserverInfo
from rxbp.observers.connectableobserver import ConnectableObserver
from rxbp.scheduler import Scheduler
from rxbp.typing import ElementType
@dataclass
class FlatConcatNoBackpressureObserver(Observer):
next_observer: Observer
selector: Callable[[Any], Observable]
scheduler: Scheduler
subscribe_scheduler: Scheduler
# observer_info: ObserverInfo
composite_disposable: CompositeDisposable
def __post_init__(self):
self.lock = threading.RLock()
# self.conn_observers: List[ConnectableObserver] = []
self.inner_observer = self.InnerObserver(
observer=self.next_observer,
last_ack=None,
lock=self.lock,
conn_observers=[],
is_completed=False,
)
@dataclass
class InnerObserver(Observer):
observer: Observer
last_ack: Optional[Ack]
lock: threading.RLock
conn_observers: List[ConnectableObserver]
is_completed: bool
def on_next(self, elem: ElementType) -> Ack:
ack = self.observer.on_next(elem)
self.last_ack = ack
return ack
def on_error(self, exc: Exception):
self.observer.on_error(exc) # todo: check this
def on_completed(self):
with self.lock:
conn_observers = self.conn_observers[1:]
self.conn_observers = conn_observers
is_completed = self.is_completed
if 0 < len(conn_observers):
if self.last_ack is None:
self.conn_observers[0].connect()
else:
class InnerSingle(Single):
def on_next(_, elem):
self.conn_observers[0].connect()
def on_error(_, exc: Exception):
pass
self.last_ack.subscribe(InnerSingle())
elif is_completed:
self.observer.on_completed()
def on_next(self, elem: ElementType):
try:
obs_list: List[Observable] = [self.selector(e) for e in elem]
except Exception as exc:
self.on_error(exc)
return stop_ack
if len(obs_list) == 0:
return continue_ack
# generate a connectable observer for each observer
def gen_connectable_observer():
for _ in obs_list:
conn_observer = ConnectableObserver(
underlying=self.inner_observer,
# scheduler=self.scheduler,
)
yield conn_observer
conn_observers = list(gen_connectable_observer())
with self.lock:
prev_conn_observers = self.inner_observer.conn_observers
self.inner_observer.conn_observers = self.inner_observer.conn_observers + conn_observers
if len(prev_conn_observers) == 0:
# conn_observers[0] is not used in this case
first_conn_observer = self.inner_observer
else:
first_conn_observer = conn_observers[0]
first_obs = obs_list[0]
other_obs = obs_list[1:]
other_conn_observers = conn_observers[1:]
# def observe_on_subscribe_scheduler(_, __):
disposable = first_obs.observe(init_observer_info(
observer=first_conn_observer,
))
self.composite_disposable.add(disposable)
for obs, conn_observer in zip(other_obs, other_conn_observers):
disposable = obs.observe(init_observer_info(
observer=conn_observer,
))
self.composite_disposable.add(disposable)
# if self.subscribe_scheduler.idle:
# disposable = self.subscribe_scheduler.schedule(observe_on_subscribe_scheduler)
# self.composite_disposable.add(disposable)
# else:
# observe_on_subscribe_scheduler(None, None)
return continue_ack
def on_error(self, exc):
self.next_observer.on_error(exc)
def on_completed(self):
with self.lock:
conn_observers = self.inner_observer.conn_observers
self.inner_observer.is_completed = True
if len(conn_observers) == 0:
self.next_observer.on_completed()
| 33.2 | 100 | 0.634815 |
1d0d5d5ac42782a1435f47277f50769804a086fc | 1,671 | py | Python | functional-programming/set__dict_comprehensions.py | Ch-sriram/python-advanced-concepts | a041eedbcff683b487d292808fba5fd0a3b9aebd | [
"MIT"
] | null | null | null | functional-programming/set__dict_comprehensions.py | Ch-sriram/python-advanced-concepts | a041eedbcff683b487d292808fba5fd0a3b9aebd | [
"MIT"
] | null | null | null | functional-programming/set__dict_comprehensions.py | Ch-sriram/python-advanced-concepts | a041eedbcff683b487d292808fba5fd0a3b9aebd | [
"MIT"
] | null | null | null | # examples on set and dict comprehensions
# EXAMPLES OF SET COMPREHENSION
# making a set comprehension is actually really easy
# instead of a list, we'll just use a set notation as follows:
my_list = [char for char in 'hello']
my_set = {char for char in 'hello'}
print(my_list)
print(my_set)
my_list1 = [num for num in range(10)]
my_set1 = {num for num in range(10)}
print(my_list1)
print(my_set1)
my_list2 = [num ** 2 for num in range(50) if num % 2 == 0]
my_set2 = {num ** 2 for num in range(50) if num % 2 == 0}
print(my_list2)
print(my_set2)
# EXAMPLE OF DICT COMPREHENSIONS
# Example 1:
simple_dict = {'a': 1, 'b': 2}
my_dict = {k: v ** 2 for k, v in simple_dict.items()}
# for each of the key:value pair in the simple_dict, we raise the value by the power of 2 and add it to my_dict
print(my_dict)
# Example 2: if we only want the even values from simple_dict to be in my_dict, then, we have the following dict comprehension
my_dict2 = {k: v ** 2 for k, v in simple_dict.items() if v % 2 == 0}
print(my_dict2)
# Example 3: If we want to make a dict from a list where the list item is the key and item*2 is the value in the dict, using dict comprehension:
my_dict3 = {item: item * 2 for item in [1, 2, 3]}
print(my_dict3)
'''
Output:
------
['h', 'e', 'l', 'l', 'o']
{'o', 'h', 'l', 'e'}
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
[0, 4, 16, 36, 64, 100, 144, 196, 256, 324, 400, 484, 576, 676, 784, 900, 1024, 1156, 1296, 1444, 1600, 1764, 1936, 2116, 2304]
{0, 256, 1024, 2304, 4, 900, 1156, 16, 144, 400, 784, 1296, 1936, 36, 676, 1444, 64, 576, 1600, 196, 324, 2116, 100, 484, 1764}
{'a': 1, 'b': 4}
{'b': 4}
{1: 2, 2: 4, 3: 6}
'''
| 31.528302 | 144 | 0.64213 |
ea2d125665940271b5d52dd0f7df67ab6257c2da | 14,187 | py | Python | avatar/mapworld/maps.py | potsdam-language-vision-2021/eyebot-1.0 | dd2f618a8357ef16c497d06b8b19ab38d628645a | [
"MIT"
] | null | null | null | avatar/mapworld/maps.py | potsdam-language-vision-2021/eyebot-1.0 | dd2f618a8357ef16c497d06b8b19ab38d628645a | [
"MIT"
] | 46 | 2021-04-30T15:28:40.000Z | 2021-08-21T15:26:34.000Z | avatar/mapworld/maps.py | potsdam-language-vision-2021/eyebot-1.0 | dd2f618a8357ef16c497d06b8b19ab38d628645a | [
"MIT"
] | 1 | 2021-08-07T19:41:12.000Z | 2021-08-07T19:41:12.000Z | # -*- coding: utf-8 -*-
"""Map objects for the map world environment.
AbstractMap encapsulates generation of graph through random walk.
ADEMap is an example of a fully specified map, where nodes in the graph
are adorned with additional information (here: room categories and room
instances [images] from the ADE20k dataset.
ADEMap needs to know the mapping between categories and instances (files
from the corpus). This is pre-compiled in ade_cat_instances.json.gz.
If that is missing, run make_and_write_instance_list() (for which you
need to have the corpus available).
"""
# TODO:
# - use visual similarity to sample maximally confusable images from
# target type.
import numpy as np
import networkx as nx
from glob import glob
import json
import gzip
import os
import matplotlib.pyplot as plt
import zipfile
from avatar import load_project_resource
class AbstractMap(object):
"""Map graph, created with random walk.
Arguments:
n: number of rows of grid for random walk
m: number of columns of grid for random walk
n_rooms: how many rooms to create
Attributes:
G: the created map graph. (A networkX graph.)
n*m must be larger than n_rooms. The larger it is, the more freedom
the random walk has to walk in one direction, and the less compact
the resulting layout gets.
This walks by first creating an n*m matrix, and then randomly walking
around it until the required number of rooms has been created.
"""
dir2delta = {'n': np.array((-1, 0)),
's': np.array((1, 0)),
'e': np.array((0, 1)),
'w': np.array((0, -1))}
def __init__(self, n, m, n_rooms):
if n * m < n_rooms:
raise ValueError('n*m must be larger than n_rooms')
self.n = n
self.m = m
self.n_rooms = n_rooms
self.G = self.__make_graph(n, m, n_rooms)
def __make_graph(self, n, m, n_rooms):
map_array = np.zeros((n, m))
G = nx.Graph()
current_pos = np.random.randint(0, n), np.random.randint(0, m)
map_array[current_pos] = 1
G.add_node(current_pos)
while map_array.sum() < n_rooms:
random_dir = np.random.choice(['n', 's', 'e', 'w'])
new_pos = tuple(np.array(current_pos) + self.dir2delta[random_dir])
if min(new_pos) < 0 or new_pos[0] >= n or new_pos[1] >= m:
# illegal move
continue
map_array[new_pos] = 1
G.add_node(new_pos)
G.add_edge(current_pos, new_pos)
current_pos = new_pos
return G
def plot_graph(self):
nx.draw_networkx(self.G, pos={n: n for n in self.G.nodes()})
def __repr__(self):
return '<AbstractMap({}, {}, {})>'.format(self.n, self.m, self.n_rooms)
class ADEMap(AbstractMap):
"""Create map for the ADEworld.
Map filled with selected types from ADE20k. We selected some categories
that seemed more common and potentially easier to describe. (For some
games, these might serve as target rooms.) Additionally, we identified
some categories that can serve as fillers. Finally, we have "outdoors"
categories.
Arguments:
n: number of rows of grid for random walk [from AbstracMap]
m: number of columns of grid for random walk [from AbstracMap]
n_rooms: how many rooms to create [from AbstracMap]
target_type_distr: list of integers, controls ambiguity (see below).
or None. (Then init via .from_json().)
Some rooms are identified as "outdoors". These are rooms with degree 1,
that is, rooms that are connected to only one other one. Think of these
as the entries into the house.
Let's say target_type_distr is (3,2). This means that we will have (at
least) two types from the list of potential target categories, and that
the first of these will occur three times, and the second twice. This
controls "type ambiguity" of the map.
N.B.: At the moment, there are no checks to ensure that there are enough
rooms after the outdoor rooms have been assigned, so make sure that then
sum of the rooms specified here is relatively small, to account for
the possiblity that the remaining rooms are all outdoor rooms.
This only assigns categories and image instances to the rooms. For other use
cases, one could imagine textual information also being assigned to rooms.
"""
_target_cats = ['b/bathroom', 'b/bedroom', 'k/kitchen',
'b/basement', 'n/nursery', 'a/attic', 'c/childs_room',
'p/playroom', 'd/dining_room', 'h/home_office',
's/staircase', 'u/utility_room', 'l/living_room',
'j/jacuzzi/indoor', 'd/doorway/indoor', 'l/locker_room',
'w/wine_cellar/bottle_storage', 'r/reading_room',
'w/waiting_room', 'b/balcony/interior']
_distractor_cats = ['h/home_theater', 's/storage_room', 'h/hotel_room',
'm/music_studio', 'c/computer_room', 's/street',
'y/yard', 't/tearoom', 'a/art_studio',
'k/kindergarden_classroom', 's/sewing_room',
's/shower', 'v/veranda', 'b/breakroom',
'p/patio', 'g/garage/indoor',
'r/restroom/indoor', 'w/workroom', 'c/corridor',
'g/game_room', 'p/poolroom/home', 'c/cloakroom/room',
'c/closet', 'p/parlor', 'h/hallway', 'r/reception',
'c/carport/indoor', 'h/hunting_lodge/indoor']
# TODO: remove outdoor categories from this set? Why is there street and yard in here?
_outdoor_cats = ['g/garage/outdoor', 'a/apartment_building/outdoor',
'j/jacuzzi/outdoor', 'd/doorway/outdoor',
'r/restroom/outdoor', 's/swimming_pool/outdoor',
'c/casino/outdoor', 'k/kiosk/outdoor',
'a/apse/outdoor', 'c/carport/outdoor',
'f/flea_market/outdoor', 'c/chicken_farm/outdoor',
'w/washhouse/outdoor', 'c/cloister/outdoor',
'd/diner/outdoor', 'k/kennel/outdoor',
'h/hunting_lodge/outdoor', 'c/cathedral/outdoor',
'n/newsstand/outdoor', 'p/parking_garage/outdoor',
'c/convenience_store/outdoor', 'b/bistro/outdoor',
'i/inn/outdoor', 'l/library/outdoor']
_cat_instances = load_project_resource("avatar/resources/ade_cat_instances.json")
def __init__(self, n: int, m: int, n_rooms: int, types_to_repeat: list = None):
"""
Arguments:
n: number of rows of grid for random walk (map height)
m: number of columns of grid for random walk (map width)
n_rooms: number of (connected) rooms in on the grid
types_to_repeat: a list of repetition counts for the room types. Each number affects a randomly
chosen room type, but each room type only once. Note: For large numbers of rooms, the types will
repeat anyway because there is only a restricted set of types.
"""
AbstractMap.__init__(self, n, m, n_rooms)
if types_to_repeat is None:
types_to_repeat = []
self.__assign_types(types_to_repeat)
self.__assign_instances()
def __assign_types(self, target_type_distr):
G = self.G
outdoor = [this_node for this_node in G.nodes() if G.degree[this_node] == 1]
# assign outdoor cats to those
for this_node in outdoor:
G.nodes[this_node]['base_type'] = 'outdoor'
G.nodes[this_node]['type'] = np.random.choice(self._outdoor_cats)
G.nodes[this_node]['target'] = False
unassigned = [this_node for this_node in G.nodes() if G.degree[this_node] > 1]
target_types = np.random.choice(self._target_cats,
len(target_type_distr), replace=False)
for target_type, repetitions in zip(target_types, target_type_distr):
for _ in range(repetitions):
this_node = unassigned[np.random.choice(range(len(unassigned)))]
G.nodes[this_node]['base_type'] = 'indoor'
G.nodes[this_node]['type'] = target_type
G.nodes[this_node]['target'] = True
unassigned.remove(this_node)
remainder_types = list(set(self._target_cats)
.difference(set(target_types))
.union(set(self._distractor_cats)))
for this_node in unassigned:
G.nodes[this_node]['base_type'] = 'indoor'
G.nodes[this_node]['type'] = np.random.choice(remainder_types)
G.nodes[this_node]['target'] = False
self.G = G
def __assign_instances(self):
G = self.G
already_sampled = []
for this_node in G.nodes():
not_yet = True
while not_yet:
this_instance = np.random.choice(ADEMap._cat_instances[G.nodes[this_node]['type']])
if this_instance not in already_sampled:
not_yet = False
G.nodes[this_node]['instance'] = this_instance
self.G = G
def print_mapping(self):
for this_node in self.G.nodes():
print('{}: {} {:>50}'.format(this_node,
self.G.nodes[this_node]['type'],
self.G.nodes[this_node]['instance']))
def __catname(self, category):
parts = category.split('/')
if parts[-1].endswith('door') or parts[-1] == 'interior':
return parts[-2]
else:
return parts[-1]
def plot_graph(self, nodes='types', state=None):
G = self.G
nx.draw_networkx(G, pos={n: n for n in G.nodes()}, with_labels=False,
node_color='blue', node_shape='s')
for this_node in G.nodes():
x, y = np.array(this_node) + np.array((-0.2, 0.2))
if nodes == 'types':
label = self.__catname(G.nodes[this_node]['type'])
elif nodes == 'inst':
label = G.nodes[this_node]['instance']
plt.text(x, y, label)
if state is not None:
nx.draw_networkx_nodes(G,
pos={n: n for n in G.nodes()},
with_labels=False,
node_color='red',
node_shape='s',
nodelist=[state])
plt.axis('off')
def to_json(self):
"""This is NO JSON!"""
return nx.json_graph.node_link_data(self.G)
@classmethod
def from_json(cls, map_json):
"""Construct map object from json serialisation.
Example:
map = ADEMap.from_json(PATH_TO_JSON)
"""
new_instance = cls()
new_instance.G = nx.json_graph.node_link_graph(map_json)
return new_instance
def to_fsa_def(self, pick_initial=True):
this_json = self.to_json()
# transitions
transitions = this_json['links']
dir2delta = {'n': np.array((0, 1)), 's': np.array((0, -1)),
'e': np.array((1, 0)), 'w': np.array((-1, 0))}
# N.B.: the interpretation of the coordinates has suddenly
# changed, compared to AbstracMap.. There, the numpy convention
# was used (row, column). Here, it is now (x, y). Doesn't
# really matter semantically; I've adapted this here as this
# is how it is interpreted visually when plotting the graph.
flip_dir = {'n': 's', 's': 'n', 'e': 'w', 'w': 'e'}
out_transitions = []
for this_transition in transitions:
for d, o in dir2delta.items():
if np.array_equal(np.array(this_transition['target']),
np.array(this_transition['source']) + o):
out_transitions.append({'source':
str(this_transition['source']),
'dest':
str(this_transition['target']),
'trigger': d})
out_transitions.append({'source':
str(this_transition['target']),
'dest':
str(this_transition['source']),
'trigger': flip_dir[d]})
break
# nodes
nodes = this_json['nodes']
# pick initial
if pick_initial:
initial_node = np.random.choice([node for node in nodes if node['base_type'] == 'outdoor'])
# TODO: make selection of initial more flexible?
initial = str(initial_node['id'])
initial_type = initial_node['type']
else:
initial, initial_type = None, None
return {'transitions': out_transitions, 'nodes': nodes,
'initial': initial, 'initial_type': initial_type}
def make_instance_list(ade_path, categories):
place_instances = {}
for this_type in categories:
full_paths = glob(ade_path + this_type + '/*.jpg')
place_instances[this_type] = ['/'.join(this_path.split('/')
[len(ade_path.split('/')) - 1:])
for this_path in full_paths]
return place_instances
def make_and_write_instance_list(ade_path, filename):
place_instances = make_instance_list(ade_path,
ADEMap._target_cats +
ADEMap._distractor_cats +
ADEMap._outdoor_cats)
with gzip.open(filename, 'w') as f:
json_s = json.dumps(place_instances, indent=4)
json_bytes = json_s.encode('utf-8')
f.write(json_bytes)
| 42.990909 | 108 | 0.569395 |
b22c83409688bb9616b57667d63aefb941328fc0 | 498 | py | Python | src/vocabulator/words/migrations/0013_auto_20180812_1033.py | sysint64/vocabulator-server-side | 1cbe6253367ca0461be9a88fb9a2cab927170393 | [
"MIT"
] | null | null | null | src/vocabulator/words/migrations/0013_auto_20180812_1033.py | sysint64/vocabulator-server-side | 1cbe6253367ca0461be9a88fb9a2cab927170393 | [
"MIT"
] | null | null | null | src/vocabulator/words/migrations/0013_auto_20180812_1033.py | sysint64/vocabulator-server-side | 1cbe6253367ca0461be9a88fb9a2cab927170393 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.7 on 2018-08-12 10:33
from django.db import migrations
def replace_comma_to_semicolon(apps, schema_editor):
Word = apps.get_model('words', 'Word')
for post in Word.objects.all():
post.translation = post.translation.replace(",", ";")
post.save()
class Migration(migrations.Migration):
dependencies = [
('words', '0012_auto_20180812_1031'),
]
operations = [
migrations.RunPython(replace_comma_to_semicolon),
]
| 21.652174 | 61 | 0.662651 |
a9ca5636d6ee5e033e0aa253660e42d674fbdb06 | 854 | py | Python | utils/face_parsing.py | oo92/Deep3dPortrait | 809b3c4ab870e7b65b123d7491ab237c862c3b99 | [
"MIT"
] | 278 | 2020-04-24T16:44:40.000Z | 2022-03-24T07:44:33.000Z | utils/face_parsing.py | oo92/Deep3dPortrait | 809b3c4ab870e7b65b123d7491ab237c862c3b99 | [
"MIT"
] | 29 | 2020-05-23T10:15:56.000Z | 2022-03-25T02:04:32.000Z | utils/face_parsing.py | oo92/Deep3dPortrait | 809b3c4ab870e7b65b123d7491ab237c862c3b99 | [
"MIT"
] | 64 | 2020-04-28T08:52:06.000Z | 2022-03-29T01:46:46.000Z | import numpy as np
from scipy.io import loadmat
#######################################################################################
# Auxiliary functions for face segmentation
# for face parsing, please refer to https://arxiv.org/pdf/1906.01342.pdf
#######################################################################################
def faceparsing():
# return a label with 5 classes:
# 0: bg 1: face 2: hair 3: left ear 4: right ear 5(optional): inner mouth
return NotImplemented
def split_segmask(mask):
face_mask, hairear_mask, mouth_mask = np.zeros_like(mask), np.zeros_like(mask), np.zeros_like(mask)
face_mask[mask==1] = 1
face_mask[mask==5] = 1
hairear_mask[mask==2] = 1
hairear_mask[mask==3] = 1
hairear_mask[mask==4] = 1
mouth_mask[mask==5] = 1
return face_mask, hairear_mask, mouth_mask
| 34.16 | 103 | 0.555035 |
d04744982cdfc820daa0793aa2c480e9691ed68a | 3,504 | py | Python | profiles_api/views.py | Paris157/profiles-rest-api | 83a9374e905d8ffb104b91058d398f2bb1161b04 | [
"MIT"
] | null | null | null | profiles_api/views.py | Paris157/profiles-rest-api | 83a9374e905d8ffb104b91058d398f2bb1161b04 | [
"MIT"
] | null | null | null | profiles_api/views.py | Paris157/profiles-rest-api | 83a9374e905d8ffb104b91058d398f2bb1161b04 | [
"MIT"
] | null | null | null | from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from profiles_api import serializers
from profiles_api import models
from profiles_api import permissions
class HelloApiView(APIView):
"""Test API View"""
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
"""Returns a list of APIView features"""
an_apiview = [
'Uses HTTP methods as function (get,post,patch,put,delete)',
'Is similar to a traditional Django View',
'Gives you the most control over your application logic',
'Is mapped manually to URLs',
]
return Response({'message':'Hello!', 'an_apiview':an_apiview})
def post(self,request):
"""Create a hellow message with our name"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}'
return Response({'message':message})
else:
return Response(
serializer.errors,
status = status.HTTP_400_BAD_REQUEST
)
def put(self,request, pk=None):
"""Handle updating an object"""
return Response({'method':'PUT'})
def patch(self, request, pk=None):
"""Handle a partial update of an object"""
return Response({'method':'PATCH'})
def delete(self,request,pk=None):
"""Handle a delete of an object"""
return Response({'method':'DELETE'})
class HelloViewSet(viewsets.ViewSet):
"""Test API ViewSet"""
serializer_class = serializers.HelloSerializer
def list(self, request):
"""Return a hello message"""
a_viewset = [
'Uses actions (list,create,retrieve, update, partial_update)',
'Automatically maps to URLs using Routers',
'Provides more functionality with less code',
]
return Response({'message':'hello', 'a_viewset':a_viewset})
def create(self, request):
"""Create a new hello message"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name=serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message':message})
else:
return Response(
serializer.errors,
status = status.HTTP_400_BAD_REQUEST
)
def retrieve(self, request, pk=None):
"""Handle getting an object by its ID"""
return Response({'http_method':'GET'})
def update(self, request, pk=None):
"""Handle the update of an object"""
return Response({'http_method':'PUT'})
def partial_update(self, request, pk=None):
"""Updating part of an object"""
return Response({'http_method':'PATCH'})
def destroy(self, request, pk=None):
"""Delete an object"""
return Response({'http_method':'DELETE'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""Handle creating and updating profiles"""
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
| 30.736842 | 74 | 0.635274 |
fc3a73a220644e40d18c78c560fc1b9e61198cd9 | 383 | py | Python | blue/wsgi.py | zhy0216/random-read | b54d806a4c2223e3c60dc554c1aacf86e9591028 | [
"MIT"
] | 239 | 2015-04-15T09:14:09.000Z | 2016-06-02T16:45:45.000Z | blue/wsgi.py | zhy0216/OhMyPocket | b54d806a4c2223e3c60dc554c1aacf86e9591028 | [
"MIT"
] | 4 | 2015-05-10T16:03:56.000Z | 2015-07-16T02:34:11.000Z | blue/wsgi.py | zhy0216/random-read | b54d806a4c2223e3c60dc554c1aacf86e9591028 | [
"MIT"
] | 25 | 2015-05-10T13:59:06.000Z | 2016-05-18T02:03:19.000Z | """
WSGI config for blue project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blue.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 25.533333 | 78 | 0.785901 |
dc4e357bf72e101b6c146694f1c14e8d596d817b | 4,041 | py | Python | tests/unit/test_table.py | kangdh/hdmf | 680b68a1bbc9377590862574daea83579a3d52bc | [
"BSD-3-Clause-LBNL"
] | 25 | 2019-03-07T15:33:16.000Z | 2022-02-16T20:03:57.000Z | tests/unit/test_table.py | kangdh/hdmf | 680b68a1bbc9377590862574daea83579a3d52bc | [
"BSD-3-Clause-LBNL"
] | 641 | 2019-02-02T00:31:12.000Z | 2022-03-31T18:16:54.000Z | tests/unit/test_table.py | kangdh/hdmf | 680b68a1bbc9377590862574daea83579a3d52bc | [
"BSD-3-Clause-LBNL"
] | 16 | 2019-02-05T18:21:35.000Z | 2022-02-14T23:37:21.000Z | import pandas as pd
from hdmf.container import Table, Row, RowGetter
from hdmf.testing import TestCase
class TestTable(TestCase):
@classmethod
def get_table_class(cls):
class MyTable(Table):
__defaultname__ = 'my_table'
__columns__ = [
{'name': 'col1', 'type': str, 'help': 'a string column'},
{'name': 'col2', 'type': int, 'help': 'an integer column'},
]
return MyTable
def test_init(self):
MyTable = TestTable.get_table_class()
table = MyTable('test_table')
self.assertTrue(hasattr(table, '__colidx__'))
self.assertEqual(table.__colidx__, {'col1': 0, 'col2': 1})
def test_add_row_getitem(self):
MyTable = TestTable.get_table_class()
table = MyTable('test_table')
table.add_row(col1='foo', col2=100)
table.add_row(col1='bar', col2=200)
row1 = table[0]
row2 = table[1]
self.assertEqual(row1, ('foo', 100))
self.assertEqual(row2, ('bar', 200))
def test_to_dataframe(self):
MyTable = TestTable.get_table_class()
table = MyTable('test_table')
table.add_row(col1='foo', col2=100)
table.add_row(col1='bar', col2=200)
df = table.to_dataframe()
exp = pd.DataFrame(data=[{'col1': 'foo', 'col2': 100}, {'col1': 'bar', 'col2': 200}])
pd.testing.assert_frame_equal(df, exp)
def test_from_dataframe(self):
MyTable = TestTable.get_table_class()
exp = pd.DataFrame(data=[{'col1': 'foo', 'col2': 100}, {'col1': 'bar', 'col2': 200}])
table = MyTable.from_dataframe(exp)
row1 = table[0]
row2 = table[1]
self.assertEqual(row1, ('foo', 100))
self.assertEqual(row2, ('bar', 200))
class TestRow(TestCase):
def setUp(self):
self.MyTable = TestTable.get_table_class()
class MyRow(Row):
__table__ = self.MyTable
self.MyRow = MyRow
self.table = self.MyTable('test_table')
def test_row_no_table(self):
with self.assertRaisesRegex(ValueError, '__table__ must be set if sub-classing Row'):
class MyRow(Row):
pass
def test_table_init(self):
MyTable = TestTable.get_table_class()
table = MyTable('test_table')
self.assertFalse(hasattr(table, 'row'))
table_w_row = self.MyTable('test_table')
self.assertTrue(hasattr(table_w_row, 'row'))
self.assertIsInstance(table_w_row.row, RowGetter)
self.assertIs(table_w_row.row.table, table_w_row)
def test_init(self):
row1 = self.MyRow(col1='foo', col2=100, table=self.table)
# make sure Row object set up properly
self.assertEqual(row1.idx, 0)
self.assertEqual(row1.col1, 'foo')
self.assertEqual(row1.col2, 100)
# make sure Row object is stored in Table peroperly
tmp_row1 = self.table.row[0]
self.assertEqual(tmp_row1, row1)
def test_add_row_getitem(self):
self.table.add_row(col1='foo', col2=100)
self.table.add_row(col1='bar', col2=200)
row1 = self.table.row[0]
self.assertIsInstance(row1, self.MyRow)
self.assertEqual(row1.idx, 0)
self.assertEqual(row1.col1, 'foo')
self.assertEqual(row1.col2, 100)
row2 = self.table.row[1]
self.assertIsInstance(row2, self.MyRow)
self.assertEqual(row2.idx, 1)
self.assertEqual(row2.col1, 'bar')
self.assertEqual(row2.col2, 200)
# test memoization
row3 = self.table.row[0]
self.assertIs(row3, row1)
def test_todict(self):
row1 = self.MyRow(col1='foo', col2=100, table=self.table)
self.assertEqual(row1.todict(), {'col1': 'foo', 'col2': 100})
def test___str__(self):
row1 = self.MyRow(col1='foo', col2=100, table=self.table)
row1_str = str(row1)
expected_str = "Row(0, test_table) = {'col1': 'foo', 'col2': 100}"
self.assertEqual(row1_str, expected_str)
| 32.328 | 93 | 0.604553 |
8086868b43a512f7a2e7c704862c444c4ca2b3e3 | 1,093 | py | Python | whatsapp-bot-venv/Lib/site-packages/twilio/rest/proxy/__init__.py | RedaMastouri/ConversationalPythonicChatBot | f204276d4b80348d42091b17d1a7d9eea33fb4e0 | [
"MIT"
] | 1,362 | 2015-01-04T10:25:18.000Z | 2022-03-24T10:07:08.000Z | whatsapp-bot-venv/Lib/site-packages/twilio/rest/proxy/__init__.py | RedaMastouri/ConversationalPythonicChatBot | f204276d4b80348d42091b17d1a7d9eea33fb4e0 | [
"MIT"
] | 299 | 2015-01-30T09:52:39.000Z | 2022-03-31T23:03:02.000Z | bot/lib/python3.7/site-packages/twilio/rest/proxy/__init__.py | carlosrh18/DavinciBot | d73a6b7f68d7bab25d134d3f85c6b63a86c206c5 | [
"MIT"
] | 622 | 2015-01-03T04:43:09.000Z | 2022-03-29T14:11:00.000Z | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base.domain import Domain
from twilio.rest.proxy.v1 import V1
class Proxy(Domain):
def __init__(self, twilio):
"""
Initialize the Proxy Domain
:returns: Domain for Proxy
:rtype: twilio.rest.proxy.Proxy
"""
super(Proxy, self).__init__(twilio)
self.base_url = 'https://proxy.twilio.com'
# Versions
self._v1 = None
@property
def v1(self):
"""
:returns: Version v1 of proxy
:rtype: twilio.rest.proxy.v1.V1
"""
if self._v1 is None:
self._v1 = V1(self)
return self._v1
@property
def services(self):
"""
:rtype: twilio.rest.proxy.v1.service.ServiceList
"""
return self.v1.services
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Proxy>'
| 20.240741 | 56 | 0.536139 |
8ed0a7554d0f046f6e8a1224b2532c149a73aec3 | 5,431 | py | Python | catalog/views.py | byronlin92/django_local_library | 86013bdfbb88bc6a7632cb803ea74c09a0e381e7 | [
"Apache-2.0"
] | null | null | null | catalog/views.py | byronlin92/django_local_library | 86013bdfbb88bc6a7632cb803ea74c09a0e381e7 | [
"Apache-2.0"
] | null | null | null | catalog/views.py | byronlin92/django_local_library | 86013bdfbb88bc6a7632cb803ea74c09a0e381e7 | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import render
from .models import Book, Author, BookInstance, Genre
def index(request):
"""
View function for home page of site.
"""
# Generate counts of some of the main objects
num_books=Book.objects.all().count()
num_instances=BookInstance.objects.all().count()
# Available books (status = 'a')
num_instances_available=BookInstance.objects.filter(status__exact='a').count()
num_authors=Author.objects.count() # The 'all()' is implied by default.
particular_word = 'byron'
count_genre = Genre.objects.filter(name=particular_word).count()
count_book = Book.objects.filter(title=particular_word).count()
# Number of visits to this view, as counted in the session variable.
num_visits=request.session.get('num_visits', 0)
request.session['num_visits'] = num_visits+1
# Render the HTML template index.html with the data in the context variable
return render(
request,
'index.html',
context={'num_books':num_books,'num_instances':num_instances,'num_instances_available':num_instances_available,'num_authors':num_authors,
'count_genre':count_genre,'count_book':count_book, 'particular_word':particular_word, 'num_visits':num_visits},
)
from django.views import generic
class BookListView(generic.ListView):
model = Book
# template_name='book_list.html'
paginate_by=10
class BookDetailView(generic.DetailView):
model = Book
# template_name='book_detail.html'
paginate_by=10
class AuthorListView(generic.ListView):
model = Author
# template_name='author_list.html'
paginate_by=10
class AuthorDetailView(generic.DetailView):
model = Author
# template_name='author_detail.html'
paginate_by=10
from django.contrib.auth.mixins import LoginRequiredMixin
class LoanedBooksByUserListView(LoginRequiredMixin,generic.ListView):
"""
Generic class-based view listing books on loan to current user.
"""
model = BookInstance
template_name ='catalog/bookinstance_list_borrowed_user.html'
paginate_by = 10
def get_queryset(self):
return BookInstance.objects.filter(borrower=self.request.user).filter(status__exact='o').order_by('due_back')
# Added as part of challenge!
from django.contrib.auth.mixins import PermissionRequiredMixin
class LoanedBooksAllListView(PermissionRequiredMixin,generic.ListView):
"""
Generic class-based view listing all books on loan. Only visible to users with can_mark_returned permission.
"""
model = BookInstance
permission_required = 'catalog.can_mark_returned'
template_name ='catalog/bookinstance_list_borrowed_all.html'
paginate_by = 10
def get_queryset(self):
return BookInstance.objects.filter(status__exact='o').order_by('due_back')
from django.contrib.auth.decorators import permission_required
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
import datetime
from .forms import RenewBookForm
@permission_required('catalog.can_mark_returned')
def renew_book_librarian(request, pk):
book_inst=get_object_or_404(BookInstance, pk = pk)
# If this is a POST request then process the Form data
if request.method == 'POST':
# Create a form instance and populate it with data from the request (binding):
form = RenewBookForm(request.POST)
# Check if the form is valid:
if form.is_valid():
# process the data in form.cleaned_data as required (here we just write it to the model due_back field)
book_inst.due_back = form.cleaned_data['renewal_date']
book_inst.save()
# redirect to a new URL:
return HttpResponseRedirect(reverse('all-borrowed') )
# If this is a GET (or any other method) create the default form.
else:
proposed_renewal_date = datetime.date.today() + datetime.timedelta(weeks=3)
form = RenewBookForm(initial={'renewal_date': proposed_renewal_date,})
return render(request, 'catalog/book_renew_librarian.html', {'form': form, 'bookinst':book_inst})
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from .models import Author
class AuthorCreate(PermissionRequiredMixin, CreateView):
model = Author
permission_required = 'catalog.can_edit_author'
fields = '__all__'
initial={'date_of_death':'12/10/2016',}
class AuthorUpdate(PermissionRequiredMixin, UpdateView):
model = Author
permission_required = 'catalog.can_edit_author'
fields = ['first_name','last_name','date_of_birth','date_of_death']
class AuthorDelete(PermissionRequiredMixin, DeleteView):
model = Author
permission_required = 'catalog.can_edit_author'
success_url = reverse_lazy('authors')
class BookCreate(PermissionRequiredMixin, CreateView):
model = Book
permission_required = 'catalog.can_edit_book'
fields = '__all__'
initial={'date_of_death':'12/10/2016',}
class BookUpdate(PermissionRequiredMixin, UpdateView):
model = Book
permission_required = 'catalog.can_edit_book'
fields = ['title','author','summary','isbn','genre','language']
class BookDelete(PermissionRequiredMixin, DeleteView):
model = Book
permission_required = 'catalog.can_edit_book'
success_url = reverse_lazy('books')
| 32.716867 | 145 | 0.735776 |
f701ce4be73d3f3af3e6256b3bb71ac213ac3103 | 4,318 | py | Python | eliot/dask.py | chenl/eliot | 8469e98aee19b3bd210515487ca48d9ec97aac6d | [
"Apache-2.0"
] | null | null | null | eliot/dask.py | chenl/eliot | 8469e98aee19b3bd210515487ca48d9ec97aac6d | [
"Apache-2.0"
] | null | null | null | eliot/dask.py | chenl/eliot | 8469e98aee19b3bd210515487ca48d9ec97aac6d | [
"Apache-2.0"
] | null | null | null | """Support for Eliot tracing with Dask computations."""
from pyrsistent import PClass, field
from dask import compute, optimize
from dask.core import toposort, get_dependencies
from . import start_action, current_action, Action, Message
class _RunWithEliotContext(PClass):
"""
Run a callable within an Eliot context.
@ivar task_id: The serialized Eliot task ID.
@ivar func: The function that Dask wants to run.
@ivar key: The key in the Dask graph.
@ivar dependencies: The keys in the Dask graph this depends on.
"""
task_id = field(type=str)
func = field() # callable
key = field(type=str)
dependencies = field()
# Pretend to be underlying callable for purposes of equality; necessary for
# optimizer to be happy:
def __eq__(self, other):
return self.func == other
def __ne__(self, other):
return self.func != other
def __hash__(self):
return hash(self.func)
def __call__(self, *args, **kwargs):
with Action.continue_task(task_id=self.task_id):
Message.log(
message_type="dask:task",
key=self.key,
dependencies=self.dependencies
)
return self.func(*args, **kwargs)
def compute_with_trace(*args):
"""Do Dask compute(), but with added Eliot tracing.
Dask is a graph of tasks, but Eliot logs trees. So we need to emulate a
graph using a tree. We do this by making Eliot action for each task, but
having it list the tasks it depends on.
We use the following algorithm:
1. Create a top-level action.
2. For each entry in the dask graph, create a child with
serialize_task_id. Do this in likely order of execution, so that
if B depends on A the task level of B is higher than the task Ievel
of A.
3. Replace each function with a wrapper that uses the corresponding
task ID (with Action.continue_task), and while it's at it also
records which other things this function depends on.
Known issues:
1. Retries will confuse Eliot. Probably need different
distributed-tree mechanism within Eliot to solve that.
"""
# 1. Create top-level Eliot Action:
with start_action(action_type="dask:compute"):
# In order to reduce logging verbosity, add logging to the already
# optimized graph:
optimized = optimize(*args, optimizations=[_add_logging])
return compute(*optimized, optimize_graph=False)
def _add_logging(dsk, ignore=None):
"""
Add logging to a Dask graph.
@param dsk: The Dask graph.
@return: New Dask graph.
"""
ctx = current_action()
result = {}
# Use topological sort to ensure Eliot actions are in logical order of
# execution in Dask:
keys = toposort(dsk)
# Give each key a string name. Some keys are just aliases to other
# keys, so make sure we have underlying key available. Later on might
# want to shorten them as well.
def simplify(k):
if isinstance(k, str):
return k
return "-".join(str(o) for o in k)
key_names = {}
for key in keys:
value = dsk[key]
if not callable(value) and value in keys:
# It's an alias for another key:
key_names[key] = key_names[value]
else:
key_names[key] = simplify(key)
# 2. Create Eliot child Actions for each key, in topological order:
key_to_action_id = {
key: str(ctx.serialize_task_id(), "utf-8")
for key in keys
}
# 3. Replace function with wrapper that logs appropriate Action:
for key in keys:
func = dsk[key][0]
args = dsk[key][1:]
if not callable(func):
# This key is just an alias for another key, no need to add
# logging:
result[key] = dsk[key]
continue
wrapped_func = _RunWithEliotContext(
task_id=key_to_action_id[key],
func=func,
key=key_names[key],
dependencies=[key_names[k] for k in get_dependencies(dsk, key)],
)
result[key] = (wrapped_func, ) + tuple(args)
assert result.keys() == dsk.keys()
return result
__all__ = ["compute_with_trace"]
| 31.064748 | 79 | 0.629458 |
fed4b5bbf7210e8e778459bea56b43e1cdd88563 | 23,152 | py | Python | airflow/providers/cncf/kubernetes/operators/kubernetes_pod.py | jfmolano/airflow | 58fd1aa23d9c65d4e0e4d60424c56fc7e0b3feb3 | [
"Apache-2.0"
] | null | null | null | airflow/providers/cncf/kubernetes/operators/kubernetes_pod.py | jfmolano/airflow | 58fd1aa23d9c65d4e0e4d60424c56fc7e0b3feb3 | [
"Apache-2.0"
] | null | null | null | airflow/providers/cncf/kubernetes/operators/kubernetes_pod.py | jfmolano/airflow | 58fd1aa23d9c65d4e0e4d60424c56fc7e0b3feb3 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Executes task in a Kubernetes POD"""
import re
from typing import Any, Dict, Iterable, List, Optional, Tuple
import yaml
from kubernetes.client import CoreV1Api, models as k8s
from airflow.exceptions import AirflowException
from airflow.kubernetes import kube_client, pod_generator, pod_launcher
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.kubernetes.secret import Secret
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.utils.helpers import validate_key
from airflow.utils.state import State
from airflow.version import version as airflow_version
class KubernetesPodOperator(BaseOperator): # pylint: disable=too-many-instance-attributes
"""
Execute a task in a Kubernetes Pod
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:KubernetesPodOperator`
.. note::
If you use `Google Kubernetes Engine <https://cloud.google.com/kubernetes-engine/>`__
and Airflow is not running in the same cluster, consider using
:class:`~airflow.providers.google.cloud.operators.kubernetes_engine.GKEStartPodOperator`, which
simplifies the authorization process.
:param namespace: the namespace to run within kubernetes.
:type namespace: str
:param image: Docker image you wish to launch. Defaults to hub.docker.com,
but fully qualified URLS will point to custom repositories. (templated)
:type image: str
:param name: name of the pod in which the task will run, will be used (plus a random
suffix) to generate a pod id (DNS-1123 subdomain, containing only [a-z0-9.-]).
:type name: str
:param cmds: entrypoint of the container. (templated)
The docker images's entrypoint is used if this is not provided.
:type cmds: list[str]
:param arguments: arguments of the entrypoint. (templated)
The docker image's CMD is used if this is not provided.
:type arguments: list[str]
:param ports: ports for launched pod.
:type ports: list[k8s.V1ContainerPort]
:param volume_mounts: volumeMounts for launched pod.
:type volume_mounts: list[k8s.V1VolumeMount]
:param volumes: volumes for launched pod. Includes ConfigMaps and PersistentVolumes.
:type volumes: list[k8s.V1Volume]
:param env_vars: Environment variables initialized in the container. (templated)
:type env_vars: dict
:param secrets: Kubernetes secrets to inject in the container.
They can be exposed as environment vars or files in a volume.
:type secrets: list[airflow.kubernetes.secret.Secret]
:param in_cluster: run kubernetes client with in_cluster configuration.
:type in_cluster: bool
:param cluster_context: context that points to kubernetes cluster.
Ignored when in_cluster is True. If None, current-context is used.
:type cluster_context: str
:param reattach_on_restart: if the scheduler dies while the pod is running, reattach and monitor
:type reattach_on_restart: bool
:param labels: labels to apply to the Pod. (templated)
:type labels: dict
:param startup_timeout_seconds: timeout in seconds to startup the pod.
:type startup_timeout_seconds: int
:param get_logs: get the stdout of the container as logs of the tasks.
:type get_logs: bool
:param image_pull_policy: Specify a policy to cache or always pull an image.
:type image_pull_policy: str
:param annotations: non-identifying metadata you can attach to the Pod.
Can be a large range of data, and can include characters
that are not permitted by labels.
:type annotations: dict
:param resources: A dict containing resources requests and limits.
Possible keys are request_memory, request_cpu, limit_memory, limit_cpu,
and limit_gpu, which will be used to generate airflow.kubernetes.pod.Resources.
See also kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
:type resources: k8s.V1ResourceRequirements
:param affinity: A dict containing a group of affinity scheduling rules.
:type affinity: dict
:param config_file: The path to the Kubernetes config file. (templated)
If not specified, default value is ``~/.kube/config``
:type config_file: str
:param node_selectors: A dict containing a group of scheduling rules.
:type node_selectors: dict
:param image_pull_secrets: Any image pull secrets to be given to the pod.
If more than one secret is required, provide a
comma separated list: secret_a,secret_b
:type image_pull_secrets: List[k8s.V1LocalObjectReference]
:param service_account_name: Name of the service account
:type service_account_name: str
:param is_delete_operator_pod: What to do when the pod reaches its final
state, or the execution is interrupted.
If False (default): do nothing, If True: delete the pod
:type is_delete_operator_pod: bool
:param hostnetwork: If True enable host networking on the pod.
:type hostnetwork: bool
:param tolerations: A list of kubernetes tolerations.
:type tolerations: list tolerations
:param security_context: security options the pod should run with (PodSecurityContext).
:type security_context: dict
:param dnspolicy: dnspolicy for the pod.
:type dnspolicy: str
:param schedulername: Specify a schedulername for the pod
:type schedulername: str
:param full_pod_spec: The complete podSpec
:type full_pod_spec: kubernetes.client.models.V1Pod
:param init_containers: init container for the launched Pod
:type init_containers: list[kubernetes.client.models.V1Container]
:param log_events_on_failure: Log the pod's events if a failure occurs
:type log_events_on_failure: bool
:param do_xcom_push: If True, the content of the file
/airflow/xcom/return.json in the container will also be pushed to an
XCom when the container completes.
:type do_xcom_push: bool
:param pod_template_file: path to pod template file (templated)
:type pod_template_file: str
:param priority_class_name: priority class name for the launched Pod
:type priority_class_name: str
:param termination_grace_period: Termination grace period if task killed in UI,
defaults to kubernetes default
:type termination_grace_period: int
"""
template_fields: Iterable[str] = (
'image', 'cmds', 'arguments', 'env_vars', 'labels', 'config_file', 'pod_template_file')
@apply_defaults
def __init__(self, # pylint: disable=too-many-arguments,too-many-locals
*,
namespace: Optional[str] = None,
image: Optional[str] = None,
name: Optional[str] = None,
cmds: Optional[List[str]] = None,
arguments: Optional[List[str]] = None,
ports: Optional[List[k8s.V1ContainerPort]] = None,
volume_mounts: Optional[List[k8s.V1VolumeMount]] = None,
volumes: Optional[List[k8s.V1Volume]] = None,
env_vars: Optional[List[k8s.V1EnvVar]] = None,
env_from: Optional[List[k8s.V1EnvFromSource]] = None,
secrets: Optional[List[Secret]] = None,
in_cluster: Optional[bool] = None,
cluster_context: Optional[str] = None,
labels: Optional[Dict] = None,
reattach_on_restart: bool = True,
startup_timeout_seconds: int = 120,
get_logs: bool = True,
image_pull_policy: str = 'IfNotPresent',
annotations: Optional[Dict] = None,
resources: Optional[k8s.V1ResourceRequirements] = None,
affinity: Optional[Dict] = None,
config_file: Optional[str] = None,
node_selectors: Optional[Dict] = None,
image_pull_secrets: Optional[List[k8s.V1LocalObjectReference]] = None,
service_account_name: str = 'default',
is_delete_operator_pod: bool = False,
hostnetwork: bool = False,
tolerations: Optional[List] = None,
security_context: Optional[Dict] = None,
dnspolicy: Optional[str] = None,
schedulername: Optional[str] = None,
full_pod_spec: Optional[k8s.V1Pod] = None,
init_containers: Optional[List[k8s.V1Container]] = None,
log_events_on_failure: bool = False,
do_xcom_push: bool = False,
pod_template_file: Optional[str] = None,
priority_class_name: Optional[str] = None,
termination_grace_period: Optional[int] = None,
**kwargs) -> None:
if kwargs.get('xcom_push') is not None:
raise AirflowException("'xcom_push' was deprecated, use 'do_xcom_push' instead")
super().__init__(resources=None, **kwargs)
self.do_xcom_push = do_xcom_push
self.image = image
self.namespace = namespace
self.cmds = cmds or []
self.arguments = arguments or []
self.labels = labels or {}
self.startup_timeout_seconds = startup_timeout_seconds
self.env_vars = env_vars or []
self.env_from = env_from or []
self.ports = ports or []
self.volume_mounts = volume_mounts or []
self.volumes = volumes or []
self.secrets = secrets or []
self.in_cluster = in_cluster
self.cluster_context = cluster_context
self.reattach_on_restart = reattach_on_restart
self.get_logs = get_logs
self.image_pull_policy = image_pull_policy
self.node_selectors = node_selectors or {}
self.annotations = annotations or {}
self.affinity = affinity or {}
self.k8s_resources = resources or {}
self.config_file = config_file
self.image_pull_secrets = image_pull_secrets or []
self.service_account_name = service_account_name
self.is_delete_operator_pod = is_delete_operator_pod
self.hostnetwork = hostnetwork
self.tolerations = tolerations or []
self.security_context = security_context or {}
self.dnspolicy = dnspolicy
self.schedulername = schedulername
self.full_pod_spec = full_pod_spec
self.init_containers = init_containers or []
self.log_events_on_failure = log_events_on_failure
self.priority_class_name = priority_class_name
self.pod_template_file = pod_template_file
self.name = self._set_name(name)
self.termination_grace_period = termination_grace_period
self.client: CoreV1Api = None
self.pod: k8s.V1Pod = None
@staticmethod
def create_labels_for_pod(context) -> dict:
"""
Generate labels for the pod to track the pod in case of Operator crash
:param context: task context provided by airflow DAG
:return: dict
"""
labels = {
'dag_id': context['dag'].dag_id,
'task_id': context['task'].task_id,
'execution_date': context['ts'],
'try_number': context['ti'].try_number,
}
# In the case of sub dags this is just useful
if context['dag'].is_subdag:
labels['parent_dag_id'] = context['dag'].parent_dag.dag_id
# Ensure that label is valid for Kube,
# and if not truncate/remove invalid chars and replace with short hash.
for label_id, label in labels.items():
safe_label = pod_generator.make_safe_label_value(str(label))
labels[label_id] = safe_label
return labels
def execute(self, context) -> Optional[str]:
try:
if self.in_cluster is not None:
client = kube_client.get_kube_client(in_cluster=self.in_cluster,
cluster_context=self.cluster_context,
config_file=self.config_file)
else:
client = kube_client.get_kube_client(cluster_context=self.cluster_context,
config_file=self.config_file)
self.pod = self.create_pod_request_obj()
self.namespace = self.pod.metadata.namespace
self.client = client
# Add combination of labels to uniquely identify a running pod
labels = self.create_labels_for_pod(context)
label_selector = self._get_pod_identifying_label_string(labels)
self.namespace = self.pod.metadata.namespace
pod_list = client.list_namespaced_pod(self.namespace, label_selector=label_selector)
if len(pod_list.items) > 1 and self.reattach_on_restart:
raise AirflowException(
'More than one pod running with labels: '
'{label_selector}'.format(label_selector=label_selector))
launcher = pod_launcher.PodLauncher(kube_client=client, extract_xcom=self.do_xcom_push)
if len(pod_list.items) == 1:
try_numbers_match = self._try_numbers_match(context, pod_list.items[0])
final_state, result = self.handle_pod_overlap(labels, try_numbers_match, launcher,
pod_list.items[0])
else:
self.log.info("creating pod with labels %s and launcher %s", labels, launcher)
final_state, _, result = self.create_new_pod_for_operator(labels, launcher)
if final_state != State.SUCCESS:
status = self.client.read_namespaced_pod(self.name, self.namespace)
raise AirflowException(
f'Pod returned a failure: {status}')
return result
except AirflowException as ex:
raise AirflowException(f'Pod Launching failed: {ex}')
def handle_pod_overlap(
self, labels: dict, try_numbers_match: bool, launcher: Any, pod: k8s.V1Pod
) -> Tuple[State, Optional[str]]:
"""
In cases where the Scheduler restarts while a KubernetesPodOperator task is running,
this function will either continue to monitor the existing pod or launch a new pod
based on the `reattach_on_restart` parameter.
:param labels: labels used to determine if a pod is repeated
:type labels: dict
:param try_numbers_match: do the try numbers match? Only needed for logging purposes
:type try_numbers_match: bool
:param launcher: PodLauncher
:param pod_list: list of pods found
"""
if try_numbers_match:
log_line = f"found a running pod with labels {labels} and the same try_number."
else:
log_line = f"found a running pod with labels {labels} but a different try_number."
# In case of failed pods, should reattach the first time, but only once
# as the task will have already failed.
if self.reattach_on_restart and not pod.metadata.labels.get("already_checked"):
log_line += " Will attach to this pod and monitor instead of starting new one"
self.log.info(log_line)
self.pod = pod
final_state, result = self.monitor_launched_pod(launcher, pod)
else:
log_line += f"creating pod with labels {labels} and launcher {launcher}"
self.log.info(log_line)
final_state, _, result = self.create_new_pod_for_operator(labels, launcher)
return final_state, result
@staticmethod
def _get_pod_identifying_label_string(labels) -> str:
filtered_labels = {label_id: label for label_id, label in labels.items() if label_id != 'try_number'}
return ','.join([label_id + '=' + label for label_id, label in sorted(filtered_labels.items())])
@staticmethod
def _try_numbers_match(context, pod) -> bool:
return pod.metadata.labels['try_number'] == context['ti'].try_number
def _set_name(self, name):
if self.pod_template_file or self.full_pod_spec:
return None
validate_key(name, max_length=220)
return re.sub(r'[^a-z0-9.-]+', '-', name.lower())
def create_pod_request_obj(self) -> k8s.V1Pod:
"""
Creates a V1Pod based on user parameters. Note that a `pod` or `pod_template_file`
will supersede all other values.
"""
self.log.debug("Creating pod for K8sPodOperator task %s", self.task_id)
if self.pod_template_file:
self.log.debug("Pod template file found, will parse for base pod")
pod_template = pod_generator.PodGenerator.deserialize_model_file(self.pod_template_file)
else:
pod_template = k8s.V1Pod(metadata=k8s.V1ObjectMeta(name="name"))
pod = k8s.V1Pod(
api_version="v1",
kind="Pod",
metadata=k8s.V1ObjectMeta(
namespace=self.namespace,
labels=self.labels,
name=self.name,
annotations=self.annotations,
),
spec=k8s.V1PodSpec(
node_selector=self.node_selectors,
affinity=self.affinity,
tolerations=self.tolerations,
init_containers=self.init_containers,
containers=[
k8s.V1Container(
image=self.image,
name="base",
command=self.cmds,
ports=self.ports,
resources=self.k8s_resources,
volume_mounts=self.volume_mounts,
args=self.arguments,
env=self.env_vars,
env_from=self.env_from,
)
],
image_pull_secrets=self.image_pull_secrets,
service_account_name=self.service_account_name,
host_network=self.hostnetwork,
security_context=self.security_context,
dns_policy=self.dnspolicy,
scheduler_name=self.schedulername,
restart_policy='Never',
priority_class_name=self.priority_class_name,
volumes=self.volumes,
)
)
pod = PodGenerator.reconcile_pods(pod_template, pod)
for secret in self.secrets:
self.log.debug("Adding secret to task %s", self.task_id)
pod = secret.attach_to_pod(pod)
if self.do_xcom_push:
self.log.debug("Adding xcom sidecar to task %s", self.task_id)
pod = PodGenerator.add_xcom_sidecar(pod)
return pod
def create_new_pod_for_operator(self, labels, launcher) -> Tuple[State, k8s.V1Pod, Optional[str]]:
"""
Creates a new pod and monitors for duration of task
:param labels: labels used to track pod
:param launcher: pod launcher that will manage launching and monitoring pods
:return:
"""
if not (self.full_pod_spec or self.pod_template_file):
# Add Airflow Version to the label
# And a label to identify that pod is launched by KubernetesPodOperator
self.log.debug("Adding k8spodoperator labels to pod before launch for task %s", self.task_id)
self.labels.update(
{
'airflow_version': airflow_version.replace('+', '-'),
'kubernetes_pod_operator': 'True',
}
)
self.labels.update(labels)
self.pod.metadata.labels = self.labels
self.log.debug("Starting pod:\n%s", yaml.safe_dump(self.pod.to_dict()))
try:
launcher.start_pod(
self.pod,
startup_timeout=self.startup_timeout_seconds)
final_state, result = launcher.monitor_pod(pod=self.pod, get_logs=self.get_logs)
except AirflowException:
if self.log_events_on_failure:
for event in launcher.read_pod_events(self.pod).items:
self.log.error("Pod Event: %s - %s", event.reason, event.message)
raise
finally:
if self.is_delete_operator_pod:
self.log.debug("Deleting pod for task %s", self.task_id)
launcher.delete_pod(self.pod)
return final_state, self.pod, result
def patch_already_checked(self, pod: k8s.V1Pod):
"""Add an "already tried annotation to ensure we only retry once"""
pod.metadata.labels["already_checked"] = "True"
body = PodGenerator.serialize_pod(pod)
self.client.patch_namespaced_pod(pod.metadata.name, pod.metadata.namespace, body)
def monitor_launched_pod(self, launcher, pod) -> Tuple[State, Optional[str]]:
"""
Monitors a pod to completion that was created by a previous KubernetesPodOperator
:param launcher: pod launcher that will manage launching and monitoring pods
:param pod: podspec used to find pod using k8s API
:return:
"""
try:
(final_state, result) = launcher.monitor_pod(pod, get_logs=self.get_logs)
finally:
if self.is_delete_operator_pod:
launcher.delete_pod(pod)
if final_state != State.SUCCESS:
if self.log_events_on_failure:
for event in launcher.read_pod_events(pod).items:
self.log.error("Pod Event: %s - %s", event.reason, event.message)
self.patch_already_checked(self.pod)
raise AirflowException(
f'Pod returned a failure: {final_state}'
)
return final_state, result
def on_kill(self) -> None:
if self.pod:
pod: k8s.V1Pod = self.pod
namespace = pod.metadata.namespace
name = pod.metadata.name
kwargs = {}
if self.termination_grace_period is not None:
kwargs = {"grace_period_seconds": self.termination_grace_period}
self.client.delete_namespaced_pod(name=name, namespace=namespace, **kwargs)
| 47.152749 | 109 | 0.644134 |
8c8181d260a8982e99301cec6e4a0ecc71eee293 | 17,221 | py | Python | test/box-py/iproto.test.py | artembo/tarantool | 5f0c04820b2429c004fbd28a838e291ca2833ca3 | [
"BSD-2-Clause"
] | 1 | 2022-02-17T06:03:48.000Z | 2022-02-17T06:03:48.000Z | test/box-py/iproto.test.py | artembo/tarantool | 5f0c04820b2429c004fbd28a838e291ca2833ca3 | [
"BSD-2-Clause"
] | null | null | null | test/box-py/iproto.test.py | artembo/tarantool | 5f0c04820b2429c004fbd28a838e291ca2833ca3 | [
"BSD-2-Clause"
] | 2 | 2021-09-07T02:13:20.000Z | 2022-01-29T22:11:35.000Z | from __future__ import print_function
import os
import sys
import struct
import socket
import msgpack
from tarantool.const import *
from tarantool import Connection
from tarantool.request import Request, RequestInsert, RequestSelect, RequestUpdate, RequestUpsert
from tarantool.response import Response
from lib.tarantool_connection import TarantoolConnection
# FIXME: Remove after the new constants are added to the Python connector.
if not 'REQUEST_TYPE_ID' in locals():
REQUEST_TYPE_ID = 73
IPROTO_VERSION = 0x54
IPROTO_FEATURES = 0x55
if not 'REQUEST_TYPE_WATCH' in locals():
REQUEST_TYPE_WATCH = 74
REQUEST_TYPE_UNWATCH = 75
REQUEST_TYPE_EVENT = 76
IPROTO_EVENT_KEY = 0x57
IPROTO_EVENT_DATA = 0x58
admin("box.schema.user.grant('guest', 'read,write,execute', 'universe')")
print("""
#
# iproto packages test
#
""")
# opening new connection to tarantool/box
conn = TarantoolConnection(server.iproto.host, server.iproto.port)
conn.connect()
s = conn.socket
print("""
# Test bug #899343 (server assertion failure on incorrect packet)
""")
print("# send the package with invalid length")
invalid_request = struct.pack("<LLL", 1, 4294967290, 1)
print(s.send(invalid_request))
print("# check that is server alive")
print(iproto.py_con.ping() > 0)
# closing connection
s.close()
# Note re IPROTO_SQL_INFO_* keys: they cannot appear in the
# response map at the top level, but have the same codes as other
# IPROTO_* constants. Exclude those names so.
key_names = {}
for (k,v) in list(globals().items()):
if type(k) == str and k.startswith("IPROTO_") and \
not k.startswith("IPROTO_SQL_INFO_") and type(v) == int:
key_names[v] = k
def repr_dict(todump):
d = {}
for (k, v) in todump.items():
k_name = key_names.get(k, k)
d[k_name] = v
return repr(sorted(d.items()))
def test(header, body):
# Connect and authenticate
c = Connection("localhost", server.iproto.port)
c.connect()
print("query", repr_dict(header), repr_dict(body))
header = msgpack.dumps(header)
body = msgpack.dumps(body)
query = msgpack.dumps(len(header) + len(body)) + header + body
# Send raw request using connected socket
s = c._socket
try:
s.send(query)
except OSError as e:
print(" => ", "Failed to send request")
c.close()
print(iproto.py_con.ping() > 0)
print("""
# Test gh-206 "Segfault if sending IPROTO package without `KEY` field"
""")
print("IPROTO_SELECT")
test({ IPROTO_CODE : REQUEST_TYPE_SELECT }, { IPROTO_SPACE_ID: 280 })
print("\n")
print("IPROTO_DELETE")
test({ IPROTO_CODE : REQUEST_TYPE_DELETE }, { IPROTO_SPACE_ID: 280 })
print("\n")
print("IPROTO_UPDATE")
test({ IPROTO_CODE : REQUEST_TYPE_UPDATE }, { IPROTO_SPACE_ID: 280 })
test({ IPROTO_CODE : REQUEST_TYPE_UPDATE },
{ IPROTO_SPACE_ID: 280, IPROTO_KEY: (1, )})
print("\n")
print("IPROTO_REPLACE")
test({ IPROTO_CODE : REQUEST_TYPE_REPLACE }, { IPROTO_SPACE_ID: 280 })
print("\n")
print("IPROTO_CALL")
test({ IPROTO_CODE : REQUEST_TYPE_CALL }, {})
test({ IPROTO_CODE : REQUEST_TYPE_CALL }, { IPROTO_KEY: ("procname", )})
print("\n")
# gh-434 Tarantool crashes on multiple iproto requests with WAL enabled
admin("box.cfg.wal_mode")
admin("space = box.schema.space.create('test', { id = 567 })")
admin("index = space:create_index('primary', { type = 'hash' })")
admin("box.schema.user.grant('guest', 'read,write,execute', 'space', 'test')")
c = Connection("localhost", server.iproto.port)
c.connect()
request1 = RequestInsert(c, 567, [1, "baobab"])
request2 = RequestInsert(c, 567, [2, "obbaba"])
s = c._socket
try:
s.send(bytes(request1) + bytes(request2))
except OSError as e:
print(" => ", "Failed to send request")
response1 = Response(c, c._read_response())
response2 = Response(c, c._read_response())
print(response1.__str__())
print(response2.__str__())
request1 = RequestInsert(c, 567, [3, "occama"])
request2 = RequestSelect(c, 567, 0, [1], 0, 1, 0)
s = c._socket
try:
s.send(bytes(request1) + bytes(request2))
except OSError as e:
print(" => ", "Failed to send request")
response1 = Response(c, c._read_response())
response2 = Response(c, c._read_response())
print(response1.__str__())
print(response2.__str__())
request1 = RequestSelect(c, 567, 0, [2], 0, 1, 0)
request2 = RequestInsert(c, 567, [4, "ockham"])
s = c._socket
try:
s.send(bytes(request1) + bytes(request2))
except OSError as e:
print(" => ", "Failed to send request")
response1 = Response(c, c._read_response())
response2 = Response(c, c._read_response())
print(response1.__str__())
print(response2.__str__())
request1 = RequestSelect(c, 567, 0, [1], 0, 1, 0)
request2 = RequestSelect(c, 567, 0, [2], 0, 1, 0)
s = c._socket
try:
s.send(bytes(request1) + bytes(request2))
except OSError as e:
print(" => ", "Failed to send request")
response1 = Response(c, c._read_response())
response2 = Response(c, c._read_response())
print(response1.__str__())
print(response2.__str__())
c.close()
admin("space:drop()")
#
# gh-522: Broken compatibility with msgpack-python for strings of size 33..255
#
admin("space = box.schema.space.create('test')")
admin("index = space:create_index('primary', { type = 'hash', parts = {1, 'string'}})")
class RawInsert(Request):
request_type = REQUEST_TYPE_INSERT
def __init__(self, conn, space_no, blob):
super(RawInsert, self).__init__(conn)
request_body = b'\x82' + msgpack.dumps(IPROTO_SPACE_ID) + \
msgpack.dumps(space_id) + msgpack.dumps(IPROTO_TUPLE) + blob
self._body = request_body
class RawSelect(Request):
request_type = REQUEST_TYPE_SELECT
def __init__(self, conn, space_no, blob):
super(RawSelect, self).__init__(conn)
request_body = b'\x83' + msgpack.dumps(IPROTO_SPACE_ID) + \
msgpack.dumps(space_id) + msgpack.dumps(IPROTO_KEY) + blob + \
msgpack.dumps(IPROTO_LIMIT) + msgpack.dumps(100);
self._body = request_body
c = iproto.py_con
space = c.space("test")
space_id = space.space_no
TESTS = [
(1, b'\xa1', b'\xd9\x01', b'\xda\x00\x01', b'\xdb\x00\x00\x00\x01'),
(31, b'\xbf', b'\xd9\x1f', b'\xda\x00\x1f', b'\xdb\x00\x00\x00\x1f'),
(32, b'\xd9\x20', b'\xda\x00\x20', b'\xdb\x00\x00\x00\x20'),
(255, b'\xd9\xff', b'\xda\x00\xff', b'\xdb\x00\x00\x00\xff'),
(256, b'\xda\x01\x00', b'\xdb\x00\x00\x01\x00'),
(65535, b'\xda\xff\xff', b'\xdb\x00\x00\xff\xff'),
(65536, b'\xdb\x00\x01\x00\x00'),
]
for test in TESTS:
it = iter(test)
size = next(it)
print("STR", size)
print("--")
for fmt in it:
if sys.version[0] == "2":
print("0x" + fmt.encode("hex"), "=>", end=" ")
else:
print("0x" + fmt.hex(), "=>", end=" ")
field = "*" * size
c._send_request(RawInsert(c, space_id, b'\x91' + fmt + field.encode("utf-8")))
tuple = space.select(field)[0]
print(len(tuple[0])== size and "ok" or "fail", end=" ")
it2 = iter(test)
next(it2)
for fmt2 in it2:
tuple = c._send_request(RawSelect(c, space_id,
b'\x91' + fmt2 + field.encode("utf-8")))[0]
print(len(tuple[0]) == size and "ok" or "fail", end=" ")
tuple = space.delete(field)[0]
print(len(tuple[0]) == size and "ok" or "fail", end="")
print()
print()
print("Test of schema_id in iproto.")
c = Connection("localhost", server.iproto.port)
c.connect()
s = c._socket
def receive_response():
resp_len = ""
resp_headerbody = ""
resp_header = {}
resp_body = {}
try:
resp_len = s.recv(5)
resp_len = msgpack.loads(resp_len)
resp_headerbody = s.recv(resp_len)
# wait for the whole data
while len(resp_headerbody) < resp_len:
chunk = s.recv(resp_len - len(resp_headerbody))
resp_headerbody = resp_headerbody + chunk
unpacker = msgpack.Unpacker(use_list = True)
unpacker.feed(resp_headerbody)
resp_header = unpacker.unpack()
resp_body = unpacker.unpack()
except (OSError, socket.timeout) as e:
print(" => ", "Failed to recv response")
res = {}
res["header"] = resp_header
res["body"] = resp_body
return res
def send_request(req_header, req_body):
query_header = msgpack.dumps(req_header)
query_body = msgpack.dumps(req_body)
packet_len = len(query_header) + len(query_body)
query = msgpack.dumps(packet_len) + query_header + query_body
try:
s.send(query)
except (OSError, socket.timeout) as e:
print(" => ", "Failed to send request")
def test_request(req_header, req_body):
send_request(req_header, req_body)
return receive_response()
def resp_status(resp):
if resp["header"][IPROTO_CODE] == REQUEST_TYPE_OK:
return "ok"
else:
return "error: {}".format(resp["body"][IPROTO_ERROR].decode("utf-8"))
header = { IPROTO_CODE : REQUEST_TYPE_SELECT}
body = { IPROTO_SPACE_ID: space_id,
IPROTO_INDEX_ID: 0,
IPROTO_KEY: [],
IPROTO_ITERATOR: 2,
IPROTO_OFFSET: 0,
IPROTO_LIMIT: 1 }
resp = test_request(header, body)
print("Normal connect done w/o errors:", resp["header"][0] == 0)
print("Got schema_id:", resp["header"][5] > 0)
schema_id = resp["header"][5]
header = { IPROTO_CODE : REQUEST_TYPE_SELECT, 5 : 0 }
resp = test_request(header, body)
print("Zero-schema_id connect done w/o errors:", resp["header"][0] == 0)
print("Same schema_id:", resp["header"][5] == schema_id)
header = { IPROTO_CODE : REQUEST_TYPE_SELECT, 5 : schema_id }
resp = test_request(header, body)
print("Normal connect done w/o errors:", resp["header"][0] == 0)
print("Same schema_id:", resp["header"][5] == schema_id)
header = { IPROTO_CODE : REQUEST_TYPE_SELECT, 5 : schema_id + 1 }
resp = test_request(header, body)
print("Wrong schema_id leads to error:", resp["header"][0] != 0)
print("Same schema_id:", resp["header"][5] == schema_id)
admin("space2 = box.schema.create_space('test2')")
header = { IPROTO_CODE : REQUEST_TYPE_SELECT, 5 : schema_id }
resp = test_request(header, body)
print("Schema changed -> error:", resp["header"][0] != 0)
print("Got another schema_id:", resp["header"][5] != schema_id)
#
# gh-2334 Lost SYNC in JOIN response.
#
uuid = "0d5bd431-7f3e-4695-a5c2-82de0a9cbc95"
header = { IPROTO_CODE: REQUEST_TYPE_JOIN, IPROTO_SYNC: 2334 }
body = { IPROTO_SERVER_UUID: uuid }
resp = test_request(header, body)
if resp["header"][IPROTO_SYNC] == 2334:
i = 1
while i < 3:
resp = receive_response()
if resp["header"][IPROTO_SYNC] != 2334:
print("Bad sync on response with number ", i)
break
if resp["header"][IPROTO_CODE] == REQUEST_TYPE_OK:
i += 1
else:
print("Sync ok")
else:
print("Bad first sync")
#
# Try incorrect JOIN. SYNC must be also returned.
#
body[IPROTO_SERVER_UUID] = "unknown"
resp = test_request(header, body)
if resp["header"][IPROTO_SYNC] == 2334:
print("Sync on error is ok")
else:
print("Sync on error is not ok")
c.close()
admin("space:drop()")
admin("space2:drop()")
admin("box.space._cluster:delete{2} ~= nil")
#
# gh-1280 Segmentation fault on space.select(tuple()) or space.select([2])
#
admin("space = box.schema.create_space('gh1280', { engine = 'vinyl' })")
admin("index = space:create_index('primary')")
admin("space:insert({1})")
admin("space:insert({2, 'Music'})")
admin("space:insert({3, 'Length', 93})")
iproto.py_con.space("gh1280").select([])
iproto.py_con.space("gh1280").select(list())
admin("space:drop()")
admin("box.schema.user.revoke('guest', 'read,write,execute', 'universe')")
#
# gh-272 if the packet was incorrect, respond with an error code
# gh-1654 do not close connnection on invalid request
#
print("""
# Test bugs gh-272, gh-1654 if the packet was incorrect, respond with
# an error code and do not close connection
""")
c = Connection("localhost", server.iproto.port)
c.connect()
s = c._socket
header = { "hello": "world"}
body = { "bug": 272 }
resp = test_request(header, body)
print("sync={}, {}".format(resp["header"][IPROTO_SYNC],
resp["body"].get(IPROTO_ERROR).decode("utf-8")))
header = { IPROTO_CODE : REQUEST_TYPE_SELECT }
header[IPROTO_SYNC] = 1234
resp = test_request(header, body)
print("sync={}, {}".format(resp["header"][IPROTO_SYNC],
resp["body"].get(IPROTO_ERROR).decode("utf-8")))
header[IPROTO_SYNC] = 5678
body = { IPROTO_SPACE_ID: 304, IPROTO_KEY: [], IPROTO_LIMIT: 1 }
resp = test_request(header, body)
print("sync={}, {}".format(resp["header"][IPROTO_SYNC],
resp["body"].get(IPROTO_ERROR).decode("utf-8")))
c.close()
admin("space = box.schema.space.create('test_index_base', { id = 568 })")
admin("index = space:create_index('primary', { type = 'hash' })")
admin("box.schema.user.grant('guest', 'read,write,execute', 'space', 'test_index_base')")
c = Connection("localhost", server.iproto.port)
c.connect()
s = c._socket
request = RequestInsert(c, 568, [1, 0, 0, 0])
try:
s.send(bytes(request))
except OSError as e:
print(" => ", "Failed to send request")
response = Response(c, c._read_response())
print(response.__str__())
request = RequestUpdate(c, 568, 0, [1], [["+", 2, 1], ["-", 3, 1]])
try:
s.send(bytes(request))
except OSError as e:
print(" => ", "Failed to send request")
response = Response(c, c._read_response())
print(response.__str__())
request = RequestUpsert(c, 568, 0, [1, 0, 0, 0], [["+", 2, 1], ["-", 3, 1]])
try:
s.send(bytes(request))
except OSError as e:
print(" => ", "Failed to send request")
response = Response(c, c._read_response())
request = RequestSelect(c, 568, 0, [1], 0, 1, 0)
try:
s.send(bytes(request))
except OSError as e:
print(" => ", "Failed to send request")
response = Response(c, c._read_response())
print(response.__str__())
c.close()
#
# gh-2619 follow up: allow empty args for call/eval.
#
admin("function kek() return 'kek' end")
admin("box.schema.user.grant('guest', 'read,write,execute', 'universe')")
c = Connection("localhost", server.iproto.port)
c.connect()
s = c._socket
header = { IPROTO_CODE: REQUEST_TYPE_CALL, IPROTO_SYNC: 100 }
body = { IPROTO_FUNCTION_NAME: "kek" }
resp = test_request(header, body)
print("Sync: ", resp["header"][IPROTO_SYNC])
body = resp["body"][IPROTO_DATA]
if sys.version[0] == "3":
body = [body[0].decode("utf-8")]
print("Retcode: ", body)
c.close()
admin("box.schema.user.revoke('guest', 'read,write,execute', 'universe')")
admin("space:drop()")
print("""
#
# gh-6253 IPROTO_ID
#
""")
c = Connection("localhost", server.iproto.port)
c.connect()
s = c._socket
header = { IPROTO_CODE: REQUEST_TYPE_ID }
print("# Invalid version")
resp = test_request(header, { IPROTO_VERSION: "abc" })
print(str(resp["body"][IPROTO_ERROR].decode("utf-8")))
print("# Invalid features")
resp = test_request(header, { IPROTO_FEATURES: ["abc"] })
print(str(resp["body"][IPROTO_ERROR].decode("utf-8")))
print("# Empty request body")
resp = test_request(header, {})
print("version={}, features={}".format(
resp["body"][IPROTO_VERSION], resp["body"][IPROTO_FEATURES]))
print("# Unknown version and features")
resp = test_request(header, { IPROTO_VERSION: 99999999,
IPROTO_FEATURES: [99999999] })
print("version={}, features={}".format(
resp["body"][IPROTO_VERSION], resp["body"][IPROTO_FEATURES]))
c.close()
print("""
#
# gh-6257 Watchers
#
""")
def watch(key):
print("# Watch key '{}'".format(key))
send_request({IPROTO_CODE: REQUEST_TYPE_WATCH}, {IPROTO_EVENT_KEY: key})
def unwatch(key):
print("# Unwatch key '{}'".format(key))
send_request({IPROTO_CODE: REQUEST_TYPE_UNWATCH}, {IPROTO_EVENT_KEY: key})
def receive_event():
print("# Recieve event")
resp = receive_response()
code = resp["header"].get(IPROTO_CODE)
if code is None:
print("<no event received>")
return
if code == REQUEST_TYPE_EVENT:
print("key='{}', value={}".format(
resp["body"].get(IPROTO_EVENT_KEY, '').decode('utf-8'),
resp["body"].get(IPROTO_EVENT_DATA)))
else:
print("Unexpected packet: {}".format(resp))
def check_no_event():
s.settimeout(0.01)
receive_event()
s.settimeout(None)
c = Connection("localhost", server.iproto.port)
c.connect()
s = c._socket
print("# Missing key")
resp = test_request({IPROTO_CODE: REQUEST_TYPE_WATCH}, {})
print(resp_status(resp))
print("# Invalid key type")
resp = test_request({IPROTO_CODE: REQUEST_TYPE_WATCH},
{IPROTO_EVENT_KEY: 123})
print(resp_status(resp))
# Register a watcher
watch("foo")
receive_event()
# Register a watcher for another key
watch("bar")
receive_event()
# Unregister and register watcher
unwatch("bar")
watch("bar")
receive_event()
# No notification without ack
admin("box.broadcast('foo', {1, 2, 3})")
check_no_event()
# Notification after ack
watch("foo")
receive_event()
watch("bar")
admin("box.broadcast('bar', 123)")
receive_event()
# No notification after unregister
admin("box.broadcast('bar', 456)")
unwatch("bar")
check_no_event()
# Cleanup
c.close()
admin("box.broadcast('foo', nil)")
admin("box.broadcast('bar', nil)")
| 29.949565 | 97 | 0.657511 |
7b8bdada91bdcc2513115fa11f4b4296ff2f9da7 | 6,415 | py | Python | sympy/testing/benchmarking.py | Michal-Gagala/sympy | 3cc756c2af73b5506102abaeefd1b654e286e2c8 | [
"MIT"
] | null | null | null | sympy/testing/benchmarking.py | Michal-Gagala/sympy | 3cc756c2af73b5506102abaeefd1b654e286e2c8 | [
"MIT"
] | null | null | null | sympy/testing/benchmarking.py | Michal-Gagala/sympy | 3cc756c2af73b5506102abaeefd1b654e286e2c8 | [
"MIT"
] | null | null | null | """benchmarking through py.test"""
import py
from py.__.test.item import Item
from py.__.test.terminal.terminal import TerminalSession
from math import ceil as _ceil, floor as _floor, log10
import timeit
from inspect import getsource
# from IPython.Magic.magic_timeit
units = ["s", "ms", "us", "ns"]
scaling = [1, 1e3, 1e6, 1e9]
unitn = {s: i for i, s in enumerate(units)}
precision = 3
# like py.test Directory but scan for 'bench_<smth>.py'
class Directory(py.test.collect.Directory):
def filefilter(self, path):
b = path.purebasename
ext = path.ext
return b.startswith('bench_') and ext == '.py'
# like py.test Module but scane for 'bench_<smth>' and 'timeit_<smth>'
class Module(py.test.collect.Module):
def funcnamefilter(self, name):
return name.startswith('bench_') or name.startswith('timeit_')
# Function level benchmarking driver
class Timer(timeit.Timer):
def __init__(self, stmt, setup='pass', timer=timeit.default_timer, globals=globals()):
# copy of timeit.Timer.__init__
# similarity index 95%
self.timer = timer
stmt = timeit.reindent(stmt, 8)
setup = timeit.reindent(setup, 4)
src = timeit.template % {'stmt': stmt, 'setup': setup}
self.src = src # Save for traceback display
code = compile(src, timeit.dummy_src_name, "exec")
ns = {}
#exec(code, globals(), ns) -- original timeit code
exec(code, globals, ns) # -- we use caller-provided globals instead
self.inner = ns["inner"]
class Function(py.__.test.item.Function):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.benchtime = None
self.benchtitle = None
def execute(self, target, *args):
# get func source without first 'def func(...):' line
src = getsource(target)
src = '\n'.join( src.splitlines()[1:] )
# extract benchmark title
if target.func_doc is not None:
self.benchtitle = target.func_doc
else:
self.benchtitle = src.splitlines()[0].strip()
# XXX we ignore args
timer = Timer(src, globals=target.func_globals)
if self.name.startswith('timeit_'):
# from IPython.Magic.magic_timeit
repeat = 3
number = 1
for i in range(1, 10):
t = timer.timeit(number)
if t >= 0.2:
number *= (0.2 / t)
number = int(_ceil(number))
break
if t <= 0.02:
# we are not close enough to that 0.2s
number *= 10
else:
# since we are very close to be > 0.2s we'd better adjust number
# so that timing time is not too high
number *= (0.2 / t)
number = int(_ceil(number))
break
self.benchtime = min(timer.repeat(repeat, number)) / number
# 'bench_<smth>'
else:
self.benchtime = timer.timeit(1)
class BenchSession(TerminalSession):
def header(self, colitems):
super().header(colitems)
def footer(self, colitems):
super().footer(colitems)
self.out.write('\n')
self.print_bench_results()
def print_bench_results(self):
self.out.write('==============================\n')
self.out.write(' *** BENCHMARKING RESULTS *** \n')
self.out.write('==============================\n')
self.out.write('\n')
# benchname, time, benchtitle
results = []
for item, outcome in self._memo:
if isinstance(item, Item):
best = item.benchtime
if best is None:
# skipped or failed benchmarks
tstr = '---'
else:
# from IPython.Magic.magic_timeit
if best > 0.0:
order = min(-int(_floor(log10(best)) // 3), 3)
else:
order = 3
tstr = "%.*g %s" % (
precision, best * scaling[order], units[order])
results.append( [item.name, tstr, item.benchtitle] )
# dot/unit align second column
# FIXME simpler? this is crappy -- shame on me...
wm = [0]*len(units)
we = [0]*len(units)
for s in results:
tstr = s[1]
n, u = tstr.split()
# unit n
un = unitn[u]
try:
m, e = n.split('.')
except ValueError:
m, e = n, ''
wm[un] = max(len(m), wm[un])
we[un] = max(len(e), we[un])
for s in results:
tstr = s[1]
n, u = tstr.split()
un = unitn[u]
try:
m, e = n.split('.')
except ValueError:
m, e = n, ''
m = m.rjust(wm[un])
e = e.ljust(we[un])
if e.strip():
n = '.'.join((m, e))
else:
n = ' '.join((m, e))
# let's put the number into the right place
txt = ''
for i in range(len(units)):
if i == un:
txt += n
else:
txt += ' '*(wm[i] + we[i] + 1)
s[1] = '%s %s' % (txt, u)
# align all columns besides the last one
for i in range(2):
w = max(len(s[i]) for s in results)
for s in results:
s[i] = s[i].ljust(w)
# show results
for s in results:
self.out.write('%s | %s | %s\n' % tuple(s))
def main(args=None):
# hook our Directory/Module/Function as defaults
from py.__.test import defaultconftest
defaultconftest.Directory = Directory
defaultconftest.Module = Module
defaultconftest.Function = Function
# hook BenchSession as py.test session
config = py.test.config
config._getsessionclass = lambda: BenchSession
py.test.cmdline.main(args)
| 28.896396 | 91 | 0.486516 |
78345e0e02cd7245a6e788f41e4952f0df5c08c4 | 939 | py | Python | htmllinklist.py | eyepod101/datagen | 25b13058f641fca1892232b1d968fdd4dbf7d62f | [
"MIT"
] | null | null | null | htmllinklist.py | eyepod101/datagen | 25b13058f641fca1892232b1d968fdd4dbf7d62f | [
"MIT"
] | null | null | null | htmllinklist.py | eyepod101/datagen | 25b13058f641fca1892232b1d968fdd4dbf7d62f | [
"MIT"
] | null | null | null | import os
import re
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-id", "--inputdir", required=True, help="Input existing directory that contains all cleaned html files.")
parser.add_argument("-of", "--outputfile", required=True, help="Output a new file that will contain URL links from all cleaned html filepaths.")
args = vars(parser.parse_args())
source = args["inputdir"]
destination = args["outputfile"]
def natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)]
def linkList(dirpath, pathname):
fout = open(pathname, "w")
for root, dirs, filenames in os.walk(dirpath):
dirs.sort(key=natural_key)
for f in sorted(filenames, key=natural_key):
p = os.path.join(root, f)
b = "<a href=" + "\"" + "/" + p + "\">" + "/" + p + "</a>"
fout.write(b + '\n')
fout.close()
linkList(source, destination)
| 32.37931 | 144 | 0.644302 |
aef05bd3614d005c520c9fff9c98480184fa61bb | 5,611 | py | Python | model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py | reuvenperetz/model_optimization | 40de02d56750ee4cc20e693da63bc2e70b4d20e6 | [
"Apache-2.0"
] | 42 | 2021-10-31T10:17:49.000Z | 2022-03-21T08:51:46.000Z | model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py | reuvenperetz/model_optimization | 40de02d56750ee4cc20e693da63bc2e70b4d20e6 | [
"Apache-2.0"
] | 6 | 2021-10-31T15:06:03.000Z | 2022-03-31T10:32:53.000Z | model_compression_toolkit/core/keras/graph_substitutions/substitutions/input_scaling.py | reuvenperetz/model_optimization | 40de02d56750ee4cc20e693da63bc2e70b4d20e6 | [
"Apache-2.0"
] | 18 | 2021-11-01T12:16:43.000Z | 2022-03-25T16:52:37.000Z | # Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorflow.keras.layers import InputLayer, Dense, DepthwiseConv2D, Conv2D, Conv2DTranspose, ZeroPadding2D
from typing import List
from model_compression_toolkit.core import common
from model_compression_toolkit.core.common.framework_info import FrameworkInfo
from model_compression_toolkit.core.common.graph.base_graph import Graph
from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher, EdgeMatcher, WalkMatcher
from model_compression_toolkit.core.common.graph.base_node import BaseNode
from model_compression_toolkit.core.common.quantization.quantization_config import QuantizationConfig
from model_compression_toolkit.core.common.constants import THRESHOLD
from model_compression_toolkit.core.keras.constants import KERNEL
input_node = NodeOperationMatcher(InputLayer)
zeropad_node = NodeOperationMatcher(ZeroPadding2D)
op2d_node = NodeOperationMatcher(Dense) | \
NodeOperationMatcher(Conv2D) | \
NodeOperationMatcher(DepthwiseConv2D) | \
NodeOperationMatcher(Conv2DTranspose)
INPUT_MATCHER = WalkMatcher([input_node, op2d_node])
INPUT_MATCHER_WITH_PAD = WalkMatcher([input_node, zeropad_node, op2d_node])
class BaseInputScaling(common.BaseSubstitution):
"""
General scale activation threshold for input layers, if they are followed by linear nodes. We first
scale their thresholds to a constrained threshold, and then fix it by scaling the linear op weights
correspondingly.
The matcher instance of type WalkMatcher may include intermediate nodes that don't affect scaling
(such as ZeroPadding), but only the first and last nodes are used for scaling
"""
def __init__(self,
matcher_instance):
"""
Matches: InputLayer -> (optional nodes) -> (Dense,Conv2D,DepthwiseConv2D,Conv2DTranspose)
note: the optional nodes are nodes that don't affect the scaling (such as ZeroPadding)
Create a substitution using different params which may affect the way this substitution is made.
The substitution is looking for edges in the graph which are input layers connected to linear layers.
Args:
matcher_instance: matcher instance of type WalkMatcher
"""
super().__init__(matcher_instance=matcher_instance)
def substitute(self,
graph: Graph,
nodes_list: List[BaseNode]) -> Graph:
"""
Scale activation threshold for input layers, if they are followed by linear nodes. We first
scale their thresholds to a constrained threshold, and then fix it by scaling the linear op weights
correspondingly.
Args:
graph: Graph to apply the substitution on.
edge_nodes: Edge (tuple of nodes) that matches the pattern the substitution can be applied on.
Returns:
Graph after applying the substitution.
"""
input_layer = nodes_list[0]
linear_layer = nodes_list[-1]
if not input_layer.is_all_activation_candidates_equal():
raise Exception("Input scaling is not supported for more than one activation quantization configuration "
"candidate")
# all candidates have same activation config, so taking the first candidate for calculations
threshold = input_layer.candidates_quantization_cfg[0].activation_quantization_cfg.activation_quantization_params.get(THRESHOLD)
if threshold is None:
return graph
min_value, max_value = graph.get_out_stats_collector(input_layer).get_min_max_values()
threshold_float = max(abs(min_value), max_value)
if threshold > threshold_float:
scale_factor = threshold_float / threshold
graph.user_info.set_input_scale(1 / scale_factor)
w1_fixed = linear_layer.get_weights_by_keys(KERNEL) * scale_factor
linear_layer.set_weights_by_keys(KERNEL, w1_fixed)
graph.scale_stats_collector(input_layer, 1 / scale_factor)
# After scaling weights may have different thresholds so it needs to be recalculated
for nqc in linear_layer.candidates_quantization_cfg:
nqc.weights_quantization_cfg.calculate_and_set_weights_params(w1_fixed)
return graph
class InputScaling(BaseInputScaling):
"""
Substitution extends BaseInputScaling to the case of Input-->Linear
"""
def __init__(self):
"""
Initialize a ScaleEqualization object.
"""
super().__init__(matcher_instance=INPUT_MATCHER)
class InputScalingWithPad(BaseInputScaling):
"""
Substitution extends BaseInputScaling to the case of Input-->ZeroPadding-->Linear
"""
def __init__(self):
"""
Initialize a ScaleEqualization object.
"""
super().__init__(matcher_instance=INPUT_MATCHER_WITH_PAD) | 41.873134 | 136 | 0.718054 |
32fa2ab7925073c8045c193c57d329dee1348f78 | 79 | py | Python | etest_test/fixtures_test/__init__.py | alunduil/etest | e5f06d7e8c83be369576976f239668545bcbfffd | [
"MIT"
] | 6 | 2015-01-08T13:56:50.000Z | 2018-01-08T00:53:08.000Z | etest_test/fixtures_test/__init__.py | alunduil/etest | e5f06d7e8c83be369576976f239668545bcbfffd | [
"MIT"
] | 64 | 2015-01-01T23:10:07.000Z | 2021-06-12T06:55:31.000Z | etest_test/fixtures_test/__init__.py | alunduil/etest | e5f06d7e8c83be369576976f239668545bcbfffd | [
"MIT"
] | 2 | 2015-05-08T01:32:48.000Z | 2015-05-30T15:38:31.000Z | """Test fixtures."""
import os
FIXTURES_DIRECTORY = os.path.dirname(__file__)
| 15.8 | 46 | 0.746835 |
44aece94cc06a059cc2c3842180813f640a90a10 | 1,009 | py | Python | python/runtime/db_writer/__init__.py | awsl-dbq/sqlflow | 6684ac4b4f26774bd10e437bc52080fdbae5ce49 | [
"Apache-2.0"
] | null | null | null | python/runtime/db_writer/__init__.py | awsl-dbq/sqlflow | 6684ac4b4f26774bd10e437bc52080fdbae5ce49 | [
"Apache-2.0"
] | null | null | null | python/runtime/db_writer/__init__.py | awsl-dbq/sqlflow | 6684ac4b4f26774bd10e437bc52080fdbae5ce49 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from runtime.db_writer.clickhouse import ClickhouseDBWriter
from runtime.db_writer.hive import HiveDBWriter
from runtime.db_writer.maxcompute import MaxComputeDBWriter
from runtime.db_writer.mysql import MySQLDBWriter
from runtime.db_writer.pai_maxcompute import PAIMaxComputeDBWriter
__all__ = [
"MySQLDBWriter", "HiveDBWriter", "MaxComputeDBWriter",
"PAIMaxComputeDBWriter", "ClickhouseDBWriter"
]
| 42.041667 | 74 | 0.799802 |
a78856847238424ef74cedfd6c601fe5bee0e6af | 8,785 | py | Python | models/vggf.py | IBM/energy-efficient-resilience | 13dfcac143df218abe20ed8d8752a0bd7e5a424b | [
"Apache-2.0"
] | 4 | 2022-03-01T16:57:12.000Z | 2022-03-22T09:22:35.000Z | models/vggf.py | IBM/energy-efficient-resilience | 13dfcac143df218abe20ed8d8752a0bd7e5a424b | [
"Apache-2.0"
] | null | null | null | models/vggf.py | IBM/energy-efficient-resilience | 13dfcac143df218abe20ed8d8752a0bd7e5a424b | [
"Apache-2.0"
] | 2 | 2022-03-04T19:45:14.000Z | 2022-03-23T13:17:27.000Z | # Adapted vgg16 from pytorch repo to include my fault injection operators and for cifar10
import torch
import torch.nn as nn
import sys
sys.path.append('../quantized_ops')
sys.path.append('../faultinjection_ops')
from quantized_ops import zs_quantized_ops
from faultinjection_ops import zs_faultinjection_ops
import pdb
# Per layer clamping currently based on manual values set
weight_clamp_values = [0.2, 0.2, 0.15, 0.13, 0.1, 0.1, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05]
fc_weight_clamp = 0.1
class VGG(nn.Module):
def __init__(self, features, classifier, classes=10, init_weights=True):
super(VGG, self).__init__()
self.features = features
#self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.avgpool = nn.AvgPool2d(kernel_size=1, stride=1)
self.classifier = classifier
# self.classifier = nn.Sequential(
# nn.Linear(512 * 7 * 7, 4096),
# nn.ReLU(True),
# nn.Dropout(),
# nn.Linear(4096, 4096),
# nn.ReLU(True),
# nn.Dropout(),
# nn.Linear(4096, classes),
# )
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_classifier(classes, precision, ber, position, BitErrorMap0to1, BitErrorMap1to0, faulty_layers):
if ('linear' in faulty_layers):
classifier = zs_faultinjection_ops.nnLinearPerturbWeight_op(512, classes, precision, fc_weight_clamp, BitErrorMap0to1=BitErrorMap0to1, BitErrorMap1to0=BitErrorMap1to0)
else:
classifier = zs_quantized_ops.nnLinearSymQuant_op(512, classes, precision, fc_weight_clamp)
return classifier
def make_layers(cfg, in_channels, batch_norm, precision, ber, position, BitErrorMap0to1, BitErrorMap1to0, faulty_layers):
layers = []
#in_channels = 3
cl = 0
#pdb.set_trace()
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
if ('conv' in faulty_layers):
conv2d = zs_faultinjection_ops.nnConv2dPerturbWeight_op(in_channels, v, kernel_size=3, stride=1, padding=1, bias=True, precision=precision, clamp_val=weight_clamp_values[cl], BitErrorMap0to1=BitErrorMap0to1, BitErrorMap1to0=BitErrorMap1to0)
else:
conv2d = zs_quantized_ops.nnConv2dSymQuant_op(in_channels, v, kernel_size=3, stride=1, padding=1, bias=True, precision=precision, clamp_val=weight_clamp_values[cl])
cl = cl + 1
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
#def vgg(cfg,batch_norm,**kwargs):
# kwargs['num_classes'] = 10
# model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
# return model
def vggf(cfg,input_channels,classes,batch_norm,precision, ber, position, BitErrorMap0to1, BitErrorMap1to0, faulty_layers):
model = VGG(make_layers(cfgs[cfg], in_channels = input_channels, batch_norm=batch_norm, precision=precision, ber=ber, position=position, BitErrorMap0to1=BitErrorMap0to1, BitErrorMap1to0=BitErrorMap1to0, faulty_layers=faulty_layers), make_classifier(classes, precision,ber, position, BitErrorMap0to1, BitErrorMap1to0, faulty_layers), classes, True)
return model
#def _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs):
# if pretrained:
# kwargs['init_weights'] = False
# model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
# if pretrained:
# state_dict = load_state_dict_from_url(model_urls[arch],
# progress=progress)
# model.load_state_dict(state_dict)
# return model
#
#def vgg11(pretrained=False, progress=True, **kwargs):
# r"""VGG 11-layer model (configuration "A") from
# `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# progress (bool): If True, displays a progress bar of the download to stderr
# """
# return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)
#
#
#def vgg11_bn(pretrained=False, progress=True, **kwargs):
# r"""VGG 11-layer model (configuration "A") with batch normalization
# `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# progress (bool): If True, displays a progress bar of the download to stderr
# """
# return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs)
#
#
#def vgg13(pretrained=False, progress=True, **kwargs):
# r"""VGG 13-layer model (configuration "B")
# `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# progress (bool): If True, displays a progress bar of the download to stderr
# """
# return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs)
#
#
#def vgg13_bn(pretrained=False, progress=True, **kwargs):
# r"""VGG 13-layer model (configuration "B") with batch normalization
# `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# progress (bool): If True, displays a progress bar of the download to stderr
# """
# return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs)
#
#
#def vgg16(pretrained=False, progress=True, **kwargs):
# r"""VGG 16-layer model (configuration "D")
# `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# progress (bool): If True, displays a progress bar of the download to stderr
# """
# return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)
#
#
#def vgg16_bn(pretrained=False, progress=True, **kwargs):
# r"""VGG 16-layer model (configuration "D") with batch normalization
# `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# progress (bool): If True, displays a progress bar of the download to stderr
# """
# return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs)
#
#
#def vgg19(pretrained=False, progress=True, **kwargs):
# r"""VGG 19-layer model (configuration "E")
# `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# progress (bool): If True, displays a progress bar of the download to stderr
# """
# return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs)
#
#
#def vgg19_bn(pretrained=False, progress=True, **kwargs):
# r"""VGG 19-layer model (configuration 'E') with batch normalization
# `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
# Args:
# pretrained (bool): If True, returns a model pre-trained on ImageNet
# progress (bool): If True, displays a progress bar of the download to stderr
# """
# return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs)
#
| 45.283505 | 351 | 0.654411 |
91a8e90d2142f54a4339aa8c38d32b467f34e290 | 1,921 | py | Python | converter/path.py | irori/wpfont | 22536d257a953f5edb997368fd58505620f923ed | [
"MIT"
] | 66 | 2018-08-11T04:16:37.000Z | 2022-03-17T06:59:24.000Z | converter/path.py | irori/wpfont | 22536d257a953f5edb997368fd58505620f923ed | [
"MIT"
] | null | null | null | converter/path.py | irori/wpfont | 22536d257a953f5edb997368fd58505620f923ed | [
"MIT"
] | 2 | 2019-11-21T04:54:20.000Z | 2020-02-08T13:12:16.000Z | class PathBuilder:
def __init__(self):
self._segments = dict()
def add_segment(self, p1, p2):
s2 = self._segments.get(p2)
if s2 and p1 in s2:
s2.remove(p1)
if not s2:
del self._segments[p2]
else:
self._segments.setdefault(p1, set()).add(p2)
def optimize(self):
for p1 in list(self._segments.keys()):
if p1 not in self._segments:
continue
for p2 in list(self._segments[p1]):
while p2:
s2 = self._segments[p2]
p3 = [p3 for p3 in s2 if is_straight(p1, p2, p3)]
if p3:
self._segments[p1].add(p3[0])
self._segments[p1].remove(p2)
s2.remove(p3[0])
if not s2:
del self._segments[p2]
p2 = p3[0]
else:
p2 = None
def generate_paths(self):
paths = []
while self._segments:
p = sorted(self._segments.keys())[0]
path = []
while p in self._segments:
path.append(p)
s = self._segments[p]
p2 = s.pop()
if not s:
del self._segments[p]
p = p2
paths.append(path)
return paths
class Pen:
def __init__(self, pb):
self.pb = pb
self.current = None
def move_to(self, x, y):
self.current = (x, y)
def line_to(self, x, y):
self.pb.add_segment(self.current, (x, y))
self.current = (x, y)
def is_straight(p1, p2, p3):
(x1, y1) = p1
(x2, y2) = p2
(x3, y3) = p3
return ((x1 == x2 == x3 or x1 < x2 < x3 or x1 > x2 > x3) and
(y1 == y2 == y3 or y1 < y2 < y3 or y1 > y2 > y3))
| 28.25 | 69 | 0.434149 |
df3e914cb7103f846e34d87ce4168176a9973aee | 1,398 | py | Python | model/contact.py | BorodinaO/python_training | 637148bc2d0ef9534515939272a0bba2b5874e8d | [
"Apache-2.0"
] | null | null | null | model/contact.py | BorodinaO/python_training | 637148bc2d0ef9534515939272a0bba2b5874e8d | [
"Apache-2.0"
] | null | null | null | model/contact.py | BorodinaO/python_training | 637148bc2d0ef9534515939272a0bba2b5874e8d | [
"Apache-2.0"
] | null | null | null | from sys import maxsize
class Contact:
def __init__(self, firstname=None, lastname=None, id=None, all_phones_from_home_page=None, home=None,
work=None, mobile=None, phone2=None, email=None, email2=None, email3=None,
all_emails_from_home_page=None, address=None):
self.firstname = firstname
self.lastname = lastname
self.id = id
self.home = home
self.work = work
self.mobile = mobile
self.phone2 = phone2
self.all_phones_from_home_page = all_phones_from_home_page
self.email = email
self.email2 = email2
self.email3 = email3
self.all_emails_from_home_page = all_emails_from_home_page
self.address = address
def __repr__(self):
return "%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s" % (self.id, self.lastname, self.firstname, self.email, self.email2,
self.email3,
self.home, self.work, self.phone2, self.address, self.mobile)
def __eq__(self, other):
return (
self.id is None or other.id is None or self.id == other.id) and self.lastname == other.lastname and self.firstname == other.firstname
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| 37.783784 | 156 | 0.578684 |
fdae8fe0f01aa5d104c0c6dbc5d4bf4f8dfa98de | 7,766 | py | Python | docs/conf.py | SandeepUpadhyaya/mlflow_main | 5f4d2a7437ea79a56669e52c049ccf71fb1cef8e | [
"MIT"
] | null | null | null | docs/conf.py | SandeepUpadhyaya/mlflow_main | 5f4d2a7437ea79a56669e52c049ccf71fb1cef8e | [
"MIT"
] | null | null | null | docs/conf.py | SandeepUpadhyaya/mlflow_main | 5f4d2a7437ea79a56669e52c049ccf71fb1cef8e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# mlops_main documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mlops_main'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mlops_maindoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'mlops_main.tex',
u'mlops_main Documentation',
u"Sandeep", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mlops_main', u'mlops_main Documentation',
[u"Sandeep"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'mlops_main', u'mlops_main Documentation',
u"Sandeep", 'mlops_main',
'its a wafer project using mlops', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.697959 | 80 | 0.706928 |
9ab8621bc24667c45e8a2f8d3c26512da981298c | 1,073 | py | Python | rgram/contrib/sites/migrations/0003_set_site_domain_and_name.py | dyjung123/rgram | 56223d08b2cd46fe73842ba3b5c817cd6b4e3fe4 | [
"MIT"
] | null | null | null | rgram/contrib/sites/migrations/0003_set_site_domain_and_name.py | dyjung123/rgram | 56223d08b2cd46fe73842ba3b5c817cd6b4e3fe4 | [
"MIT"
] | 7 | 2020-06-05T17:08:40.000Z | 2021-09-07T23:49:22.000Z | rgram/contrib/sites/migrations/0003_set_site_domain_and_name.py | dyjung123/rgram | 56223d08b2cd46fe73842ba3b5c817cd6b4e3fe4 | [
"MIT"
] | null | null | null | """
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model('sites', 'Site')
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
'domain': 'example.com',
'name': 'rgram'
}
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model('sites', 'Site')
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
'domain': 'example.com',
'name': 'example.com'
}
)
class Migration(migrations.Migration):
dependencies = [
('sites', '0002_alter_domain_unique'),
]
operations = [
migrations.RunPython(update_site_forward, update_site_backward),
]
| 24.953488 | 129 | 0.640261 |
91ecd343c0416cd1814985e3b64df5a263621780 | 1,043 | py | Python | grace_dl/tensorflow/compressor/packing.py | aoranwu/grace | 1e28915f6f6e8189ef33c0c7d8d3ce314e0a493e | [
"BSD-2-Clause"
] | 88 | 2020-05-07T15:36:10.000Z | 2022-03-13T06:13:31.000Z | grace_dl/tensorflow/compressor/packing.py | aoranwu/grace | 1e28915f6f6e8189ef33c0c7d8d3ce314e0a493e | [
"BSD-2-Clause"
] | 21 | 2020-05-25T08:37:03.000Z | 2022-03-30T10:08:14.000Z | grace_dl/tensorflow/compressor/packing.py | aoranwu/grace | 1e28915f6f6e8189ef33c0c7d8d3ce314e0a493e | [
"BSD-2-Clause"
] | 33 | 2020-05-07T23:11:39.000Z | 2022-03-25T03:33:49.000Z | import tensorflow as tf
def encode_byte(a):
# input: int32 type tensor with values in range 0,1,2,3 (2'b00,2'b01,2'b10,3'b11)
# output: encoded uint8 type tensor
a = tf.reshape(a, [-1])
pad_size = 4 - tf.mod(tf.size(a), 4)
pad = tf.range(0, pad_size)
a = tf.concat([a, pad], 0)
a_split1, a_split2, a_split3, a_split4 = tf.split(a, 4)
# encode 4 grads into 1 Byte
sum_1 = tf.add(a_split1, a_split2 * 4)
sum_2 = tf.add(a_split3 * 16, a_split4 * 64)
sum_all = tf.add(sum_1, sum_2)
return tf.cast(sum_all, tf.uint8)
def decode_byte(encoded, real_size):
# input: encoded uint8 type tensor
# output: int32 type tensor with values in range 0,1,2,3 (2'b00,2'b01,2'b10,3'b11)
a = tf.cast(encoded, tf.int32)
a_split1 = tf.mod(a, 4)
a_split2 = tf.cast(tf.mod(a / 4, 4), tf.int32)
a_split3 = tf.cast(tf.mod(a / 16, 4), tf.int32)
a_split4 = tf.cast(tf.mod(a / 64, 4), tf.int32)
a = tf.concat([a_split1, a_split2, a_split3, a_split4], 0)
a = a[:real_size]
return a | 34.766667 | 86 | 0.627037 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.