content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
print("Welcome to tip calculator! ")
bill = float(input('What was the total bill ? ₹'))
tip = int(input("How much tip would you like to give ? 10 , 12, 15 : "))
people = int(input('How many people would like to split the bill ? '))
tip_amount = (tip/100)* bill
total_bill = tip_amount + bill
final_amount = total_bill / people
print(f'Each person should pay : ₹{round(final_amount,2)}')
print('Each person should pay :- ₹{:.2f}'.format(final_amount))
| [
4798,
7203,
14618,
284,
8171,
28260,
0,
366,
8,
201,
198,
35546,
796,
12178,
7,
15414,
10786,
2061,
373,
262,
2472,
2855,
5633,
2343,
224,
117,
6,
4008,
201,
198,
22504,
796,
493,
7,
15414,
7203,
2437,
881,
8171,
561,
345,
588,
284,... | 2.559585 | 193 |
from dataclasses import dataclass
from ..config import ObisDataSetConfig
from .data_block import DataBlock, DataSet
from .obis_data_set import (
ObisDataSet,
ObisId,
ObisStringDataSet,
UnknownObisDataSet,
parse_obis_id_from_address,
)
METERING_POINT_ID_OBIS_IDS: list[ObisId] = [
(1, 0, 0, 0, 0),
(1, 1, 0, 0, 0),
(1, 0, 96, 1, 0, 255),
]
@dataclass
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
198,
6738,
11485,
11250,
1330,
1835,
271,
6601,
7248,
16934,
198,
6738,
764,
7890,
62,
9967,
1330,
6060,
12235,
11,
6060,
7248,
198,
6738,
764,
672,
271,
62,
7890,
62,
2617,
1330,
3... | 2.198864 | 176 |
"""
This module contains tools for calculating the completeness of cluster samples and handling area masks.
"""
import os
import sys
import resource
import glob
import numpy as np
import pylab as plt
import astropy.table as atpy
from astLib import *
from scipy import stats
from scipy import interpolate
from scipy.interpolate import InterpolatedUnivariateSpline as _spline
from scipy import ndimage
from scipy import optimize
import nemo
from . import signals
from . import maps
from . import MockSurvey
from . import plotSettings
from . import startUp
from collections import OrderedDict
import colorcet
import types
import pickle
import astropy.io.fits as pyfits
import time
import shutil
import yaml
from decimal import Decimal
# If want to catch warnings as errors...
#import warnings
#warnings.filterwarnings('error')
#------------------------------------------------------------------------------------------------------------
class SelFn(object):
"""An object that describes the survey selection function. It uses the output in the ``selFn/`` directory
(produced by the :ref:`nemoCommand` command) to calculate the survey completeness for a given
signal-to-noise cut on a (log\ :sub:`10` mass, z) grid for a given set of cosmological and scaling
relation parameters.
Args:
selFnDir (:obj:`str`): Path to a ``selFn/`` directory, as produced by the :ref:`nemoCommand`
command. This directory contains information such as the survey noise maps, area masks,
and information needed to construct the filter mismatch function, `Q`, used in mass
modeling.
SNRCut (:obj:`float`): Completeness will be computed relative to this signal-to-noise selection
cut (labelled as `fixed_SNR` in the catalogs produced by :ref:`nemoCommand`).
configFileName (:obj:`str`, optional): Path to a Nemo configuration file. If not given, this
will be read from ``selFnDir/config.yml``, which is the config file for the
:ref:`nemoCommand` run that produced the ``selFn`` directory.
footprintLabel (:obj:`str`, optional): Use this to specify a footprint, if any are defined in the
Nemo config used to produce the ``selFn`` dir (e.g., 'DES', 'HSC', 'KiDS' etc.). The default
value of ``None`` uses the whole survey footprint.
zStep (:obj:`float`, optional): Use this to set the binning in redshift for completeness
calculations.
tileNames (:obj:`list`, optional): If given, restrict the :class:`SelFn` object to use only these
tiles.
enableDrawSample (:obj:`bool`, optional): This only needs to be set to `True` for generating mock
catalogs.
mockOversampleFactor (:obj:`float`, optional): Used only by :func:`generateMockSample`. Sets
the oversampling level for the generated mock sample.
downsampleRMS (:obj:`float`, optional): Downsample the resolution of the RMS (noise) tables by
this factor. The RMS tables are generated from the noise maps, and are just a listing of noise
level versus survey area. Downsampling speeds up completeness calculations considerably.
applyMFDebiasCorrection (:obj:`bool`, optional): Set to `False` to disable the Eddington bias
correction of mass estimates. Probably only useful for debugging.
applyRelativisticCorrection (:obj:`bool`, optional): Set to `False` to disable inclusion of
the relativistic correction in completeness calculations.
setupAreaMask (:obj:`bool`, optional): If `True`, read in the area masks so that quick position
checks can be done (e.g., by :meth:`SelFn.checkCoordsAreInMask`).
enableCompletenessCalc (:obj:`bool`, optional): If `True`, set up the machinery needed to do
completeness calculations.
Attributes:
SNRCut (:obj:`float`): Completeness will be computed relative to this signal-to-noise selection cut
(labelled as `fixed_SNR` in the catalogs produced by :ref:`nemoCommand`).
footprintLabel (:obj:`str`): Use this to specify a footprint, if any are defined in the Nemo config
used to produce the ``selFn`` dir (e.g., 'DES', 'HSC', 'KiDS' etc.). The default of ``None``
uses the whole survey footprint.
applyMFDebiasCorrection (:obj:`bool`): Set to `False` to disable the Eddington bias correction of
mass estimates. Probably only useful for debugging.
zStep (:obj:`float`): Use this to set the binning in redshift for completeness calculations.
tileNames (:obj:`list`): The list of tiles used by the SelFn object (default of None uses all tiles).
WCSDict (:obj:`dict`): A dictionary indexed by `tileName`, containing :obj:`astWCS.WCS` objects
that describe the mapping between pixel coords and (RA, dec) coords in each tile.
areaMaskDict (:obj:`dict`): A dictionary containing the survey area masks, indexed by tileName.
Values > 0 in these masks define the cluster or source search area.
scalingRelationDict (:obj:`dict`): A dictionary of scaling relation parameters (see example Nemo
config files for the format).
Q (:class:`nemo.signals.QFit`): An object for calculating the filter mismatch function, referred
to as `Q` in the ACT papers from `Hasselfield et al. (2013) <http://adsabs.harvard.edu/abs/2013JCAP...07..008H>`_
onwards.
RMSDict (:obj:`dict`): A dictionary of RMS tables, indexed by tileName. Each RMSTable contains
the noise level by area, as returned by :meth:`getRMSTab`.
totalAreaDeg2 (:obj:`float`): The total area in square degrees, as measured from the survey mask,
for the given set of tiles and footprint.
fRelDict (:obj:`dict`): A dictionary of weights used for relativistic corrections, indexed by
`tileName`.
mockSurvey (:class:`nemo.MockSurvey.MockSurvey`): A :class:`MockSurvey` object, used for halo mass function
calculations and generating mock catalogs.
Note:
Some of the methods of this class are experimental and not necessarily well tested.
"""
def _setUpAreaMask(self):
"""Sets-up WCS info and loads area masks - needed for quick position checks etc.
"""
# This takes ~20 sec to set-up - we could cache, or it's overhead when initialising SelFn
# We could do a lot of this by just making the area mask, mass limit maps etc. MEFs
# But then we wouldn't want to lazily load them (probably)
self.tileTab=atpy.Table()
self.tileTab.add_column(atpy.Column(list(self.tileNames), 'tileName'))
self.tileTab.add_column(atpy.Column(np.zeros(len(self.tileNames)), 'RAMin'))
self.tileTab.add_column(atpy.Column(np.zeros(len(self.tileNames)), 'RAMax'))
self.tileTab.add_column(atpy.Column(np.zeros(len(self.tileNames)), 'decMin'))
self.tileTab.add_column(atpy.Column(np.zeros(len(self.tileNames)), 'decMax'))
self.WCSDict={}
self.areaMaskDict={}
for row in self.tileTab:
areaMap, wcs=loadAreaMask(row['tileName'], self.selFnDir)
self.WCSDict[row['tileName']]=wcs.copy()
self.areaMaskDict[row['tileName']]=areaMap
ra0, dec0=self.WCSDict[row['tileName']].pix2wcs(0, 0)
ra1, dec1=self.WCSDict[row['tileName']].pix2wcs(wcs.header['NAXIS1'], wcs.header['NAXIS2'])
if ra1 > ra0:
ra1=-(360-ra1)
row['RAMin']=min([ra0, ra1])
row['RAMax']=max([ra0, ra1])
row['decMin']=min([dec0, dec1])
row['decMax']=max([dec0, dec1])
def checkCoordsInAreaMask(self, RADeg, decDeg):
"""Checks if the given RA, dec coords are in valid regions of the map.
Args:
RADeg (:obj:`float` or :obj:`np.ndarray`): RA in decimal degrees.
decDeg (:obj:`float` or :obj:`np.ndarray`): Dec in decimal degrees.
Returns:
`True` if the coordinates are in the area mask mask, `False` if not.
"""
if self.tileTab is None:
self._setUpAreaMask()
RADeg=np.array(RADeg)
decDeg=np.array(decDeg)
if RADeg.shape == ():
RADeg=[RADeg]
if decDeg.shape == ():
decDeg=[decDeg]
inMaskList=[]
for ra, dec in zip(RADeg, decDeg):
inMask=False
# Inside footprint check
# NOTE: Tiles may have -ve RAMin coords
raMask=np.logical_and(np.greater_equal(ra, self.tileTab['RAMin']), np.less(ra, self.tileTab['RAMax']))
negRAMask=np.logical_and(np.greater_equal(-(360-ra), self.tileTab['RAMin']), np.less(-(360-ra), self.tileTab['RAMax']))
raMask=np.logical_or(raMask, negRAMask)
decMask=np.logical_and(np.greater_equal(dec, self.tileTab['decMin']), np.less(dec, self.tileTab['decMax']))
tileMask=np.logical_and(raMask, decMask)
# This is just dealing with bytes versus strings in python3
matchTilesList=[]
for item in self.tileTab['tileName'][tileMask].tolist():
if type(item) == bytes:
matchTilesList.append(item.decode('utf-8'))
else:
matchTilesList.append(str(item))
for tileName in matchTilesList:
x, y=self.WCSDict[tileName].wcs2pix(ra, dec)
x=int(round(x)); y=int(round(y))
if x < self.WCSDict[tileName].header['NAXIS1'] and y < self.WCSDict[tileName].header['NAXIS2'] \
and self.areaMaskDict[tileName][y, x] > 0:
inMask=True
inMaskList.append(inMask)
if len(inMaskList) > 1:
return np.array(inMaskList)
else:
return inMaskList[0]
def update(self, H0, Om0, Ob0, sigma8, ns, scalingRelationDict = None):
"""Re-calculates the survey-average selection function for a given set of cosmological and scaling
relation parameters.
Returns:
None - attributes such as :attr:`compMz`, the (log\ :sub:`10` mass, z) completeness grid, are
updated in-place.
"""
if scalingRelationDict is not None:
self.scalingRelationDict=scalingRelationDict
self.mockSurvey.update(H0, Om0, Ob0, sigma8, ns)
#---
# New - as well as completeness, get y0 grid to map between cluster counts <-> y0
zRange=self.mockSurvey.z
y0GridCube=[]
compMzCube=[]
for tileName in self.RMSDict.keys():
tenToA0, B0, Mpivot, sigma_int=[self.scalingRelationDict['tenToA0'], self.scalingRelationDict['B0'],
self.scalingRelationDict['Mpivot'], self.scalingRelationDict['sigma_int']]
y0Grid=np.zeros([zRange.shape[0], self.mockSurvey.clusterCount.shape[1]])
for i in range(len(zRange)):
zk=zRange[i]
k=np.argmin(abs(self.mockSurvey.z-zk))
if self.mockSurvey.delta != 500 or self.mockSurvey.rhoType != "critical":
log10M500s=np.log10(self.mockSurvey.mdef.translate_mass(self.mockSurvey.cosmoModel,
self.mockSurvey.M,
self.mockSurvey.a[k],
self.mockSurvey._M500cDef))
else:
log10M500s=self.mockSurvey.log10M
theta500s_zk=interpolate.splev(log10M500s, self.mockSurvey.theta500Splines[k])
Qs_zk=self.Q.getQ(theta500s_zk, zk, tileName = tileName)
true_y0s_zk=tenToA0*np.power(self.mockSurvey.Ez[k], 2)*np.power(np.power(10, self.mockSurvey.log10M)/Mpivot,
1+B0)*Qs_zk
if self.applyRelativisticCorrection == True:
fRels_zk=interpolate.splev(log10M500s, self.mockSurvey.fRelSplines[k])
true_y0s_zk=true_y0s_zk*fRels_zk
y0Grid[i]=true_y0s_zk
# For some cosmological parameters, we can still get the odd -ve y0
y0Grid[y0Grid <= 0] = 1e-9
# Calculate completeness using area-weighted average
# NOTE: RMSTab that is fed in here can be downsampled in noise resolution for speed
RMSTab=self.RMSDict[tileName]
areaWeights=RMSTab['areaDeg2']/RMSTab['areaDeg2'].sum()
log_y0Lim=np.log(self.SNRCut*RMSTab['y0RMS'])
log_y0=np.log(y0Grid)
compMz=np.zeros(log_y0.shape)
for i in range(len(RMSTab)):
SNRGrid=y0Grid/RMSTab['y0RMS'][i]
SNRGrid=SNRGrid
log_y0Err=1/SNRGrid
log_y0Err[SNRGrid < self.SNRCut]=1/self.SNRCut
log_totalErr=np.sqrt(log_y0Err**2 + sigma_int**2)
compMz=compMz+stats.norm.sf(log_y0Lim[i], loc = log_y0, scale = log_totalErr)*areaWeights[i]
compMzCube.append(compMz)
y0GridCube.append(y0Grid)
compMzCube=np.array(compMzCube)
y0GridCube=np.array(y0GridCube)
self.compMz=np.average(compMzCube, axis = 0, weights = self.fracArea)
self.y0Grid=np.average(y0GridCube, axis = 0, weights = self.fracArea)
#---
# Old
#compMzCube=[]
#for tileName in self.RMSDict.keys():
#compMzCube.append(calcCompleteness(self.RMSDict[tileName], self.SNRCut, tileName,
#self.mockSurvey, self.scalingRelationDict, self.tckQFitDict))
#if np.any(np.isnan(compMzCube[-1])) == True:
#raise Exception("NaNs in compMz for tile '%s'" % (tileName))
#compMzCube=np.array(compMzCube)
#self.compMz=np.average(compMzCube, axis = 0, weights = self.fracArea)
def projectCatalogToMz(self, tab):
"""Project a Nemo cluster catalog (an astropy Table) into the (log\ :sub:`10` mass, z) grid, taking
into account the uncertainties on y0, and redshift. Note that if the redshift error is non-zero, this
is a lot slower.
Args:
tab (:obj:`astropy.table.Table`): A Nemo cluster catalog, containing `redshift` and `redshiftErr`
columns.
Returns:
A 2d array containing the projection of the catalog on the (log\ :sub:`10` mass, z) grid.
"""
catProjectedMz=np.zeros(self.mockSurvey.clusterCount.shape)
tenToA0, B0, Mpivot, sigma_int=self.scalingRelationDict['tenToA0'], self.scalingRelationDict['B0'], \
self.scalingRelationDict['Mpivot'], self.scalingRelationDict['sigma_int']
for row in tab:
tileName=row['tileName']
z=row['redshift']
zErr=row['redshiftErr']
y0=row['fixed_y_c']*1e-4
y0Err=row['fixed_err_y_c']*1e-4
P=signals.calcPMass(y0, y0Err, z, zErr, self.Q, self.mockSurvey,
tenToA0 = tenToA0, B0 = B0, Mpivot = Mpivot, sigma_int = sigma_int,
applyMFDebiasCorrection = self.applyMFDebiasCorrection,
fRelWeightsDict = self.fRelDict[tileName],
return2D = True, tileName = tileName)
# Paste into (M, z) grid
catProjectedMz=catProjectedMz+P # For return2D = True, P is normalised such that 2D array sum is 1
return catProjectedMz
def projectCatalogToMz_simple(self, tab):
"""Project a Nemo cluster catalog (an astropy Table) into the (log\ :sub:`10` mass, z) grid. This version
doesn't take into account any uncertainties (which may be okay if your binning is coarse enough).
Args:
tab (:obj:`astropy.table.Table`): A Nemo cluster catalog, containing `redshift` and `redshiftErr`
columns.
Returns:
A 2d array containing the projection of the catalog on the (log\ :sub:`10` mass, z) grid.
"""
tenToA0, B0, Mpivot, sigma_int=self.scalingRelationDict['tenToA0'], self.scalingRelationDict['B0'], \
self.scalingRelationDict['Mpivot'], self.scalingRelationDict['sigma_int']
obs_log10Ms=[]
for row in tab:
tileName=row['tileName']
z=row['redshift']
zErr=row['redshiftErr']
y0=row['fixed_y_c']*1e-4
y0Err=row['fixed_err_y_c']*1e-4
massDict=signals.calcMass(y0, y0Err, z, zErr, self.Q, tenToA0 = tenToA0, B0 = B0, Mpivot = Mpivot,
sigma_int = sigma_int, mockSurvey = self.mockSurvey,
applyMFDebiasCorrection = self.applyMFDebiasCorrection,
fRelWeightsDict = self.fRelDict[tileName],
calcErrors = False, tileName = tileName)
obs_log10Ms.append(14+np.log10(massDict['M500']))
obsGrid, obs_log10MBinEdges, obs_zBinEdges=np.histogram2d(obs_log10Ms, tab['redshift'],
bins = [self.mockSurvey.log10MBinEdges,
self.mockSurvey.zBinEdges])
obsGrid=obsGrid.transpose()
return obsGrid
def addPDetToCatalog(self, tab):
"""Given a catalog, add a column named `Pdet`, containing the detection probability.
Args:
tab (:obj:`astropy.table.Table`): A Nemo cluster catalog.
Returns:
Catalog with `Pdet` column added (:obj:`astropy.table.Table`)
"""
log_y0Lim=np.log(self.SNRCut*tab['fixed_err_y_c']*1e-4)
log_y0=np.log(tab['fixed_y_c']*1e-4)
log_y0Err=1/tab['fixed_SNR']
sigma_int=self.scalingRelationDict['sigma_int']
log_totalErr=np.sqrt(log_y0Err**2 + sigma_int**2)
Pdet=np.zeros(len(tab))
for i in range(len(Pdet)):
Pdet[i]=stats.norm.sf(log_y0Lim[i], loc = log_y0[i], scale = log_totalErr[i])
tab['Pdet']=Pdet
return tab
def generateMockSample(self):
"""Returns a mock catalog (but with no object coordinate information).
Note:
This currently uses the average noise level in each tile, rather than the full noise
distribution in each tile.
"""
mockTabsList=[]
for tileName, areaDeg2 in zip(self.tileNames, self.tileAreas):
mockTab=self.mockSurvey.drawSample(self.y0NoiseAverageDict[tileName], self.scalingRelationDict,
self.Q, wcs = None,
photFilterLabel = self.photFilterLabel, tileName = tileName,
makeNames = False,
SNRLimit = self.SNRCut, applySNRCut = True,
areaDeg2 = areaDeg2*self.mockOversampleFactor,
applyPoissonScatter = True,
applyIntrinsicScatter = True,
applyNoiseScatter = True)
if mockTab is not None:
mockTabsList.append(mockTab)
tab=atpy.vstack(mockTabsList)
return tab
def getMassLimit(self, completenessFraction, zBinEdges = None):
"""Return the mass limit (units of 10\ :sup:`14` MSun) as a function of redshift, for the given
completeness level.
Args:
completenessFraction (:obj:`float`): The completeness fraction (a number between 0 and 1) at
which to return the mass limit.
zBinEdges (:obj:`np.ndarray`, optional): The redshifts at which the completeness will be
evaluated. If not given, :attr:`self.mockSurvey.z` will be used.
Returns:
Mass completeness by redshift (:obj:`np.ndarray`).
"""
if zBinEdges is None:
zBinEdges=self.mockSurvey.zBinEdges
return calcMassLimit(completenessFraction, self.compMz, self.mockSurvey, zBinEdges)
#------------------------------------------------------------------------------------------------------------
def loadAreaMask(tileName, selFnDir):
"""Loads the survey area mask, i.e., the area searched for sources and clusters, for the given tile.
Args:
tileName (:obj:`str`): The name of the tile for which the area mask will be loaded.
selFnDir (:obj:`str`): Path to a ``selFn/`` directory, as produced by the :ref:`nemoCommand`
command. This directory contains information such as the survey noise maps, area masks,
and information needed to construct the filter mismatch function, `Q`, used in mass
modeling.
Returns:
Map array (2d :obj:`np.ndarray`), WCS object (:obj:`astWCS.WCS`)
"""
areaMap, wcs=_loadTile(tileName, selFnDir, "areaMask", extension = 'fits')
return areaMap, wcs
#------------------------------------------------------------------------------------------------------------
def loadRMSMap(tileName, selFnDir, photFilter):
"""Loads the RMS (noise) map for the given tile.
Args:
tileName (:obj:`str`): The name of the tile for which the RMS (noise) map will be loaded.
selFnDir (:obj:`str`): Path to a ``selFn/`` directory, as produced by the :ref:`nemoCommand`
command. This directory contains information such as the survey noise maps, area masks,
and information needed to construct the filter mismatch function, `Q`, used in mass
modeling.
photFilter (:obj:`str`): Name of the reference filter, as specified in, e.g., a :ref:`nemoCommand`
config file (see :ref:`ConfigReference`).
Returns:
Map array (2d :obj:`np.ndarray`), WCS object (:obj:`astWCS.WCS`)
"""
RMSMap, wcs=_loadTile(tileName, selFnDir, "RMSMap_%s" % (photFilter), extension = 'fits')
return RMSMap, wcs
#------------------------------------------------------------------------------------------------------------
def loadMassLimitMap(tileName, diagnosticsDir, z):
"""Loads the mass limit map for the given tile at the given redshift.
Args:
tileName (:obj:`str`): The name of the tile for which the mass limit map will be loaded.
diagnosticsDir (:obj:`str`): Path to the ``diagnostics/`` directory, as produced by the
:ref:`nemoCommand` command.
z (:obj:`float`): Redshift at which the mass limit map was made (should match an entry in the
:ref:`nemoCommand` config file).
Returns:
Map array (2d :obj:`np.ndarray`), WCS object (:obj:`astWCS.WCS`)
"""
massLimMap, wcs=_loadTile(tileName, diagnosticsDir, "massLimitMap_z%s" % (str(z).replace(".", "p")),
extension = 'fits')
return massLimMap, wcs
#------------------------------------------------------------------------------------------------------------
def loadIntersectionMask(tileName, selFnDir, footprint):
"""Loads the intersection mask for the given tile and footprint.
Args:
tileName (:obj:`str`): The name of the tile for which the intersection mask will be loaded.
selFnDir (:obj:`str`): Path to a ``selFn/`` directory, as produced by the :ref:`nemoCommand`
command. This directory contains information such as the survey noise maps, area masks,
and information needed to construct the filter mismatch function, `Q`, used in mass
modeling.
footprint (:obj:`str`): The name of the footprint for which the intersection will be calculated,
as defined in the :ref:`nemoCommand` config file (see :ref:`selFnFootprints`).
Returns:
Map array (2d :obj:`np.ndarray`), WCS object (:obj:`astWCS.WCS`)
"""
intersectMask, wcs=_loadTile(tileName, selFnDir, "intersect_%s" % (footprint), extension = 'fits')
return intersectMask, wcs
#------------------------------------------------------------------------------------------------------------
def _loadTile(tileName, baseDir, baseFileName, extension = 'fits'):
"""Generic function to load a tile image from either a multi-extension FITS file, or a file with
#tileName.fits type extension, whichever is found.
Returns map array, wcs
"""
# After tidyUp is run, this will be a MEF file... during first run, it won't be (written under MPI)
if os.path.exists(baseDir+os.path.sep+"%s#%s.%s" % (baseFileName, tileName, extension)):
fileName=baseDir+os.path.sep+"%s#%s.%s" % (baseFileName, tileName, extension)
else:
fileName=baseDir+os.path.sep+"%s.%s" % (baseFileName, extension)
with pyfits.open(fileName) as img:
# If we find the tile - great. If not, we use first extension with data as it'll be compressed
if tileName in img:
extName=tileName
data=img[extName].data
else:
data=None
if data is None:
for extName in img:
data=img[extName].data
if data is not None:
break
data=img[extName].data
wcs=astWCS.WCS(img[extName].header, mode = 'pyfits')
return data, wcs
#------------------------------------------------------------------------------------------------------------
def getTileTotalAreaDeg2(tileName, selFnDir, masksList = [], footprintLabel = None):
"""Returns the total area of the tile given by `tileName` (taking into account masked regions).
Args:
tileName (:obj:`str`): The name of the tile for which the area will be calculated.
selFnDir (:obj:`str`): Path to a ``selFn/`` directory, as produced by the :ref:`nemoCommand`
command. This directory contains information such as the survey noise maps, area masks,
and information needed to construct the filter mismatch function, `Q`, used in mass
modeling.
masksList (:obj:`list`, optional): A list of paths to FITS-format mask images, which contain pixels
with values of 1 to indicate valid survey area, 0 otherwise. If given, the area is calculated
for the intersection of these masks with the survey area mask.
footprintLabel (:obj:`str`, optional): The name of the footprint for which the intersection will
be calculated, as defined in the :ref:`nemoCommand` config file (see :ref:`selFnFootprints`).
Returns:
Tile area, after masking, in square degrees.
"""
areaMap, wcs=loadAreaMask(tileName, selFnDir)
areaMapSqDeg=(maps.getPixelAreaArcmin2Map(areaMap.shape, wcs)*areaMap)/(60**2)
totalAreaDeg2=areaMapSqDeg.sum()
if footprintLabel is not None:
intersectMask=makeIntersectionMask(tileName, selFnDir, footprintLabel, masksList = masksList)
totalAreaDeg2=(areaMapSqDeg*intersectMask).sum()
return totalAreaDeg2
#------------------------------------------------------------------------------------------------------------
def makeIntersectionMask(tileName, selFnDir, label, masksList = []):
"""Creates an intersection mask between the survey mask, and the mask files given in `masksList`.
Args:
tileName (:obj:`str`): The name of the tile for which the intersection mask will be made (or loaded
from disk, if cached).
selFnDir (:obj:`str`): Path to a ``selFn/`` directory, as produced by the :ref:`nemoCommand`
command. This directory contains information such as the survey noise maps, area masks,
and information needed to construct the filter mismatch function, `Q`, used in mass
modeling.
label (:obj:`str`): The name of the footprint for which the intersection will be calculated,
as defined in the :ref:`nemoCommand` config file (see :ref:`selFnFootprints`).
masksList (:obj:`list`, optional): A list of paths to FITS-format mask images, which contain pixels
with values of 1 to indicate valid survey area, 0 otherwise. If given, the area is calculated
for the intersection of these masks with the survey area mask.
Returns:
Intersection mask (1 = valid area, 0 = outside of intersection area; 2d :obj:`np.ndarray`)
Note:
For speed, it is assumed that the declination axis is aligned with the vertical axis in each
mask image. This routine caches the intersection masks in `selFnDir`.
"""
# After tidyUp has run, there will be intersection mask MEF files
intersectFileName=selFnDir+os.path.sep+"intersect_%s.fits" % (label)
if os.path.exists(intersectFileName):
intersectMask, wcs=loadIntersectionMask(tileName, selFnDir, label)
return intersectMask
# Otherwise, we may have a per-tile intersection mask
intersectFileName=selFnDir+os.path.sep+"intersect_%s#%s.fits" % (label, tileName)
if os.path.exists(intersectFileName):
intersectMask, wcs=loadIntersectionMask(tileName, selFnDir, label)
return intersectMask
# Otherwise... make it
areaMap, wcs=loadAreaMask(tileName, selFnDir)
RAMin, RAMax, decMin, decMax=wcs.getImageMinMaxWCSCoords()
if masksList == []:
raise Exception("didn't find previously cached intersection mask but makeIntersectionMask called with empty masksList")
print("... creating %s intersection mask (%s) ..." % (label, tileName))
intersectMask=np.zeros(areaMap.shape)
outRACoords=np.array(wcs.pix2wcs(np.arange(intersectMask.shape[1]), [0]*intersectMask.shape[1]))
outDecCoords=np.array(wcs.pix2wcs([0]*np.arange(intersectMask.shape[0]), np.arange(intersectMask.shape[0])))
outRA=outRACoords[:, 0]
outDec=outDecCoords[:, 1]
RAToX=interpolate.interp1d(outRA, np.arange(intersectMask.shape[1]), fill_value = 'extrapolate')
DecToY=interpolate.interp1d(outDec, np.arange(intersectMask.shape[0]), fill_value = 'extrapolate')
for fileName in masksList:
with pyfits.open(fileName) as maskImg:
for hdu in maskImg:
if type(hdu) == pyfits.ImageHDU:
break
maskWCS=astWCS.WCS(hdu.header, mode = 'pyfits')
maskData=hdu.data
# From sourcery tileDir stuff
RAc, decc=wcs.getCentreWCSCoords()
xc, yc=maskWCS.wcs2pix(RAc, decc)
xc, yc=int(xc), int(yc)
xIn=np.arange(maskData.shape[1])
yIn=np.arange(maskData.shape[0])
inRACoords=np.array(maskWCS.pix2wcs(xIn, [yc]*len(xIn)))
inDecCoords=np.array(maskWCS.pix2wcs([xc]*len(yIn), yIn))
inRA=inRACoords[:, 0]
inDec=inDecCoords[:, 1]
RAToX=interpolate.interp1d(inRA, xIn, fill_value = 'extrapolate')
DecToY=interpolate.interp1d(inDec, yIn, fill_value = 'extrapolate')
outRACoords=np.array(wcs.pix2wcs(np.arange(intersectMask.shape[1]), [0]*intersectMask.shape[1]))
outDecCoords=np.array(wcs.pix2wcs([0]*np.arange(intersectMask.shape[0]), np.arange(intersectMask.shape[0])))
outRA=outRACoords[:, 0]
outDec=outDecCoords[:, 1]
xIn=np.array(RAToX(outRA), dtype = int)
yIn=np.array(DecToY(outDec), dtype = int)
xMask=np.logical_and(xIn >= 0, xIn < maskData.shape[1])
yMask=np.logical_and(yIn >= 0, yIn < maskData.shape[0])
xOut=np.arange(intersectMask.shape[1])
yOut=np.arange(intersectMask.shape[0])
for i in yOut[yMask]:
intersectMask[i][xMask]=maskData[yIn[i], xIn[xMask]]
intersectMask=np.array(np.greater(intersectMask, 0.5), dtype = int)
maps.saveFITS(intersectFileName, intersectMask, wcs, compressed = True, compressionType = 'PLIO_1')
return intersectMask
#------------------------------------------------------------------------------------------------------------
def getRMSTab(tileName, photFilterLabel, selFnDir, footprintLabel = None):
"""Makes a table containing map area in the tile refered to by `tileName` against RMS (noise level)
values, compressing the information in the RMS maps. Results are cached under `selFnDir`, and read from
disk if found.
Args:
tileName (:obj:`str`): The name of the tile.
photFilterLabel (:obj:`str`): Name of the reference filter, as specified in, e.g., a
:ref:`nemoCommand` config file (see :ref:`ConfigReference`).
selFnDir (:obj:`str`): Path to a ``selFn/`` directory, as produced by the :ref:`nemoCommand`
command. This directory contains information such as the survey noise maps, area masks,
and information needed to construct the filter mismatch function, `Q`, used in mass
modeling.
footprintLabel (:obj:`str`, optional): The name of the footprint in which the calculation will be
done, as defined in the :ref:`nemoCommand` config file (see :ref:`selFnFootprints`).
Returns:
A table of RMS (noise level) values versus area in square degrees (:obj:`astropy.table.Table`).
"""
# After tidyUp has run, we may have one global RMS table with an extra tileName column we can use
RMSTabFileName=selFnDir+os.path.sep+"RMSTab.fits"
if footprintLabel is not None:
RMSTabFileName=RMSTabFileName.replace(".fits", "_%s.fits" % (footprintLabel))
if os.path.exists(RMSTabFileName):
tab=atpy.Table().read(RMSTabFileName)
return tab[np.where(tab['tileName'] == tileName)]
# Otherwise, we may have a per-tile RMS table
RMSTabFileName=selFnDir+os.path.sep+"RMSTab_%s.fits" % (tileName)
if footprintLabel != None:
RMSTabFileName=RMSTabFileName.replace(".fits", "_%s.fits" % (footprintLabel))
if os.path.exists(RMSTabFileName) == True:
return atpy.Table().read(RMSTabFileName)
# Table doesn't exist, so make it...
print(("... making %s ..." % (RMSTabFileName)))
RMSMap, wcs=loadRMSMap(tileName, selFnDir, photFilterLabel)
areaMap, wcs=loadAreaMask(tileName, selFnDir)
areaMapSqDeg=(maps.getPixelAreaArcmin2Map(areaMap.shape, wcs)*areaMap)/(60**2)
if footprintLabel != None:
intersectMask=makeIntersectionMask(tileName, selFnDir, footprintLabel)
areaMapSqDeg=areaMapSqDeg*intersectMask
RMSMap=RMSMap*intersectMask
RMSValues=np.unique(RMSMap[np.nonzero(RMSMap)])
totalAreaDeg2=areaMapSqDeg.sum()
tileArea=np.zeros(len(RMSValues))
for i in range(len(RMSValues)):
tileArea[i]=areaMapSqDeg[np.equal(RMSMap, RMSValues[i])].sum()
RMSTab=atpy.Table()
RMSTab.add_column(atpy.Column(tileArea, 'areaDeg2'))
RMSTab.add_column(atpy.Column(RMSValues, 'y0RMS'))
# Checks - these should be impossible but we have seen (e.g., when messed up masks)
tol=0.003
if abs(RMSTab['areaDeg2'].sum()-areaMapSqDeg.sum()) > tol:
raise Exception("Mismatch between area map and area in RMSTab for tile '%s'" % (tileName))
if np.less(RMSTab['areaDeg2'], 0).sum() > 0:
raise Exception("Negative area in tile '%s' - check your survey mask (and delete/remake tileDir files if necessary)." % (tileName))
RMSTab.meta['NEMOVER']=nemo.__version__
RMSTab.write(RMSTabFileName)
return RMSTab
#------------------------------------------------------------------------------------------------------------
def downsampleRMSTab(RMSTab, stepSize = 0.001*1e-4):
"""Downsamples `RMSTab` (see :meth:`getRMSTab`) in terms of noise resolution, binning by `stepSize`.
Args:
RMSTab (:obj:`astropy.table.Table`): An RMS table, as produced by :meth:`getRMSTab`.
stepSize (:obj:`float`, optional): Sets the re-binning in terms of y0.
Returns:
A table of RMS (noise level) values versus area in square degrees (:obj:`astropy.table.Table`).
"""
binEdges=np.arange(RMSTab['y0RMS'].min(), RMSTab['y0RMS'].max()+stepSize, stepSize)
y0Binned=[]
tileAreaBinned=[]
binMins=[]
binMaxs=[]
for i in range(len(binEdges)-1):
mask=np.logical_and(RMSTab['y0RMS'] >= binEdges[i], RMSTab['y0RMS'] < binEdges[i+1])
if mask.sum() > 0:
y0Binned.append(np.average(RMSTab['y0RMS'][mask], weights = RMSTab['areaDeg2'][mask]))
tileAreaBinned.append(np.sum(RMSTab['areaDeg2'][mask]))
binMins.append(binEdges[i])
binMaxs.append(binEdges[i+1])
newRMSTab=atpy.Table()
newRMSTab.add_column(atpy.Column(y0Binned, 'y0RMS'))
newRMSTab.add_column(atpy.Column(tileAreaBinned, 'areaDeg2'))
RMSTab=newRMSTab
return RMSTab
#------------------------------------------------------------------------------------------------------------
def calcTileWeightedAverageNoise(tileName, photFilterLabel, selFnDir, footprintLabel = None):
"""Returns the area weighted average ỹ\ :sub:`0` noise value in the tile.
Args:
tileName (:obj:`str`): The name of the tile.
photFilterLabel (:obj:`str`): Name of the reference filter, as specified in, e.g., a
:ref:`nemoCommand` config file (see :ref:`ConfigReference`).
selFnDir (:obj:`str`): Path to a ``selFn/`` directory, as produced by the :ref:`nemoCommand`
command. This directory contains information such as the survey noise maps, area masks,
and information needed to construct the filter mismatch function, `Q`, used in mass
modeling.
footprintLabel (:obj:`str`, optional): The name of the footprint in which the calculation will be
done, as defined in the :ref:`nemoCommand` config file (see :ref:`selFnFootprints`).
Returns:
Area-weighted average noise in ỹ\ :sub:`0` (``fixed_y_c`` in Nemo cluster catalogs).
"""
RMSTab=getRMSTab(tileName, photFilterLabel, selFnDir, footprintLabel = footprintLabel)
RMSValues=np.array(RMSTab['y0RMS'])
tileArea=np.array(RMSTab['areaDeg2'])
tileRMSValue=np.average(RMSValues, weights = tileArea)
return tileRMSValue
#------------------------------------------------------------------------------------------------------------
def completenessByFootprint(selFnCollection, mockSurvey, diagnosticsDir, additionalLabel = ""):
"""Write out the average (log\ :sub:`10` mass, z) grid over all survey footprints provided in
`selFnCollection`, weighted by fraction of total survey area within the footprint. Also prints some
useful statistics and produces some plots that are written to the `diagnosticsDir` directory.
Args:
selFnCollection (:obj:`dict`): A dictionary where each key points to a :class:`SelFn` object that
describes a given survey footprint. Here, "full" corresponds to the whole survey, while "DES"
may represent the DES footprint, if it has been defined in the :ref:`nemoCommand` config file
(see :ref:`selFnFootprints`).
mockSurvey (:class:`nemo.MockSurvey.MockSurvey`): A :class:`MockSurvey` object, used for halo mass
function calculations and generating mock catalogs.
diagnosticsDir (:obj:`str`): The path to the diagnostics directory, where the output from this
routine will be written.
additionalLabel (:obj:`str`, optional): This will be added to the output filenames (use this for
e.g., tagging with the :meth:`calcCompleteness` method used).
Returns:
None
Note:
Output is written to files named e.g. ``diagnosticsDir/MzCompleteness_label.npz``, where `label`
is the footprint name (i.e., a key in `selFnCollection`); 'full' is the default (survey-wide
average).
"""
zBinEdges=np.arange(0.05, 2.1, 0.1)
zBinCentres=(zBinEdges[:-1]+zBinEdges[1:])/2.
for footprintLabel in selFnCollection.keys():
print(">>> Survey-averaged results inside footprint: %s ..." % (footprintLabel))
selFnDictList=selFnCollection[footprintLabel]
tileAreas=[]
compMzCube=[]
completeness=[]
for selFnDict in selFnDictList:
tileAreas.append(selFnDict['tileAreaDeg2'])
massLimit_90Complete=calcMassLimit(0.9, selFnDict['compMz'], mockSurvey, zBinEdges = zBinEdges)
completeness.append(massLimit_90Complete)
compMzCube.append(selFnDict['compMz'])
tileAreas=np.array(tileAreas)
completeness=np.array(completeness)
if np.sum(tileAreas) == 0:
print("... no overlapping area with %s ..." % (footprintLabel))
continue
fracArea=tileAreas/np.sum(tileAreas)
compMzCube=np.array(compMzCube)
compMz_surveyAverage=np.average(compMzCube, axis = 0, weights = fracArea)
outFileName=diagnosticsDir+os.path.sep+"MzCompleteness_%s%s.npz" % (footprintLabel, additionalLabel)
np.savez(outFileName, z = mockSurvey.z, log10M500c = mockSurvey.log10M,
M500Completeness = compMz_surveyAverage)
makeMzCompletenessPlot(compMz_surveyAverage, mockSurvey.log10M, mockSurvey.z, footprintLabel,
diagnosticsDir+os.path.sep+"MzCompleteness_%s%s.pdf" % (footprintLabel, additionalLabel))
# 90% mass completeness limit and plots
massLimit_90Complete=np.average(completeness, axis = 0, weights = fracArea) # agrees with full mass limit map
makeMassLimitVRedshiftPlot(massLimit_90Complete, zBinCentres, diagnosticsDir+os.path.sep+"completeness90Percent_%s%s.pdf" % (footprintLabel, additionalLabel), title = "footprint: %s" % (footprintLabel))
zMask=np.logical_and(zBinCentres >= 0.2, zBinCentres < 1.0)
averageMassLimit_90Complete=np.average(massLimit_90Complete[zMask])
print("... total survey area (after masking) = %.1f sq deg" % (np.sum(tileAreas)))
print("... survey-averaged 90%% mass completeness limit (z = 0.5) = %.1f x 10^14 MSun" % (massLimit_90Complete[np.argmin(abs(zBinCentres-0.5))]))
print("... survey-averaged 90%% mass completeness limit (0.2 < z < 1.0) = %.1f x 10^14 MSun" % (averageMassLimit_90Complete))
#------------------------------------------------------------------------------------------------------------
def calcCompletenessContour(compMz, log10M, z, level = 0.90):
"""Calculates a completeness contour on the (log\ :sub:`10` mass, z) plane.
Args:
compMz (:obj:`np.ndarray`): Map (2d array) of completeness on the (log\ :sub:`10` mass, z) plane.
log10M (:obj:`np.ndarray`): One dimensional array of log\ :sub:`10` mass values corresponding to
`compMz`.
z (:obj:`np.ndarray`): One dimensional arra of redshifts corresponding to `compMz`.
level (:obj:`float`, optional): Fractional completeness level (e.g., 0.90 is 90% completeness).
Returns:
Contour values for the given completeness level (a pair of arrays - redshifts, and log\ :sub:`10`
mass values).
"""
# Easiest way to get at contour for plotting later
# The smoothing may only be necessary if compMz is made by montecarlo method
contours=plt.contour(z, log10M, compMz.transpose(), levels = [level])
cont_z=[]
cont_log10M=[]
for p in contours.collections[0].get_paths():
v=p.vertices
cont_z=cont_z+v[:, 0].tolist()
cont_log10M=cont_log10M+v[:, 1].tolist()
plt.close()
contTab=atpy.Table()
contTab.add_column(atpy.Column(cont_z, 'z'))
contTab.add_column(atpy.Column(cont_log10M, 'log10M'))
contTab.sort('z')
cont_z=[]
cont_log10M=[]
for zi in z:
mask=np.equal(contTab['z'], zi)
if mask.sum() > 0:
cont_z.append(zi)
cont_log10M.append(np.median(contTab['log10M'][mask]))
cont_z=np.array(cont_z)
cont_log10M=np.array(cont_log10M)
#cont_log10M=ndimage.uniform_filter(cont_log10M, int(np.ceil(len(cont_log10M)/20)))
return cont_z, cont_log10M
#------------------------------------------------------------------------------------------------------------
def makeMzCompletenessPlot(compMz, log10M, z, title, outFileName):
"""Makes a (log\ :sub:`10` mass, z) completeness plot.
Args:
compMz (:obj:`np.ndarray`): Map (2d array) of completeness on the (log\ :sub:`10` mass, z) plane.
log10M (:obj:`np.ndarray`): One dimensional array of log\ :sub:`10` mass values corresponding to
`compMz`.
z (:obj:`np.ndarray`): One dimensional arra of redshifts corresponding to `compMz`.
title (:obj:`str`): Title that will be written at the top of the plot.
outFileName (:obj:`str`): Path where the plot will be written to a file, with the format being
determined by the file extension.
Returns:
None
"""
cont_z, cont_log10M=calcCompletenessContour(compMz, log10M, z, level = 0.90)
# Actual plot
plotSettings.update_rcParams()
fig, ax = plt.subplots(figsize=(9.5,6.5))
plt.imshow((compMz*100).transpose(), cmap = colorcet.m_rainbow, origin = 'lower', aspect = 'auto')
y_tck=interpolate.splrep(log10M, np.arange(log10M.shape[0]))
plot_log10M=np.linspace(13.5, 15.5, 9)
coords_log10M=interpolate.splev(plot_log10M, y_tck)
labels_log10M=[]
for lm in plot_log10M:
labels_log10M.append("%.2f" % (lm))
plt.yticks(interpolate.splev(plot_log10M, y_tck), labels_log10M)
plt.ylim(coords_log10M.min(), coords_log10M.max())
plt.ylabel("log$_{10}$ ($M_{\\rm 500c} / M_{\odot}$)")
x_tck=interpolate.splrep(z, np.arange(z.shape[0]))
plot_z=np.linspace(0.0, 2.0, 11)
coords_z=interpolate.splev(plot_z, x_tck)
labels_z=[]
for lz in plot_z:
labels_z.append("%.1f" % (lz))
plt.xticks(interpolate.splev(plot_z, x_tck), labels_z)
plt.xlim(coords_z.min(), coords_z.max())
plt.xlabel("$z$")
coords_cont_z=interpolate.splev(cont_z, x_tck)
coords_cont_log10M=interpolate.splev(cont_log10M, y_tck)
plt.plot(coords_cont_z, coords_cont_log10M, 'k:', lw = 3)
plt.colorbar(pad = 0.03)
cbLabel="Completeness (%)"
plt.figtext(0.96, 0.52, cbLabel, ha="center", va="center", family = "sans-serif", rotation = "vertical")
plt.tight_layout()
if title != 'full':
plt.title(title)
plt.savefig(outFileName)
plt.close()
#------------------------------------------------------------------------------------------------------------
def calcMassLimit(completenessFraction, compMz, mockSurvey, zBinEdges = []):
"""Given a completeness (log\ :sub:`10` mass, z) grid as made by :meth:`calcCompleteness`, return the
mass limit (units of 10\ :sup:`14` M\ :sub:`Sun`) as a function of redshift at the given completeness
level. By default, the same binning as the given `mockSurvey` object is used - this can be overridden by
giving `zBinEdges`.
Args:
completenessFraction (:obj:`float`): Fractional completeness level (e.g., 0.90 is 90% completeness).
compMz (:obj:`np.ndarray`): Map (2d array) of completeness on the (log\ :sub:`10` mass, z) plane.
mockSurvey (:class:`nemo.MockSurvey.MockSurvey`): A :class:`MockSurvey` object, used for halo mass
function calculations and generating mock catalogs.
zBinEdges (:obj:`np.ndarray`, optional): Redshifts at which the mass limit is evaluated.
Returns:
The mass limit (units of 10\ :sup:`14` M\ :sub:`Sun`, 1d array) at each requested redshift.
"""
massLimit=np.power(10, mockSurvey.log10M[np.argmin(abs(compMz-completenessFraction), axis = 1)])/1e14
if zBinEdges != []:
binnedMassLimit=np.zeros(len(zBinEdges)-1)
for i in range(len(zBinEdges)-1):
binnedMassLimit[i]=np.average(massLimit[np.logical_and(mockSurvey.z > zBinEdges[i], mockSurvey.z <= zBinEdges[i+1])])
massLimit=binnedMassLimit
return massLimit
#------------------------------------------------------------------------------------------------------------
def calcCompleteness(RMSTab, SNRCut, tileName, mockSurvey, scalingRelationDict, QFit,
plotFileName = None, z = None, method = "fast", numDraws = 2000000, numIterations = 100):
"""Calculate completeness as a function of (log\ :sub:`10` mass, z) on the mockSurvey grid at the given
`SNRCut`. Intrinsic scatter in the scaling relation is taken into account.
Args:
RMSTab (:obj:`astropy.table.Table`): Table containing noise level by area, as returned by
:meth:`getRMSTab`.
SNRCut (:obj:`float`): Completeness will be calculated for objects relative to this cut in
``fixed_SNR``.
tileName (:obj:`str`): Name of the map tile.
mockSurvey (:class:`nemo.MockSurvey.MockSurvey`): A :class:`MockSurvey` object, used for halo mass
function calculations and generating mock catalogs.
scalingRelationDict (:obj:`dict`): A dictionary of scaling relation parameters (see example Nemo
config files for the format).
QFit (:class:`nemo.signals.QFit`): An object for calculating the filter mismatch function, referred
to as `Q` in the ACT papers from `Hasselfield et al. (2013) <http://adsabs.harvard.edu/abs/2013JCAP...07..008H>`_
onwards.
plotFileName (:obj:`str`, optional): If given, write a plot showing 90% completness limit to this
path.
z (:obj:`np.ndarray`, optional): Redshifts at which the completeness calculation will be performed.
Alternatively, a single redshift can be specified as a :obj:`float` instead.
method (:obj:`str`, optional): Two methods for doing the calculation are available: "fast" (applies
the measurement errors and scatter to 'true' ỹ\ :sub:`0` values on a grid) and "montecarlo" (uses
samples drawn from a mock catalog, generated on the fly, to estimate the completeness). Both
methods should give consistent results.
numDraws (:obj:`int`, optional): Used by the "montecarlo" method - sets the number of draws from the
halo mass function on each iteration.
numIterations (:obj:`int`, optional): Used by the "montecarlo" method - sets the number of iterations,
i.e., the number of mock catalogs from which the completeness is estimated.
Returns:
A 2d array of (log\ :sub:`10` mass, z) completeness.
"""
if z is not None:
zIndex=np.argmin(abs(mockSurvey.z-z))
zRange=mockSurvey.z[zIndex:zIndex+1]
else:
zRange=mockSurvey.z
trueMassCol="true_M%d%s" % (mockSurvey.delta, mockSurvey.rhoType[0])
if method == "montecarlo":
# Need area-weighted average noise in the tile - we could change this to use entire RMS map instead
areaWeights=RMSTab['areaDeg2'].data/RMSTab['areaDeg2'].data.sum()
if areaWeights.sum() > 0:
y0Noise=np.average(RMSTab['y0RMS'].data, weights = areaWeights)
# Monte-carlo sims approach: slow - but can use to verify the other approach below
halfBinWidth=(mockSurvey.log10M[1]-mockSurvey.log10M[0])/2.0
binEdges_log10M=(mockSurvey.log10M-halfBinWidth).tolist()+[np.max(mockSurvey.log10M)+halfBinWidth]
halfBinWidth=(mockSurvey.z[1]-mockSurvey.z[0])/2.0
binEdges_z=(zRange-halfBinWidth).tolist()+[np.max(zRange)+halfBinWidth]
allMz=np.zeros([mockSurvey.clusterCount.shape[1], mockSurvey.clusterCount.shape[0]])
detMz=np.zeros([mockSurvey.clusterCount.shape[1], mockSurvey.clusterCount.shape[0]])
for i in range(numIterations):
tab=mockSurvey.drawSample(y0Noise, scalingRelationDict, QFit, tileName = tileName,
SNRLimit = SNRCut, applySNRCut = False, z = z, numDraws = numDraws)
allMz=allMz+np.histogram2d(np.log10(tab[trueMassCol]*1e14), tab['redshift'], [binEdges_log10M, binEdges_z])[0]
detMask=np.greater(tab['fixed_y_c']*1e-4, y0Noise*SNRCut)
detMz=detMz+np.histogram2d(np.log10(tab[trueMassCol][detMask]*1e14), tab['redshift'][detMask], [binEdges_log10M, binEdges_z])[0]
mask=np.not_equal(allMz, 0)
compMz=np.ones(detMz.shape)
compMz[mask]=detMz[mask]/allMz[mask]
compMz=compMz.transpose()
else:
compMz=np.zeros([mockSurvey.clusterCount.shape[0], mockSurvey.clusterCount.shape[1]])
#astImages.saveFITS("test_compMz_MC_5000.fits", compMz.transpose(), None)
elif method == "fast":
# Using full noise distribution, weighted by fraction of area
# NOTE: removed recMassBias and div parameters
#t0=time.time()
tenToA0, B0, Mpivot, sigma_int=[scalingRelationDict['tenToA0'], scalingRelationDict['B0'],
scalingRelationDict['Mpivot'], scalingRelationDict['sigma_int']]
y0Grid=np.zeros([zRange.shape[0], mockSurvey.clusterCount.shape[1]])
for i in range(len(zRange)):
zk=zRange[i]
k=np.argmin(abs(mockSurvey.z-zk))
theta500s_zk=interpolate.splev(mockSurvey.log10M, mockSurvey.theta500Splines[k])
Qs_zk=QFit.getQ(theta500s_zk, z = zk, tileName = tileName)
fRels_zk=interpolate.splev(mockSurvey.log10M, mockSurvey.fRelSplines[k])
true_y0s_zk=tenToA0*np.power(mockSurvey.Ez[k], 2)*np.power(np.power(10, mockSurvey.log10M)/Mpivot, 1+B0)*Qs_zk*fRels_zk
#true_y0s_zk=tenToA0*np.power(mockSurvey.Ez[k], 2)*np.power((recMassBias*np.power(10, mockSurvey.log10M))/Mpivot, 1+B0)*Qs_zk*fRels_zk
y0Grid[i]=true_y0s_zk
# For some cosmological parameters, we can still get the odd -ve y0
y0Grid[y0Grid <= 0] = 1e-9
# Calculate completeness using area-weighted average
# NOTE: RMSTab that is fed in here can be downsampled in noise resolution for speed
areaWeights=RMSTab['areaDeg2']/RMSTab['areaDeg2'].sum()
log_y0Lim=np.log(SNRCut*RMSTab['y0RMS'])
log_y0=np.log(y0Grid)
compMz=np.zeros(log_y0.shape)
for i in range(len(RMSTab)):
SNRGrid=y0Grid/RMSTab['y0RMS'][i]
SNRGrid=SNRGrid
log_y0Err=1/SNRGrid
log_y0Err[SNRGrid < SNRCut]=1/SNRCut
log_totalErr=np.sqrt(log_y0Err**2 + sigma_int**2)
compMz=compMz+stats.norm.sf(log_y0Lim[i], loc = log_y0, scale = log_totalErr)*areaWeights[i]
#t1=time.time()
# For checking figure-of-merit
#predMz=compMz*mockSurvey.clusterCount
#predMz=predMz/predMz.sum()
#astImages.saveFITS("predMz.fits", predMz.transpose(), None)
#projImg=pyfits.open("projMz_SNR%.2f.fits" % (SNRCut))
#projMz=projImg[0].data.transpose()
#projImg.close()
#merit=np.sum(np.sqrt(np.power(projMz-predMz, 2)))
#print(merit)
#IPython.embed()
#sys.exit()
else:
raise Exception("calcCompleteness only has 'fast', and 'Monte Carlo' methods available")
if plotFileName is not None:
# Calculate 90% completeness as function of z
zBinEdges=np.arange(0.05, 2.1, 0.1)
zBinCentres=(zBinEdges[:-1]+zBinEdges[1:])/2.
massLimit_90Complete=calcMassLimit(0.9, compMz, mockSurvey, zBinEdges = zBinEdges)
zMask=np.logical_and(zBinCentres >= 0.2, zBinCentres < 1.0)
averageMassLimit_90Complete=np.average(massLimit_90Complete[zMask])
makeMassLimitVRedshiftPlot(massLimit_90Complete, zBinCentres, plotFileName,
title = "%s: $M_{\\rm %d%s}$ / $10^{14}$ M$_{\odot}$ > %.2f (0.2 < $z$ < 1)" % (tileName,
mockSurvey.delta, mockSurvey.rhoType[0], averageMassLimit_90Complete))
return compMz
#------------------------------------------------------------------------------------------------------------
def makeMassLimitMap(SNRCut, z, tileName, photFilterLabel, mockSurvey, scalingRelationDict, QFit,
diagnosticsDir, selFnDir):
"""Makes a map of 90% mass completeness (for now, this fraction is fixed and not adjustable). The map
is written as a FITS file in the `diagnosticsDir` directory.
Args:
SNRCut (:obj:`float`): Completeness will be calculated for objects relative to this cut in
``fixed_SNR``.
z (:obj:`float`): The redshift at which the mass limit map will be produced.
tileName (:obj:`str`): The name of the tile.
photFilterLabel (:obj:`str`): Name of the reference filter, as specified in, e.g., a
:ref:`nemoCommand` config file (see :ref:`ConfigReference`).
mockSurvey (:class:`nemo.MockSurvey.MockSurvey`): A :class:`MockSurvey` object, used for halo mass function
calculations and generating mock catalogs.
scalingRelationDict (:obj:`dict`): A dictionary of scaling relation parameters (see example Nemo
config files for the format).
QFit (:class:`nemo.signals.QFit`): An object for calculating the filter mismatch function, referred
to as `Q` in the ACT papers from `Hasselfield et al. (2013) <http://adsabs.harvard.edu/abs/2013JCAP...07..008H>`_
onwards.
diagnosticsDir (:obj:`str`): Path to the ``diagnostics/`` directory, as produced by the
:ref:`nemoCommand` command.
selFnDir (:obj:`str`): Path to a ``selFn/`` directory, as produced by the :ref:`nemoCommand`
command. This directory contains information such as the survey noise maps, area masks,
and information needed to construct the filter mismatch function, `Q`, used in mass
modeling.
"""
RMSMap, wcs=loadRMSMap(tileName, selFnDir, photFilterLabel)
RMSTab=getRMSTab(tileName, photFilterLabel, selFnDir)
# Downsampling for speed?
# If this is done, also need the bin edges and then need to modify code below accordingly
#RMSTab=downsampleRMSTab(RMSTab)
# Fill in blocks in map for each RMS value
outFileName=diagnosticsDir+os.path.sep+tileName+os.path.sep+"massLimitMap_z%s#%s.fits" % (str(z).replace(".", "p"), tileName)
if os.path.exists(outFileName) == False:
massLimMap=np.zeros(RMSMap.shape)
count=0
t0=time.time()
for y0Noise in RMSTab['y0RMS']:
count=count+1
#print(("... %d/%d (%.3e) ..." % (count, len(RMSTab), y0Noise)))
compMz=calcCompleteness(RMSTab[np.where(RMSTab['y0RMS'] == y0Noise)], SNRCut, tileName, mockSurvey,
scalingRelationDict, QFit, z = z)
massLimMap[RMSMap == y0Noise]=mockSurvey.log10M[np.argmin(abs(compMz-0.9))]
t1=time.time()
mask=np.not_equal(massLimMap, 0)
massLimMap[mask]=np.power(10, massLimMap[mask])/1e14
maps.saveFITS(outFileName, massLimMap, wcs, compressed = True)
#------------------------------------------------------------------------------------------------------------
def makeMassLimitVRedshiftPlot(massLimit_90Complete, zRange, outFileName, title = None):
"""Makes a plot of 90%-completeness mass limit versus redshift. Uses spline interpolation.
Args:
massLimit_90Complete (:obj:`np.ndarray`): Mass limit at each redshift, corresponding to 90%
completeness.
zRange (:obj:`np.ndarray`): Redshifts at which mass completeness was evaluted.
outFileName (:obj:`str`): Path to which the plot file will be written. The format is determined by
the file extension.
title (:obj:`str`): The title that will be written at the top of the plot.
Returns:
None
"""
plotSettings.update_rcParams()
plt.figure(figsize=(9,6.5))
ax=plt.axes([0.10, 0.11, 0.87, 0.86])
if title is not None:
plt.figtext(0.15, 0.2, title, ha="left", va="center")
tck=interpolate.splrep(zRange, massLimit_90Complete)
plotRange=np.linspace(0, 2, 100)
plt.plot(plotRange, interpolate.splev(plotRange, tck), 'k-')
plt.plot(zRange, massLimit_90Complete, 'D', ms = 8)
plt.xlabel("$z$")
plt.ylim(0.5, 8)
plt.xticks(np.arange(0, 2.2, 0.2))
plt.xlim(0, 2)
labelStr="$M_{\\rm 500c}$ (10$^{14}$ M$_{\odot}$) [90% complete]"
plt.ylabel(labelStr)
plt.savefig(outFileName)
if outFileName.find(".pdf") != -1:
plt.savefig(outFileName.replace(".pdf", ".png"))
plt.close()
#------------------------------------------------------------------------------------------------------------
def cumulativeAreaMassLimitPlot(z, diagnosticsDir, selFnDir, tileNames):
"""Makes cumulative plots of the 90%-completeness mass limit versus survey area, at the given redshift.
Args:
z (:obj:`float`): The redshift at which the mass completeness is evaluated.
diagnosticsDir (:obj:`str`): Path to the ``diagnostics/`` directory, as produced by the
:ref:`nemoCommand` command.
selFnDir (:obj:`str`): Path to a ``selFn/`` directory, as produced by the :ref:`nemoCommand`
command. This directory contains information such as the survey noise maps, area masks,
and information needed to construct the filter mismatch function, `Q`, used in mass
modeling.
tileNames (:obj:`list`): List of tiles to use.
Note:
This routine writes plots into the `diagnosticsDir` directory (one for the cumulative mass
completeness limit over the whole survey area, and one for the deepest 20% only).
Returns:
None
"""
# NOTE: We truncate mass limits to 0.1 level here - differences beyond that are due to compression
allLimits=[]
allAreas=[]
for tileName in tileNames:
massLimMap, wcs=loadMassLimitMap(tileName, diagnosticsDir+os.path.sep+tileName, z)
areaMap, wcs=loadAreaMask(tileName, selFnDir)
areaMapSqDeg=(maps.getPixelAreaArcmin2Map(areaMap.shape, wcs)*areaMap)/(60**2)
limits=np.unique(massLimMap).tolist()
if limits[0] == 0:
limits=limits[1:]
areas=[]
truncLim=[]
for l in limits:
truncLim.append(round(l, 1))
#truncLim.append(float(Decimal(l).quantize(Decimal('0.1'))))
areas.append(areaMapSqDeg[np.where(massLimMap == l)].sum())
allLimits=allLimits+truncLim
allAreas=allAreas+areas
# Reduce redundant mass limits
allLimits=np.array(allLimits)
allAreas=np.array(allAreas)
uniqLimits=np.unique(allLimits)
uniqAreas=[]
for u in uniqLimits:
uniqAreas.append(allAreas[allLimits == u].sum())
uniqAreas=np.array(uniqAreas)
tab=atpy.Table()
tab.add_column(atpy.Column(uniqLimits, 'MLim'))
tab.add_column(atpy.Column(uniqAreas, 'areaDeg2'))
tab.sort('MLim')
plotSettings.update_rcParams()
# Full survey plot
plt.figure(figsize=(9,6.5))
ax=plt.axes([0.155, 0.12, 0.82, 0.86])
plt.minorticks_on()
plt.plot(tab['MLim'], np.cumsum(tab['areaDeg2']), 'k-')
#plt.plot(plotMRange, plotCumArea, 'k-')
plt.ylabel("survey area < $M_{\\rm 500c}$ limit (deg$^2$)")
plt.xlabel("$M_{\\rm 500c}$ (10$^{14}$ M$_{\odot}$) [90% complete]")
labelStr="total survey area = %.0f deg$^2$" % (np.cumsum(tab['areaDeg2']).max())
plt.ylim(0.0, 1.2*np.cumsum(tab['areaDeg2']).max())
plt.xlim(tab['MLim'].min(), tab['MLim'].max())
plt.figtext(0.2, 0.9, labelStr, ha="left", va="center")
plt.savefig(diagnosticsDir+os.path.sep+"cumulativeArea_massLimit_z%s.pdf" % (str(z).replace(".", "p")))
plt.savefig(diagnosticsDir+os.path.sep+"cumulativeArea_massLimit_z%s.png" % (str(z).replace(".", "p")))
plt.close()
# Deepest 20% - we show a bit beyond this
totalAreaDeg2=tab['areaDeg2'].sum()
deepTab=tab[np.where(np.cumsum(tab['areaDeg2']) < 0.25 * totalAreaDeg2)]
plt.figure(figsize=(9,6.5))
ax=plt.axes([0.155, 0.12, 0.82, 0.86])
plt.minorticks_on()
plt.plot(tab['MLim'], np.cumsum(tab['areaDeg2']), 'k-')
plt.ylabel("survey area < $M_{\\rm 500c}$ limit (deg$^2$)")
plt.xlabel("$M_{\\rm 500c}$ (10$^{14}$ M$_{\odot}$) [90% complete]")
labelStr="area of deepest 20%% = %.0f deg$^2$" % (0.2 * totalAreaDeg2)
plt.ylim(0.0, 1.2*np.cumsum(deepTab['areaDeg2']).max())
plt.xlim(deepTab['MLim'].min(), deepTab['MLim'].max())
plt.figtext(0.2, 0.9, labelStr, ha="left", va="center")
plt.savefig(diagnosticsDir+os.path.sep+"cumulativeArea_massLimit_z%s_deepest20Percent.pdf" % (str(z).replace(".", "p")))
plt.savefig(diagnosticsDir+os.path.sep+"cumulativeArea_massLimit_z%s_deepest20Percent.png" % (str(z).replace(".", "p")))
plt.close()
#------------------------------------------------------------------------------------------------------------
def makeFullSurveyMassLimitMapPlot(z, config):
"""Makes full area mass limit map by reprojecting the tile mass limit maps onto the full map pixelisation.
Creates both a plot and a FITS image.
Args:
z (:obj:`float`): The redshift at which the mass completeness is evaluated.
config (:class:`nemo.startUp.NemoConfig`): A NemoConfig object.
Note:
Output is written to the `diagnostics` directory, as produced by the :ref:`nemoCommand` command.
Returns:
None
"""
if 'makeQuickLookMaps' not in config.parDict.keys():
config.quicklookScale=0.25
config.quicklookShape, config.quicklookWCS=maps.shrinkWCS(config.origShape, config.origWCS, config.quicklookScale)
outFileName=config.diagnosticsDir+os.path.sep+"reproj_massLimitMap_z%s.fits" % (str(z).replace(".", "p"))
maps.stitchTilesQuickLook(config.diagnosticsDir+os.path.sep+"*"+os.path.sep+"massLimitMap_z%s#*.fits" % (str(z).replace(".", "p")),
outFileName, config.quicklookWCS, config.quicklookShape,
fluxRescale = config.quicklookScale)
# Make plot
if os.path.exists(outFileName) == True:
with pyfits.open(outFileName) as img:
for hdu in img:
if hdu.shape != ():
reproj=np.nan_to_num(hdu.data)
reproj=np.ma.masked_where(reproj <1e-6, reproj)
wcs=astWCS.WCS(hdu.header, mode = 'pyfits')
plotSettings.update_rcParams()
fontSize=20.0
figSize=(16, 5.7)
axesLabels="sexagesimal"
axes=[0.08,0.15,0.91,0.88]
cutLevels=[2, int(np.median(reproj[np.nonzero(reproj)]))+2]
colorMapName=colorcet.m_rainbow
fig=plt.figure(figsize = figSize)
p=astPlots.ImagePlot(reproj, wcs, cutLevels = cutLevels, title = None, axes = axes,
axesLabels = axesLabels, colorMapName = colorMapName, axesFontFamily = 'sans-serif',
RATickSteps = {'deg': 30.0, 'unit': 'h'}, decTickSteps = {'deg': 20.0, 'unit': 'd'},
axesFontSize = fontSize)
cbLabel="$M_{\\rm 500c}$ (10$^{14}$ M$_{\odot}$) [90% complete]"
cbShrink=0.7
cbAspect=40
cb=plt.colorbar(p.axes.images[0], ax = p.axes, orientation="horizontal", fraction = 0.05, pad = 0.18,
shrink = cbShrink, aspect = cbAspect)
plt.figtext(0.53, 0.04, cbLabel, size = 20, ha="center", va="center", fontsize = fontSize, family = "sans-serif")
plt.savefig(outFileName.replace(".fits", ".pdf"), dpi = 300)
plt.savefig(outFileName.replace(".fits", ".png"), dpi = 300)
plt.close()
#------------------------------------------------------------------------------------------------------------
def tidyUp(config):
"""Tidies up the `selFn` directory, constructing multi-extension FITS files from individual tile images
and tables, deleting the individual tile files when complete. This routine also copies the given Nemo
configuration into the `selFn` directory, and writes a plain text file that lists the tile names, and the
areas covered by each tile.
Args:
config (:class:`nemo.startUp.NemoConfig`): A NemoConfig object.
Returns:
None
"""
shutil.copy(config.configFileName, config.selFnDir+os.path.sep+"config.yml")
# Delete single tile Q fits (combined Q file should be made before this)
if 'photFilter' in config.parDict.keys() and config.parDict['photFilter'] is not None and config.parDict['fitQ'] == True:
#signals.makeCombinedQTable(config)
for tileName in config.allTileNames:
QFileName=config.selFnDir+os.path.sep+"QFit#%s.fits" % (tileName)
if os.path.exists(QFileName):
os.remove(QFileName)
# Make MEFs
MEFsToBuild=["areaMask", "RMSMap_%s" % (config.parDict['photFilter'])]
compressionTypes=["PLIO_1", "RICE_1"]
dtypes=[np.int32, np.float]
if 'selFnFootprints' in config.parDict.keys():
for footprintDict in config.parDict['selFnFootprints']:
MEFsToBuild.append("intersect_%s" % footprintDict['label'])
compressionTypes.append("PLIO_1")
dtypes.append(np.int32)
for MEFBaseName, compressionType, dtype in zip(MEFsToBuild, compressionTypes, dtypes):
outFileName=config.selFnDir+os.path.sep+MEFBaseName+".fits"
newImg=pyfits.HDUList()
filesToRemove=[]
for tileName in config.allTileNames:
fileName=config.selFnDir+os.path.sep+MEFBaseName+"#"+tileName+".fits"
if os.path.exists(fileName):
with pyfits.open(fileName) as img:
for extName in img:
if img[extName].data is not None:
break
hdu=pyfits.CompImageHDU(np.array(img[extName].data, dtype = dtype), img[extName].header,
name = tileName, compression_type = compressionType)
filesToRemove.append(fileName)
newImg.append(hdu)
if len(newImg) > 0:
newImg.writeto(outFileName, overwrite = True)
for f in filesToRemove:
os.remove(f)
# Combine RMSTab files (we can downsample further later if needed)
# We add a column for the tileName just in case want to select on this later
footprints=['']
if 'selFnFootprints' in config.parDict.keys():
for footprintDict in config.parDict['selFnFootprints']:
footprints.append(footprintDict['label'])
strLen=0
for tileName in config.allTileNames:
if len(tileName) > strLen:
strLen=len(tileName)
for footprint in footprints:
if footprint != "":
label="_"+footprint
else:
label=""
outFileName=config.selFnDir+os.path.sep+"RMSTab"+label+".fits"
tabList=[]
filesToRemove=[]
for tileName in config.allTileNames:
fileName=config.selFnDir+os.path.sep+"RMSTab_"+tileName+label+".fits"
if os.path.exists(fileName):
tileTab=atpy.Table().read(fileName)
tileTab.add_column(atpy.Column(np.array([tileName]*len(tileTab), dtype = '<U%d' % (strLen)),
"tileName"))
tabList.append(tileTab)
filesToRemove.append(fileName)
if len(tabList) > 0:
tab=atpy.vstack(tabList)
tab.sort('y0RMS')
tab.meta['NEMOVER']=nemo.__version__
tab.write(outFileName, overwrite = True)
for f in filesToRemove:
os.remove(f)
# Write a table of tile areas for those that want it
with open(config.selFnDir+os.path.sep+"tileAreas.txt", "w") as outFile:
outFile.write("#tileName areaDeg2\n")
for tileName in config.allTileNames:
tileAreaDeg2=getTileTotalAreaDeg2(tileName, config.selFnDir)
outFile.write("%s %.6f\n" % (tileName, tileAreaDeg2))
| [
37811,
198,
198,
1212,
8265,
4909,
4899,
329,
26019,
262,
1224,
43205,
286,
13946,
8405,
290,
9041,
1989,
20680,
13,
198,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
8271,
198,
11748,
15095,
198,
11748,
299,
32152,
... | 2.218726 | 33,151 |
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from enum import IntEnum
from gridWorld import StandardGrid
from gridWorld import NegativeGrid
from gridWorld import GridWorldMove
from gridWorld import RandomPolicy
from printHelpers import print_values
from printHelpers import print_policy
from printHelpers import print_policy_beautifly
if __name__ == '__main__':
##test_td0_approximation_method(itterations = 50000)
test_td0_sarsa_approximated()
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
33829,
1330,
2558,
4834,
388,
198,
6738,
10706,
10603,
1330,
8997,
41339,
198,
6... | 3.441379 | 145 |
import torch
from torchvision.utils import make_grid
import numpy as np
from base import BaseTrainer
from models import Generator, Discriminator
from losses import *
from data_loaders import CartoonDataLoader, DiffAugment
from utils import MetricTracker
| [
11748,
28034,
198,
6738,
28034,
10178,
13,
26791,
1330,
787,
62,
25928,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
2779,
1330,
7308,
2898,
10613,
198,
6738,
4981,
1330,
35986,
11,
8444,
3036,
20900,
198,
6738,
9089,
1330,
1635,
198,
... | 4.112903 | 62 |
# -*- coding: utf-8 -*-
# cython: language_level=3
# Copyright (c) 2021-present VincentRPS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
"""The V4 Voice Gateway Impl"""
import asyncio
import json
import logging
import struct
import time
import aiohttp
from ..state import ConnectionState
_log = logging.getLogger(__name__)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
3075,
400,
261,
25,
3303,
62,
5715,
28,
18,
198,
2,
15069,
357,
66,
8,
33448,
12,
25579,
18653,
49,
3705,
198,
198,
2,
2448,
3411,
318,
29376,
7520,
11,
1479,
2... | 3.708333 | 360 |
import taichi as ti
@ti.all_archs
@ti.all_archs
@ti.all_archs
| [
11748,
20486,
16590,
355,
46668,
628,
198,
31,
20259,
13,
439,
62,
34592,
628,
198,
31,
20259,
13,
439,
62,
34592,
628,
198,
31,
20259,
13,
439,
62,
34592,
198
] | 2.266667 | 30 |
import numpy as np
import datetime
from matplotlib.dates import num2date, date2num
def get_sunrise_and_sunset_times_utc(t_start, t_end):
"""
Return all sunrise and sunset times between `t_start` and `t_end` at BCO as
`(time, kind)` where `time` will be a `np.datetim64` and kind is either
`"sunrise"` or `"sunset"`
# sunrise and sunset (local time): 0630, 1750
# sunrise and sunset (UTC): 1030, 2150
"""
dt_start = npdt64_to_dt(t_start)
dt_end = npdt64_to_dt(t_end)
dt_current = datetime.datetime(
year=dt_start.year, month=dt_start.month, day=dt_start.day
)
while dt_current < dt_end:
dt_sunrise = dt_current + datetime.timedelta(hours=10, minutes=30)
dt_sunset = dt_current + datetime.timedelta(hours=21, minutes=50)
if dt_current < dt_sunrise < dt_end:
yield (np.datetime64(dt_sunrise), "sunrise")
if dt_current < dt_sunset < dt_end:
yield (np.datetime64(dt_sunset), "sunset")
dt_current += datetime.timedelta(days=1)
def annotate_with_sunrise_and_sunset(ax, da_time):
"""
Annotate axes `ax` x-axis with all sunset and sunrise times in the time
interval spanned by `da_time`
"""
for (t, kind) in get_sunrise_and_sunset_times_utc(
da_time.min().values, da_time.max().values
):
ax.axvline(t, color="red", linestyle="--")
ylim = ax.get_ylim()
text = ax.text(
t + np.timedelta64(2, "m"),
0.8 * ylim[1],
kind,
color="red",
fontsize=14,
ha="center",
)
text.set_bbox(dict(facecolor="white", alpha=0.8, edgecolor="grey"))
@np.vectorize
@np.vectorize
def add_approx_distance_axis(ax, t0, posn=1.2, v=10.0, units="m"):
"""
Add extra x-axis to axes `ax` at position `posn` (can either be a float
representing the position in figure coordinates, or "top" or "bottom") with
constant velocity `v` in units `units` using reference time `t0` (expected
to a `np.datetime64`)
"""
if units == "m":
s = 1.0
elif units == "km":
s = 1000.0
else:
raise NotImplementedError(units)
ax2 = ax.secondary_xaxis(posn, functions=(time_to_distance, distance_to_time))
ax2.set_xlabel(f"approximate distance [{units}]")
return ax2
| [
11748,
299,
32152,
355,
45941,
198,
11748,
4818,
8079,
198,
6738,
2603,
29487,
8019,
13,
19581,
1330,
997,
17,
4475,
11,
3128,
17,
22510,
628,
198,
4299,
651,
62,
19155,
17163,
62,
392,
62,
19155,
2617,
62,
22355,
62,
315,
66,
7,
83... | 2.168353 | 1,087 |
line=[]
with open('./mycode/n.txt','r',encoding='utf8') as f1:
line=f1.readlines()
f=open('./mycode/n1.txt','w+',encoding='utf8')
for char in line[0]:
if is_quan_alphabet(char) or is_quan_number(char):
print(char)
else:
f.write(char)
f.close()
| [
628,
198,
198,
1370,
28,
21737,
198,
4480,
1280,
7,
4458,
14,
1820,
8189,
14,
77,
13,
14116,
41707,
81,
3256,
12685,
7656,
11639,
40477,
23,
11537,
355,
277,
16,
25,
198,
220,
220,
220,
1627,
28,
69,
16,
13,
961,
6615,
3419,
198,
... | 2.014493 | 138 |
import sys
# stdlib
from operator import itemgetter
from functools import lru_cache
# 3rd party
import arrow
import meilisearch
from django.apps import apps
from django.db.models import Q, Case, When, Model, Manager, QuerySet
from wagtail.search.index import (
FilterField, SearchField, RelatedFields, AutocompleteField, class_is_indexed,
get_indexed_models
)
from wagtail.search.utils import OR
from django.utils.encoding import force_text
from wagtail.search.backends.base import (
BaseSearchBackend, BaseSearchResults, EmptySearchResults, BaseSearchQueryCompiler
)
from .settings import STOP_WORDS
try:
from cacheops import invalidate_model
except ImportError:
pass
else:
USING_CACHEOPS = True
AUTOCOMPLETE_SUFFIX = '_ngrams'
FILTER_SUFFIX = '_filter'
class MeiliSearchModelIndex:
"""Creats a working index for each model sent to it.
"""
def __init__(self, backend, model):
"""Initialise an index for `model`
Args:
backend (MeiliSearchBackend): A backend instance
model (django.db.Model): Should be able to pass any model here but it's most
likely to be a subclass of wagtail.core.models.Page
"""
self.backend = backend
self.client = backend.client
self.query_limit = backend.query_limit
self.model = model
self.name = model._meta.label
self.index = self._set_index(model)
self.search_params = {
'limit': self.query_limit,
'matches': True
}
self.update_strategy = backend.update_strategy
self.update_delta = backend.update_delta
self.delta_fields = [
'created_at', 'updated_at', 'first_published_at', 'last_published_at'
]
def prepare_value(self, value):
"""Makes sure `value` is something we can save in the index.
Args:
value (UNKNOWN): This could be anything.
Returns:
str: A String representation of whatever `value` was
"""
if not value:
return ''
if isinstance(value, str):
return value
if isinstance(value, list):
return ', '.join(self.prepare_value(item) for item in value)
if isinstance(value, dict):
return ', '.join(self.prepare_value(item)
for item in value.values())
if callable(value):
return force_text(value())
return force_text(value)
def _get_document_fields(self, model, item):
"""Borrowed from Wagtail-Whoosh
Walks through the model's search fields and returns stuff the way the index is
going to want it.
Todo:
* Make sure all of this is usable by MeiliSearch
Args:
model (db.Model): The model class we want the fields for
item (db.Model): The model instance we want the fields for
Yields:
TYPE: Description
"""
for field in model.get_search_fields():
if isinstance(field, (SearchField, FilterField, AutocompleteField)):
try:
yield _get_field_mapping(field), self.prepare_value(field.get_value(item))
except Exception:
pass
if isinstance(field, RelatedFields):
value = field.get_value(item)
if isinstance(value, (Manager, QuerySet)):
qs = value.all()
for sub_field in field.fields:
sub_values = qs.values_list(sub_field.field_name, flat=True)
try:
yield '{0}__{1}'.format(
field.field_name, _get_field_mapping(sub_field)), \
self.prepare_value(list(sub_values))
except Exception:
pass
if isinstance(value, Model):
for sub_field in field.fields:
try:
yield '{0}__{1}'.format(
field.field_name, _get_field_mapping(sub_field)),\
self.prepare_value(sub_field.get_value(value))
except Exception:
pass
@lru_cache()
def _create_document(self, model, item):
"""Create a dict containing the fields we want to send to MeiliSearch
Args:
model (db.Model): The model class we're indexing
item (db.Model): The model instance we're indexing
Returns:
dict: A dict representation of the model
"""
doc_fields = dict(self._get_document_fields(model, item))
doc_fields.update(id=item.id)
document = {}
document.update(doc_fields)
return document
def add_items(self, item_model, items):
"""Adds items in bulk to the index. If we're adding stuff through the `update_index`
management command, we'll receive these in chunks of 1000.
We're then splitting those chunks into smaller chunks of 100, I think that helps
not overload stuff, but it would be good TODO tests to verify this.
Args:
item_model (db.Model): The model class we're indexing
items (list): A list containing a bunch of items to index.
Returns:
bool: True
"""
prepared = []
# Ensure we're not indexing something stale from the cache
# This also stops redis from overloading during the indexing
if USING_CACHEOPS is True:
try:
invalidate_model(item_model)
except Exception:
pass
# split items into chunks of 100
chunks = [items[x:x + 100] for x in range(0, len(items), 100)]
for chunk in chunks:
if self.update_strategy == 'delta':
chunk = self._check_deltas(chunk)
prepared = []
for item in chunk:
doc = self._create_document(self.model, item)
prepared.append(doc)
if self.update_strategy == 'soft' or self.update_strategy == 'delta':
self.index.update_documents(prepared)
else:
self.index.add_documents(prepared)
del(chunk)
return True
def _check_deltas(self, objects: list) -> list:
"""Takes a list of objects and removes any where the last_published_at, first_published_at,
created_at or updated_at are outside of the time delta.
TODO: This looks ugly, and is probably slow.
Args:
objects (list): A list of model instances
"""
filtered = []
since = arrow.now().shift(**self.update_delta).datetime
for obj in objects:
if self._has_date_fields(obj):
for field in self.delta_fields:
if hasattr(obj, field):
val = getattr(obj, field)
try:
if val and val > since:
filtered.append(obj)
continue
except TypeError:
pass
return filtered
class DummyModelIndex:
"""This class enables the SKIP_MODELS feature by providing a
dummy model index that we can add things to without it actually
doing anything.
"""
@lru_cache()
def get_descendant_models(model):
"""
Borrowed from Wagtail-Whoosh
Returns all descendants of a model
e.g. for a search on Page, return [HomePage, ContentPage, Page] etc.
"""
descendant_models = [
other_model for other_model in apps.get_models() if issubclass(other_model, model)
]
return descendant_models
SearchBackend = MeiliSearchBackend
| [
11748,
25064,
198,
198,
2,
14367,
8019,
198,
6738,
10088,
1330,
2378,
1136,
353,
198,
6738,
1257,
310,
10141,
1330,
300,
622,
62,
23870,
198,
198,
2,
513,
4372,
2151,
198,
11748,
15452,
198,
11748,
502,
346,
271,
3679,
198,
6738,
4262... | 2.158653 | 3,681 |
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: Remove stripe artefacts
:platform: Unix
:synopsis: A plugin working in sinogram space to remove large stripe artefacts
.. moduleauthor:: Nghia Vo <scientificsoftware@diamond.ac.uk>
"""
from savu.plugins.plugin import Plugin
from savu.plugins.driver.cpu_plugin import CpuPlugin
from savu.plugins.utils import register_plugin
from savu.data.plugin_list import CitationInformation
import numpy as np
from scipy.ndimage import median_filter
from scipy.ndimage import binary_dilation
from scipy.ndimage import uniform_filter1d
from scipy import interpolate
@register_plugin
class RemoveUnresponsiveAndFluctuatingRings(Plugin, CpuPlugin):
"""
Method to remove unresponsive and fluctuating stripe artefacts in a\
sinogram (<-> ring artefacts in a reconstructed image).
:param size: Size of the median filter window. Greater is stronger\
. Default: 71.
:param snr: Ratio used to detect locations of stripes. Greater is\
less sensitive. Default: 3.0.
"""
def detect_stripe(self, listdata, snr):
"""
Algorithm 4 in the paper. Used to locate stripe positions.
---------
Parameters: - listdata: 1D normalized array.
- snr: ratio used to discriminate between useful
information and noise.
---------
Return: - 1D binary mask.
"""
numdata = len(listdata)
listsorted = np.sort(listdata)[::-1]
xlist = np.arange(0, numdata, 1.0)
ndrop = np.int16(0.25 * numdata)
(_slope, _intercept) = np.polyfit(
xlist[ndrop:-ndrop-1], listsorted[ndrop:-ndrop - 1], 1)
numt1 = _intercept + _slope * xlist[-1]
noiselevel = np.abs(numt1 - _intercept)
val1 = np.abs(listsorted[0] - _intercept) / noiselevel
val2 = np.abs(listsorted[-1] - numt1) / noiselevel
listmask = np.zeros_like(listdata)
if (val1 >= snr):
upper_thresh = _intercept + noiselevel * snr * 0.5
listmask[listdata > upper_thresh] = 1.0
if (val2 >= snr):
lower_thresh = numt1 - noiselevel * snr * 0.5
listmask[listdata <= lower_thresh] = 1.0
return listmask
def remove_large_stripe(self, matindex, sinogram, snr, size):
"""
Algorithm 5 in the paper. Use to remove residual stripes
---------
Parameters: - sinogram: 2D array.
- snr: ratio used to discriminate between useful
information and noise.
- size: window size of the median filter.
---------
Return: - stripe-removed sinogram.
"""
badpixelratio = 0.05
(nrow, ncol) = sinogram.shape
ndrop = np.int16(badpixelratio * nrow)
sinosorted = np.sort(sinogram, axis=0)
sinosmoothed = median_filter(sinosorted, (1, size))
list1 = np.mean(sinosorted[ndrop:nrow - ndrop], axis=0)
list2 = np.mean(sinosmoothed[ndrop:nrow - ndrop], axis=0)
listfact = list1 / list2
listmask = self.detect_stripe(listfact, snr)
listmask = binary_dilation(listmask, iterations=1).astype(listmask.dtype)
matfact = np.tile(listfact,(nrow,1))
sinogram = sinogram / matfact
sinogram1 = np.transpose(sinogram)
matcombine = np.asarray(np.dstack((matindex, sinogram1)))
matsort = np.asarray(
[row[row[:, 1].argsort()] for row in matcombine])
matsort[:, :, 1] = np.transpose(sinosmoothed)
matsortback = np.asarray(
[row[row[:, 0].argsort()] for row in matsort])
sino_corrected = np.transpose(matsortback[:, :, 1])
listxmiss = np.where(listmask > 0.0)[0]
sinogram[:, listxmiss] = sino_corrected[:, listxmiss]
return sinogram
def process_frames(self, data):
"""
Algorithm 6 and 5 in the paper. Remove unresponsive and fluctuating\
stripes using algorithm 6. Then using algorithm 5 to clean residual\
stripes.
"""
sinogram = np.copy(data[0])
sinosmoothed = np.apply_along_axis(uniform_filter1d, 0, sinogram, 10)
listdiff = np.sum(np.abs(sinogram - sinosmoothed), axis=0)
nmean = np.mean(listdiff)
listdiffbck = median_filter(listdiff, self.size)
listdiffbck[listdiffbck == 0.0] = nmean
listfact = listdiff / listdiffbck
listmask = self.detect_stripe(listfact, self.snr)
listmask = binary_dilation(listmask, iterations=1).astype(listmask.dtype)
listmask[0:2] = 0.0
listmask[-2:] = 0.0
listx = np.where(listmask < 1.0)[0]
listy = np.arange(self.height1)
matz = sinogram[:, listx]
finter = interpolate.interp2d(listx, listy, matz, kind='linear')
listxmiss = np.where(listmask > 0.0)[0]
if len(listxmiss) > 0:
matzmiss = finter(listxmiss, listy)
sinogram[:, listxmiss] = matzmiss
# Use algorithm 5 to remove residual stripes
sinogram = self.remove_large_stripe(self.matindex, sinogram, self.snr, self.size)
return sinogram
| [
2,
15069,
1946,
13566,
4401,
8090,
12052,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198,
... | 2.359016 | 2,440 |
import unittest
import os
import sys
import tempfile
import shutil
import importlib
import traceback
from contextlib import contextmanager
from gmc.conf import settings, ENVIRONMENT_VARIABLE | [
11748,
555,
715,
395,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
20218,
7753,
198,
11748,
4423,
346,
198,
11748,
1330,
8019,
198,
11748,
12854,
1891,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
6738,
308,
23209,
13,
10414,
1330,... | 3.653846 | 52 |
from re import T
import my_log
import os
import glob
from tqdm import tqdm
from datetime import datetime as dt
import ast
import csv
import sys
import math
csv.field_size_limit(sys.maxsize)
if __name__ == "__main__":
main()
print("\n\n\033[32m...........all tasks done!!...........\033[0m\n")
| [
6738,
302,
1330,
309,
198,
11748,
616,
62,
6404,
198,
11748,
28686,
198,
11748,
15095,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
6738,
4818,
8079,
1330,
4818,
8079,
355,
288,
83,
198,
11748,
6468,
198,
11748,
269,
21370,
1... | 2.72973 | 111 |
"""
Test Swagger Generation
~~~~~~~~~~~~~~~~~~~~~~~
Tests for converting a handler registry to a Swagger specification.
:copyright: Copyright 2018 PlanGrid, Inc., see AUTHORS.
:license: MIT, see LICENSE for details.
"""
import json
import marshmallow as m
import pytest
from flask_rebar.rebar import Rebar
from flask_rebar.swagger_generation import ExternalDocumentation
from flask_rebar.swagger_generation import SwaggerV2Generator
from flask_rebar.swagger_generation import SwaggerV3Generator
from flask_rebar.swagger_generation import Server
from flask_rebar.swagger_generation import ServerVariable
from flask_rebar.swagger_generation import Tag
from flask_rebar.testing import validate_swagger
from flask_rebar.testing.swagger_jsonschema import (
SWAGGER_V2_JSONSCHEMA,
SWAGGER_V3_JSONSCHEMA,
)
from tests.swagger_generation.registries import hidden_api
@pytest.mark.parametrize(
"registry, swagger_generator, expected_swagger",
[
(
hidden_api.registry,
hidden_api.swagger_v2_generator,
hidden_api.EXPECTED_SWAGGER_V2,
),
(
hidden_api.registry,
hidden_api.normal_swagger_v3_generator,
hidden_api.SWAGGER_V3_WITHOUT_HIDDEN,
),
(
hidden_api.registry,
hidden_api.swagger_v3_generator_with_hidden,
hidden_api.SWAGGER_V3_WITH_HIDDEN,
),
],
)
| [
37811,
198,
220,
220,
220,
6208,
2451,
7928,
16588,
198,
220,
220,
220,
220,
27156,
8728,
4907,
93,
628,
220,
220,
220,
30307,
329,
23202,
257,
21360,
20478,
284,
257,
2451,
7928,
20855,
13,
628,
220,
220,
220,
1058,
22163,
4766,
25,
... | 2.430252 | 595 |
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2015, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import pyi_testmod_dynamic
if __name__ == "__main__":
# The value 'foo' should not be None.
print("'foo' value: %s" % pyi_testmod_dynamic.foo)
assert pyi_testmod_dynamic.foo is not None
assert pyi_testmod_dynamic.foo == 'A new value!'
| [
2,
10097,
32501,
198,
2,
15069,
357,
66,
8,
5075,
12,
4626,
11,
9485,
15798,
263,
7712,
4816,
13,
198,
2,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
22961,
3611,
5094,
13789,
351,
6631,
198,
2,
329,
25950,
6297,
29356,
13,
198,... | 3.83815 | 173 |
import argparse
import logging
import os
import pickle
import sys
import time
from datetime import datetime
import constants
from config import Config
from controller import Controller
from exceptions import *
from gameinstancemanager import GameInstanceManager
from helpers import is_responding_pid, find_window_by_title, taskkill_pid, init_pytesseract
parser = argparse.ArgumentParser(description='Launch and control a Battlefield 2 spectator instance')
parser.add_argument('--version', action='version', version=f'{constants.APP_NAME} v{constants.APP_VERSION}')
parser.add_argument('--player-name', help='Account name of spectating player', type=str, required=True)
parser.add_argument('--player-pass', help='Account password of spectating player', type=str, required=True)
parser.add_argument('--server-ip', help='IP of sever to join for spectating', type=str, required=True)
parser.add_argument('--server-port', help='Port of sever to join for spectating', type=str, default='16567')
parser.add_argument('--server-pass', help='Password of sever to join for spectating', type=str)
parser.add_argument('--game-path', help='Path to BF2 install folder',
type=str, default='C:\\Program Files (x86)\\EA Games\\Battlefield 2\\')
parser.add_argument('--game-res', help='Resolution to use for BF2 window', choices=['720p', '900p'], type=str, default='720p')
parser.add_argument('--tesseract-path', help='Path to Tesseract install folder',
type=str, default='C:\\Program Files\\Tesseract-OCR\\')
parser.add_argument('--instance-rtl', help='How many rounds to use a game instance for (rounds to live)', type=int, default=6)
parser.add_argument('--use-controller', dest='use_controller', action='store_true')
parser.add_argument('--controller-base-uri', help='Base uri of web controller', type=str)
parser.add_argument('--controller-app-key', help='App key for web controller', type=str)
parser.add_argument('--controller-timeout', help='Timeout to use for requests to controller (in seconds)', type=int,
default=2)
parser.add_argument('--no-start', dest='start_game', action='store_false')
parser.add_argument('--no-rtl-limit', dest='limit_rtl', action='store_false')
parser.add_argument('--debug-log', dest='debug_log', action='store_true')
parser.add_argument('--debug-screenshot', dest='debug_screenshot', action='store_true')
parser.set_defaults(start_game=True, limit_rtl=True, debug_log=False, debug_screenshot=False, use_controller=False)
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if args.debug_log else logging.INFO, format='%(asctime)s %(message)s')
# Transfer argument values to config
config = Config()
config.set_options(
player_name=args.player_name,
player_pass=args.player_pass,
server_ip=args.server_ip,
server_port=args.server_port,
server_pass=args.server_pass,
game_path=args.game_path,
tesseract_path=args.tesseract_path,
limit_rtl=args.limit_rtl,
instance_rtl=args.instance_rtl,
use_controller=args.use_controller,
controller_base_uri=args.controller_base_uri,
controller_app_key=args.controller_app_key,
controller_timeout=args.controller_timeout,
resolution=args.game_res,
debug_screenshot=args.debug_screenshot,
max_iterations_on_player=5
)
# Make sure provided paths are valid
if not os.path.isfile(os.path.join(config.get_tesseract_path(), constants.TESSERACT_EXE)):
sys.exit(f'Could not find {constants.TESSERACT_EXE} in given install folder: {args.tesseract_path}')
elif not os.path.isfile(os.path.join(config.get_game_path(), constants.BF2_EXE)):
sys.exit(f'Could not find {constants.BF2_EXE} in given game install folder: {config.get_game_path()}')
# Init pytesseract
init_pytesseract(config.get_tesseract_path())
# Load pickles
logging.info('Loading pickles')
with open(os.path.join(config.ROOT_DIR, 'pickle', 'histograms.pickle'), 'rb') as histogramFile:
histograms = pickle.load(histogramFile)
# Init debug directory if debugging is enabled
if config.debug_screenshot():
# Create debug output dir if needed
if not os.path.isdir(config.DEBUG_DIR):
os.mkdir(Config.DEBUG_DIR)
# Init game instance state store
gim = GameInstanceManager(config.get_game_path(), config.get_player_name(), config.get_player_pass(), histograms)
gis = gim.get_state()
controller = Controller(
config.get_controller_base_uri(),
config.get_controller_app_key(),
config.get_controller_timeout()
)
# Check whether the controller has a server join
if config.use_controller():
logging.info('Checking for join server on controller')
joinServer = controller.get_join_server()
if joinServer is not None and \
(joinServer['ip'] != config.get_server_ip() or
str(joinServer['gamePort']) != config.get_server_port()):
# Spectator is supposed to be on different server
logging.info('Controller has a server to join, updating config')
config.set_server(joinServer['ip'], str(joinServer['gamePort']), joinServer['password'])
# Init game instance if requested
gotInstance = False
if args.start_game:
logging.info('Initializing spectator game instance')
gotInstance = gim.launch_instance(config.get_resolution())
else:
logging.info('"Attaching" to existing game instance')
gotInstance = gim.find_instance(config.get_resolution())
# Schedule restart if no instance was started/found
if not gotInstance:
gis.set_error_restart_required(True)
# Start with max to switch away from dead spectator right away
iterationsOnPlayer = config.get_max_iterations_on_player()
while True:
bf2Window = gim.get_game_window()
# Try to bring BF2 window to foreground
if not gis.error_restart_required():
try:
gim.bring_to_foreground()
except Exception as e:
logging.error('BF2 window is gone, restart required')
logging.error(str(e))
gis.set_error_restart_required(True)
# Check if game froze
if not gis.error_restart_required() and not is_responding_pid(bf2Window.pid):
logging.info('Game froze, checking unresponsive count')
# Game will temporarily freeze when map load finishes or when joining server, so don't restart right away
if gis.get_error_unresponsive_count() < 3:
logging.info('Unresponsive count below limit, giving time to recover')
# Increase unresponsive count
gis.increase_error_unresponsive_count()
# Check again in 2 seconds
time.sleep(2)
continue
else:
logging.error('Unresponsive count exceeded limit, scheduling restart')
gis.set_error_restart_required(True)
elif not gis.error_restart_required() and gis.get_error_unresponsive_count() > 0:
logging.info('Game recovered from temp freeze, resetting unresponsive count')
# Game got it together, reset unresponsive count
gis.reset_error_unresponsive_count()
# Check for (debug assertion and Visual C++ Runtime) error window
if not gis.error_restart_required() and \
(find_window_by_title('BF2 Error') is not None or
find_window_by_title('Microsoft Visual C++ Runtime Library') is not None):
logging.error('BF2 Error window present, scheduling restart')
gis.set_error_restart_required(True)
# Check if a game restart command was issued to the controller
forceNextPlayer = False
if config.use_controller():
commands = controller.get_commands()
if commands.get('game_restart') is True:
logging.info('Game restart requested via controller, queueing game restart')
# Reset command to false
commandReset = controller.post_commands({'game_restart': False})
if commandReset:
# Set restart required flag
gis.set_error_restart_required(True)
if commands.get('rotation_pause') is True:
logging.info('Player rotation pause requested via controller, pausing rotation')
# Reset command to false
commandReset = controller.post_commands({'rotation_pause': False})
if commandReset:
# Set pause via config
config.pause_player_rotation(constants.PLAYER_ROTATION_PAUSE_DURATION)
if commands.get('rotation_resume') is True:
logging.info('Player rotation resume requested via controller, resuming rotation')
# Reset command flag
commandReset = controller.post_commands({'rotation_resume': False})
if commandReset:
# Unpause via config
config.unpause_player_rotation()
if commands.get('next_player') is True:
logging.info('Manual switch to next player requested via controller, queueing switch')
# Reset command to false
commandReset = controller.post_commands({'next_player': False})
if commandReset:
forceNextPlayer = True
# Start a new game instance if required
if gis.rtl_restart_required() or gis.error_restart_required():
if bf2Window is not None and gis.rtl_restart_required():
# Quit out of current instnace
logging.info('Quitting existing game instance')
quitSuccessful = gim.quit_instance()
logging.debug(f'Quit successful: {quitSuccessful}')
gis.set_rtl_restart_required(False)
# If quit was not successful, switch to error restart
if not quitSuccessful:
logging.error('Quitting existing game instance failed, switching to error restart')
gis.set_error_restart_required(True)
# Don't use elif here so error restart can be executed right after a failed quit attempt
if bf2Window is not None and gis.error_restart_required():
# Kill any remaining instance by pid
logging.info('Killing existing game instance')
killed = taskkill_pid(bf2Window.pid)
logging.debug(f'Instance killed: {killed}')
# Give Windows time to actually close the window
time.sleep(3)
# Init game new game instance
gim.launch_instance(config.get_resolution())
# Bring window to foreground
try:
gim.bring_to_foreground()
except Exception as e:
logging.error('BF2 window is gone, restart required')
logging.error(str(e))
continue
# Connect to server
logging.info('Connecting to server')
serverIp, serverPort, serverPass = config.get_server()
connected = gim.connect_to_server(serverIp, serverPort, serverPass)
# Reset state
gis.restart_reset()
gis.set_spectator_on_server(connected)
gis.set_map_loading(connected)
if connected:
gis.set_server(serverIp, serverPort, serverPass)
continue
# Make sure we are still in the game
gameMessagePresent = gim.check_for_game_message()
if gameMessagePresent:
logging.info('Game message present, ocr-ing message')
gameMessage = gim.ocr_game_message()
# Close game message to enable actions
gim.close_game_message()
if 'full' in gameMessage:
logging.info('Server full, trying to rejoin in 30 seconds')
# Update state
gis.set_spectator_on_server(False)
# Connect to server waits 10, wait another 20 = 30
time.sleep(20)
elif 'kicked' in gameMessage:
logging.info('Got kicked, trying to rejoin')
# Update state
gis.set_spectator_on_server(False)
elif 'banned' in gameMessage:
sys.exit('Got banned, contact server admin')
elif 'connection' in gameMessage and 'lost' in gameMessage or \
'failed to connect' in gameMessage:
logging.info('Connection lost, trying to reconnect')
# Update state
gis.set_spectator_on_server(False)
elif 'modified content' in gameMessage:
logging.info('Got kicked for modified content, trying to rejoin')
# Update state
gis.set_spectator_on_server(False)
elif 'invalid ip address' in gameMessage:
logging.info('Join by ip dialogue bugged, restart required')
# Set restart flag
gis.set_error_restart_required(True)
else:
sys.exit(gameMessage)
continue
# If we are using a controller, check if server switch is required and possible
# (spectator not on server or fully in game)
if config.use_controller() and (not gis.spectator_on_server() or
(not gis.map_loading() and
iterationsOnPlayer == config.get_max_iterations_on_player())):
logging.info('Checking for join server on controller')
joinServer = controller.get_join_server()
# Update server and switch if spectator is supposed to be on a different server of password was updated
if joinServer is not None and \
(joinServer['ip'] != config.get_server_ip() or
str(joinServer['gamePort']) != config.get_server_port() or
joinServer['password'] != config.get_server_pass()):
# Spectator is supposed to be on different server
logging.info('Controller has a server to join, updating config')
config.set_server_ip(joinServer['ip'])
config.set_server_port(str(joinServer['gamePort']))
config.set_server_pass(joinServer['password'])
elif gis.spectator_on_server():
controller.post_current_server(
gis.get_server_ip(),
gis.get_server_port(),
gis.get_server_password()
)
# Queue server switch if spectator is supposed to be on a different server (or the password changed)
if gis.spectator_on_server() and \
(config.get_server_ip() != gis.get_server_ip() or
config.get_server_port() != gis.get_server_port() or
config.get_server_pass() != gis.get_server_password()):
logging.info('Queued server switch, disconnecting from current server')
gis.set_spectator_on_server(False)
gim.disconnect_from_server()
# If game instance is about to replaced, add one more round on the new server
if gis.get_round_num() + 1 >= config.get_instance_trl():
logging.info('Extending instance lifetime by one round on the new server')
gis.decrease_round_num()
# Player is not on server, check if rejoining is possible and makes sense
if not gis.spectator_on_server():
# Check number of free slots
# TODO
# (Re-)connect to server
logging.info('(Re-)Connecting to server')
serverIp, serverPort, serverPass = config.get_server()
connected = gim.connect_to_server(serverIp, serverPort, serverPass)
# Treat re-connecting as map rotation (state wise)
gis.map_rotation_reset()
# Update state
gis.set_spectator_on_server(connected)
gis.set_map_loading(connected)
if connected:
gis.set_server(serverIp, serverPort, serverPass)
# Update controller
if connected and config.use_controller():
controller.post_current_server(serverIp, serverPort, serverPass)
continue
onRoundFinishScreen = gim.check_if_round_ended()
mapIsLoading = gim.check_if_map_is_loading()
mapBriefingPresent = gim.check_for_map_briefing()
# Update instance state if any map load/eor screen is present
# (only _set_ map loading state here, since it should only be _unset_ when attempting to spawn
if (onRoundFinishScreen or mapIsLoading or mapBriefingPresent) and not gis.map_loading():
gis.set_map_loading(True)
if config.limit_rtl() and onRoundFinishScreen and gis.get_round_num() >= config.get_instance_trl():
logging.info('Game instance has reached rtl limit, restart required')
gis.set_rtl_restart_required(True)
elif mapIsLoading:
logging.info('Map is loading')
# Reset state once if it still reflected to be on the (same) map
if gis.rotation_on_map():
logging.info('Performing map rotation reset')
gis.map_rotation_reset()
iterationsOnPlayer = config.get_max_iterations_on_player()
time.sleep(3)
elif mapBriefingPresent:
logging.info('Map briefing present, checking map')
currentMapName = gim.get_map_name()
currentMapSize = gim.get_map_size()
# Update map state if relevant and required
if currentMapName is not None and currentMapSize != -1 and \
(currentMapName != gis.get_rotation_map_name() or
currentMapSize != gis.get_rotation_map_size()):
logging.debug(f'Updating map state: {currentMapName}; {currentMapSize}')
gis.set_rotation_map_name(currentMapName)
gis.set_rotation_map_size(currentMapSize)
# Give go-ahead for active joining
logging.info('Enabling active joining')
gis.set_active_join_possible(True)
if gis.active_join_possible():
# Check if join game button is present
logging.info('Could actively join, checking for button')
joinGameButtonPresent = gim.check_for_join_game_button()
if joinGameButtonPresent:
# TODO
pass
time.sleep(3)
elif onRoundFinishScreen:
logging.info('Game is on round finish screen')
# Reset state
gis.round_end_reset()
# Set counter to max again to skip spectator
iterationsOnPlayer = config.get_max_iterations_on_player()
time.sleep(3)
elif not onRoundFinishScreen and not gis.round_spawned():
# Loaded into map, now trying to start spectating
gis.set_map_loading(False)
# Re-enable hud if required
if gis.hud_hidden():
# Give game time to swap teams
time.sleep(3)
# Re-enable hud
logging.info('Enabling hud')
gim.toggle_hud(1)
# Update state
gis.set_hud_hidden(False)
time.sleep(1)
spawnMenuVisible = gim.check_if_spawn_menu_visible()
if not spawnMenuVisible:
logging.info('Spawn menu not visible, opening with enter')
gim.open_spawn_menu()
# Force another attempt re-enable hud
gis.set_hud_hidden(True)
continue
logging.info('Determining team')
currentTeam = gim.get_player_team()
if currentTeam is not None and \
gis.get_rotation_map_name() is not None and \
gis.get_rotation_map_size() != -1:
gis.set_round_team(currentTeam)
logging.debug(f'Current team: {"USMC" if gis.get_round_team() == 0 else "MEC/CHINA"}')
logging.info('Spawning once')
try:
spawnSucceeded = gim.spawn_suicide()
logging.info('Spawn succeeded' if spawnSucceeded else 'Spawn failed, retrying')
gis.set_round_spawned(spawnSucceeded)
except UnsupportedMapException as e:
logging.error('Spawning not supported on current map/size')
# Wait map out by "faking" spawn
gis.set_round_spawned(True)
elif gis.get_rotation_map_name() is not None and \
gis.get_rotation_map_size() != -1:
logging.error('Failed to determine current team, retrying')
# Force another attempt re-enable hud
gis.set_hud_hidden(True)
time.sleep(2)
continue
else:
# Map detection failed, force reconnect
logging.error('Map detection failed, disconnecting')
gim.disconnect_from_server()
# Update state
gis.set_spectator_on_server(False)
continue
elif not onRoundFinishScreen and not gis.hud_hidden():
logging.info('Hiding hud')
gim.toggle_hud(0)
gis.set_hud_hidden(True)
# Increase round number/counter
gis.increase_round_num()
logging.debug(f'Entering round #{gis.get_round_num()} using this instance')
# Spectator has "entered" map, update state accordingly
gis.set_rotation_on_map(True)
elif not onRoundFinishScreen and iterationsOnPlayer < config.get_max_iterations_on_player() and \
not config.player_rotation_paused() and not forceNextPlayer:
# Check if player is afk
if not gim.is_sufficient_action_on_screen():
logging.info('Insufficient action on screen')
iterationsOnPlayer = config.get_max_iterations_on_player()
else:
logging.info('Nothing to do, stay on player')
iterationsOnPlayer += 1
time.sleep(2)
elif not onRoundFinishScreen and config.player_rotation_paused() and not forceNextPlayer:
logging.info(f'Player rotation is paused until {config.get_player_rotation_paused_until().isoformat()}')
# If rotation pause flag is still set even though the pause expired, remove the flag
if config.get_player_rotation_paused_until() < datetime.now():
logging.info('Player rotation pause expired, re-enabling rotation')
config.unpause_player_rotation()
else:
time.sleep(2)
elif not onRoundFinishScreen:
logging.info('Rotating to next player')
gim.rotate_to_next_player()
iterationsOnPlayer = 0
| [
11748,
1822,
29572,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
2298,
293,
198,
11748,
25064,
198,
11748,
640,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
198,
11748,
38491,
198,
6738,
4566,
1330,
17056,
198,
6738,
10444,
1330,
22... | 2.481431 | 8,805 |
#!/usr/bin/env python
import yaml
import sys
import os
import json
import argparse
ocp_node_configmap_path = "/tmp/node-config.yaml"
ocp_webconsole_configmap_path = "/tmp/webconsole-config.yaml"
ocp_cluster_info_configmap_path = "/tmp/id"
ocp_config_dir = "/tmp"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("config_type", help="config_type can be one of the follwing: node-config-compute, node-config-master, node-config-infra, webconsole-config, cluster-info")
args = parser.parse_args()
status = main(args.config_type)
sys.exit(status)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
331,
43695,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
1822,
29572,
198,
198,
420,
79,
62,
17440,
62,
11250,
8899,
62,
6978,
796,
12813,
22065,
14,
... | 2.73516 | 219 |
"""Resource mapper for
[`GatewayResource`][optimade_gateway.models.gateways.GatewayResource]."""
from optimade_gateway.models import GatewayResource
from optimade_gateway.mappers.base import BaseResourceMapper
class GatewaysMapper(BaseResourceMapper):
"""[`GatewayResource`][optimade_gateway.models.gateways.GatewayResource] mapper."""
ENDPOINT = "gateways"
ENTRY_RESOURCE_CLASS = GatewayResource
| [
37811,
26198,
285,
11463,
329,
198,
58,
63,
22628,
1014,
26198,
63,
7131,
40085,
671,
62,
10494,
1014,
13,
27530,
13,
10494,
1322,
13,
22628,
1014,
26198,
29225,
15931,
198,
6738,
6436,
671,
62,
10494,
1014,
13,
27530,
1330,
29916,
2619... | 3.169231 | 130 |
import numpy as np
import torch
from miso.metrics.continuous_metrics import ContinuousMetric
from miso.modules.linear.bilinear import BiLinear
from miso.losses.loss import MSECrossEntropyLoss
| [
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
198,
198,
6738,
2984,
78,
13,
4164,
10466,
13,
18487,
5623,
62,
4164,
10466,
1330,
45012,
9171,
1173,
198,
6738,
2984,
78,
13,
18170,
13,
29127,
13,
33473,
259,
451,
1330,
8436,
14993,
... | 3.213115 | 61 |
from __future__ import division # uncomment this if using Python 2.7
import numpy as np
from scipy import spatial
from scipy.ndimage import gaussian_filter, sobel
import matplotlib.pyplot as plt
from matplotlib import cm
from skimage import data, color, img_as_float, io
from skimage.morphology import erosion, disk
import time
from math import *
def get_patches(img, points, size_w):
"""
- img: (n, m) input RGB image
- points: (n, m) position of corners
- w: integer patch size
"""
patches=[]
img3=np.lib.pad(img, ((size_w//2, size_w//2),(size_w//2,size_w//2)), 'edge')
for i in points:
patches.append(np.array(img3[i[0]:i[0]+2*size_w//2, i[1]:i[1]+2*size_w//2].flatten()))
return patches
def spectral_matching(sim, patches1, patches2, maskinv, corner_pos1,corner_pos2):
"""define a similarity measure between Euclidean and SSD and correlation"""
patches2 = [x*maskinv for x in patches2]
patches1=patches1*maskinv
matchall=spatial.distance.cdist(patches1, patches2, sim)
match=[]
for i in range(matchall.shape[0]):
match.append((corner_pos1, corner_pos2[np.argmin(matchall[i])]))
return match
if __name__ == "__main__":
img_names = "test_im3.bmp"
input_path="images/"
output_path="results/"
output_name = ["result1.bmp"]
mask_name = ["mask1.bmp"]
patchSize = [11]
for imi in range(1):
for size_w in patchSize:
print mask_name[imi]
RGB_img1 = data.imread(str(input_path+img_names))
img1 = img_as_float(color.rgb2gray(RGB_img1))
RGB_mask = data.imread(str(input_path+mask_name[imi]))
mask = img_as_float(color.rgb2gray(RGB_mask))
'''
plt.figure(figsize=(12,6))
plt.subplot(121)
io.imshow(RGB_img1, cmap=cm.gist_gray)
plt.show()
plt.title('Shrub L')
plt.subplot(122)
io.imshow(RGB_mask, cmap=cm.gist_gray)
plt.show()
plt.title('Shrub R');
'''
half_size_w = size_w // 2
masknew=mask.copy()
newImg = img1.copy()
imgblur = gaussian_filter(newImg, sigma=1, order=0)
max_x_shape, max_y_shape = newImg.shape
# io.imsave(output_path + output_name, newImg)
start_time = time.time()
patchesIndex = [
(ix,iy) for ix, row in enumerate(newImg) for iy, i in enumerate(row) if (ix>size_w and iy>size_w and ix<newImg.shape[0]-size_w and iy<newImg.shape[1]-size_w and ix%half_size_w==0 and iy%half_size_w==0) and (1 not in get_patches(mask, [(ix,iy)], size_w)[0])
]
patches = get_patches(newImg, patchesIndex, size_w)
confidence = 1. - masknew.copy()
border = masknew - erosion(masknew, disk(1))
border_I = [(ix,iy) for ix, row in enumerate(border) for iy, i in enumerate(row) if i==1]
border_C=[0] * len(border_I)
for idx, i in enumerate(border_I):
confidence[i[0],i[1]]=sum(get_patches(confidence, [i], size_w)[0])/size_w**2
while (masknew==1).sum()>0 :
border = masknew - erosion(masknew, disk(1))
border_I = [(ix,iy) for ix, row in enumerate(border) for iy, i in enumerate(row) if i==1 ]
border_C = [0] * len(border_I)
pivotI = compute_Pivot(border_I, confidence, newImg)
print("Pixels remain to solve: ")
print((masknew == 1).sum())
pivotPatch = get_patches(newImg, [pivotI], size_w)
maskpatch = -(get_patches(masknew, [pivotI], size_w)[0]-1)
match = spectral_matching('euclidean', pivotPatch, patches, maskpatch, pivotI, patchesIndex)
pivotI_xmin=match[0][0][0]-half_size_w
pivotI_xmax=match[0][0][0]+half_size_w
pivotI_ymin=match[0][0][1]-half_size_w
pivotI_ymax=match[0][0][1]+half_size_w
match_xmin=match[0][1][0]-half_size_w
match_xmax=match[0][1][0]+half_size_w
match_ymin=match[0][1][1]-half_size_w
match_ymax=match[0][1][1]+half_size_w
if pivotI_xmin < 0:
match_xmin -= pivotI_xmin
pivotI_xmin = 0
elif pivotI_ymin < 0:
match_ymin -= pivotI_ymin
pivotI_ymin = 0
if pivotI_xmax > max_x_shape:
match_xmax = match_xmax - pivotI_xmax + max_x_shape
pivotI_xmax = max_x_shape
elif pivotI_ymax > max_y_shape:
match_ymax = match_ymax - pivotI_ymax + max_y_shape
pivotI_ymax = max_y_shape
newImg[pivotI_xmin:pivotI_xmax, pivotI_ymin:pivotI_ymax]=newImg[match_xmin:match_xmax, match_ymin:match_ymax]
masknew[pivotI_xmin:pivotI_xmax, pivotI_ymin:pivotI_ymax]=0
pivotI_xmin = match[0][0][0]-half_size_w
pivotI_xmax = match[0][0][0]+half_size_w
pivotI_ymin = match[0][0][1]-half_size_w
pivotI_ymax = match[0][0][1]+half_size_w
match_xmin = match[0][1][0]-half_size_w
match_xmax = match[0][1][0]+half_size_w
match_ymin = match[0][1][1]-half_size_w
match_ymax = match[0][1][1]+half_size_w
if pivotI_xmin < 0:
match_xmin -= pivotI_xmin
pivotI_xmin = 0
elif pivotI_ymin < 0:
match_ymin -= pivotI_ymin
pivotI_ymin = 0
if pivotI_xmax > max_x_shape:
match_xmax = match_xmax - pivotI_xmax + max_x_shape
pivotI_xmax = max_x_shape
elif pivotI_ymax > max_y_shape:
match_ymax = match_ymax - pivotI_ymax + max_y_shape
pivotI_ymax = max_y_shape
confidence[pivotI_xmin:pivotI_xmax, pivotI_ymin:pivotI_ymax]=confidence[match_xmin:match_xmax, match_ymin:match_ymax]
print("--- %s seconds ---" % (time.time() - start_time))
'''
plt.figure(figsize=(12,6))
io.imshow(newImg, cmap='gray')
plt.show()
plt.title('Superimage')
'''
io.imsave(output_path + output_name[imi], newImg) | [
6738,
11593,
37443,
834,
1330,
7297,
1303,
8820,
434,
428,
611,
1262,
11361,
362,
13,
22,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
629,
541,
88,
1330,
21739,
198,
6738,
629,
541,
88,
13,
358,
9060,
1330,
31986,
31562,
62,
24455,... | 1.809135 | 3,678 |
from dry_rest_permissions.generics import DRYPermissions
from rest_framework.decorators import action
from rest_framework.generics import get_object_or_404
from rest_framework.mixins import ListModelMixin
from rest_framework.permissions import IsAuthenticated
from care.users.models import User
from rest_framework.viewsets import GenericViewSet
from care.facility.api.serializers.patient_search import PatientScopedSearchSerializer
from care.facility.models import PatientSearch
from django_filters import rest_framework as filters
| [
6738,
5894,
62,
2118,
62,
525,
8481,
13,
8612,
873,
1330,
10560,
56,
5990,
8481,
198,
6738,
1334,
62,
30604,
13,
12501,
273,
2024,
1330,
2223,
198,
6738,
1334,
62,
30604,
13,
8612,
873,
1330,
651,
62,
15252,
62,
273,
62,
26429,
198,... | 3.870504 | 139 |
import base64
from io import BytesIO
import numpy as np
from PIL import Image, ImageOps
| [
11748,
2779,
2414,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
350,
4146,
1330,
7412,
11,
7412,
41472,
198
] | 3.384615 | 26 |
import random
import numpy as np
import os
import copy
import time
import dice_battle_seq as dice_seq
import dice_battle_sim as dice_sim
import print_dice
bold = '\x1b[;1m'
blue = '\x1b[34;6m'
green = '\x1b[32;6m'
red = '\x1b[31;6m'
reset = '\x1b[m'
main()
| [
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
4866,
198,
11748,
640,
198,
11748,
17963,
62,
38471,
62,
41068,
355,
17963,
62,
41068,
198,
11748,
17963,
62,
38471,
62,
14323,
355,
17963,
62,
14323,
198,
... | 2.222222 | 117 |
#!/bin/python3
import os
import sys
import soco
from Xlib import X, display
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GObject, GLib, Gdk
import zonesPage
from zonesPage import ZonesPage, Zones
from MusicPage import MusicPage
from MusicPlayingPage import MusicPlayingPage
from MusicAlbumArtPage import MusicAlbumArtPage
from QueuePage import QueuePage
from musicLibraryPage import MusicLibraryPage
from mediaListItemsPage import MediaListItemsPage
from mediaListArtistsPage import MediaListArtistsPage
from mediaListAlbumsPage import MediaListAlbumsPage
from systemSettingsPage import SystemSettingsPage
from Zone import Zone
from threading import Thread, Event
import time
from mediaListTracksPage import MediaListTracksPage
from zoneListener import ZoneListener
from I2C import I2CListener, CRi2c
import I2C
import imageManager
import CSS
volumeDialog = None
# self.pageInView.show()
# self.pageInView.show()
###############################################################################
###############################################################################
###############################################################################
# if self.eventThread.is_alive():
# self.eventThread.join()
try:
print("CWD: ", os.getcwd())
# Need a better way to inform
# the CR101 code on the path
# of its resources, such as
# images...
os.chdir('/home/pi/CR101py/')
print("Argv: ", len(sys.argv))
hideUI = False
hideDecorations = False
for a in sys.argv:
if a == "-hui":
hideUI = True
elif a == "-hdec":
hideDecorations = True
CSS.mainStyle()
app = PyApp(hideUI, hideDecorations)
Gtk.main()
except KeyboardInterrupt:
app.i2c.Close()
| [
2,
48443,
8800,
14,
29412,
18,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
1307,
78,
198,
6738,
1395,
8019,
1330,
1395,
11,
3359,
198,
11748,
308,
72,
198,
12397,
13,
46115,
62,
9641,
10786,
38,
30488,
3256,
705,
18,
13,
15,
1... | 3.229508 | 549 |
"""
Konversi html ke image (PNG atau JPEG).
"""
from requests import get as http_get
from io import BytesIO
from imgkit import from_url, from_string
from PIL import Image, ImageFile
from .tools import get_base_url, convert_static_link_to_absolute
ImageFile.MAXBLOCK = 2 ** 20
IMAGE_FORMATS = set(
[
"PNG",
"JPEG"
]
)
| [
37811,
198,
42,
261,
690,
72,
27711,
885,
2939,
357,
47,
10503,
379,
559,
48561,
737,
198,
37811,
198,
198,
6738,
7007,
1330,
651,
355,
2638,
62,
1136,
198,
198,
6738,
33245,
1330,
2750,
4879,
9399,
198,
6738,
33705,
15813,
1330,
422,... | 2.536765 | 136 |
from __future__ import unicode_literals
from django.db import connections
from django.db.models import sql
from django.db.models.query import RawQuerySet
from sqlserver_ado.dbapi import FetchFailedError
__all__ = [
'RawStoredProcedureQuery',
'RawStoredProcedureQuerySet',
]
class RawStoredProcedureQuery(sql.RawQuery):
"""
A single raw SQL stored procedure query
"""
def _execute_query(self):
"""
Execute the stored procedure using callproc, instead of execute.
"""
self.cursor = connections[self.using].cursor()
self.cursor.callproc(self.sql, self.params)
class RawStoredProcedureQuerySet(RawQuerySet):
"""
Provides an iterator which converts the results of raw SQL queries into
annotated model instances.
raw_query should only be the name of the stored procedure.
"""
@property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
if not hasattr(self, '_columns'):
try:
self._columns = self.query.get_columns()
except TypeError:
# "'NoneType' object is not iterable" thrown when stored procedure
# doesn't return a result set.
# no result means no column names, so grab them from the model
self._columns = [self.model._meta.pk.db_column] # [x.db_column for x in self.model._meta.fields]
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
try:
index = self._columns.index(query_name)
self._columns[index] = model_name
except ValueError:
# Ignore translations for non-existant column names
pass
return self._columns
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
201,
198,
201,
198,
6738,
42625,
14208,
13,
9945,
1330,
8787,
201,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
44161,
201,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,... | 2.281609 | 870 |
# -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
STATES = [
"AL",
"AK",
"AS",
"AZ",
"AR",
"CA",
"CO",
"CT",
"DE",
"DC",
"FM",
"FL",
"GA",
"GU",
"HI",
"ID",
"IL",
"IN",
"IA",
"KS",
"KY",
"LA",
"ME",
"MH",
"MD",
"MA",
"MI",
"MN",
"MS",
"MO",
"MT",
"NE",
"NV",
"NH",
"NJ",
"NM",
"NY",
"NC",
"ND",
"MP",
"OH",
"OK",
"OR",
"PW",
"PA",
"PR",
"RI",
"SC",
"SD",
"TN",
"TX",
"UT",
"VT",
"VI",
"VA",
"WA",
"WV",
"WI",
"WY",
]
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
15881,
88,
198,
11748,
33918,
198,
198,
6738,
7064,
13,
23814,
1330,
2269,
13210,
1559,
12727,
7449,
198,
198,
2257,
29462,
796,
685,
198,
220,
220,
220,
366,
184... | 1.533917 | 457 |
"""
Copyright(c) 2022 SoftFish
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files(the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and / or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# c.py: ANSI Escape Sequence colors
# 'constants' -- feel free to change at runtime ;P
RESET = "\x1b[0m"
# ANSI Colors
# RGB6*6*6 colors
# 24-bit color in the terminal!
# 0x bg rr gg bb
# use c24 -> it will handle backwards-compat
#def c8(color):
# bg = color >> 12 & 0x01
# r = color >> 8 & 0x07
# g = color >> 4 & 0x07
# b = color >> 0 & 0x07
# print (r, g, b, bg)
# return rgb8(r if r < 6 else 5, g if g < 6 else 5, b if b < 6 else 5, bg)
| [
37811,
198,
15269,
7,
66,
8,
33160,
8297,
39428,
198,
198,
5990,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
597,
1048,
16727,
257,
4866,
286,
428,
3788,
290,
3917,
10314,
3696,
7,
1169,
366,
25423,
12340,
284,
1730,
287,
... | 3.295259 | 464 |
""" CS206 Spring 2017 ludobots -- Project Phototaxis
https://www.reddit.com/r/ludobots/wiki/pyrosim/phototaxis
"""
import pyrosim
import constants as c
from robot import ROBOT
import numpy as np
import random
import math
| [
37811,
9429,
22136,
8225,
2177,
300,
463,
672,
1747,
1377,
4935,
5919,
313,
22704,
198,
220,
220,
220,
3740,
1378,
2503,
13,
10748,
13,
785,
14,
81,
14,
75,
463,
672,
1747,
14,
15466,
14,
9078,
4951,
320,
14,
38611,
313,
22704,
198,... | 2.876543 | 81 |
import numpy as np
# Make heatmaps using the utility functions from the centernet repo
# Wrapped heatmap function
| [
11748,
299,
32152,
355,
45941,
198,
198,
2,
6889,
4894,
31803,
1262,
262,
10361,
5499,
422,
262,
1247,
1142,
316,
29924,
198,
198,
2,
27323,
1496,
4894,
8899,
2163,
198
] | 3.866667 | 30 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions for sql instance commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from googlecloudsdk.api_lib.sql import constants
from googlecloudsdk.api_lib.sql import instance_prop_reducers as reducers
from googlecloudsdk.api_lib.sql import instances as api_util
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.util.args import labels_util
DEFAULT_RELEASE_TRACK = base.ReleaseTrack.GA
# PD = Persistent Disk. This is prefixed to all storage type payloads.
STORAGE_TYPE_PREFIX = 'PD_'
class _BaseInstances(object):
"""Common utility functions for sql instance commands."""
@classmethod
def _ConstructBaseSettingsFromArgs(cls,
sql_messages,
args,
instance=None,
release_track=DEFAULT_RELEASE_TRACK):
"""Constructs instance settings from the command line arguments.
Args:
sql_messages: module, The messages module that should be used.
args: argparse.Namespace, The arguments that this command was invoked
with.
instance: sql_messages.DatabaseInstance, The original instance, for
settings that depend on the previous state.
release_track: base.ReleaseTrack, the release track that this was run
under.
Returns:
A settings object representing the instance settings.
Raises:
ToolException: An error other than http error occurred while executing the
command.
"""
settings = sql_messages.Settings(
tier=reducers.MachineType(instance, args.tier, args.memory, args.cpu),
pricingPlan=args.pricing_plan,
replicationType=args.replication,
activationPolicy=args.activation_policy)
if args.authorized_gae_apps:
settings.authorizedGaeApplications = args.authorized_gae_apps
if any([
args.assign_ip is not None, args.require_ssl is not None,
args.authorized_networks
]):
settings.ipConfiguration = sql_messages.IpConfiguration()
if args.assign_ip is not None:
cls.SetIpConfigurationEnabled(settings, args.assign_ip)
if args.authorized_networks:
cls.SetAuthorizedNetworks(settings, args.authorized_networks,
sql_messages.AclEntry)
if args.require_ssl is not None:
settings.ipConfiguration.requireSsl = args.require_ssl
if any([args.follow_gae_app, args.gce_zone]):
settings.locationPreference = sql_messages.LocationPreference(
followGaeApplication=args.follow_gae_app, zone=args.gce_zone)
if args.storage_size:
settings.dataDiskSizeGb = int(args.storage_size / constants.BYTES_TO_GB)
if args.storage_auto_increase is not None:
settings.storageAutoResize = args.storage_auto_increase
# BETA args.
if release_track == base.ReleaseTrack.BETA:
if args.IsSpecified('storage_auto_increase_limit'):
# Resize limit should be settable if the original instance has resize
# turned on, or if the instance to be created has resize flag.
if (instance and instance.settings.storageAutoResize) or (
args.storage_auto_increase):
# If the limit is set to None, we want it to be set to 0. This is a
# backend requirement.
settings.storageAutoResizeLimit = (args.storage_auto_increase_limit or
0)
else:
raise exceptions.RequiredArgumentException(
'--storage-auto-increase', 'To set the storage capacity limit '
'using [--storage-auto-increase-limit], '
'[--storage-auto-increase] must be enabled.')
if args.IsSpecified('availability_type'):
settings.availabilityType = args.availability_type
return settings
@classmethod
def _ConstructCreateSettingsFromArgs(cls,
sql_messages,
args,
instance=None,
release_track=DEFAULT_RELEASE_TRACK):
"""Constructs create settings object from base settings and args."""
original_settings = instance.settings if instance else None
settings = cls._ConstructBaseSettingsFromArgs(sql_messages, args, instance,
release_track)
if args.on_premises_host_port:
if args.require_ssl:
raise exceptions.ToolException('Argument --on-premises-host-port not '
'allowed with --require_ssl')
settings.onPremisesConfiguration = sql_messages.OnPremisesConfiguration(
hostPort=args.on_premises_host_port)
backup_configuration = (reducers.BackupConfiguration(
sql_messages,
instance,
backup=args.backup,
backup_start_time=args.backup_start_time,
enable_bin_log=args.enable_bin_log))
if backup_configuration:
cls.AddBackupConfigToSettings(settings, backup_configuration)
settings.databaseFlags = (reducers.DatabaseFlags(
sql_messages, original_settings, database_flags=args.database_flags))
settings.maintenanceWindow = (reducers.MaintenanceWindow(
sql_messages,
instance,
maintenance_release_channel=args.maintenance_release_channel,
maintenance_window_day=args.maintenance_window_day,
maintenance_window_hour=args.maintenance_window_hour))
if args.storage_type:
settings.dataDiskType = STORAGE_TYPE_PREFIX + args.storage_type
# BETA args.
if release_track == base.ReleaseTrack.BETA:
settings.userLabels = labels_util.ParseCreateArgs(
args, sql_messages.Settings.UserLabelsValue)
# Check that availability type is only specified if this is Postgres.
if (args.IsSpecified('availability_type') and
not api_util.InstancesV1Beta4.IsPostgresDatabaseVersion(
args.database_version)):
raise exceptions.InvalidArgumentException(
'--availability-type', 'Cannot set [--availability-type] on a '
'non-Postgres instance.')
return settings
@classmethod
def _ConstructPatchSettingsFromArgs(cls,
sql_messages,
args,
instance,
release_track=DEFAULT_RELEASE_TRACK):
"""Constructs create settings object from base settings and args."""
original_settings = instance.settings
settings = cls._ConstructBaseSettingsFromArgs(sql_messages, args, instance,
release_track)
if args.clear_gae_apps:
settings.authorizedGaeApplications = []
if any([args.follow_gae_app, args.gce_zone]):
settings.locationPreference = sql_messages.LocationPreference(
followGaeApplication=args.follow_gae_app, zone=args.gce_zone)
if args.clear_authorized_networks:
if not settings.ipConfiguration:
settings.ipConfiguration = sql_messages.IpConfiguration()
settings.ipConfiguration.authorizedNetworks = []
if args.enable_database_replication is not None:
settings.databaseReplicationEnabled = args.enable_database_replication
backup_configuration = (reducers.BackupConfiguration(
sql_messages,
instance,
no_backup=args.no_backup,
backup_start_time=args.backup_start_time,
enable_bin_log=args.enable_bin_log))
if backup_configuration:
cls.AddBackupConfigToSettings(settings, backup_configuration)
settings.databaseFlags = (reducers.DatabaseFlags(
sql_messages,
original_settings,
database_flags=args.database_flags,
clear_database_flags=args.clear_database_flags))
settings.maintenanceWindow = (reducers.MaintenanceWindow(
sql_messages,
instance,
maintenance_release_channel=args.maintenance_release_channel,
maintenance_window_day=args.maintenance_window_day,
maintenance_window_hour=args.maintenance_window_hour))
# BETA args.
if release_track == base.ReleaseTrack.BETA:
labels_diff = labels_util.ExplicitNullificationDiff.FromUpdateArgs(args)
labels_update = labels_diff.Apply(
sql_messages.Settings.UserLabelsValue, instance.settings.userLabels)
if labels_update.needs_update:
settings.userLabels = labels_update.labels
return settings
@classmethod
def _ConstructBaseInstanceFromArgs(cls,
sql_messages,
args,
original=None,
instance_ref=None,
release_track=DEFAULT_RELEASE_TRACK):
"""Construct a Cloud SQL instance from command line args.
Args:
sql_messages: module, The messages module that should be used.
args: argparse.Namespace, The CLI arg namespace.
original: sql_messages.DatabaseInstance, The original instance, if some of
it might be used to fill fields in the new one.
instance_ref: reference to DatabaseInstance object, used to fill project
and instance information.
release_track: base.ReleaseTrack, the release track that this was run
under.
Returns:
sql_messages.DatabaseInstance, The constructed (and possibly partial)
database instance.
Raises:
ToolException: An error other than http error occurred while executing the
command.
"""
del args, original, release_track # Currently unused in base function.
instance_resource = sql_messages.DatabaseInstance()
if instance_ref:
cls.SetProjectAndInstanceFromRef(instance_resource, instance_ref)
return instance_resource
@classmethod
def ConstructCreateInstanceFromArgs(cls,
sql_messages,
args,
original=None,
instance_ref=None,
release_track=DEFAULT_RELEASE_TRACK):
"""Constructs Instance for create request from base instance and args."""
instance_resource = cls._ConstructBaseInstanceFromArgs(
sql_messages, args, original, instance_ref)
instance_resource.region = args.region
instance_resource.databaseVersion = args.database_version
instance_resource.masterInstanceName = args.master_instance_name
instance_resource.settings = cls._ConstructCreateSettingsFromArgs(
sql_messages, args, original, release_track)
if args.master_instance_name:
replication = 'ASYNCHRONOUS'
if args.replica_type == 'FAILOVER':
instance_resource.replicaConfiguration = (
sql_messages.ReplicaConfiguration(failoverTarget=True))
else:
replication = 'SYNCHRONOUS'
if not args.replication:
instance_resource.settings.replicationType = replication
if args.failover_replica_name:
instance_resource.failoverReplica = (
sql_messages.DatabaseInstance.FailoverReplicaValue(
name=args.failover_replica_name))
return instance_resource
@classmethod
def ConstructPatchInstanceFromArgs(cls,
sql_messages,
args,
original,
instance_ref=None,
release_track=DEFAULT_RELEASE_TRACK):
"""Constructs Instance for patch request from base instance and args."""
instance_resource = cls._ConstructBaseInstanceFromArgs(
sql_messages, args, original, instance_ref)
instance_resource.settings = cls._ConstructPatchSettingsFromArgs(
sql_messages, args, original, release_track)
return instance_resource
class InstancesV1Beta3(_BaseInstances):
"""Common utility functions for sql instances V1Beta3."""
@staticmethod
@staticmethod
@staticmethod
@staticmethod
class InstancesV1Beta4(_BaseInstances):
"""Common utility functions for sql instances V1Beta4."""
@staticmethod
@staticmethod
@staticmethod
@staticmethod
| [
2,
15069,
2177,
3012,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
... | 2.468302 | 5,300 |
# 🚨 Don't change the code below 👇
print("Welcome to the Love Calculator!")
name1 = input("What is your name? \n")
name2 = input("What is their name? \n")
# 🚨 Don't change the code above 👆
merged_name = (name1 + name2).upper()
firs_d = sum([merged_name.count(l_f) for l_f in 'TRUE'])
second_d = sum([merged_name.count(l_f) for l_f in 'LOVE'])
love_score = int(f'{firs_d}{second_d}')
if love_score < 10 or love_score > 90:
msg = f'Your score is {love_score}, you go together like coke and mentos.'
elif 40 <= love_score <= 50:
msg = f'Your score is {love_score}, you are alright together.'
else:
msg = f'Your score is {love_score}.'
print(msg)
#First *fork* your copy. Then copy-paste your code below this line 👇
#Finally click "Run" to execute the tests
#SOLUTION
# combined_names = name1 + name2
# lower_names = combined_names.lower()
# t = lower_names.count("t")
# r = lower_names.count("r")
# u = lower_names.count("u")
# e = lower_names.count("e")
# first_digit = t + r + u + e
#
# l = lower_names.count("l")
# o = lower_names.count("o")
# v = lower_names.count("v")
# e = lower_names.count("e")
# second_digit = l + o + v + e
#
# score = int(str(first_digit) + str(second_digit))
#
# if (score < 10) or (score > 90):
# print(f"Your score is {score}, you go together like coke and mentos.")
# elif (score >= 40) and (score <= 50):
# print(f"Your score is {score}, you are alright together.")
# else:
# print(f"Your score is {score}.")
#SOLUTION
#Write your code above this line 👆
# 🚨 Do NOT modify the code below this line 👇
with open('testing_copy.py', 'w') as file:
file.write('def test_func():\n')
with open('main.py', 'r') as original:
f2 = original.readlines()[0:60]
for x in f2:
file.write(" " + x)
import testing_copy
import unittest
from unittest.mock import patch
from io import StringIO
import os
print('\n\n\n.\n.\n.')
print('Checking if your print statements match the instructions. \nFor "Mario" and "Princess Peach" your program should print this line *exactly*:\n')
print('Your score is 43, you are alright together.\n')
print('\nRunning some tests on your code with different name combinations:')
print('.\n.\n.')
unittest.main(verbosity=1, exit=False)
os.remove('testing_copy.py')
| [
2,
12520,
248,
101,
2094,
470,
1487,
262,
2438,
2174,
50169,
229,
198,
4798,
7203,
14618,
284,
262,
5896,
43597,
2474,
8,
198,
3672,
16,
796,
5128,
7203,
2061,
318,
534,
1438,
30,
3467,
77,
4943,
198,
3672,
17,
796,
5128,
7203,
2061... | 2.653318 | 874 |
#!/usr/bin/python
import csv
import os
import tempfile
import time
if __name__ == "__main__":
bigquery(None)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
269,
21370,
198,
11748,
28686,
198,
11748,
20218,
7753,
198,
11748,
640,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
197,
14261,
22766,
7,
14202,
8,
19... | 2.666667 | 42 |
import argparse
import numpy as np
from sklearn.model_selection import KFold
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, required='true')
parser.add_argument('--num_folds', type=int, required='true')
args = parser.parse_args()
######################################## Train and Valid ########################################
dataset_ids = 'datasets/{}/train/ids.txt'.format(args.dataset)
with open(dataset_ids, 'r') as txt_file:
ids = txt_file.read().splitlines()
ids_all = 'datasets/{}/train/ids_all.txt'.format(args.dataset)
with open(ids_all, 'w') as txt_file:
for i, idx in zip(range(len(ids)), ids):
image_path = 'datasets/{}/train/images/{}.jpg'.format(args.dataset, idx)
mask_path = 'datasets/{}/train/masks/{}.png'.format(args.dataset, idx)
if i == len(ids) - 1:
txt_file.write('{} {}'.format(image_path, mask_path))
else:
txt_file.write('{} {}\n'.format(image_path, mask_path))
ids = np.array(ids)
kf = KFold(n_splits=args.num_folds, shuffle=True)
cont = 0
for train, valid in kf.split(ids):
train_path = 'datasets/{}/train/train_ids{}.txt'.format(args.dataset, cont)
valid_path = 'datasets/{}/train/valid_ids{}.txt'.format(args.dataset, cont)
cont += 1
with open(train_path, 'w') as train_file:
for i, idx in zip(range(len(train)), train):
image_path = 'datasets/{}/train/images/{}.jpg'.format(args.dataset, ids[idx])
mask_path = 'datasets/{}/train/masks/{}.png'.format(args.dataset, ids[idx])
if i == len(train) - 1:
train_file.write('{} {}'.format(image_path, mask_path))
else:
train_file.write('{} {}\n'.format(image_path, mask_path))
with open(valid_path, 'w') as valid_file:
for i, idx in zip(range(len(valid)), valid):
image_path = 'datasets/{}/train/images/{}.jpg'.format(args.dataset, ids[idx])
mask_path = 'datasets/{}/train/masks/{}.png'.format(args.dataset, ids[idx])
if i == len(valid) - 1:
valid_file.write('{} {}'.format(image_path, mask_path))
else:
valid_file.write('{} {}\n'.format(image_path, mask_path))
######################################## Test ########################################
dataset_ids = 'datasets/{}/valid/ids.txt'.format(args.dataset)
with open(dataset_ids, 'r') as txt_file:
ids = np.array(txt_file.read().splitlines())
test_path = 'datasets/{}/valid/test_ids.txt'.format(args.dataset)
with open(test_path, 'w') as test_file:
for i in range(len(ids)):
image_path = 'datasets/{}/valid/images/{}.jpg'.format(args.dataset, ids[i])
mask_path = 'datasets/{}/valid/masks/{}.png'.format(args.dataset, ids[i])
if i == len(ids) - 1:
test_file.write('{} {}'.format(image_path, mask_path))
else:
test_file.write('{} {}\n'.format(image_path, mask_path)) | [
11748,
1822,
29572,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
509,
37,
727,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
30751,
796,
1822,
29572,
... | 2.044053 | 1,589 |
# -*- coding: utf-8 -*-
"""
a pytorch tensor is conceptually similar to numpy array
tensor is an n-dimensional array
to run pytorch tensor on GPU, just need to cast it to a new datatype
"""
import torch
dtype = torch.FloatTensor
## for GPU ## dtype = torch.cuda.FloatTensor
## initializing dimensions
BatchSize, InDimension, HiddenDimension, OutDimension = 64, 1000, 100, 10
## randomly initialize input and output data
in_data = torch.randn(BatchSize, InDimension).type(dtype)
out_data = torch.randn(BatchSize, OutDimension).type(dtype)
## randomly initialize weights
weight01 = torch.randn(InDimension, HiddenDimension).type(dtype)
weight02 = torch.randn(HiddenDimension, OutDimension).type(dtype)
learning_rate = 1e-6
for tensor in range(500):
## forward pass: compute predicted out_data
hid = in_data.mm(weight01)
hidden_relu = hid.clamp(min=0)
out_data_prediction = hidden_relu.mm(weight02)
check_loss(out_data_prediction, out_data)
gradient_weight01, gradient_weight02 = calculate_gradients(hid, hidden_relu, out_data_prediction)
## update weights
weight01 = recalculate_weight(weight01, gradient_weight01)
weight02 = recalculate_weight(weight02, gradient_weight02)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
64,
12972,
13165,
354,
11192,
273,
318,
3721,
935,
2092,
284,
299,
32152,
7177,
198,
83,
22854,
318,
281,
299,
12,
19577,
7177,
198,
1462,
1057,
12972,
13165... | 2.930788 | 419 |
daysActive = int(input())
countSweets = int(input())
countCakes = int(input())
countWaffles = int(input())
countPancakes = int(input())
cakePerDay = countCakes * 45
wafflePerDay = countWaffles * 5.80
pancakePerDay = countPancakes * 3.20
sumPerDay = (cakePerDay + wafflePerDay + pancakePerDay) * countSweets
allSum = sumPerDay * daysActive
productsUsedSum = allSum - (0.125 * allSum)
print(f'{productsUsedSum:.2f}') | [
12545,
13739,
796,
493,
7,
15414,
28955,
198,
9127,
40783,
1039,
796,
493,
7,
15414,
28955,
198,
9127,
34,
1124,
796,
493,
7,
15414,
28955,
198,
9127,
54,
48501,
796,
493,
7,
15414,
28955,
198,
9127,
47,
1192,
1124,
796,
493,
7,
154... | 2.768212 | 151 |
"""
Write a program that takes an array denoting the daily stock price, and
returns the maximum profit that could be made by buying and then selling one
share of that stock. - [EPI: 5.6].
"""
#==============================================================================
| [
37811,
198,
16594,
257,
1430,
326,
2753,
281,
7177,
2853,
10720,
262,
4445,
4283,
2756,
11,
290,
198,
7783,
82,
262,
5415,
7630,
326,
714,
307,
925,
416,
7067,
290,
788,
6301,
530,
198,
20077,
286,
326,
4283,
13,
532,
685,
8905,
40,... | 4.807018 | 57 |
# -*- coding: utf-8 -*-
#
# Copyright 2016 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Import python libs
from __future__ import absolute_import
import os
import grp
import pwd
from xml.dom import minidom
import platform
import socket
# Import salt libs
import salt.utils
from salt.modules.inspectlib.exceptions import InspectorKiwiProcessorException
# Import third party libs
try:
from lxml import etree
except ImportError:
from salt._compat import ElementTree as etree
class KiwiExporter(object):
'''
Exports system description as Kiwi configuration.
'''
def load(self, **descr):
'''
Load data by keys.
:param data:
:return:
'''
for obj, data in descr.items():
setattr(self._data, obj, data)
return self
def export(self, name):
'''
Export to the Kiwi config.xml as text.
:return:
'''
self.name = name
root = self._create_doc()
self._set_description(root)
self._set_preferences(root)
self._set_repositories(root)
self._set_users(root)
self._set_packages(root)
return '\n'.join([line for line in minidom.parseString(
etree.tostring(root, encoding='UTF-8', pretty_print=True)).toprettyxml(indent=" ").split("\n")
if line.strip()])
def _get_package_manager(self):
'''
Get package manager.
:return:
'''
ret = None
if self.__grains__.get('os_family') in ('Kali', 'Debian'):
ret = 'apt-get'
elif self.__grains__.get('os_family', '') == 'Suse':
ret = 'zypper'
elif self.__grains__.get('os_family', '') == 'redhat':
ret = 'yum'
if ret is None:
raise InspectorKiwiProcessorException('Unsupported platform: {0}'.format(self.__grains__.get('os_family')))
return ret
def _set_preferences(self, node):
'''
Set preferences.
:return:
'''
pref = etree.SubElement(node, 'preferences')
pacman = etree.SubElement(pref, 'packagemanager')
pacman.text = self._get_package_manager()
p_version = etree.SubElement(pref, 'version')
p_version.text = '0.0.1'
p_type = etree.SubElement(pref, 'type')
p_type.set('image', 'vmx')
for disk_id, disk_data in self._data.system.get('disks', {}).items():
if disk_id.startswith('/dev'):
p_type.set('filesystem', disk_data.get('type') or 'ext3')
break
p_type.set('installiso', 'true')
p_type.set('boot', "vmxboot/suse-leap42.1")
p_type.set('format', self.format)
p_type.set('bootloader', 'grub2')
p_type.set('timezone', __salt__['timezone.get_zone']())
p_type.set('hwclock', __salt__['timezone.get_hwclock']())
return pref
def _get_user_groups(self, user):
'''
Get user groups.
:param user:
:return:
'''
return [g.gr_name for g in grp.getgrall()
if user in g.gr_mem] + [grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name]
def _set_users(self, node):
'''
Create existing local users.
<users group="root">
<user password="$1$wYJUgpM5$RXMMeASDc035eX.NbYWFl0" home="/root" name="root"/>
</users>
:param node:
:return:
'''
# Get real local users with the local passwords
shadow = {}
with salt.utils.fopen('/etc/shadow') as rfh:
for sh_line in rfh.read().split(os.linesep):
if sh_line.strip():
login, pwd = sh_line.split(":")[:2]
if pwd and pwd[0] not in '!*':
shadow[login] = {'p': pwd}
with salt.utils.fopen('/etc/passwd') as rfh:
for ps_line in rfh.read().split(os.linesep):
if ps_line.strip():
ps_line = ps_line.strip().split(':')
if ps_line[0] in shadow:
shadow[ps_line[0]]['h'] = ps_line[5]
shadow[ps_line[0]]['s'] = ps_line[6]
shadow[ps_line[0]]['g'] = self._get_user_groups(ps_line[0])
users_groups = []
users_node = etree.SubElement(node, 'users')
for u_name, u_data in shadow.items():
user_node = etree.SubElement(users_node, 'user')
user_node.set('password', u_data['p'])
user_node.set('home', u_data['h'])
user_node.set('name', u_name)
users_groups.extend(u_data['g'])
users_node.set('group', ','.join(users_groups))
return users_node
def _set_repositories(self, node):
'''
Create repositories.
:param node:
:return:
'''
priority = 99
for repo_id, repo_data in self._data.software.get('repositories', {}).items():
if type(repo_data) == list:
repo_data = repo_data[0]
if repo_data.get('enabled') or not repo_data.get('disabled'): # RPM and Debian, respectively
uri = repo_data.get('baseurl', repo_data.get('uri'))
if not uri:
continue
repo = etree.SubElement(node, 'repository')
if self.__grains__.get('os_family') in ('Kali', 'Debian'):
repo.set('alias', repo_id)
repo.set('distribution', repo_data['dist'])
else:
repo.set('alias', repo_data['alias'])
if self.__grains__.get('os_family', '') == 'Suse':
repo.set('type', 'yast2') # TODO: Check for options!
repo.set('priority', str(priority))
source = etree.SubElement(repo, 'source')
source.set('path', uri) # RPM and Debian, respectively
priority -= 1
def _set_packages(self, node):
'''
Set packages and collections.
:param node:
:return:
'''
pkgs = etree.SubElement(node, 'packages')
for pkg_name, pkg_version in sorted(self._data.software.get('packages', {}).items()):
pkg = etree.SubElement(pkgs, 'package')
pkg.set('name', pkg_name)
# Add collections (SUSE)
if self.__grains__.get('os_family', '') == 'Suse':
for ptn_id, ptn_data in self._data.software.get('patterns', {}).items():
if ptn_data.get('installed'):
ptn = etree.SubElement(pkgs, 'namedCollection')
ptn.set('name', ptn_id)
return pkgs
def _set_description(self, node):
'''
Create a system description.
:return:
'''
hostname = socket.getfqdn() or platform.node()
descr = etree.SubElement(node, 'description')
author = etree.SubElement(descr, 'author')
author.text = "salt.modules.node on {0}".format(hostname)
contact = etree.SubElement(descr, 'contact')
contact.text = 'root@{0}'.format(hostname)
specs = etree.SubElement(descr, 'specification')
specs.text = 'Rebuild of {0}, based on Salt inspection.'.format(hostname)
return descr
def _create_doc(self):
'''
Create document.
:return:
'''
root = etree.Element('image')
root.set('schemaversion', '6.3')
root.set('name', self.name)
return root
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
1584,
311,
19108,
11419,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407... | 2.056589 | 3,923 |
from gromozeka.backends.base import BackendAdapter, get_backend_factory
from gromozeka.backends.redis import RedisAioredisAdaptee
__all__ = ['BackendAdapter', 'RedisAioredisAdaptee', 'get_backend_factory']
| [
6738,
308,
398,
8590,
38001,
13,
1891,
2412,
13,
8692,
1330,
5157,
437,
47307,
11,
651,
62,
1891,
437,
62,
69,
9548,
198,
6738,
308,
398,
8590,
38001,
13,
1891,
2412,
13,
445,
271,
1330,
2297,
271,
32,
72,
1850,
271,
48003,
1453,
... | 2.797297 | 74 |
import logging
from .scriptable_strategy import ScriptableStrategy
| [
11748,
18931,
198,
198,
6738,
764,
12048,
540,
62,
2536,
4338,
1330,
12327,
540,
13290,
4338,
628
] | 4.058824 | 17 |
# -*- coding: utf-8 -*-
"""A4_Part_1_Preprocessing.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/112zmgKh5ZpSPwkUI7fki3vp0EcKmPH7h
# Drive
"""
# https://drive.google.com/file/d/19MIw3ZI-Z91NLTDMc8GI4OlIOwN_Vv-t/view?usp=sharing
# gdown https://drive.google.com/uc?id=19MIw3ZI-Z91NLTDMc8GI4OlIOwN_Vv-t
from google.colab import drive
drive.mount('/content/drive')
"""# Import statements"""
import os
import numpy as np
import matplotlib.pyplot as plt
import pickle
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import json
import datetime
import copy
from PIL import Image
from PIL import Image as im
import joblib
from sklearn.model_selection import train_test_split
# import math as Math
import random
import torch.optim
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import torchvision
import cv2
import joblib
# !pip install scipy==1.1.0
import os
import numpy as np
import h5py
import json
import torch
from scipy.misc import imread, imresize
from tqdm import tqdm
from collections import Counter
from random import seed, choice, sample
#Import libraries
import pandas as pd
import numpy as np
from google.colab import drive
import glob
import string
import nltk
import joblib
import pickle
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
nltk.download('wordnet')
nltk.download('punkt')
nltk.download('stopwords')
# #Reading Files
# drive.mount('/content/drive')
"""# Saving and Loading functions """
# Saving and Loading models using joblib
"""# Paths """
# Final folder where all the data saved
# output_folder = "/content/drive/MyDrive/SEM-2/05-DL /Assignments/A4/Method2/data/"
output_folder = "./"
"""# Data Downloading """
# # !wget https://drive.google.com/file/d/1lbqTV-u8xmZ3eBuQ4tUSjq0mOjaIIp_P
# !unzip "/content/drive/MyDrive/SEM-2/05-DL /Assignments/A4/Data/Dataset.zip" -d /content/drive/MyDrive/SEM-2/05-DL /Assignments/A4/Data/
# !du -sh "/content/drive/MyDrive/A4/Data/"
# !unzip "/content/drive/MyDrive/SEM-2/05-DL /Assignments/A4/Data/Dataset.zip" -d "/content/drive/MyDrive/SEM-2/05-DL /Assignments/A4/Data/myData/"
"""# Data Preprocessing
## Loading Data Files
"""
# IITD
# p = "/content/drive/MyDrive/SEM-2/05-DL /Assignments/A4/"
# gmail
p = "/content/drive/MyDrive/A4/"
# !ls "/content/drive/MyDrive/SEM-2/05-DL /Assignments/A4/Data/myData/Data/Val"
f1 = load(p + "Data/myData/Data/Val/val_captions.pkl")
f2 = load(p + "Data/myData/Data/Test/test_captions.pkl")
f3 = load(p + "Data/myData/Data/Train/train_captions.pkl")
# """
# Creates input files for training, validation, and test data.
# :param image_folder: folder with downloaded images
# :param captions_per_image: number of captions to sample per image
# :param min_word_freq: words occuring less frequently than this threshold are binned as <unk>s
# :param output_folder: folder to save files
# :param max_len: don't sample captions longer than this length
# """
"""## word frequency counter """
# calculates the total word frequency for all train,test and val
word_freq = Counter()
for file in (f1,f2,f3):
for k in file.keys():
for sentence in file[k]:
word_freq.update(inputPreprocess(sentence))
# word_freq
"""## Creating a word map """
# mapping each word to a particular index
min_word_freq = 1
# Create word map
words = [w for w in word_freq.keys() if word_freq[w] > min_word_freq]
word_map = {k: v + 4 for v, k in enumerate(words)}
word_map['<start>'] = 0
word_map['<eos>'] = 1
word_map['<unk>'] = 2
word_map['<pad>'] = 3
word_map_reverse = {str(v + 4) : k for v, k in enumerate(words)}
word_map_reverse['0'] = '<start>'
word_map_reverse['1'] = '<eos>'
word_map_reverse['2'] = '<unk>'
word_map_reverse['3'] = '<pad>'
# word_map_reverse
"""## Train, Test, Val paths and captions """
# Read image paths and captions for each image
train_image_paths = []
train_image_captions = []
val_image_paths = []
val_image_captions = []
test_image_paths = []
test_image_captions = []
# Not giving absolute path but variable path or relative path here
image_folder = "Data"
base_folder = ["Val", "Test","Train"]
# With maximum length 50 we get 5 captions for each image in Train,Test,Val
max_len = 50
for idx,file in enumerate((f1,f2,f3)):
for k in file.keys():
path = os.path.join(image_folder,base_folder[idx],'Images/'+ k)
for sentence in file[k]:
if len(inputPreprocess(sentence)) <= max_len:
captions = inputPreprocess(sentence)
# if len(captions) == 0:
# continue
if idx == 2:
train_image_paths.append(path)
train_image_captions.append(captions)
elif idx == 0:
val_image_paths.append(path)
val_image_captions.append(captions)
elif idx == 1:
test_image_paths.append(path)
test_image_captions.append(captions)
# Got paths of all images and their array of captions for each Train, Test and Val
# check if lengths equal or not
# Sanity check
assert len(train_image_paths) == len(train_image_captions)
assert len(val_image_paths) == len(val_image_captions)
assert len(test_image_paths) == len(test_image_captions)
# remove half of the images and captions in training
train_image_paths = train_image_paths[:10000]
train_image_captions = train_image_captions[:10000]
len(train_image_paths)
train_image_paths[0]
"""## Saving word map """
# # # Save word map to a JSON
# with open(output_folder + '/word_dict.json', 'w') as f:
# json.dump(word_map, f)
save('./word_dict.json',word_map)
save('./word_dict_reverse.json',word_map_reverse)
"""## Saving Image, Captions, Caption lengths """
# Sample captions for each image, save images to HDF5 file, and captions and their lengths to JSON files
seed(123)
max_length = 50
train_image_captions = process_caption_tokens(train_image_captions, word_map, max_length)
val_image_captions = process_caption_tokens(val_image_captions, word_map, max_length)
test_image_captions = process_caption_tokens(test_image_captions, word_map, max_length)
# with open(output_folder + '/train_img_paths.json', 'w') as f:
# json.dump(train_image_paths, f)
# with open(output_folder + '/val_img_paths.json', 'w') as f:
# json.dump(val_image_paths, f)
# with open(output_folder + '/test_img_paths.json', 'w') as f:
# json.dump(test_image_paths, f)
# with open(output_folder + '/train_captions.json', 'w') as f:
# json.dump(train_image_captions, f)
# with open(output_folder + '/val_captions.json', 'w') as f:
# json.dump(val_image_captions, f)
# with open(output_folder + '/test_captions.json', 'w') as f:
# json.dump(test_image_captions, f)
save('./train_img_paths.json',train_image_paths)
save('./val_img_paths.json',val_image_paths)
save('./test_img_paths.json',test_image_paths)
save('./train_captions.json',train_image_captions)
save('./val_captions.json',val_image_captions)
save('./test_captions.json',test_image_captions)
# downloading all preprocessed files from collab
from google.colab import files
files.download('train_img_paths.json')
files.download('val_img_paths.json')
files.download('test_img_paths.json')
files.download('train_captions.json')
files.download('val_captions.json')
files.download('test_captions.json')
files.download('word_dict.json')
files.download('word_dict_reverse.json')
"""# Preprocessed Files """
# !ls "/content/drive/MyDrive/SEM-2/05-DL /Assignments/A4/Method2/data/"
# !ls | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
32,
19,
62,
7841,
62,
16,
62,
6719,
36948,
13,
541,
2047,
65,
198,
198,
38062,
4142,
7560,
416,
1623,
4820,
2870,
13,
198,
198,
20556,
2393,
318,
5140,
379,
1... | 2.628175 | 2,953 |
# coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlVerkeersregelaarCoordinatiewijze(KeuzelijstField):
"""Keuzelijst met de voorkomende manieren van coordinate voor verkeersregelaars."""
naam = 'KlVerkeersregelaarCoordinatiewijze'
label = 'Verkeersregelaar coordinatiewijze'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlVerkeersregelaarCoordinatiewijze'
definition = 'Keuzelijst met de voorkomende manieren van coordinate voor verkeersregelaars.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlVerkeersregelaarCoordinatiewijze'
options = {
'centraal': KeuzelijstWaarde(invulwaarde='centraal',
label='centraal',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerkeersregelaarCoordinatiewijze/centraal'),
'geen': KeuzelijstWaarde(invulwaarde='geen',
label='geen',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerkeersregelaarCoordinatiewijze/geen'),
'its-app': KeuzelijstWaarde(invulwaarde='its-app',
label='ITS-app',
definitie='Coordinatie door ITS-app.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerkeersregelaarCoordinatiewijze/its-app'),
'klok': KeuzelijstWaarde(invulwaarde='klok',
label='klok',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerkeersregelaarCoordinatiewijze/klok'),
'master': KeuzelijstWaarde(invulwaarde='master',
label='master',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerkeersregelaarCoordinatiewijze/master'),
'pulsen': KeuzelijstWaarde(invulwaarde='pulsen',
label='pulsen',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerkeersregelaarCoordinatiewijze/pulsen'),
'slave': KeuzelijstWaarde(invulwaarde='slave',
label='slave',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlVerkeersregelaarCoordinatiewijze/slave')
}
| [
2,
19617,
28,
40477,
12,
23,
198,
6738,
440,
14990,
44,
3913,
13,
2394,
43,
17633,
13,
27354,
265,
9497,
13,
8896,
10277,
417,
2926,
301,
15878,
1330,
3873,
10277,
417,
2926,
301,
15878,
198,
6738,
440,
14990,
44,
3913,
13,
2394,
43... | 1.86695 | 1,413 |
from __future__ import unicode_literals
from django.contrib import admin
from Hostel.models import *
# Register your models here.
admin.site.register(Hostel_Details)
admin.site.register(Hostel_Room)
admin.site.register(Hostel_Register)
admin.site.register(Hostel_Allocation)
admin.site.register(Hostel_Visitor)
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
14504,
417,
13,
27530,
1330,
1635,
198,
2,
17296,
534,
4981,
994,
13,
628,
198,
28482,
13,
15654,
13,
30238,
7... | 3.193878 | 98 |
/anaconda/lib/python3.6/token.py | [
14,
272,
330,
13533,
14,
8019,
14,
29412,
18,
13,
21,
14,
30001,
13,
9078
] | 2.133333 | 15 |
# 7. Maximum Multiple
# Given a Divisor and a Bound, find the largest integer N, such that:
# N is divisible by divisor
# N is less than or equal to bound
# N is greater than 0.
# Notes: The divisor and bound are only positive values. It's guaranteed that a divisor is found
divisor = int(input())
bound = int(input())
for number in range(bound, 0, -1):
if number % divisor == 0:
N = number
break
print(N)
| [
2,
767,
13,
197,
40541,
20401,
198,
2,
11259,
257,
4777,
271,
273,
290,
257,
30149,
11,
1064,
262,
4387,
18253,
399,
11,
884,
326,
25,
198,
2,
399,
318,
2659,
12843,
416,
2659,
271,
273,
198,
2,
399,
318,
1342,
621,
393,
4961,
2... | 2.848684 | 152 |
import numpy as np
import subprocess
import glob as gb
import shutil
import time
import os
import logging
import ccx2paraview
# Function changing input mesh inside the model.inp file
# Function which starts ccx solver and copy results
os.mkdir("Results")
# Check the meshes in Mesh folder
mesh_file_mames = gb.glob("Mesh/*.inp")
# Name of the input inp file for CalculiX
ccx_input_file = ("model")
for mesh_file_mame in mesh_file_mames:
change_model_inp(ccx_input_file, mesh_file_mame)
run_calculix(ccx_input_file, mesh_file_mame)
print ("All simualations are done!")
| [
11748,
299,
32152,
355,
45941,
198,
11748,
850,
14681,
198,
11748,
15095,
355,
308,
65,
198,
11748,
4423,
346,
198,
11748,
640,
198,
11748,
28686,
198,
11748,
18931,
198,
11748,
36624,
87,
17,
1845,
615,
769,
628,
628,
198,
1303,
15553,... | 2.849515 | 206 |
from scripts import settings, streaming
# Query keys categorize tweets
# Each key or category corresponds to an array of keywords
queries = {'ETH': ['ETH', 'Ethereum'],
'LTC': ['LTC', 'Litecoin'],
'BTC': ['BTC', 'Bitcoin'],
'XRP': ['XRP', 'Ripple'],
'XLM': ['XLM', 'Stellar']}
# Aggregate volume and sentiment every 15 minutes
refresh = 15*60
streaming.streamer(settings.credentials,
queries,
refresh,
sentiment=True,
debug=True)
| [
6738,
14750,
1330,
6460,
11,
11305,
198,
198,
2,
43301,
8251,
17851,
1096,
12665,
198,
2,
5501,
1994,
393,
6536,
24866,
284,
281,
7177,
286,
26286,
198,
421,
10640,
796,
1391,
6,
20702,
10354,
37250,
20702,
3256,
705,
36,
17733,
6,
43... | 2.125954 | 262 |
"""Tests for module_utils.py."""
import os
from pytype import file_utils
from pytype import module_utils
import unittest
class ModuleUtilsTest(unittest.TestCase):
"""Test module utilities."""
# Because TestInferModule expands a lot of paths:
expand = file_utils.expand_path
class TestInferModule(unittest.TestCase):
"""Test module_utils.infer_module."""
if __name__ == "__main__":
unittest.main()
| [
37811,
51,
3558,
329,
8265,
62,
26791,
13,
9078,
526,
15931,
198,
198,
11748,
28686,
198,
198,
6738,
12972,
4906,
1330,
2393,
62,
26791,
198,
6738,
12972,
4906,
1330,
8265,
62,
26791,
198,
198,
11748,
555,
715,
395,
628,
198,
4871,
19... | 3.021898 | 137 |
# -*- coding: utf-8 -*-
import logging
import logging.config
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
18931,
198,
11748,
18931,
13,
11250,
628
] | 2.73913 | 23 |
import os, json, math, pprint
if __name__ == "__main__":
goalLength = 5
parse(goalLength) | [
11748,
28686,
11,
33918,
11,
10688,
11,
279,
4798,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
3061,
24539,
796,
642,
198,
220,
220,
220,
21136,
7,
35231,
24539,
8
] | 2.605263 | 38 |
from abc import ABC, abstractmethod
from concurrent import futures
import datetime as dt
import logging
import pathlib
import shutil
from typing import Optional, List, Tuple
from dateutil.relativedelta import relativedelta, FR
import pandas as pd
from tqdm import tqdm
from opensignals import features
logger = logging.getLogger(__name__)
AWS_BASE_URL = 'https://numerai-signals-public-data.s3-us-west-2.amazonaws.com'
SIGNALS_UNIVERSE = f'{AWS_BASE_URL}/latest_universe.csv'
SIGNALS_TICKER_MAP = f'{AWS_BASE_URL}/signals_ticker_map_w_bbg.csv'
SIGNALS_TARGETS = f'{AWS_BASE_URL}/signals_train_val_bbg.csv'
class Provider(ABC):
"""Common base class for (daily) stock price data"""
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
def get_train_test_data(ticker_data: pd.DataFrame,
feature_names: List[str],
targets: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""merge our feature data with Numerai targets"""
ml_data = pd.merge(
ticker_data, targets,
on=['date', 'bloomberg_ticker'],
how='left'
)
logger.info(f'Found {ml_data.target.isna().sum()}'
'rows without target, filling with 0.5')
ml_data['target'] = ml_data['target'].fillna(0.5)
ml_data = ml_data.set_index('date')
# for training and testing we want clean, complete data only
ml_data = ml_data.dropna(subset=feature_names)
# ensure we have only fridays
ml_data = ml_data[ml_data.index.weekday == 4]
# drop eras with under 50 observations per era
ml_data = ml_data[ml_data.index.value_counts() > 50]
# train test split
train_data = ml_data[ml_data['data_type'] == 'train']
test_data = ml_data[ml_data['data_type'] == 'validation']
return train_data, test_data
def get_data(self,
db_dir: pathlib.Path,
features_generators: Optional[List[features.FeatureGenerator]] = None,
last_friday: Optional[dt.datetime] = None,
target: str = 'target_20d',
feature_prefix: Optional[str] = None) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, List[str]]:
"""generate data set"""
if last_friday is None:
last_friday = dt.datetime.today() - relativedelta(weekday=FR(-1))
if features_generators is None:
features_generators = []
ticker_data = self.get_ticker_data(db_dir)
ticker_universe = pd.read_csv(SIGNALS_UNIVERSE)
ticker_data = ticker_data[ticker_data.bloomberg_ticker.isin(
ticker_universe['bloomberg_ticker'])]
targets = pd.read_csv(SIGNALS_TARGETS)
targets['date'] = pd.to_datetime(
targets['friday_date'],
format='%Y%m%d'
)
targets['target'] = targets[target]
feature_names = []
for features_generator in features_generators:
ticker_data, feature_names_aux = features_generator.generate_features(ticker_data, feature_prefix)
feature_names.extend(feature_names_aux)
train_data, test_data = Provider.get_train_test_data(ticker_data, feature_names, targets)
# generate live data
live_data = Provider.get_live_data(ticker_data, last_friday)
return train_data, test_data, live_data, feature_names
@abstractmethod
| [
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
6738,
24580,
1330,
25650,
198,
11748,
4818,
8079,
355,
288,
83,
198,
11748,
18931,
198,
11748,
3108,
8019,
198,
11748,
4423,
346,
198,
6738,
19720,
1330,
32233,
11,
7343,
11,
309,
29291,... | 2.237179 | 1,560 |
# https://leetcode.com/problems/reshape-the-matrix/
import numpy as np
| [
2,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
3447,
1758,
12,
1169,
12,
6759,
8609,
14,
628,
198,
11748,
299,
32152,
355,
45941,
198,
220,
220,
220,
220,
220,
220,
220,
220,
198,
220,
220,
220,
220,
220,
220,
220,
... | 1.978261 | 46 |
import torch
import bionetwork
import numpy
import time
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
networkSize = numpy.array([1000, 3000, 5000, 7000, 9000, 11000, 13000, 15000, 17000, 19000])
batchsize = 3
repeats = 5
parameters = bionetwork.trainingParameters(iterations=150, clipping=1)
criterion = torch.nn.MSELoss()
resultsF = numpy.zeros((len(networkSize), repeats))
resultsB = numpy.zeros((len(networkSize), repeats))
numberOfWeights = numpy.zeros((len(networkSize), repeats))
for i in range(len(networkSize)):
for j in range(repeats):
net = makeNetwork(networkSize[i])
net.preScaleWeights(0.9)
start = time.time()
input1 = torch.randn(batchsize, net.A.shape[0], dtype=torch.double, requires_grad=True)
start = time.time()
prediction1 = net(input1)
resultsF[i, j] = time.time() - start
predictionForLoss = torch.randn(input1.shape).double()
predictionForLoss.requires_grad = False
start = time.time()
loss1 = criterion(prediction1, predictionForLoss)
a = loss1.backward()
resultsB[i, j] = time.time() - start
numberOfWeights[i, j] = net.A.data.shape[0]
print(networkSize[i], resultsF[i, :], resultsB[i, :])
#%%
meanTimeF = numpy.mean(resultsF, axis=1)
stdTimeF = numpy.std(resultsF, axis=1)
meanTimeB = numpy.mean(resultsB, axis=1)
stdTimeB = numpy.std(resultsB, axis=1)
X = networkSize.reshape(-1,1).repeat(repeats, axis=1).flatten().reshape(-1, 1)
Y = 0.5*(resultsF+resultsB).flatten().reshape(-1, 1)
reg = LinearRegression(fit_intercept=False).fit(X, Y)
print(reg.score(X, Y))
plt.errorbar(networkSize, meanTimeF, yerr=stdTimeF)
plt.errorbar(networkSize, meanTimeB, yerr=stdTimeB)
plt.legend({'Forward', 'Backward'}, frameon=False)
plt.xlabel('Number of nodes')
plt.ylabel('Time [s]')
plt.xlim([0, max(networkSize)+1000])
X = numpy.array([0, max(networkSize)])
Y = reg.coef_.flatten() * X
plt.plot(X, Y, 'k-')
plt.ylim(bottom=0)
# plt.figure()
# plt.scatter(numberOfWeights.flatten(), resultsF.flatten())
# plt.scatter(numberOfWeights.flatten(), resultsB.flatten())
# plt.legend({'Forward', 'Backward'})
# plt.xlabel('Number of interactions')
# plt.ylabel('Time [s]') | [
11748,
28034,
198,
11748,
275,
295,
316,
1818,
198,
11748,
299,
32152,
198,
11748,
640,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
1341,
35720,
13,
29127,
62,
19849,
1330,
44800,
8081,
2234,
628,
198,
27349... | 2.445287 | 923 |
EPSILON = 1e-8
| [
36,
3705,
4146,
1340,
796,
352,
68,
12,
23,
198
] | 1.5 | 10 |
# generated file
from .interface import Mmc
| [
2,
7560,
2393,
198,
6738,
764,
39994,
1330,
337,
23209,
198
] | 4 | 11 |
import pytest
from PyTradier.account import Account
| [
11748,
12972,
9288,
198,
6738,
9485,
2898,
38868,
13,
23317,
1330,
10781,
628
] | 4.076923 | 13 |
import pytest
from dlkit.primordium.locale.objects import InitializableLocale
from dlkit.primordium.type.primitives import Type
@pytest.fixture(scope="function")
@pytest.mark.usefixtures("initializable_locale_test_wrapper")
| [
11748,
12972,
9288,
198,
198,
6738,
288,
75,
15813,
13,
19795,
585,
1505,
13,
17946,
1000,
13,
48205,
1330,
20768,
13821,
33711,
1000,
198,
6738,
288,
75,
15813,
13,
19795,
585,
1505,
13,
4906,
13,
19795,
20288,
1330,
5994,
628,
198,
... | 3.053333 | 75 |
#!/usr/bin/env python
# coding: utf-8
"""Attribute-based dictionary."""
# native
from typing import Any, Dict, Type
ADict = Dict[Any, Any]
"""`Dict` with `Any` key and `Any` value."""
def dict_merge(dest: ADict, *sources: ADict, default: Type[ADict] = dict) -> ADict:
"""Recursively merge dictionaries into the first dictionary.
Args:
dest (dict): dict into which source dicts are merged
*sources (dict): dicts to merge
default (dict): constructor for default dict, default: `dict`
Returns:
dest (dict): the resulting merged dict
Examples:
>>> a = {"b": {"c": 1}, "d": 2}
>>> b = {"b": {"c": 2, "e": 3}, "d": 2}
>>> c = {"d": 4}
>>> dict_merge(a, b, c)
{'b': {'c': 2, 'e': 3}, 'd': 4}
"""
for src in sources:
for key, value in src.items():
if isinstance(value, dict):
node = dest.setdefault(key, default())
dict_merge(node, default(**value), default=default)
else:
dest[key] = value
return dest
def dict_set(dest: ADict, path: str, val: Any, sep: str = ".") -> ADict:
"""Set a path in a dict to a value.
Args:
dest (dict): dict to update
path (str): location to update
val (any): value to set
sep (str): path separator (default: .)
Examples:
>>> items = {"a": {"b": {"c": 1, "d": 2}}}
>>> dict_set(items, "a.b.c", 5)
{'a': {'b': {'c': 5, 'd': 2}}}
>>> dict_set(items, "a.b.d.e", 5)
{'a': {'b': {'c': 5, 'd': {'e': 5}}}}
>>> dict_set(items, "", "") == items
True
You can use a different path separator:
>>> dict_set(items, "a/b/d/e", 6, sep="/")
{'a': {'b': {'c': 5, 'd': {'e': 6}}}}
"""
if not path:
return dest
original = dest
parts = path.split(sep)
for part in parts[:-1]:
if part not in dest or not isinstance(dest[part], dict):
dest[part] = {}
dest = dest[part]
dest[parts[-1]] = val
return original
def dict_get(src: ADict, path: str, default: Any = None, sep: str = ".") -> Any:
"""Get a path value from a dict.
Examples:
>>> items = {"a": {"b": {"c": 1, "d": 2}}}
>>> dict_get(items, "a.b.c")
1
>>> dict_get(items, "x") is None
True
>>> dict_get(items, "") == items
True
"""
if not path:
return src
parts = path.split(sep)
for part in parts[:-1]:
src = src.get(part, {})
return src.get(parts[-1], default)
class AttrDict(Dict[str, Any]):
"""Like a `dict`, but with attribute syntax."""
def __setattr__(self, name: str, value: Any) -> None:
"""Set the value of an attribute.
Args:
name (str): name of the attribute
value (any): value to set
Examples:
>>> item = AttrDict()
>>> item.a = 1
>>> item['a']
1
>>> object.__setattr__(item, 'b', 2)
>>> item.b = 3
>>> item.b
3
"""
try:
super().__getattribute__(name)
super().__setattr__(name, value)
except AttributeError:
self[name] = value
def __getattr__(self, name: str) -> Any:
"""Return the value of the attribute.
Args:
name (str): name of the attribute
Returns:
(any): value of the attribute, or None if it is missing
Examples:
>>> item = AttrDict(a=1)
>>> item.a
1
"""
return self[name]
def __delattr__(self, name: str) -> None:
"""Delete the attribute.
Args:
name (str): name of the attribute
Examples:
>>> item = AttrDict(a=1, b=2)
>>> del item.a
>>> item.a is None
True
"""
del self[name]
def __getitem__(self, name: Any) -> Any:
"""Return the value of the key.
Args:
name (str): name of the key
Returns:
(any): value of the key, or None if it is missing
Examples:
>>> item = AttrDict(a=1)
>>> item['a']
1
"""
result = None
try:
result = dict.__getitem__(self, name)
except KeyError:
pass
return result
def __lshift__(self, other: ADict) -> ADict:
"""Merge `other` into this dict.
NOTE: Any nested dictionaries will be converted to `AttrDict` objects.
Args:
other (dict): other dictionary to merge
Returns:
self (AttrDict): merged dictionary
Examples:
>>> item = AttrDict(a=1, b=2)
>>> item <<= {"b": 3}
>>> item.b
3
>>> item << {"b": 2, "c": {"d": 4}} << {"c": {"d": 5}}
{'a': 1, 'b': 2, 'c': {'d': 5}}
>>> item.c.d
5
"""
return dict_merge(self, other, default=self.__class__)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
37811,
33682,
12,
3106,
22155,
526,
15931,
198,
198,
2,
6868,
198,
6738,
19720,
1330,
4377,
11,
360,
713,
11,
5994,
198,
198,
2885,
713,
796,
3... | 2.001173 | 2,557 |
#!/usr/local/bin/python2.3
from io import StringIO
from PyZ3950 import z3950, oids
# We need "\"\"" to be one token
from PyZ3950.CQLParser import CQLshlex
from PyZ3950.CQLUtils import ZCQLConfig
from PyZ3950.zdefs import make_attr
zconfig = ZCQLConfig()
"""
http://cheshire.berkeley.edu/cheshire2.html#zfind
top ::= query ['resultsetid' name]
query ::= query boolean clause | clause
clause ::= '(' query ')'
| attributes [relation] term
| resultset
attributes ::= '[' { [set] type '=' value } ']' | name
boolean ::= 'and' | 'or' | 'not' | (synonyms)
prox ::= ('!PROX' | (synonyms)) {'/' name}
relation ::= '>' | '<' | ...
[bib1 1=5, bib1 3=6] > term and title @ fish
"""
booleans = {'AND' : 'and',
'.AND.' : 'and',
'&&' : 'and',
'OR' : 'or',
'.OR.' : 'or',
'||' : 'or',
'NOT' : 'and-not',
'.NOT.' : 'and-not',
'ANDNOT' : 'and-not',
'.ANDNOT.' : 'and-not',
'!!' : 'and-not'
}
relations = {'<' : 1,
'LT' : 1,
'.LT.' : 1,
'<=' : 2,
'LE' : 2,
'.LE.' : 2,
'=' : 3,
'>=' : 4,
'GE' : 4,
'.GE.' : 4,
'>' : 5,
'GT' : 5,
'.GT.' : 5,
'<>' : 6,
'!=' : 6,
'NE' : 6,
'.NE.' : 6,
'?' : 100,
'PHON' : 100,
'.PHON.' : 100,
'%' : 101,
'STEM' : 101,
'.STEM.' : 101,
'@' : 102,
'REL' : 102,
'.REL.' : 102,
'<=>' : 104,
'WITHIN' : 104,
'.WITHIN.' : 104}
geoRelations = {'>=<' : 7,
'.OVERLAPS.' : 7,
'>#<' : 8,
'.FULLY_ENCLOSED_WITHIN.' : 8,
'<#>' : 9,
'.ENCLOSES.' : 9,
'<>#' : 10,
'.OUTSIDE_OF.' : 10,
'+-+' : 11,
'.NEAR.' : 11,
'.#.' : 12,
'.MEMBERS_CONTAIN.' : 12,
'!.#.' : 13,
'.MEMBERS_NOT_CONTAIN.' : 13,
':<:' : 14,
'.BEFORE.' : 14,
':<=:' : 15,
'.BEFORE_OR_DURING.' : 15,
':=:' : 16,
'.DURING.' : 16,
':>=:' : 17,
'.DURING_OR_AFTER.' : 17,
':>:' : 18,
'.AFTER.' : 18}
proxBooleans = {'!PROX' : (2, 0, 2),
'!ADJ' : (2, 0, 2),
'!NEAR' : (20, 0, 2),
'!FAR' : (20, 0, 4),
'!OPROX' : (2, 1, 2),
'!OADJ' : (2, 1, 2),
'!ONEAR' : (20, 1, 2),
'!OFAR' : (20, 1, 4)}
proxUnits = {'C' : 1,
'CHAR' : 1,
'W' : 2,
'WORD' : 2,
'S' : 3,
'SENT' : 3,
'SENTENCE' : 3,
'P' : 4,
'PARA' : 4,
'PARAGRAPH' : 4,
'SECTION' : 5,
'CHAPTER' : 6,
'DOCUMENT' : 7,
'ELEMENT' : 8,
'SUBELEMENT' : 9,
'ELEMENTTYPE' : 10,
'BYTE' : 11}
privateBooleans = {'!FUZZY_AND' : 1,
'!FUZZY_OR' : 2,
'!FUZZY_NOT' : 3,
'!RESTRICT_FROM' : 4,
'!RESTRICT_TO' : 5,
'!MERGE_SUM' : 6,
'!MERGE_MEAN' : 7,
'!MERGE_NORM' : 8}
| [
2,
48443,
14629,
14,
12001,
14,
8800,
14,
29412,
17,
13,
18,
198,
6738,
33245,
1330,
10903,
9399,
198,
198,
6738,
9485,
57,
2670,
1120,
1330,
1976,
2670,
1120,
11,
267,
2340,
198,
2,
775,
761,
366,
7879,
7879,
1,
220,
284,
307,
53... | 1.469355 | 2,480 |
import os
import math
import random
import numpy
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torchvision import transforms as T, datasets, models
import pytorch_lightning as pl
| [
11748,
28686,
198,
11748,
10688,
198,
11748,
4738,
198,
11748,
299,
32152,
198,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
28034,
13,
20471,
1330,
10345,
355,
376,
198,
6738,
28034,
13,
26791,
13,
7890,
13... | 3.627907 | 86 |
import platform
first=" Sistem :" ,platform.system()
second="İsim :", platform.node()
three="İşletim Sistemi :", platform.platform()
four="İşlemci :" ,platform.processor()
five="Bit :" ,platform.machine()
six="Genel :" ,platform.uname()
print("""
- WarriorTurks Python3 Örnekleri -
1 - Sistem
2- İsim
3 - İşletim Sistemi
4 -İşlemci
5 - Bit
6 - Genel
7 - HEPSİ
""")
örnek = "hatalı seçim yaptın dostum"
seçim = input("Neyi Öğrenmek istersin : ")
if seçim == "1":
print(first)
elif seçim == "2":
print(second)
elif seçim == "3":
print(three)
elif seçim == "4":
print(four)
elif seçim == "5":
print(five)
elif seçim == "6":
print(six)
elif seçim == "7":
print(first , "\n" , second , "\n" , three , "\n" , four , "\n" , five , "\n" , six )
else :
print(örnek) | [
11748,
3859,
201,
198,
11085,
2625,
311,
396,
368,
220,
220,
1058,
1,
837,
24254,
13,
10057,
3419,
201,
198,
12227,
2625,
128,
108,
14323,
220,
220,
1058,
1600,
3859,
13,
17440,
3419,
201,
198,
15542,
2625,
128,
108,
46481,
1616,
320,... | 2.033097 | 423 |
from typing import List
import pytest
from aoc.day_06 import Day06
from resources import read_as_string_list
aoc_input = Day06(read_as_string_list('test/day06.in'))
@pytest.mark.parametrize("test_input, expected", [
(['turn on 0,0 through 0,0'], 1),
(['toggle 0,0 through 999,999'], 2_000_000),
])
| [
6738,
19720,
1330,
7343,
198,
198,
11748,
12972,
9288,
198,
6738,
257,
420,
13,
820,
62,
3312,
1330,
3596,
3312,
198,
198,
6738,
4133,
1330,
1100,
62,
292,
62,
8841,
62,
4868,
198,
198,
64,
420,
62,
15414,
796,
3596,
3312,
7,
961,
... | 2.552846 | 123 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os.path
import yaml
Z = KF2_ZED
class KF2_EndlessUtility(object):
"""
Utility class with routines to compute zed count,
zed count multipliers, spawn rates etc. in KF2 Endless mode.
Numbers as per 01.05.2018
References
----------
* https://wiki.tripwireinteractive.com/index.php?title=Endless_Mode
"""
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
KF2 = KF2_EndlessUtility
def make_line_const_interp((x0, y0), (x1, y1)):
"""Same as `make_line_interp`, but extrapolated constantly beyond [x0; x1]."""
m = min(x0, x1)
M = max(x0, x1)
return f
class KF2_CustomEndlessWaves(object):
"""Class encapsulating custom zed waves in KF2 Endless mode."""
@staticmethod
@staticmethod
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Generate `kfzedvarient.ini` file from given YAML config '
'and save it to the same directory.')
parser.add_argument('config_path', metavar='PATH', type=str, help='path to YAML config')
parser.add_argument('--txt', action='store_true', help='display wave names')
parser.add_argument('--markdown', action='store_true', help='display wave names in Markdown format')
args = parser.parse_args()
main(args)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
1822,
29572,
198,
11748,
28686,
13,
6978,
198,
11748,
331,
43695,
628,
198,
57,
796,
509,
37,
17,
62,
57,
1961... | 2.466967 | 666 |
from indicators.SingleValueIndicator import SingleValueIndicator | [
6738,
21337,
13,
28008,
11395,
5497,
26407,
1330,
14206,
11395,
5497,
26407
] | 5.333333 | 12 |
import cPickle, time, unittest
from numpy.testing import dec
from theano.gof import Variable, Op
from theano import gof
from theano.scalar import *
from theano import tensor
from theano.compile.mode import get_default_mode
from theano.tensor.elemwise import *
from theano.tests import unittest_tools
if __name__ == '__main__':
#unittest.main()
suite = unittest.TestSuite([test_Prod('test_mul_without_zeros_zeros')])
#suite.addTest(test_Prod('test_verify_grad_with_zeros'))
#suite.addTest(test_Prod('test_prod_without_zeros'))
#suite.addTest(test_Prod('test_other_grad_tests'))
unittest.TextTestRunner().run(suite)
| [
11748,
269,
31686,
293,
11,
640,
11,
555,
715,
395,
198,
198,
6738,
299,
32152,
13,
33407,
1330,
875,
198,
198,
6738,
262,
5733,
13,
70,
1659,
1330,
35748,
11,
8670,
198,
6738,
262,
5733,
1330,
467,
69,
198,
198,
6738,
262,
5733,
... | 2.545098 | 255 |
import pandas as pd
import logging
from pathlib import Path
if __name__ == "__main__":
QLocationFactory.load("locations.csv")
names = QLocationFactory.get_available_objects()
for name in names:
r = QLocationFactory.get_object_by_name(name)
print(r) | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
18931,
198,
6738,
3108,
8019,
1330,
10644,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
628,
220,
220,
220,
1195,
14749,
22810,
13,
2220,
7203,
17946,
602,
13,
40664,
... | 2.718447 | 103 |
import os
import sys
from unittest import mock
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
with mock.patch.dict(os.environ, {"AWS_REGION": "us-west-1", "DYNAMO_TABLE": "fake-table"}):
import app
| [
11748,
28686,
198,
11748,
25064,
198,
6738,
555,
715,
395,
1330,
15290,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,... | 2.414141 | 99 |
"""Purge old data helper."""
from datetime import timedelta
import logging
import homeassistant.util.dt as dt_util
from .util import session_scope
_LOGGER = logging.getLogger(__name__)
def purge_old_data(instance, purge_days, repack):
"""Purge events and states older than purge_days ago."""
from .models import States, Events
from sqlalchemy.exc import SQLAlchemyError
purge_before = dt_util.utcnow() - timedelta(days=purge_days)
_LOGGER.debug("Purging events before %s", purge_before)
try:
with session_scope(session=instance.get_session()) as session:
deleted_rows = (
session.query(States)
.filter((States.last_updated < purge_before))
.delete(synchronize_session=False)
)
_LOGGER.debug("Deleted %s states", deleted_rows)
deleted_rows = (
session.query(Events)
.filter((Events.time_fired < purge_before))
.delete(synchronize_session=False)
)
_LOGGER.debug("Deleted %s events", deleted_rows)
# Execute sqlite vacuum command to free up space on disk
if repack and instance.engine.driver == "pysqlite":
_LOGGER.debug("Vacuuming SQLite to free space")
instance.engine.execute("VACUUM")
except SQLAlchemyError as err:
_LOGGER.warning("Error purging history: %s.", err)
| [
37811,
30026,
469,
1468,
1366,
31904,
526,
15931,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
11748,
18931,
198,
198,
11748,
1363,
562,
10167,
13,
22602,
13,
28664,
355,
288,
83,
62,
22602,
198,
198,
6738,
764,
22602,
1330,
6246,
62... | 2.36755 | 604 |
import random
import pickle
import numpy as np
import metaworld
from metaworld import Task
SEED = 1
EPISODE_LEN = 200
NUM_EPISODES = 10
DECIMAL_PRECISION = 3
# Set random seed.
random.seed(SEED)
np.random.seed(SEED)
# Create kwargs list for ML45_train and ML_45 test.
kwargs_list = []
benchmark = metaworld.ML45()
kwargs_list.append(
{
"env_dict": benchmark.train_classes,
"tasks": benchmark.train_tasks,
"resample_tasks": True,
"add_observability": True,
}
)
kwargs_list.append(
{
"env_dict": benchmark.test_classes,
"tasks": benchmark.test_tasks,
"resample_tasks": True,
"add_observability": True,
}
)
# Get list of goals, initial hand positions, and initial object positions for each task.
goals = {}
hand_poses = {}
obj_poses = {}
for kwargs in kwargs_list:
benchmark_goals, benchmark_hand, benchmark_obj = check_obs(**kwargs)
goals.update(benchmark_goals)
hand_poses.update(benchmark_hand)
obj_poses.update(benchmark_obj)
# Find environments that violate assumptions about observation info.
goal_violating_envs = []
hand_violating_envs = []
obj_violating_envs = []
for env_idx, env_name in enumerate(goals.keys()):
# Check that goals aren't identical across episodes.
task_goals = np.round(np.array(goals[env_name]), decimals=DECIMAL_PRECISION)
if len(np.unique(task_goals, axis=0)) == 1:
goal_violating_envs.append((env_idx, env_name))
# Check that initial hand positions are identical across episodes.
task_hand_poses = np.round(
np.array(hand_poses[env_name]), decimals=DECIMAL_PRECISION
)
if len(np.unique(task_hand_poses, axis=0)) > 1:
hand_violating_envs.append((env_idx, env_name))
# Check that initial object positions aren't identical across episodes.
task_obj_poses = np.round(np.array(obj_poses[env_name]), decimals=DECIMAL_PRECISION)
if len(np.unique(task_obj_poses, axis=0)) == 1:
obj_violating_envs.append((env_idx, env_name))
# Print violating environments.
print("Goal violating environments: %s" % goal_violating_envs)
print("Hand violating environments: %s" % hand_violating_envs)
print("Object violating environments: %s" % obj_violating_envs)
| [
11748,
4738,
198,
11748,
2298,
293,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
1138,
707,
1764,
198,
6738,
1138,
707,
1764,
1330,
15941,
628,
198,
5188,
1961,
796,
352,
198,
8905,
1797,
16820,
62,
43,
1677,
796,
939,
198,
41... | 2.579611 | 873 |
#!/usr/bin/env python3
from collections import defaultdict
from heapq import heappush, heappop
import sys
sys.setrecursionlimit(10**6)
input = sys.stdin.buffer.readline
INF = 10 ** 9 + 1 # sys.maxsize # float("inf")
def _test():
"""
>>> solve(5, 3, [10, 30, 40, 50, 20])
30
>>> solve(3, 1, [10, 20, 10])
20
>>> solve(2, 100, [10, 10])
0
>>> solve(10, 4, [40, 10, 20, 70, 80, 10, 20, 70, 80, 60])
40
"""
import doctest
doctest.testmod()
def as_input(s):
"use in test, use given string as input file"
import io
global read, input
f = io.StringIO(s.strip())
input = f.readline
read = f.read
USE_NUMBA = False
if (USE_NUMBA and sys.argv[-1] == 'ONLINE_JUDGE') or sys.argv[-1] == '-c':
print("compiling")
from numba.pycc import CC
cc = CC('my_module')
cc.export('solve', solve.__doc__.strip().split()[0])(solve)
cc.compile()
exit()
else:
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
if (USE_NUMBA and sys.argv[-1] != '-p') or sys.argv[-1] == "--numba":
# -p: pure python mode
# if not -p, import compiled module
from my_module import solve # pylint: disable=all
elif sys.argv[-1] == "-t":
_test()
sys.exit()
elif sys.argv[-1] != '-p' and len(sys.argv) == 2:
# input given as file
input_as_file = open(sys.argv[1])
input = input_as_file.buffer.readline
read = input_as_file.buffer.read
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
24575,
80,
1330,
339,
1324,
1530,
11,
339,
1324,
404,
198,
11748,
25064,
198,
198,
17597,
13,
2617,
8344,
24197,
32374,
7,
940,
1174... | 2.175287 | 696 |
# coding: utf-8
from __future__ import unicode_literals
from itertools import chain
from django import forms
from django.forms.formsets import TOTAL_FORM_COUNT
from django.forms.models import fields_for_model
from django.utils.translation import ungettext
from modeltranslation_rosetta.settings import DEFAULT_FROM_LANG, DEFAULT_TO_LANG, LANGUAGES
from .import_translation import parse_po, parse_xlsx, parse_xml
from .utils import build_localized_fieldname
from .utils import get_model, build_model_name, get_models
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
340,
861,
10141,
1330,
6333,
198,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
23914,
13,
23914,
103... | 3.208589 | 163 |
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
import time
| [
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
11321,
13,
1069,
11755,
1330,
1400,
16678,
20180,
16922,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
1525,
1330,
2750,
198,
6738,
384,
11925,
... | 3.694444 | 72 |
"""
Signin With Apple
Token Module
author: hugh@blinkybeach.com
"""
import jwt
from jwt.exceptions import PyJWTError
from typing import TypeVar, Type, Any, Dict, Union, Optional
from siwa.library.data import Data
import json
from siwa.library.key_cache import KeyCache
from siwa.library.token.header import Header
from siwa.library.token.payload import Payload
T = TypeVar('T', bound='IdentityToken')
| [
37811,
198,
11712,
259,
2080,
4196,
198,
30642,
19937,
198,
9800,
25,
289,
6724,
31,
2436,
29246,
1350,
620,
13,
785,
198,
37811,
198,
11748,
474,
46569,
198,
6738,
474,
46569,
13,
1069,
11755,
1330,
9485,
41,
39386,
12331,
198,
6738,
... | 3.25 | 124 |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from pysteps.utils import transformation
# boxcox_transform
test_data = [
(
np.array([1]),
{
"accutime": 5,
"transform": None,
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
None,
None,
None,
False,
np.array([0]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "BoxCox",
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
None,
None,
None,
True,
np.array([np.exp(1)]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": None,
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
1.0,
None,
None,
False,
np.array([0]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "BoxCox",
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
1.0,
None,
None,
True,
np.array([2.0]),
),
]
@pytest.mark.parametrize(
"R, metadata, Lambda, threshold, zerovalue, inverse, expected", test_data
)
def test_boxcox_transform(R, metadata, Lambda, threshold, zerovalue, inverse, expected):
"""Test the boxcox_transform."""
assert_array_almost_equal(
transformation.boxcox_transform(
R, metadata, Lambda, threshold, zerovalue, inverse
)[0],
expected,
)
# dB_transform
test_data = [
(
np.array([1]),
{
"accutime": 5,
"transform": None,
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
None,
None,
False,
np.array([0]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "dB",
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
None,
None,
True,
np.array([1.25892541]),
),
]
@pytest.mark.parametrize(
"R, metadata, threshold, zerovalue, inverse, expected", test_data
)
def test_dB_transform(R, metadata, threshold, zerovalue, inverse, expected):
"""Test the dB_transform."""
assert_array_almost_equal(
transformation.dB_transform(R, metadata, threshold, zerovalue, inverse)[0],
expected,
)
# NQ_transform
test_data = [
(
np.array([1, 2]),
{
"accutime": 5,
"transform": None,
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
False,
np.array([-0.4307273, 0.4307273]),
)
]
@pytest.mark.parametrize("R, metadata, inverse, expected", test_data)
def test_NQ_transform(R, metadata, inverse, expected):
"""Test the NQ_transform."""
assert_array_almost_equal(
transformation.NQ_transform(R, metadata, inverse)[0], expected
)
# sqrt_transform
test_data = [
(
np.array([1]),
{
"accutime": 5,
"transform": None,
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
False,
np.array([1]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "sqrt",
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
True,
np.array([1]),
),
]
@pytest.mark.parametrize("R, metadata, inverse, expected", test_data)
def test_sqrt_transform(R, metadata, inverse, expected):
"""Test the sqrt_transform."""
assert_array_almost_equal(
transformation.sqrt_transform(R, metadata, inverse)[0], expected
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
6738,
299,
32152,
13,
33407,
1330,
6818,
62,
18747,
62,
28177,
62,
40496,
198,
198,
6738,
12972,
20214,
... | 1.797657 | 2,219 |
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from textwrap import dedent
from pants.backend.codegen.grpcio.python.python_grpcio_library import PythonGrpcioLibrary
from pants_test.backend.codegen.grpcio.grpcio_test_base import GrpcioTestBase
| [
2,
15069,
13130,
41689,
1628,
20420,
357,
3826,
27342,
9865,
3843,
20673,
13,
9132,
737,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
3826,
38559,
24290,
737,
198,
198,
6738,
2420,
37150,
1330,
4648,
298,
198,
1... | 3.3 | 100 |
import argparse
from gopro_overlay.gpx import load_timeseries
from gopro_overlay.journey import Journey
from gopro_overlay.units import units
# ## -0.29363,51.39235,-0.26822,51.39963
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Various random utilities for GPX files",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--action", choices=["extents"], default="extents")
parser.add_argument("gpx", help="GPX file")
args = parser.parse_args()
timeseries = load_timeseries(args.gpx, units)
if args.action == "extents":
journey = Journey()
timeseries.process(journey.accept)
bbox = journey.bounding_box
print(f"{bbox[0].lon},{bbox[0].lat},{bbox[1].lon},{bbox[1].lat}")
print(bbox)
| [
11748,
1822,
29572,
198,
198,
6738,
308,
404,
305,
62,
2502,
10724,
13,
70,
8416,
1330,
3440,
62,
22355,
10640,
198,
6738,
308,
404,
305,
62,
2502,
10724,
13,
73,
5604,
1330,
15120,
198,
6738,
308,
404,
305,
62,
2502,
10724,
13,
416... | 2.486486 | 333 |
class APIRequestError(APIError):
"""Problem with connection, parsing json, timeout or any other response with
non 200 HTTP code
"""
class SurveyMonkeyAPIError(APIError):
""""""
error = "Unknown Error"
error_code = None
error_description = None
| [
628,
220,
220,
220,
220,
198,
4871,
7824,
18453,
12331,
7,
17614,
12331,
2599,
198,
220,
220,
220,
37227,
40781,
351,
4637,
11,
32096,
33918,
11,
26827,
393,
597,
584,
2882,
351,
198,
220,
220,
220,
1729,
939,
14626,
2438,
628,
220,
... | 3.031915 | 94 |
__version__='0.11.1'
| [
834,
9641,
834,
11639,
15,
13,
1157,
13,
16,
6,
628
] | 2 | 11 |
import telebot
from telebot import types
import logging
TOKEN = 'TOKEN_HERE'
# Enable Logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
tb = telebot.TeleBot(TOKEN)
updates = tb.get_updates(1234,100,20)
last_chat_id = 0
user_dict = {}
def any_message(bot, update):
""" Print to console """
# Save last chat_id to use in reply handler
global last_chat_id
last_chat_id = update.message.chat_id
logger.info("New message\nFrom: %s\nchat_id: %d\nText: %s" %
(update.message.from_user,
update.message.chat_id,
update.message.text))
@tb.message_handler(commands=['saludo'])
@tb.message_handler(commands=['guardia'])
@tb.message_handler(commands=['status'])
@tb.message_handler(commands=['help'])
@tb.message_handler(commands=['jugar'])
tb.polling(none_stop=True) | [
11748,
5735,
13645,
198,
6738,
5735,
13645,
1330,
3858,
198,
11748,
18931,
628,
198,
10468,
43959,
796,
705,
10468,
43959,
62,
39,
9338,
6,
198,
198,
2,
27882,
5972,
2667,
198,
6404,
2667,
13,
35487,
16934,
7,
198,
220,
220,
220,
220,... | 2.267606 | 426 |
# -*- coding: utf-8 -*-
"""
@author: Viet Nguyen <nhviet1009@gmail.com>
"""
import sys
import csv
import numpy as np
from torch.utils.data import Dataset
csv.field_size_limit(sys.maxsize)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
31,
9800,
25,
8730,
42379,
1279,
77,
71,
85,
1155,
3064,
24,
31,
14816,
13,
785,
29,
198,
37811,
198,
11748,
25064,
198,
11748,
269,
21370,
198,
11748,
299... | 2.533333 | 75 |
import logging
import os
import tempfile
from contextlib import ExitStack
from typing import Text, Optional, Dict
import yaml
from rasa.constants import DEFAULT_CONFIG_PATH, DEFAULT_DATA_PATH, DEFAULT_DOMAIN_PATH
from rasa.importers.importer import TrainingDataImporter
from rasa.train import DEFAULT_MODELS_PATH
from rasa.train import _train_async_internal, handle_domain_if_not_exists, train
from rasa.utils.common import TempDirectoryPath
from bot_trainer.data_processor.constant import MODEL_TRAINING_STATUS
from bot_trainer.data_processor.importer import MongoDataImporter
from bot_trainer.data_processor.processor import AgentProcessor, ModelProcessor
from bot_trainer.data_processor.processor import MongoProcessor
from bot_trainer.exceptions import AppException
from bot_trainer.utils import Utility
async def train_model(
data_importer: TrainingDataImporter,
output_path: Text,
force_training: bool = False,
fixed_model_name: Optional[Text] = None,
persist_nlu_training_data: bool = False,
additional_arguments: Optional[Dict] = None,
):
""" Trains the rasa model internally, using functions from the rasa modules """
with ExitStack() as stack:
train_path = stack.enter_context(TempDirectoryPath(tempfile.mkdtemp()))
domain = await data_importer.get_domain()
if domain.is_empty():
return await handle_domain_if_not_exists(
data_importer, output_path, fixed_model_name
)
return await _train_async_internal(
data_importer,
train_path,
output_path,
force_training,
fixed_model_name,
persist_nlu_training_data,
additional_arguments,
)
async def train_model_from_mongo(
bot: str,
force_training: bool = False,
fixed_model_name: Optional[Text] = None,
persist_nlu_training_data: bool = False,
additional_arguments: Optional[Dict] = None,
):
""" Trains the rasa model, using the data that is loaded onto
Mongo, through the bot files """
data_importer = MongoDataImporter(bot)
output = os.path.join(DEFAULT_MODELS_PATH, bot)
return await train_model(
data_importer,
output,
force_training,
fixed_model_name,
persist_nlu_training_data,
additional_arguments,
)
def train_model_for_bot(bot: str):
""" Trains the rasa model, using the data that is loaded onto
Mongo, through the bot files """
processor = MongoProcessor()
nlu = processor.load_nlu(bot)
if not nlu.training_examples:
raise AppException("Training data does not exists!")
domain = processor.load_domain(bot)
stories = processor.load_stories(bot)
config = processor.load_config(bot)
directory = Utility.save_files(
nlu.nlu_as_markdown().encode(),
domain.as_yaml().encode(),
stories.as_story_string().encode(),
yaml.dump(config).encode(),
)
output = os.path.join(DEFAULT_MODELS_PATH, bot)
model = train(domain=os.path.join(directory,DEFAULT_DOMAIN_PATH),
config=os.path.join(directory,DEFAULT_CONFIG_PATH),
training_files=os.path.join(directory,DEFAULT_DATA_PATH),
output=output)
Utility.delete_directory(directory)
return model
def start_training(bot: str, user: str):
""" Prevents training of the bot if the training session is in progress otherwise start training """
exception = None
model_file = None
training_status = None
ModelProcessor.set_training_status(
bot=bot,
user=user,
status=MODEL_TRAINING_STATUS.INPROGRESS.value,
)
try:
model_file = train_model_for_bot(bot)
training_status = MODEL_TRAINING_STATUS.DONE.value
except Exception as e:
logging.exception(e)
training_status = MODEL_TRAINING_STATUS.FAIL.value
exception = str(e)
raise AppException(exception)
finally:
ModelProcessor.set_training_status(
bot=bot,
user=user,
status=training_status,
model_path=model_file,
exception=exception,
)
AgentProcessor.reload(bot)
return model_file
| [
11748,
18931,
198,
11748,
28686,
198,
11748,
20218,
7753,
198,
6738,
4732,
8019,
1330,
29739,
25896,
198,
6738,
19720,
1330,
8255,
11,
32233,
11,
360,
713,
198,
198,
11748,
331,
43695,
198,
6738,
374,
15462,
13,
9979,
1187,
1330,
5550,
... | 2.45667 | 1,754 |
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.model import SuiteVisitor
| [
2,
220,
15069,
3648,
12,
6999,
26182,
45196,
641,
27862,
39447,
73,
198,
2,
198,
2,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
220,
345,
743,
407,
779,
428,
2393,
2845,
287,
1... | 3.685714 | 175 |
import argparse
import glob
import os
import sys
import time
import pickle
from itertools import product
import matplotlib.pyplot as plt
import multiprocessing as mp
import numpy as np
import pandas as pd
import seaborn as sns
import utils
from bias_beta import BETA_BIAS
# PATHS
SRC_DIR = '/home/jmcbride/Scales/Toy_model/Src'
DATA_DIR = '/home/jmcbride/Scales/Toy_model/Data/Raw_tmp4'
DATA_DIR_2 = '/home/jmcbride/Scales/Toy_model/Data/Raw4'
# Model Parameters
MIarr = np.array([0.] + list(np.arange(50., 110., 10.)))
MAarr = np.array(list(np.arange(400., 600., 50.)) + [1200.])
Narr = np.array([5, 7])
TEMP_MIN = 50.
TEMP_MAX = 300.
TEMP_LOW_MARGIN = 0.50
TEMP_HI_MARGIN = 1.50
N_TRIALS = 50
ALPHA_W = 0.1
ALPHA_HS = 1.
#MIarr = [50.]
#MAarr = [550.]
#Narr = [5]
biases = ['none',
'distI_1_0', 'distI_2_0', 'distI_0_1', 'distI_0_2',
'distI_1_1', 'distI_2_1', 'distI_1_2', 'distI_2_2',
'opt_c', 'opt_c_I1', 'opt_c_I2', 'opt_c_s2', 'opt_c_s3',
'hs_n1_w05', 'hs_n1_w10', 'hs_n1_w15', 'hs_n1_w20',
'hs_n2_w05', 'hs_n2_w10', 'hs_n2_w15', 'hs_n2_w20']
#beta_params = pickle.load(open(os.path.join(SRC_DIR, 'beta_param_biases.pickle'), 'rb'))
# Parallel parameters
N_PROC = 28
CHUNKSIZE = 5
# Initialisation parameters
N_SCALES = 10000
NORM_CONST = load_norm_const()
if os.path.exists('hs_attractors.pickle'):
ATTRACTORS = pickle.load(open('hs_attractors.pickle', 'rb'))
else:
ATTRACTORS = {f"hs_n{n}_w{w:02d}":get_attractors(n, diff=w) for n in [1,2,3] for w in [5,10,15,20]}
pickle.dump(ATTRACTORS, open('hs_attractors.pickle', 'wb'))
FILES = sorted(glob.glob("/home/jmcbride/Scales/Toy_model/Src/Alternative_attractors/*"))
ALT_ATT = {os.path.split(f)[1][3:9]: np.load(f) for f in FILES}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-t', action='store', default=0, type=int)
args = parser.parse_args()
inputs = np.load('new_inputs_7.npy')
n, INT_MIN, INT_MAX, bias = inputs[args.t-1]
if 'B' in bias:
BETAarr = [0.001, 0.002, 0.004, 0.01, 0.02, 0.04, 0.06, 0.08, 0.1, 0.2, 0.3, 0.4, 0.6, 0.8, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6, 6.5, 7, 7.5, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 20, 24, 28, 32, 36, 40, 45, 50, 55, 60, 70, 80, 90, 100, 110, 120, 140, 160, 180, 200, 225, 250, 275, 300, 350, 400, 500]
else:
BETAarr = [1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 18, 20, 24, 28, 32, 36, 40, 45, 50, 55, 60, 70, 80, 90, 100, 110, 120, 140, 160, 180, 200, 225, 250, 275, 300, 350, 400, 500]
n = int(n)
INT_MIN = float(INT_MIN)
INT_MAX = float(INT_MAX)
print(f'INPUT T#{args.t}')
count = 0
for BETA in BETAarr:
count += 1
print(n, INT_MIN, INT_MAX, bias)
fName = f"{DATA_DIR}/n{n}_{bias}_MI{int(INT_MIN):d}_MA{int(INT_MAX):d}_BETA_{BETA:07.3f}.feather"
fName_2 = f"{DATA_DIR_2}/n{n}_{bias}_MI{int(INT_MIN):d}_MA{int(INT_MAX):d}_BETA_{BETA:07.3f}.feather"
if os.path.exists(fName): # or os.path.exists(fName_2):
continue
print(f"\nRound {count}")
print(f"\tINT_MIN = {INT_MIN}\tINT_MAX = {INT_MAX}\t{bias}\tBETA = {BETA}")
timeS = time.time()
with mp.Pool(N_PROC) as pool:
output = list(pool.imap_unordered(generate_new_scale, [(n, INT_MIN, INT_MAX, BETA, bias)]*N_SCALES, CHUNKSIZE))
# output = [ generate_new_scale( (n, INT_MIN, INT_MAX, BETA, bias) ) for i in range(N_SCALES) ]
scales = [o[0] for o in output]
n_samp = [o[1] for o in output]
print(f"{len(scales)} scales accepted out of {sum(n_samp)} scales generated")
print(f"Acceptance rate = {len(scales)/sum(n_samp)}")
print('Time taken: ', (time.time()-timeS)/60.)
str_ints = [';'.join([str(int(round(x))) for x in sc]) for sc in scales]
df = pd.DataFrame(data={'pair_ints':str_ints, 'n_att':n_samp})
df['n_notes'] = n
df = utils.get_scale_from_pair_ints(df)
df = utils.get_all_ints_from_pair_ints(df)
df = utils.get_harmonic_similarity_score_df(df)
# df = utils.get_attractors_in_scale(df)
df.to_feather(fName)
print('Time taken: ', (time.time()-timeS)/60.)
# if sum(n_samp) > 5000000:
# break
# if ((time.time()-timeS)/60.) > 100:
# break
| [
11748,
1822,
29572,
198,
11748,
15095,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
2298,
293,
198,
198,
6738,
340,
861,
10141,
1330,
1720,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
... | 1.979857 | 2,234 |
# Copyright 2010 Google Inc.
# All Rights Reserved.
# Author: thaloun@google.com (Tim Haloun)
"""Noop tool that defines builder functions for non-default platforms to
avoid errors when scanning sconsscripts."""
import SCons.Builder
def generate(env):
"""SCons method."""
if not env.Bit('windows'):
builder = SCons.Builder.Builder(
action=''
)
env.Append(BUILDERS={'RES': builder, 'Grit': builder})
| [
2,
15069,
3050,
3012,
3457,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
6434,
25,
294,
282,
977,
31,
13297,
13,
785,
357,
14967,
11023,
977,
8,
198,
198,
37811,
2949,
404,
2891,
326,
15738,
27098,
5499,
329,
1729,
12,
12286,
9554,
... | 3.028369 | 141 |
""" This module contains list of constant variables. """
ANGLE = 2
ACTION_INTERVAL = 0.05
DEFAULT_LIFE_COUNT = 5
DEFAULT_SPEED = 35
DEFAULT_SPEED_BULLETS = 120
MAX_ANGLE = 360
MAX_SPEED = 220
ROTATION_ANGLE = 0.015
SPEED = 8
STEP_INTERVAL = 1 # 1 second, can be changed to 0.5
UNIT_PROPERTIES = ['x', 'y', 'x1', 'y1', 'angle', 'hits', 'speed', 'id', 'life_count', 'type', 'width', 'height', 'name']
UNITS = {'invader': [{'type': 'invader1', 'dimension': 28},
{'type': 'invader2', 'dimension': 28},
{'type': 'invader3', 'dimension': 28}],
'hero': [{'type': 'hero_1_black', 'dimension': 28},
{'type': 'hero_1_green', 'dimension': 28},
{'type': 'hero_1_blue', 'dimension': 28},
{'type': 'hero_1_pink', 'dimension': 28},
{'type': 'hero_1_white', 'dimension': 28},
{'type': 'hero_1_red', 'dimension': 28},
{'type': 'hero_2_black', 'dimension': 28},
{'type': 'hero_2_green', 'dimension': 28},
{'type': 'hero_2_blue', 'dimension': 28},
{'type': 'hero_2_pink', 'dimension': 28},
{'type': 'hero_2_white', 'dimension': 28},
{'type': 'hero_2_red', 'dimension': 28}],
'bullet_hero': {'type': 'bullet_hero', 'dimension': 5},
'bullet_invader': {'type': 'bullet_invader', 'dimension': 10}}
| [
37811,
770,
8265,
4909,
1351,
286,
6937,
9633,
13,
37227,
628,
198,
15567,
2538,
796,
362,
198,
44710,
62,
41358,
23428,
796,
657,
13,
2713,
198,
7206,
38865,
62,
43,
29150,
62,
34,
28270,
796,
642,
198,
7206,
38865,
62,
4303,
41841,
... | 1.997238 | 724 |
from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
import sys
kratos_benchmarking_path = '../../../../benchmarking'
sys.path.append(kratos_benchmarking_path)
import benchmarking
benchmarking.BuildReferenceData("cantilever2ddynamic_benchmarking.py", "cantilever2ddynamic.txt")
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
4112,
62,
11748,
11,
7297,
1303,
49123,
509,
10366,
418,
15205,
13323,
23154,
19528,
11670,
351,
21015,
362,
13,
21,
290,
362,
13,
22,
198,
11748,
25064,
198,
74,
10366,
418,
62,
2696... | 3.19469 | 113 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Background processes made simple
---------------------------------
"""
USAGE = """
## Example
For any existing app
Create File: app/models/scheduler.py ======
from gluon.scheduler import Scheduler
def demo1(*args,**vars):
print 'you passed args=%s and vars=%s' % (args, vars)
return 'done!'
def demo2():
1/0
scheduler = Scheduler(db,dict(demo1=demo1,demo2=demo2))
## run worker nodes with:
cd web2py
python web2py.py -K myapp
or
python gluon/scheduler.py -u sqlite://storage.sqlite \
-f applications/myapp/databases/ \
-t mytasks.py
(-h for info)
python scheduler.py -h
## schedule jobs using
http://127.0.0.1:8000/myapp/appadmin/insert/db/scheduler_task
## monitor scheduled jobs
http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_task.id>0
## view completed jobs
http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_run.id>0
## view workers
http://127.0.0.1:8000/myapp/appadmin/select/db?query=db.scheduler_worker.id>0
## To install the scheduler as a permanent daemon on Linux (w/ Upstart), put
## the following into /etc/init/web2py-scheduler.conf:
## (This assumes your web2py instance is installed in <user>'s home directory,
## running as <user>, with app <myapp>, on network interface eth0.)
description "web2py task scheduler"
start on (local-filesystems and net-device-up IFACE=eth0)
stop on shutdown
respawn limit 8 60 # Give up if restart occurs 8 times in 60 seconds.
exec sudo -u <user> python /home/<user>/web2py/web2py.py -K <myapp>
respawn
## You can then start/stop/restart/check status of the daemon with:
sudo start web2py-scheduler
sudo stop web2py-scheduler
sudo restart web2py-scheduler
sudo status web2py-scheduler
"""
import os
import time
import multiprocessing
import sys
import threading
import traceback
import signal
import socket
import datetime
import logging
import optparse
import types
import Queue
path = os.getcwd()
if 'WEB2PY_PATH' not in os.environ:
os.environ['WEB2PY_PATH'] = path
try:
# try external module
from simplejson import loads, dumps
except ImportError:
try:
# try stdlib (Python >= 2.6)
from json import loads, dumps
except:
# fallback to pure-Python module
from gluon.contrib.simplejson import loads, dumps
IDENTIFIER = "%s#%s" % (socket.gethostname(), os.getpid())
logger = logging.getLogger('web2py.scheduler.%s' % IDENTIFIER)
from gluon import DAL, Field, IS_NOT_EMPTY, IS_IN_SET, IS_NOT_IN_DB
from gluon import IS_INT_IN_RANGE, IS_DATETIME, IS_IN_DB
from gluon.utils import web2py_uuid
from gluon.storage import Storage
QUEUED = 'QUEUED'
ASSIGNED = 'ASSIGNED'
RUNNING = 'RUNNING'
COMPLETED = 'COMPLETED'
FAILED = 'FAILED'
TIMEOUT = 'TIMEOUT'
STOPPED = 'STOPPED'
ACTIVE = 'ACTIVE'
TERMINATE = 'TERMINATE'
DISABLED = 'DISABLED'
KILL = 'KILL'
PICK = 'PICK'
STOP_TASK = 'STOP_TASK'
EXPIRED = 'EXPIRED'
SECONDS = 1
HEARTBEAT = 3 * SECONDS
MAXHIBERNATION = 10
CLEAROUT = '!clear!'
CALLABLETYPES = (types.LambdaType, types.FunctionType,
types.BuiltinFunctionType,
types.MethodType, types.BuiltinMethodType)
class Task(object):
"""Defines a "task" object that gets passed from the main thread to the
executor's one
"""
class TaskReport(object):
"""Defines a "task report" object that gets passed from the executor's
thread to the main one
"""
class JobGraph(object):
"""Experimental: with JobGraph you can specify
dependencies amongs tasks"""
def add_deps(self, task_parent, task_child):
"""Creates a dependency between task_parent and task_child"""
self.db.scheduler_task_deps.insert(task_parent=task_parent,
task_child=task_child,
job_name=self.job_name)
def validate(self, job_name):
"""Validates if all tasks job_name can be completed, i.e. there
are no mutual dependencies among tasks.
Commits at the end if successfull, or it rollbacks the entire
transaction. Handle with care!"""
db = self.db
sd = db.scheduler_task_deps
if job_name:
q = sd.job_name == job_name
else:
q = sd.id > 0
edges = db(q).select()
nested_dict = {}
for row in edges:
k = row.task_parent
if k in nested_dict:
nested_dict[k].add(row.task_child)
else:
nested_dict[k] = set((row.task_child,))
try:
rtn = []
for k, v in nested_dict.items():
v.discard(k) # Ignore self dependencies
extra_items_in_deps = reduce(set.union, nested_dict.values()) - set(nested_dict.keys())
nested_dict.update(dict((item, set()) for item in extra_items_in_deps))
while True:
ordered = set(item for item, dep in nested_dict.items() if not dep)
if not ordered:
break
rtn.append(ordered)
nested_dict = dict(
(item, (dep - ordered)) for item, dep in nested_dict.items()
if item not in ordered
)
assert not nested_dict, "A cyclic dependency exists amongst %r" % nested_dict
db.commit()
return rtn
except:
db.rollback()
return None
def demo_function(*argv, **kwargs):
""" test function """
for i in range(argv[0]):
print 'click', i
time.sleep(1)
return 'done'
#the two functions below deal with simplejson decoding as unicode, esp for the dict decode
#and subsequent usage as function Keyword arguments unicode variable names won't work!
#borrowed from http://stackoverflow.com/questions/956867/how-to-get-string-objects-instead-unicode-ones-from-json-in-python
def executor(queue, task, out):
"""The function used to execute tasks in the background process"""
logger.debug(' task started')
class LogOutput(object):
"""Facility to log output at intervals"""
W2P_TASK = Storage({'id': task.task_id, 'uuid': task.uuid})
stdout = LogOutput(out)
try:
if task.app:
os.chdir(os.environ['WEB2PY_PATH'])
from gluon.shell import env, parse_path_info
from gluon import current
level = logging.getLogger().getEffectiveLevel()
logging.getLogger().setLevel(logging.WARN)
# Get controller-specific subdirectory if task.app is of
# form 'app/controller'
(a, c, f) = parse_path_info(task.app)
_env = env(a=a, c=c, import_models=True)
logging.getLogger().setLevel(level)
f = task.function
functions = current._scheduler.tasks
if not functions:
#look into env
_function = _env.get(f)
else:
_function = functions.get(f)
if not isinstance(_function, CALLABLETYPES):
raise NameError(
"name '%s' not found in scheduler's environment" % f)
#Inject W2P_TASK into environment
_env.update({'W2P_TASK': W2P_TASK})
#Inject W2P_TASK into current
from gluon import current
current.W2P_TASK = W2P_TASK
globals().update(_env)
args = loads(task.args)
vars = loads(task.vars, object_hook=_decode_dict)
result = dumps(_function(*args, **vars))
else:
### for testing purpose only
result = eval(task.function)(
*loads(task.args, object_hook=_decode_dict),
**loads(task.vars, object_hook=_decode_dict))
queue.put(TaskReport('COMPLETED', result=result))
except BaseException, e:
tb = traceback.format_exc()
queue.put(TaskReport('FAILED', tb=tb))
del stdout
class MetaScheduler(threading.Thread):
"""Base class documenting scheduler's base methods"""
def async(self, task):
"""Starts the background process
Args:
task : a `Task` object
Returns:
tuple: containing::
('ok',result,output)
('error',exception,None)
('timeout',None,None)
('terminated',None,None)
"""
db = self.db
sr = db.scheduler_run
out = multiprocessing.Queue()
queue = multiprocessing.Queue(maxsize=1)
p = multiprocessing.Process(target=executor, args=(queue, task, out))
self.process = p
logger.debug(' task starting')
p.start()
task_output = ""
tout = ""
try:
if task.sync_output > 0:
run_timeout = task.sync_output
else:
run_timeout = task.timeout
start = time.time()
while p.is_alive() and (not task.timeout or time.time() - start < task.timeout):
if tout:
try:
logger.debug(' partial output saved')
db(sr.id == task.run_id).update(run_output=task_output)
db.commit()
except:
pass
p.join(timeout=run_timeout)
tout = ""
while not out.empty():
tout += out.get()
if tout:
logger.debug(' partial output: "%s"' % str(tout))
if CLEAROUT in tout:
task_output = tout[
tout.rfind(CLEAROUT) + len(CLEAROUT):]
else:
task_output += tout
except:
p.terminate()
p.join()
self.have_heartbeat = False
logger.debug(' task stopped by general exception')
tr = TaskReport(STOPPED)
else:
if p.is_alive():
p.terminate()
logger.debug(' task timeout')
try:
# we try to get a traceback here
tr = queue.get(timeout=2)
tr.status = TIMEOUT
tr.output = task_output
except Queue.Empty:
tr = TaskReport(TIMEOUT)
elif queue.empty():
self.have_heartbeat = False
logger.debug(' task stopped')
tr = TaskReport(STOPPED)
else:
logger.debug(' task completed or failed')
tr = queue.get()
tr.output = task_output
return tr
def die(self):
"""Forces termination of the worker process along with any running
task"""
logger.info('die!')
self.have_heartbeat = False
self.terminate_process()
def give_up(self):
"""Waits for any running task to be executed, then exits the worker
process"""
logger.info('Giving up as soon as possible!')
self.have_heartbeat = False
def terminate_process(self):
"""Terminates any running tasks (internal use only)"""
try:
self.process.terminate()
except:
pass # no process to terminate
def run(self):
"""This is executed by the main thread to send heartbeats"""
counter = 0
while self.have_heartbeat:
self.send_heartbeat(counter)
counter += 1
def pop_task(self):
"""Fetches a task ready to be executed"""
return Task(
app=None,
function='demo_function',
timeout=7,
args='[2]',
vars='{}')
def report_task(self, task, task_report):
"""Creates a task report"""
print 'reporting task'
pass
def loop(self):
"""Main loop, fetching tasks and starting executor's background
processes"""
try:
self.start_heartbeats()
while True and self.have_heartbeat:
logger.debug('looping...')
task = self.pop_task()
if task:
self.empty_runs = 0
self.report_task(task, self.async(task))
else:
self.empty_runs += 1
logger.debug('sleeping...')
if self.max_empty_runs != 0:
logger.debug('empty runs %s/%s',
self.empty_runs, self.max_empty_runs)
if self.empty_runs >= self.max_empty_runs:
logger.info(
'empty runs limit reached, killing myself')
self.die()
self.sleep()
except KeyboardInterrupt:
self.die()
TASK_STATUS = (QUEUED, RUNNING, COMPLETED, FAILED, TIMEOUT, STOPPED, EXPIRED)
RUN_STATUS = (RUNNING, COMPLETED, FAILED, TIMEOUT, STOPPED)
WORKER_STATUS = (ACTIVE, PICK, DISABLED, TERMINATE, KILL, STOP_TASK)
class TYPE(object):
"""
Validator that checks whether field is valid json and validates its type.
Used for `args` and `vars` of the scheduler_task table
"""
class Scheduler(MetaScheduler):
"""Scheduler object
Args:
db: DAL connection where Scheduler will create its tables
tasks(dict): either a dict containing name-->func or None.
If None, functions will be searched in the environment
migrate(bool): turn migration on/off for the Scheduler's tables
worker_name(str): force worker_name to identify each process.
Leave it to None to autoassign a name (hostname#pid)
group_names(list): process tasks belonging to this group
defaults to ['main'] if nothing gets passed
heartbeat(int): how many seconds the worker sleeps between one
execution and the following one. Indirectly sets how many seconds
will pass between checks for new tasks
max_empty_runs(int): how many loops are allowed to pass without
processing any tasks before exiting the process. 0 to keep always
the process alive
discard_results(bool): Scheduler stores executions's details into the
scheduler_run table. By default, only if there is a result the
details are kept. Turning this to True means discarding results
even for tasks that return something
utc_time(bool): do all datetime calculations assuming UTC as the
timezone. Remember to pass `start_time` and `stop_time` to tasks
accordingly
"""
def now(self):
"""Shortcut that fetches current time based on UTC preferences"""
return self.utc_time and datetime.datetime.utcnow() or datetime.datetime.now()
def set_requirements(self, scheduler_task):
"""Called to set defaults for lazy_tables connections"""
from gluon import current
if hasattr(current, 'request'):
scheduler_task.application_name.default = '%s/%s' % (
current.request.application, current.request.controller
)
def define_tables(self, db, migrate):
"""Defines Scheduler tables structure"""
from pydal.base import DEFAULT
logger.debug('defining tables (migrate=%s)', migrate)
now = self.now
db.define_table(
'scheduler_task',
Field('application_name', requires=IS_NOT_EMPTY(),
default=None, writable=False),
Field('task_name', default=None),
Field('group_name', default='main'),
Field('status', requires=IS_IN_SET(TASK_STATUS),
default=QUEUED, writable=False),
Field('function_name',
requires=IS_IN_SET(sorted(self.tasks.keys()))
if self.tasks else DEFAULT),
Field('uuid', length=255,
requires=IS_NOT_IN_DB(db, 'scheduler_task.uuid'),
unique=True, default=web2py_uuid),
Field('args', 'text', default='[]', requires=TYPE(list)),
Field('vars', 'text', default='{}', requires=TYPE(dict)),
Field('enabled', 'boolean', default=True),
Field('start_time', 'datetime', default=now,
requires=IS_DATETIME()),
Field('next_run_time', 'datetime', default=now),
Field('stop_time', 'datetime'),
Field('repeats', 'integer', default=1, comment="0=unlimited",
requires=IS_INT_IN_RANGE(0, None)),
Field('retry_failed', 'integer', default=0, comment="-1=unlimited",
requires=IS_INT_IN_RANGE(-1, None)),
Field('period', 'integer', default=60, comment='seconds',
requires=IS_INT_IN_RANGE(0, None)),
Field('prevent_drift', 'boolean', default=False,
comment='Cron-like start_times between runs'),
Field('timeout', 'integer', default=60, comment='seconds',
requires=IS_INT_IN_RANGE(1, None)),
Field('sync_output', 'integer', default=0,
comment="update output every n sec: 0=never",
requires=IS_INT_IN_RANGE(0, None)),
Field('times_run', 'integer', default=0, writable=False),
Field('times_failed', 'integer', default=0, writable=False),
Field('last_run_time', 'datetime', writable=False, readable=False),
Field('assigned_worker_name', default='', writable=False),
on_define=self.set_requirements,
migrate=self.__get_migrate('scheduler_task', migrate),
format='%(task_name)s')
db.define_table(
'scheduler_run',
Field('task_id', 'reference scheduler_task'),
Field('status', requires=IS_IN_SET(RUN_STATUS)),
Field('start_time', 'datetime'),
Field('stop_time', 'datetime'),
Field('run_output', 'text'),
Field('run_result', 'text'),
Field('traceback', 'text'),
Field('worker_name', default=self.worker_name),
migrate=self.__get_migrate('scheduler_run', migrate)
)
db.define_table(
'scheduler_worker',
Field('worker_name', length=255, unique=True),
Field('first_heartbeat', 'datetime'),
Field('last_heartbeat', 'datetime'),
Field('status', requires=IS_IN_SET(WORKER_STATUS)),
Field('is_ticker', 'boolean', default=False, writable=False),
Field('group_names', 'list:string', default=self.group_names),
Field('worker_stats', 'json'),
migrate=self.__get_migrate('scheduler_worker', migrate)
)
db.define_table(
'scheduler_task_deps',
Field('job_name', default='job_0'),
Field('task_parent', 'integer',
requires=IS_IN_DB(db, 'scheduler_task.id',
'%(task_name)s')
),
Field('task_child', 'reference scheduler_task'),
Field('can_visit', 'boolean', default=False),
migrate=self.__get_migrate('scheduler_task_deps', migrate)
)
if migrate is not False:
db.commit()
def loop(self, worker_name=None):
"""Main loop
This works basically as a neverending loop that:
- checks if the worker is ready to process tasks (is not DISABLED)
- pops a task from the queue
- if there is a task:
- spawns the executor background process
- waits for the process to be finished
- sleeps `heartbeat` seconds
- if there is not a task:
- checks for max_empty_runs
- sleeps `heartbeat` seconds
"""
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
try:
self.start_heartbeats()
while True and self.have_heartbeat:
if self.w_stats.status == DISABLED:
logger.debug('Someone stopped me, sleeping until better'
' times come (%s)', self.w_stats.sleep)
self.sleep()
continue
logger.debug('looping...')
task = self.wrapped_pop_task()
if task:
self.w_stats.empty_runs = 0
self.w_stats.status = RUNNING
self.w_stats.total += 1
self.wrapped_report_task(task, self.async(task))
if not self.w_stats.status == DISABLED:
self.w_stats.status = ACTIVE
else:
self.w_stats.empty_runs += 1
logger.debug('sleeping...')
if self.max_empty_runs != 0:
logger.debug('empty runs %s/%s',
self.w_stats.empty_runs, self.max_empty_runs)
if self.w_stats.empty_runs >= self.max_empty_runs:
logger.info(
'empty runs limit reached, killing myself')
self.die()
self.sleep()
except (KeyboardInterrupt, SystemExit):
logger.info('catched')
self.die()
def wrapped_assign_tasks(self, db):
"""Commodity function to call `assign_tasks` and trap exceptions
If an exception is raised, assume it happened because of database
contention and retries `assign_task` after 0.5 seconds
"""
logger.debug('Assigning tasks...')
db.commit() # db.commit() only for Mysql
x = 0
while x < 10:
try:
self.assign_tasks(db)
db.commit()
logger.debug('Tasks assigned...')
break
except:
self.w_stats.errors += 1
db.rollback()
logger.error('TICKER: error assigning tasks (%s)', x)
x += 1
time.sleep(0.5)
def wrapped_pop_task(self):
"""Commodity function to call `pop_task` and trap exceptions
If an exception is raised, assume it happened because of database
contention and retries `pop_task` after 0.5 seconds
"""
db = self.db
db.commit() # another nifty db.commit() only for Mysql
x = 0
while x < 10:
try:
rtn = self.pop_task(db)
return rtn
break
except:
self.w_stats.errors += 1
db.rollback()
logger.error(' error popping tasks')
x += 1
time.sleep(0.5)
def pop_task(self, db):
"""Grabs a task ready to be executed from the queue"""
now = self.now()
st = self.db.scheduler_task
if self.is_a_ticker and self.do_assign_tasks:
#I'm a ticker, and 5 loops passed without reassigning tasks,
#let's do that and loop again
self.wrapped_assign_tasks(db)
return None
#ready to process something
grabbed = db(
(st.assigned_worker_name == self.worker_name) &
(st.status == ASSIGNED)
)
task = grabbed.select(limitby=(0, 1), orderby=st.next_run_time).first()
if task:
task.update_record(status=RUNNING, last_run_time=now)
#noone will touch my task!
db.commit()
logger.debug(' work to do %s', task.id)
else:
if self.is_a_ticker and self.greedy:
#there are other tasks ready to be assigned
logger.info('TICKER: greedy loop')
self.wrapped_assign_tasks(db)
else:
logger.info('nothing to do')
return None
times_run = task.times_run + 1
if not task.prevent_drift:
next_run_time = task.last_run_time + datetime.timedelta(
seconds=task.period
)
else:
next_run_time = task.start_time + datetime.timedelta(
seconds=task.period * times_run
)
if times_run < task.repeats or task.repeats == 0:
#need to run (repeating task)
run_again = True
else:
#no need to run again
run_again = False
run_id = 0
while True and not self.discard_results:
logger.debug(' new scheduler_run record')
try:
run_id = db.scheduler_run.insert(
task_id=task.id,
status=RUNNING,
start_time=now,
worker_name=self.worker_name)
db.commit()
break
except:
time.sleep(0.5)
db.rollback()
logger.info('new task %(id)s "%(task_name)s"'
' %(application_name)s.%(function_name)s' % task)
return Task(
app=task.application_name,
function=task.function_name,
timeout=task.timeout,
args=task.args, # in json
vars=task.vars, # in json
task_id=task.id,
run_id=run_id,
run_again=run_again,
next_run_time=next_run_time,
times_run=times_run,
stop_time=task.stop_time,
retry_failed=task.retry_failed,
times_failed=task.times_failed,
sync_output=task.sync_output,
uuid=task.uuid)
def wrapped_report_task(self, task, task_report):
"""Commodity function to call `report_task` and trap exceptions
If an exception is raised, assume it happened because of database
contention and retries `pop_task` after 0.5 seconds
"""
db = self.db
while True:
try:
self.report_task(task, task_report)
db.commit()
break
except:
self.w_stats.errors += 1
db.rollback()
logger.error(' error storing result')
time.sleep(0.5)
def report_task(self, task, task_report):
"""Takes care of storing the result according to preferences
and deals with logic for repeating tasks"""
db = self.db
now = self.now()
st = db.scheduler_task
sr = db.scheduler_run
if not self.discard_results:
if task_report.result != 'null' or task_report.tb:
#result is 'null' as a string if task completed
#if it's stopped it's None as NoneType, so we record
#the STOPPED "run" anyway
logger.debug(' recording task report in db (%s)',
task_report.status)
db(sr.id == task.run_id).update(
status=task_report.status,
stop_time=now,
run_result=task_report.result,
run_output=task_report.output,
traceback=task_report.tb)
else:
logger.debug(' deleting task report in db because of no result')
db(sr.id == task.run_id).delete()
#if there is a stop_time and the following run would exceed it
is_expired = (task.stop_time
and task.next_run_time > task.stop_time
and True or False)
status = (task.run_again and is_expired and EXPIRED
or task.run_again and not is_expired
and QUEUED or COMPLETED)
if task_report.status == COMPLETED:
d = dict(status=status,
next_run_time=task.next_run_time,
times_run=task.times_run,
times_failed=0
)
db(st.id == task.task_id).update(**d)
if status == COMPLETED:
self.update_dependencies(db, task.task_id)
else:
st_mapping = {'FAILED': 'FAILED',
'TIMEOUT': 'TIMEOUT',
'STOPPED': 'QUEUED'}[task_report.status]
status = (task.retry_failed
and task.times_failed < task.retry_failed
and QUEUED or task.retry_failed == -1
and QUEUED or st_mapping)
db(st.id == task.task_id).update(
times_failed=db.scheduler_task.times_failed + 1,
next_run_time=task.next_run_time,
status=status
)
logger.info('task completed (%s)', task_report.status)
def adj_hibernation(self):
"""Used to increase the "sleep" interval for DISABLED workers"""
if self.w_stats.status == DISABLED:
wk_st = self.w_stats.sleep
hibernation = wk_st + HEARTBEAT if wk_st < MAXHIBERNATION else MAXHIBERNATION
self.w_stats.sleep = hibernation
def send_heartbeat(self, counter):
"""This function is vital for proper coordination among available
workers.
It:
- sends the heartbeat
- elects a ticker among available workers (the only process that
effectively dispatch tasks to workers)
- deals with worker's statuses
- does "housecleaning" for dead workers
- triggers tasks assignment to workers
"""
if not self.db_thread:
logger.debug('thread building own DAL object')
self.db_thread = DAL(
self.db._uri, folder=self.db._adapter.folder)
self.define_tables(self.db_thread, migrate=False)
try:
db = self.db_thread
sw, st = db.scheduler_worker, db.scheduler_task
now = self.now()
# record heartbeat
mybackedstatus = db(sw.worker_name == self.worker_name).select().first()
if not mybackedstatus:
sw.insert(status=ACTIVE, worker_name=self.worker_name,
first_heartbeat=now, last_heartbeat=now,
group_names=self.group_names,
worker_stats=self.w_stats)
self.w_stats.status = ACTIVE
self.w_stats.sleep = self.heartbeat
mybackedstatus = ACTIVE
else:
mybackedstatus = mybackedstatus.status
if mybackedstatus == DISABLED:
# keep sleeping
self.w_stats.status = DISABLED
logger.debug('........recording heartbeat (%s)',
self.w_stats.status)
db(sw.worker_name == self.worker_name).update(
last_heartbeat=now,
worker_stats=self.w_stats)
elif mybackedstatus == TERMINATE:
self.w_stats.status = TERMINATE
logger.debug("Waiting to terminate the current task")
self.give_up()
elif mybackedstatus == KILL:
self.w_stats.status = KILL
self.die()
return
else:
if mybackedstatus == STOP_TASK:
logger.info('Asked to kill the current task')
self.terminate_process()
logger.debug('........recording heartbeat (%s)',
self.w_stats.status)
db(sw.worker_name == self.worker_name).update(
last_heartbeat=now, status=ACTIVE,
worker_stats=self.w_stats)
self.w_stats.sleep = self.heartbeat # re-activating the process
if self.w_stats.status != RUNNING:
self.w_stats.status = ACTIVE
self.do_assign_tasks = False
if counter % 5 == 0 or mybackedstatus == PICK:
try:
# delete dead workers
expiration = now - datetime.timedelta(
seconds=self.heartbeat * 3)
departure = now - datetime.timedelta(
seconds=self.heartbeat * 3 * 15)
logger.debug(
' freeing workers that have not sent heartbeat')
dead_workers = db(
((sw.last_heartbeat < expiration) & (sw.status == ACTIVE)) |
((sw.last_heartbeat < departure) & (sw.status != ACTIVE))
)
dead_workers_name = dead_workers._select(sw.worker_name)
db(
(st.assigned_worker_name.belongs(dead_workers_name)) &
(st.status == RUNNING)
).update(assigned_worker_name='', status=QUEUED)
dead_workers.delete()
try:
self.is_a_ticker = self.being_a_ticker()
except:
logger.error('Error coordinating TICKER')
if self.w_stats.status == ACTIVE:
self.do_assign_tasks = True
except:
logger.error('Error cleaning up')
db.commit()
except:
logger.error('Error retrieving status')
db.rollback()
self.adj_hibernation()
self.sleep()
def being_a_ticker(self):
"""Elects a TICKER process that assigns tasks to available workers.
Does its best to elect a worker that is not busy processing other tasks
to allow a proper distribution of tasks among all active workers ASAP
"""
db = self.db_thread
sw = db.scheduler_worker
my_name = self.worker_name
all_active = db(
(sw.worker_name != my_name) & (sw.status == ACTIVE)
).select(sw.is_ticker, sw.worker_name)
ticker = all_active.find(lambda row: row.is_ticker is True).first()
not_busy = self.w_stats.status == ACTIVE
if not ticker:
#if no other tickers are around
if not_busy:
#only if I'm not busy
db(sw.worker_name == my_name).update(is_ticker=True)
db(sw.worker_name != my_name).update(is_ticker=False)
logger.info("TICKER: I'm a ticker")
else:
#I'm busy
if len(all_active) >= 1:
#so I'll "downgrade" myself to a "poor worker"
db(sw.worker_name == my_name).update(is_ticker=False)
else:
not_busy = True
db.commit()
return not_busy
else:
logger.info(
"%s is a ticker, I'm a poor worker" % ticker.worker_name)
return False
def assign_tasks(self, db):
"""Assigns task to workers, that can then pop them from the queue
Deals with group_name(s) logic, in order to assign linearly tasks
to available workers for those groups
"""
sw, st, sd = db.scheduler_worker, db.scheduler_task, db.scheduler_task_deps
now = self.now()
all_workers = db(sw.status == ACTIVE).select()
#build workers as dict of groups
wkgroups = {}
for w in all_workers:
if w.worker_stats['status'] == 'RUNNING':
continue
group_names = w.group_names
for gname in group_names:
if gname not in wkgroups:
wkgroups[gname] = dict(
workers=[{'name': w.worker_name, 'c': 0}])
else:
wkgroups[gname]['workers'].append(
{'name': w.worker_name, 'c': 0})
#set queued tasks that expired between "runs" (i.e., you turned off
#the scheduler): then it wasn't expired, but now it is
db(
(st.status.belongs((QUEUED, ASSIGNED))) &
(st.stop_time < now)
).update(status=EXPIRED)
#calculate dependencies
deps_with_no_deps = db(
(sd.can_visit == False) &
(~sd.task_child.belongs(
db(sd.can_visit == False)._select(sd.task_parent)
)
)
)._select(sd.task_child)
no_deps = db(
(st.status.belongs((QUEUED,ASSIGNED))) &
(
(sd.id == None) | (st.id.belongs(deps_with_no_deps))
)
)._select(st.id, distinct=True, left=sd.on(
(st.id == sd.task_parent) &
(sd.can_visit == False)
)
)
all_available = db(
(st.status.belongs((QUEUED, ASSIGNED))) &
((st.times_run < st.repeats) | (st.repeats == 0)) &
(st.start_time <= now) &
((st.stop_time == None) | (st.stop_time > now)) &
(st.next_run_time <= now) &
(st.enabled == True) &
(st.id.belongs(no_deps))
)
limit = len(all_workers) * (50 / (len(wkgroups) or 1))
#if there are a moltitude of tasks, let's figure out a maximum of
#tasks per worker. This can be further tuned with some added
#intelligence (like esteeming how many tasks will a worker complete
#before the ticker reassign them around, but the gain is quite small
#50 is a sweet spot also for fast tasks, with sane heartbeat values
#NB: ticker reassign tasks every 5 cycles, so if a worker completes its
#50 tasks in less than heartbeat*5 seconds,
#it won't pick new tasks until heartbeat*5 seconds pass.
#If a worker is currently elaborating a long task, its tasks needs to
#be reassigned to other workers
#this shuffles up things a bit, in order to give a task equal chances
#to be executed
#let's freeze it up
db.commit()
x = 0
for group in wkgroups.keys():
tasks = all_available(st.group_name == group).select(
limitby=(0, limit), orderby = st.next_run_time)
#let's break up the queue evenly among workers
for task in tasks:
x += 1
gname = task.group_name
ws = wkgroups.get(gname)
if ws:
counter = 0
myw = 0
for i, w in enumerate(ws['workers']):
if w['c'] < counter:
myw = i
counter = w['c']
assigned_wn = wkgroups[gname]['workers'][myw]['name']
d = dict(
status=ASSIGNED,
assigned_worker_name=assigned_wn
)
if not task.task_name:
d['task_name'] = task.function_name
db(
(st.id == task.id) &
(st.status.belongs((QUEUED, ASSIGNED)))
).update(**d)
wkgroups[gname]['workers'][myw]['c'] += 1
db.commit()
#I didn't report tasks but I'm working nonetheless!!!!
if x > 0:
self.w_stats.empty_runs = 0
self.w_stats.queue = x
self.w_stats.distribution = wkgroups
self.w_stats.workers = len(all_workers)
#I'll be greedy only if tasks assigned are equal to the limit
# (meaning there could be others ready to be assigned)
self.greedy = x >= limit
logger.info('TICKER: workers are %s', len(all_workers))
logger.info('TICKER: tasks are %s', x)
def sleep(self):
"""Calculates the number of seconds to sleep according to worker's
status and `heartbeat` parameter"""
time.sleep(self.w_stats.sleep)
# should only sleep until next available task
def set_worker_status(self, group_names=None, action=ACTIVE,
exclude=None, limit=None, worker_name=None):
"""Internal function to set worker's status"""
ws = self.db.scheduler_worker
if not group_names:
group_names = self.group_names
elif isinstance(group_names, str):
group_names = [group_names]
if worker_name:
self.db(ws.worker_name == worker_name).update(status=action)
return
exclusion = exclude and exclude.append(action) or [action]
if not limit:
for group in group_names:
self.db(
(ws.group_names.contains(group)) &
(~ws.status.belongs(exclusion))
).update(status=action)
else:
for group in group_names:
workers = self.db(
(ws.group_names.contains(group)) &
(~ws.status.belongs(exclusion))
)._select(ws.id, limitby=(0,limit))
self.db(ws.id.belongs(workers)).update(status=action)
def disable(self, group_names=None, limit=None, worker_name=None):
"""Sets DISABLED on the workers processing `group_names` tasks.
A DISABLED worker will be kept alive but it won't be able to process
any waiting tasks, essentially putting it to sleep.
By default, all group_names of Scheduler's instantation are selected"""
self.set_worker_status(
group_names=group_names,
action=DISABLED,
exclude=[DISABLED, KILL, TERMINATE],
limit=limit)
def resume(self, group_names=None, limit=None, worker_name=None):
"""Wakes a worker up (it will be able to process queued tasks)"""
self.set_worker_status(
group_names=group_names,
action=ACTIVE,
exclude=[KILL, TERMINATE],
limit=limit)
def terminate(self, group_names=None, limit=None, worker_name=None):
"""Sets TERMINATE as worker status. The worker will wait for any
currently running tasks to be executed and then it will exit gracefully
"""
self.set_worker_status(
group_names=group_names,
action=TERMINATE,
exclude=[KILL],
limit=limit)
def kill(self, group_names=None, limit=None, worker_name=None):
"""Sets KILL as worker status. The worker will be killed even if it's
processing a task."""
self.set_worker_status(
group_names=group_names,
action=KILL,
limit=limit)
def queue_task(self, function, pargs=[], pvars={}, **kwargs):
"""
Queue tasks. This takes care of handling the validation of all
parameters
Args:
function: the function (anything callable with a __name__)
pargs: "raw" args to be passed to the function. Automatically
jsonified.
pvars: "raw" kwargs to be passed to the function. Automatically
jsonified
kwargs: all the parameters available (basically, every
`scheduler_task` column). If args and vars are here, they should
be jsonified already, and they will override pargs and pvars
Returns:
a dict just as a normal validate_and_insert(), plus a uuid key
holding the uuid of the queued task. If validation is not passed
( i.e. some parameters are invalid) both id and uuid will be None,
and you'll get an "error" dict holding the errors found.
"""
if hasattr(function, '__name__'):
function = function.__name__
targs = 'args' in kwargs and kwargs.pop('args') or dumps(pargs)
tvars = 'vars' in kwargs and kwargs.pop('vars') or dumps(pvars)
tuuid = 'uuid' in kwargs and kwargs.pop('uuid') or web2py_uuid()
tname = 'task_name' in kwargs and kwargs.pop('task_name') or function
immediate = 'immediate' in kwargs and kwargs.pop('immediate') or None
rtn = self.db.scheduler_task.validate_and_insert(
function_name=function,
task_name=tname,
args=targs,
vars=tvars,
uuid=tuuid,
**kwargs)
if not rtn.errors:
rtn.uuid = tuuid
if immediate:
self.db(
(self.db.scheduler_worker.is_ticker == True)
).update(status=PICK)
else:
rtn.uuid = None
return rtn
def task_status(self, ref, output=False):
"""
Retrieves task status and optionally the result of the task
Args:
ref: can be
- an integer : lookup will be done by scheduler_task.id
- a string : lookup will be done by scheduler_task.uuid
- a `Query` : lookup as you wish, e.g. ::
db.scheduler_task.task_name == 'test1'
output(bool): if `True`, fetch also the scheduler_run record
Returns:
a single Row object, for the last queued task.
If output == True, returns also the last scheduler_run record.
The scheduler_run record is fetched by a left join, so it can
have all fields == None
"""
from pydal.objects import Query
sr, st = self.db.scheduler_run, self.db.scheduler_task
if isinstance(ref, (int, long)):
q = st.id == ref
elif isinstance(ref, str):
q = st.uuid == ref
elif isinstance(ref, Query):
q = ref
else:
raise SyntaxError(
"You can retrieve results only by id, uuid or Query")
fields = [st.ALL]
left = False
orderby = ~st.id
if output:
fields = st.ALL, sr.ALL
left = sr.on(sr.task_id == st.id)
orderby = ~st.id | ~sr.id
row = self.db(q).select(
*fields,
**dict(orderby=orderby,
left=left,
limitby=(0, 1))
).first()
if row and output:
row.result = row.scheduler_run.run_result and \
loads(row.scheduler_run.run_result,
object_hook=_decode_dict) or None
return row
def stop_task(self, ref):
"""Shortcut for task termination.
If the task is RUNNING it will terminate it, meaning that status
will be set as FAILED.
If the task is QUEUED, its stop_time will be set as to "now",
the enabled flag will be set to False, and the status to STOPPED
Args:
ref: can be
- an integer : lookup will be done by scheduler_task.id
- a string : lookup will be done by scheduler_task.uuid
Returns:
- 1 if task was stopped (meaning an update has been done)
- None if task was not found, or if task was not RUNNING or QUEUED
Note:
Experimental
"""
st, sw = self.db.scheduler_task, self.db.scheduler_worker
if isinstance(ref, (int, long)):
q = st.id == ref
elif isinstance(ref, str):
q = st.uuid == ref
else:
raise SyntaxError(
"You can retrieve results only by id or uuid")
task = self.db(q).select(st.id, st.status, st.assigned_worker_name)
task = task.first()
rtn = None
if not task:
return rtn
if task.status == 'RUNNING':
q = sw.worker_name == task.assigned_worker_name
rtn = self.db(q).update(status=STOP_TASK)
elif task.status == 'QUEUED':
rtn = self.db(q).update(
stop_time=self.now(),
enabled=False,
status=STOPPED)
return rtn
def get_workers(self, only_ticker=False):
""" Returns a dict holding `worker_name : {**columns}`
representing all "registered" workers
only_ticker returns only the workers running as a TICKER,
if there are any
"""
db = self.db
if only_ticker:
workers = db(db.scheduler_worker.is_ticker == True).select()
else:
workers = db(db.scheduler_worker.id > 0).select()
all_workers = {}
for row in workers:
all_workers[row.worker_name] = Storage(
status=row.status,
first_heartbeat=row.first_heartbeat,
last_heartbeat=row.last_heartbeat,
group_names=row.group_names,
is_ticker=row.is_ticker,
worker_stats=row.worker_stats
)
return all_workers
def main():
"""
allows to run worker without python web2py.py .... by simply::
python gluon/scheduler.py
"""
parser = optparse.OptionParser()
parser.add_option(
"-w", "--worker_name", dest="worker_name", default=None,
help="start a worker with name")
parser.add_option(
"-b", "--heartbeat", dest="heartbeat", default=10,
type='int', help="heartbeat time in seconds (default 10)")
parser.add_option(
"-L", "--logger_level", dest="logger_level",
default=30,
type='int',
help="set debug output level (0-100, 0 means all, 100 means none;default is 30)")
parser.add_option("-E", "--empty-runs",
dest="max_empty_runs",
type='int',
default=0,
help="max loops with no grabbed tasks permitted (0 for never check)")
parser.add_option(
"-g", "--group_names", dest="group_names",
default='main',
help="comma separated list of groups to be picked by the worker")
parser.add_option(
"-f", "--db_folder", dest="db_folder",
default='/Users/mdipierro/web2py/applications/scheduler/databases',
help="location of the dal database folder")
parser.add_option(
"-u", "--db_uri", dest="db_uri",
default='sqlite://storage.sqlite',
help="database URI string (web2py DAL syntax)")
parser.add_option(
"-t", "--tasks", dest="tasks", default=None,
help="file containing task files, must define" +
"tasks = {'task_name':(lambda: 'output')} or similar set of tasks")
parser.add_option(
"-U", "--utc-time", dest="utc_time", default=False,
help="work with UTC timestamps"
)
(options, args) = parser.parse_args()
if not options.tasks or not options.db_uri:
print USAGE
if options.tasks:
path, filename = os.path.split(options.tasks)
if filename.endswith('.py'):
filename = filename[:-3]
sys.path.append(path)
print 'importing tasks...'
tasks = __import__(filename, globals(), locals(), [], -1).tasks
print 'tasks found: ' + ', '.join(tasks.keys())
else:
tasks = {}
group_names = [x.strip() for x in options.group_names.split(',')]
logging.getLogger().setLevel(options.logger_level)
print 'groups for this worker: ' + ', '.join(group_names)
print 'connecting to database in folder: ' + options.db_folder or './'
print 'using URI: ' + options.db_uri
db = DAL(options.db_uri, folder=options.db_folder)
print 'instantiating scheduler...'
scheduler = Scheduler(db=db,
worker_name=options.worker_name,
tasks=tasks,
migrate=True,
group_names=group_names,
heartbeat=options.heartbeat,
max_empty_runs=options.max_empty_runs,
utc_time=options.utc_time)
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
print 'starting main worker loop...'
scheduler.loop()
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
91,
770,
2393,
318,
636,
286,
262,
3992,
17,
9078,
5313,
25161,
198,
91,
15069,
276,
416,
5674,
25147,
60... | 2.012605 | 26,100 |
from ..Units.Unit import Unit
from Message_Log import MessageLog as LOG
from . import Damage as DMG
from Routines import SidavLOS as LOS, TdlConsoleWrapper as CW, SidavRandom as RAND
from ..Controllers import LevelController as LC
# The next two methods maybe should be moved to some controller!
| [
6738,
11485,
3118,
896,
13,
26453,
1330,
11801,
198,
6738,
16000,
62,
11187,
1330,
16000,
11187,
355,
41605,
198,
6738,
764,
1330,
8995,
355,
14848,
38,
198,
6738,
39602,
1127,
1330,
15686,
615,
45376,
355,
406,
2640,
11,
309,
25404,
47... | 3.7875 | 80 |
from dataclasses import dataclass
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
# @dataclass
# movie1 = Movie(name = 'interstellar', genre='sf')
# print(movie1)
data = [{
'name':'interstellar',
'genre':'sf'
},{
'name':'The martian',
'genre' : 'sf'
}
]
engine = create_engine('sqlite:///app.db') # mysql, postgres
Session = sessionmaker(bind = engine)
my_sess = Session()
# Add table to db
Base.metadata.create_all(engine)
movie1 = Movie(name='interstellar', genre='scifi')
movie2 = Movie(name='Martian', genre='scifi')
# prepare
my_sess.add(movie1, movie2)
# commit
my_sess.commit()
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
11748,
44161,
282,
26599,
198,
6738,
44161,
282,
26599,
1330,
2251,
62,
18392,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
6246,
10297,
198,
6738,
44161,
282,
26599,
1330,
29201,
11,
... | 2.866667 | 270 |
#!/usr/bin/env python
# coding=utf8
import numpy as np
import sympy as sp
class F_Routine(ModelRoutine):
"""Compute the right hand side of the dynamical system
:math:`\\frac{\\partial U}{\\partial t} = F(U)`
Parameters
----------
fields : triflow.Fields
triflow fields container generated by a triflow.Model containing the actual state of the dependent variables and helper functions.
pars : dict
dictionnary with the different physical parameters of the model and the 'periodic' key.
Returns
-------
numpy.ndarray
flat array containing the right hand side of the dynamical system.
""" # noqa
class J_Routine(ModelRoutine):
"""Compute the right hand side of the dynamical system
:math:`\\frac{\\partial U}{\\partial t} = F(U)`
Parameters
----------
fields : triflow.Fields
triflow fields container generated by a triflow.Model containing the actual state of the dependent variables and helper functions.
pars : dict
dictionnary with the different physical parameters of the model and the 'periodic' key.
sparse : bool, optional, default True
whether should the matrix returned as dense or sparse form.
Returns
-------
scipy.sparse.CSC or numpy.ndarray: sparse or dense form (depending of the `sparse` argument) of the Jacobian approximation of the dynamical system right hand side.
""" # noqa
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
28,
40477,
23,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10558,
88,
355,
599,
628,
198,
198,
4871,
376,
62,
49,
28399,
7,
17633,
49,
28399,
2599,
198,
220,
220,
... | 2.904297 | 512 |
from binding import *
from ..namespace import llvm
Reloc = llvm.Namespace('Reloc')
Reloc.Enum('Model',
'Default', 'Static', 'PIC_', 'DynamicNoPIC')
CodeModel = llvm.Namespace('CodeModel')
CodeModel.Enum('Model',
'Default', 'JITDefault', 'Small', 'Kernel', 'Medium', 'Large')
TLSModel = llvm.Namespace('TLSModel')
TLSModel.Enum('Model',
'GeneralDynamic', 'LocalDynamic', 'InitialExec', 'LocalExec')
CodeGenOpt = llvm.Namespace('CodeGenOpt')
CodeGenOpt.Enum('Level',
'None', 'Less', 'Default', 'Aggressive')
| [
6738,
12765,
1330,
1635,
198,
6738,
11485,
14933,
10223,
1330,
32660,
14761,
628,
198,
6892,
420,
796,
32660,
14761,
13,
36690,
10223,
10786,
6892,
420,
11537,
198,
6892,
420,
13,
4834,
388,
10786,
17633,
3256,
198,
220,
220,
220,
220,
... | 2.433476 | 233 |
from mazikeen.RmdirBlock import RmdirBlock
from mazikeen.GeneratorException import GeneratorException
from mazikeen.ConsolePrinter import Printer, BufferedPrinter | [
6738,
285,
1031,
522,
268,
13,
49,
9132,
343,
12235,
1330,
371,
9132,
343,
12235,
198,
6738,
285,
1031,
522,
268,
13,
8645,
1352,
16922,
1330,
35986,
16922,
198,
6738,
285,
1031,
522,
268,
13,
47581,
6836,
3849,
1330,
1736,
3849,
11,
... | 3.521739 | 46 |
PYTEST_HEADLOCK_DIR = '.pytest-headlock'
| [
198,
47,
56,
51,
6465,
62,
37682,
36840,
62,
34720,
796,
45302,
9078,
9288,
12,
2256,
5354,
6,
198
] | 2.210526 | 19 |
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import Date
from sqlalchemy import Enum
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
from pynYNAB.schema.Entity import Entity, Base
from pynYNAB.schema.types import AccountTypes, ColorFlagType, AmountType
| [
6738,
44161,
282,
26599,
1330,
41146,
198,
6738,
44161,
282,
26599,
1330,
29201,
198,
6738,
44161,
282,
26599,
1330,
7536,
198,
6738,
44161,
282,
26599,
1330,
2039,
388,
198,
6738,
44161,
282,
26599,
1330,
8708,
9218,
198,
6738,
44161,
28... | 3.596774 | 124 |
# -*- coding: utf-8 -*-
# Copyright (c) 2011 University of Jyväskylä and Contributors.
#
# All Rights Reserved.
#
# Authors:
# Esa-Matti Suuronen <esa-matti@suuronen.org>
# Asko Soukka <asko.soukka@iki.fi>
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
"""
sauna.reload
============
Enable sauna.reload's Zope patches and deferrend z3c.autoinclude includes
by adding ``zope-conf-additional = %import sauna.reload``
into your buildout's part with *plone.recipe.zope2instance*-recipe::
[instance]
recipe = plone.recipe.zope2instance
zope-conf-additional = %import sauna.reload
"""
import sys
import os
from sauna.reload.forkloop import ForkLoop
from sauna.reload.reloadpaths import ReloadPaths
reload_paths = ReloadPaths([os.path.join(os.getcwd(), p)
for p in os.environ.get("RELOAD_PATH", "").split(":") if p])
forkloop = ForkLoop()
forkloop.startBootTimer()
# Hook into PEP 302 laoder
from sauna.reload.monkeypatcher import MonkeyPatchingLoader
__loader__ = MonkeyPatchingLoader(sys.modules[__name__])
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
2813,
2059,
286,
449,
88,
85,
11033,
15688,
75,
11033,
290,
25767,
669,
13,
198,
2,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
46665,
25,... | 2.882845 | 478 |
from datetime import datetime, timedelta
from commits import generate_commits
from deploy import DeployPolicy, Deployer
from export import print_metrics
from metrics import pipeline_metrics
from simulation import run_simulation, print_runs
from stages import Stage
stages = [
Stage("Build & Unit Test", duration=timedelta(minutes=10), failure_rate=0.01),
Stage("Acceptance Test", duration=timedelta(minutes=20), failure_rate=0.02),
Stage("Manual Test", duration=timedelta(minutes=120), failure_rate = 0.05, manual_stage=True),
]
start_time = datetime(year=2017,month=6,day=19,hour=8)
commits = generate_commits(100, start_time, offset=1000, max_interval=122)
deployer=Deployer(duration=timedelta(minutes=20), deploy_policy=DeployPolicy.EveryPassing, deploy_hour=8)
runs = run_simulation(start_time, stages, commits=commits, deployer=deployer)
print_runs("simulation2", stages, runs)
metrics = pipeline_metrics(runs)
print_metrics("simulation2", metrics)
print(metrics.pretty_print()) | [
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
198,
6738,
23463,
1330,
7716,
62,
9503,
896,
198,
6738,
6061,
1330,
34706,
36727,
11,
34706,
263,
198,
6738,
10784,
1330,
3601,
62,
4164,
10466,
198,
6738,
20731,
1330,
11523,
6... | 3.076687 | 326 |
import random
bizz = {
"clothing": {
"Aerie": " Aerie focuses on intimates and swimwear for teens and young women. But Aerie has one big difference that makes it stand out from competitors: It was the first major brand to ban Photoshop in ad campaigns and deviate from the prototypical model in favor of average women proudly displaying their curves and flaws.",
"S'Well": "S'well is a reusable water bottle company headquartered in Manhattan, New York. Sarah Kauss founded the company in 2010. Kauss is the CEO of the company.",
"Tory Burch": "The designer started her eponymous “affordable luxury” brand in 2004 out of her kitchen with borrowed money and built it from the ground up, expanding the business into a $3 billion company with more than 160 stores across the world."
},
"software": {
"Flickr": "Flickr is an image- and video-hosting website and web services suite that was created by Ludicorp in 2004 and acquired by Yahoo on 20 March 2005.",
"BlackLine": "BlackLine is an American enterprise software company that develops cloud-based accounting software that helps businesses manage their quarterly financial reports."
},
"health and wellness": {
"The Honest Company" : "The Honest Company is an American consumer goods company, founded by actress Jessica Alba, that emphasizes household products to supply the marketplace for ethical consumerism",
"Birchbox":"Birchbox is a New York City-based online monthly subscription service that sends its subscribers a box of four to five selected samples of makeup, or other beauty related products.",
"The Body Shop" : "The Body Shop International Limited, trading as The Body Shop, is a British cosmetics, skin care and perfume company that was founded in 1976 by Dame Anita Roddick.",
"ProActiv" : "Proactiv, also known as Proactiv Solution, is a brand of skin-care products developed by two American dermatologists, Katie Rodan and Kathy Fields, and launched in 1995"
}
}
cool_synonyms = ["cool", "interesting", "unique", "innovative"]
| [
11748,
4738,
198,
65,
6457,
796,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
366,
565,
24834,
1298,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
366,
32,
18287,
1298,
366,
15781,
494,
13692,
319,
10683,
689,
... | 3.587065 | 603 |
#### PATTERN | DB ##################################################################################
# -*- coding: utf-8 -*-
# Copyright (c) 2010 University of Antwerp, Belgium
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from builtins import str, bytes, dict, int, chr
from builtins import map, zip, filter
from builtins import object, range, next
import os
import sys
import inspect
import warnings
import re
import urllib
import base64
import json
if sys.version > "3":
import csv as csvlib
else:
from backports import csv as csvlib
from codecs import BOM_UTF8
from itertools import islice
from datetime import datetime, timedelta
from calendar import monthrange
from time import mktime, strftime
from math import sqrt
from types import GeneratorType
from functools import cmp_to_key
from io import open, StringIO, BytesIO
BOM_UTF8 = BOM_UTF8.decode("utf-8")
from html.entities import name2codepoint
from email.utils import parsedate_tz, mktime_tz
try:
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
from pattern.helpers import encode_string, decode_string
decode_utf8 = decode_string
encode_utf8 = encode_string
MYSQL = "mysql"
SQLITE = "sqlite"
def _import_db(engine=SQLITE):
""" Lazy import called from Database() or Database.new().
Depending on the type of database we either import MySQLdb or SQLite.
Note: 64-bit Python needs 64-bit MySQL, 32-bit the 32-bit version.
"""
global MySQLdb
global sqlite
if engine == MYSQL:
import MySQLdb
warnings.simplefilter("ignore", MySQLdb.Warning)
if engine == SQLITE:
import sqlite3.dbapi2 as sqlite
def pd(*args):
""" Returns the path to the parent directory of the script that calls pd() + given relative path.
For example, in this script: pd("..") => /usr/local/lib/python2.x/site-packages/pattern/db/..
"""
f = inspect.currentframe()
f = inspect.getouterframes(f)[1][1]
f = f != "<stdin>" and f or os.getcwd()
return os.path.join(os.path.dirname(os.path.realpath(f)), *args)
_sum = sum # pattern.db.sum() is also a column aggregate function.
#### DATE FUNCTIONS ################################################################################
NOW, YEAR = "now", datetime.now().year
# Date formats can be found in the Python documentation:
# http://docs.python.org/library/time.html#time.strftime
DEFAULT_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
date_formats = [
DEFAULT_DATE_FORMAT, # 2010-09-21 09:27:01 => SQLite + MySQL
"%Y-%m-%dT%H:%M:%SZ", # 2010-09-20T09:27:01Z => Bing
"%a, %d %b %Y %H:%M:%S +0000", # Fri, 21 Sep 2010 09:27:01 +000 => Twitter
"%a %b %d %H:%M:%S +0000 %Y", # Fri Sep 21 09:21:01 +0000 2010 => Twitter
"%Y-%m-%dT%H:%M:%S+0000", # 2010-09-20T09:27:01+0000 => Facebook
"%Y-%m-%d %H:%M", # 2010-09-21 09:27
"%Y-%m-%d", # 2010-09-21
"%d/%m/%Y", # 21/09/2010
"%d %B %Y", # 21 September 2010
"%d %b %Y", # 21 Sep 2010
"%B %d %Y", # September 21 2010
"%B %d, %Y", # September 21, 2010
]
def _yyyywwd2yyyymmdd(year, week, weekday):
""" Returns (year, month, day) for given (year, week, weekday).
"""
d = datetime(year, month=1, day=4) # 1st week contains January 4th.
d = d - timedelta(d.isoweekday() - 1) + timedelta(days=weekday - 1, weeks=week - 1)
return (d.year, d.month, d.day)
def _strftime1900(d, format):
""" Returns the given date formatted as a string.
"""
if d.year < 1900: # Python's strftime() doesn't handle year < 1900.
return strftime(format, (1900,) + d.timetuple()[1:]).replace("1900", str(d.year), 1)
return datetime.strftime(d, format)
class Date(datetime):
""" A convenience wrapper for datetime.datetime with a default string format.
"""
format = DEFAULT_DATE_FORMAT
# Date.year
# Date.month
# Date.day
# Date.minute
# Date.second
@property
@property
@property
@property
@property
@property
def date(*args, **kwargs):
""" Returns a Date from the given parameters:
- date(format=Date.format) => now
- date(int)
- date(string)
- date(string, format=Date.format)
- date(string, inputformat, format=Date.format)
- date(year, month, day, format=Date.format)
- date(year, month, day, hours, minutes, seconds, format=Date.format)
If a string is given without an explicit input format, all known formats will be tried.
"""
d = None
f = None
if len(args) == 0 \
and kwargs.get("year") is not None \
and kwargs.get("month") \
and kwargs.get("day"):
# Year, month, day.
d = Date(**kwargs)
elif kwargs.get("week"):
# Year, week, weekday.
f = kwargs.pop("format", None)
d = Date(*_yyyywwd2yyyymmdd(
kwargs.pop("year", args and args[0] or Date.now().year),
kwargs.pop("week"),
kwargs.pop("weekday", kwargs.pop("day", 1))), **kwargs)
elif len(args) == 0 or args[0] == NOW:
# No parameters or one parameter NOW.
d = Date.now()
elif len(args) == 1 \
and isinstance(args[0], (Date, datetime)):
# One parameter, a Date or datetime object.
d = Date.fromtimestamp(int(mktime(args[0].timetuple())))
d += time(microseconds=args[0].microsecond)
elif len(args) == 1 \
and (isinstance(args[0], int) \
or isinstance(args[0], (str, bytes)) and args[0].isdigit()):
# One parameter, an int or string timestamp.
if isinstance(args[0], bytes):
args = (args[0].decode("utf-8"),)
d = Date.fromtimestamp(int(args[0]))
elif len(args) == 1 \
and isinstance(args[0], (str, bytes)):
# One parameter, a date string for which we guess the input format (RFC2822 or known formats).
if isinstance(args[0], bytes):
args = (args[0].decode("utf-8"),)
try:
d = Date.fromtimestamp(mktime_tz(parsedate_tz(args[0])))
except:
for format in ("format" in kwargs and [kwargs["format"]] or []) + date_formats:
try:
d = Date.strptime(args[0], format)
break
except:
pass
if d is None:
raise DateError("unknown date format for %s" % repr(args[0]))
elif len(args) == 2 \
and isinstance(args[0], (str, bytes)):
# Two parameters, a date string and an explicit input format.
if isinstance(args[0], bytes):
args = (args[0].decode("utf-8"), args[1].decode("utf-8"))
d = Date.strptime(args[0], args[1])
elif len(args) >= 3:
# 3-6 parameters: year, month, day, hours, minutes, seconds.
f = kwargs.pop("format", None)
d = Date(*args[:7], **kwargs)
else:
raise DateError("unknown date format")
d.format = kwargs.get("format") or len(args) > 7 and args[7] or f or Date.format
return d
def time(days=0, seconds=0, minutes=0, hours=0, **kwargs):
""" Returns a Time that can be added to a Date object.
Other parameters: microseconds, milliseconds, weeks, months, years.
"""
return Time(days=days, seconds=seconds, minutes=minutes, hours=hours, **kwargs)
def string(value, default=""):
""" Returns the value cast to unicode, or default if it is None/empty.
"""
# Useful for HTML interfaces.
if value is None or value == "": # Don't do value != None because this includes 0.
return default
return decode_utf8(value)
def encrypt_string(s, key=""):
""" Returns the given string as an encrypted bytestring.
"""
key += " "
a = []
for i in range(len(s)):
try:
a.append(chr(ord(s[i]) + ord(key[i % len(key)]) % 256).encode("latin-1"))
except:
raise EncryptionError()
s = b"".join(a)
s = base64.urlsafe_b64encode(s)
return s
def decrypt_string(s, key=""):
""" Returns the given string as a decrypted Unicode string.
"""
key += " "
s = base64.urlsafe_b64decode(s)
s = s.decode("latin-1")
a = []
for i in range(len(s)):
try:
a.append(chr(ord(s[i]) - ord(key[i % len(key)]) % 256))
except:
raise DecryptionError()
s = "".join(a)
s = decode_utf8(s)
return s
RE_AMPERSAND = re.compile("\&(?!\#)") # & not followed by #
RE_UNICODE = re.compile(r'&(#?)(x|X?)(\w+);') # É
def encode_entities(string):
""" Encodes HTML entities in the given string ("<" => "<").
For example, to display "<em>hello</em>" in a browser,
we need to pass "<em>hello</em>" (otherwise "hello" in italic is displayed).
"""
if isinstance(string, str):
string = RE_AMPERSAND.sub("&", string)
string = string.replace("<", "<")
string = string.replace(">", ">")
string = string.replace('"', """)
string = string.replace("'", "'")
return string
def decode_entities(string):
""" Decodes HTML entities in the given string ("<" => "<").
"""
# http://snippets.dzone.com/posts/show/4569
if isinstance(string, str):
return RE_UNICODE.subn(replace_entity, string)[0]
return string
class _Binary(object):
""" A wrapper for BLOB data with engine-specific encoding.
See also: Database.binary().
"""
def _escape(value, quote=lambda string: "'%s'" % string.replace("'", "\\'")):
""" Returns the quoted, escaped string (e.g., "'a bird\'s feathers'") for database entry.
Anything that is not a string (e.g., an integer) is converted to string.
Booleans are converted to "0" and "1", None is converted to "null".
See also: Database.escape()
"""
# Note: use Database.escape() for MySQL/SQLITE-specific escape.
if value in ("current_timestamp",):
# Don't quote constants such as current_timestamp.
return value
if isinstance(value, str):
# Strings are quoted, single quotes are escaped according to the database engine.
return quote(value)
if isinstance(value, bool):
# Booleans are converted to "0" or "1".
return str(int(value))
if isinstance(value, (int, float)):
# Numbers are converted to string.
return str(value)
if isinstance(value, datetime):
# Dates are formatted as string.
return quote(value.strftime(DEFAULT_DATE_FORMAT))
if isinstance(value, type(None)):
# None is converted to NULL.
return "null"
if isinstance(value, Query):
# A Query is converted to "("+Query.SQL()+")" (=subquery).
return "(%s)" % value.SQL().rstrip(";")
if isinstance(value, _Binary):
# Binary data is escaped with attention to null bytes.
return "'%s'" % value.escape()
return value
def cast(x, f, default=None):
""" Returns f(x) or default.
"""
if f is str and isinstance(x, str):
return decode_utf8(x)
if f is bool and x in ("1", "True", "true"):
return True
if f is bool and x in ("0", "False", "false"):
return False
if f is int:
f = lambda x: int(round(float(x)))
try:
return f(x)
except:
return default
#### LIST FUNCTIONS ################################################################################
def find(match=lambda item: False, list=[]):
""" Returns the first item in the list for which match(item) is True.
"""
for item in list:
if match(item) is True:
return item
def order(list, cmp=None, key=None, reverse=False):
""" Returns a list of indices in the order as when the given list is sorted.
For example: ["c","a","b"] => [1, 2, 0]
This means that in the sorted list, "a" (index 1) comes first and "c" (index 0) last.
"""
if cmp and key:
f = lambda i, j: cmp(key(list[i]), key(list[j]))
elif cmp:
f = lambda i, j: cmp(list[i], list[j])
elif key:
f = lambda i, j: int(key(list[i]) >= key(list[j])) * 2 - 1
else:
f = lambda i, j: int(list[i] >= list[j]) * 2 - 1
return sorted(range(len(list)), key=cmp_to_key(f), reverse=reverse)
_order = order
def avg(list):
""" Returns the arithmetic mean of the given list of values.
For example: mean([1,2,3,4]) = 10/4 = 2.5.
"""
return float(_sum(list)) / (len(list) or 1)
def variance(list):
""" Returns the variance of the given list of values.
The variance is the average of squared deviations from the mean.
"""
a = avg(list)
return _sum([(x - a)**2 for x in list]) / (len(list) - 1 or 1)
def stdev(list):
""" Returns the standard deviation of the given list of values.
Low standard deviation => values are close to the mean.
High standard deviation => values are spread out over a large range.
"""
return sqrt(variance(list))
#### SQLITE FUNCTIONS ##############################################################################
# Convenient MySQL functions not in in pysqlite2. These are created at each Database.connect().
# SQLite (and MySQL) date string format:
# yyyy-mm-dd hh:mm:ss
#### DATABASE ######################################################################################
#### FIELD #########################################################################################
# The STRING constant can be called with a length when passed to field(),
# for example field("language", type=STRING(2), default="en", index=True).
# Field type.
# Note: SQLite string fields do not impose a string limit.
# Unicode strings have more characters than actually displayed (e.g. "♥").
# Boolean fields are stored as tinyint(1), int 0 or 1.
STRING, INTEGER, FLOAT, TEXT, BLOB, BOOLEAN, DATE = \
_String(), "integer", "float", "text", "blob", "boolean", "date"
STR, INT, BOOL = STRING, INTEGER, BOOLEAN
# Field index.
PRIMARY = "primary"
UNIQUE = "unique"
# DATE default.
NOW = "now"
#--- FIELD- ----------------------------------------------------------------------------------------
#def field(name, type=STRING, default=None, index=False, optional=True)
def field(name, type=STRING, **kwargs):
""" Returns a table field definition that can be passed to Database.create().
The column can be indexed by setting index to True, PRIMARY or UNIQUE.
Primary key number columns are always auto-incremented.
"""
default, index, optional = (
kwargs.get("default", type == DATE and NOW or None),
kwargs.get("index", False),
kwargs.get("optional", True)
)
if type == STRING:
type = STRING()
if type == FLOAT:
type = "real"
if type == BOOLEAN:
type = "tinyint(1)"
if type == DATE:
type = "timestamp"
if str(index) in "01":
index = bool(int(index))
if str(optional) in "01":
optional = bool(int(optional))
return (name, type, default, index, optional)
_field = field
def primary_key(name="id"):
""" Returns an auto-incremented integer primary key field named "id".
"""
return field(name, INTEGER, index=PRIMARY, optional=False)
pk = primary_key
#--- FIELD SCHEMA ----------------------------------------------------------------------------------
#### TABLE #########################################################################################
ALL = "*"
#### QUERY #########################################################################################
#--- QUERY SYNTAX ----------------------------------------------------------------------------------
BETWEEN, LIKE, IN = \
"between", "like", "in"
sql_functions = \
"first|last|count|min|max|sum|avg|stdev|group_concat|concatenate|" \
"year|month|day|hour|minute|second|" \
"length|lower|upper|substr|substring|replace|trim|round|random|rand|" \
"strftime|date_format"
def abs(table, field):
""" For a given <fieldname>, returns the absolute <tablename>.<fieldname>.
This is useful when constructing queries with relations to other tables.
"""
if isinstance(field, (list, tuple)):
return [_format(f) for f in field]
return _format(field)
def cmp(field, value, comparison="=", escape=lambda v: _escape(v), table=""):
""" Returns an SQL WHERE comparison string using =, i=, !=, >, <, >=, <= or BETWEEN.
Strings may contain wildcards (*) at the start or at the end.
A list or tuple of values can be given when using =, != or BETWEEN.
"""
# Use absolute field names if table name is given:
if table:
field = abs(table, field)
# cmp("name", "Mar*") => "name like 'Mar%'".
if isinstance(value, str) and (value.startswith(("*", "%")) or value.endswith(("*", "%"))):
if comparison in ("=", "i=", "==", LIKE):
return "%s like %s" % (field, escape(value.replace("*", "%")))
if comparison in ("!=", "<>"):
return "%s not like %s" % (field, escape(value.replace("*", "%")))
# cmp("name", "markov") => "name" like 'markov'" (case-insensitive).
if isinstance(value, str):
if comparison == "i=":
return "%s like %s" % (field, escape(value))
# cmp("type", ("cat", "dog"), "!=") => "type not in ('cat','dog')".
# cmp("amount", (10, 100), ":") => "amount between 10 and 100".
if isinstance(value, (list, tuple)):
if find(lambda v: isinstance(v, str) and (v.startswith("*") or v.endswith("*")), value):
return "(%s)" % any(*[(field, v) for v in value]).sql(escape=escape)
if comparison in ("=", "==", IN):
return "%s in (%s)" % (field, ",".join(escape(v) for v in value))
if comparison in ("!=", "<>"):
return "%s not in (%s)" % (field, ",".join(escape(v) for v in value))
if comparison in (":", BETWEEN):
return "%s between %s and %s" % (field, escape(value[0]), escape(value[1]))
# cmp("type", None, "!=") => "type is not null".
if isinstance(value, type(None)):
if comparison in ("=", "=="):
return "%s is null" % field
if comparison in ("!=", "<>"):
return "%s is not null" % field
# Using a subquery:
if isinstance(value, Query):
if comparison in ("=", "==", IN):
return "%s in %s" % (field, escape(value))
if comparison in ("!=", "<>"):
return "%s not in %s" % (field, escape(value))
return "%s%s%s" % (field, comparison, escape(value))
# Functions for date fields: cmp(year("date"), 1999, ">").
# Aggregate functions.
#--- QUERY FILTER ----------------------------------------------------------------------------------
AND, OR = "and", "or"
def all(*args, **kwargs):
""" Returns a group of filters combined with AND.
"""
kwargs["operator"] = AND
return FilterChain(*args, **kwargs)
def any(*args, **kwargs):
""" Returns a group of filters combined with OR.
"""
kwargs["operator"] = OR
return FilterChain(*args, **kwargs)
# From a GET-query dict:
# all(*dict.items())
# filter() value can also be a Query with comparison=IN.
#--- QUERY -----------------------------------------------------------------------------------------
# Relations:
INNER = "inner" # The rows for which there is a match in both tables (same as join=None).
LEFT = "left" # All rows from this table, with field values from the related table when possible.
RIGHT = "right" # All rows from the related table, with field values from this table when possible.
FULL = "full" # All rows form both tables.
rel = relation
# Sorting:
ASCENDING = "asc"
DESCENDING = "desc"
# Grouping:
FIRST, LAST, COUNT, MAX, MIN, SUM, AVG, STDEV, CONCATENATE = \
"first", "last", "count", "max", "min", "sum", "avg", "stdev", "group_concat"
def associative(query):
""" Yields query rows as dictionaries of (field, value)-items.
"""
for row in query:
yield query.record(row)
assoc = associative
#### VIEW ##########################################################################################
# A representation of data based on a table in the database.
# The render() method can be overridden to output data in a certain format (e.g., HTML for a web app).
#### XML PARSER ####################################################################################
XML_HEADER = "<?xml version=\"1.0\" encoding=\"utf-8\"?>"
def _unpack_fields(table, fields=[]):
""" Replaces "*" with the actual field names.
Fields from related tables keep the "<tablename>." prefix.
"""
u = []
for f in fields:
a, b = "." in f and f.split(".", 1) or (table.name, f)
if a == table.name and b == ALL:
# <table>.*
u.extend(f for f in table.db.tables[a].fields)
elif a != table.name and b == ALL:
# <related-table>.*
u.extend("%s.%s" % (a, f) for f in table.db.tables[a].fields)
elif a != table.name:
# <related-table>.<field>
u.append("%s.%s" % (a, b))
else:
# <field>
u.append(b)
return u
def xml_format(a):
""" Returns the given attribute (string, int, float, bool, None) as a quoted unicode string.
"""
if isinstance(a, str):
return "\"%s\"" % encode_entities(a)
if isinstance(a, bool):
return "\"%s\"" % ("no", "yes")[int(a)]
if isinstance(a, int):
return "\"%s\"" % a
if isinstance(a, float):
return "\"%s\"" % round(a, 5)
if isinstance(a, type(None)):
return "\"\""
if isinstance(a, Date):
return "\"%s\"" % str(a)
if isinstance(a, datetime):
return "\"%s\"" % str(date(mktime(a.timetuple())))
def xml(rows):
""" Returns the rows in the given Table or Query as an XML-string, for example:
<?xml version="1.0" encoding="utf-8"?>
<table name="pets", fields="id, name, type" count="2">
<schema>
<field name="id", type="integer", index="primary", optional="no" />
<field name="name", type="string", length="50" />
<field name="type", type="string", length="50" />
</schema>
<rows>
<row id="1", name="Taxi", type="cat" />
<row id="2", name="Hofstadter", type="dog" />
</rows>
</table>
"""
if isinstance(rows, Table):
root, table, rows, fields, aliases = "table", rows, rows.rows(), rows.fields, {}
if isinstance(rows, Query):
root, table, rows, fields, aliases, = "query", rows.table, rows.rows(), rows.fields, rows.aliases
fields = _unpack_fields(table, fields)
# <table name="" fields="" count="">
# <query table="" fields="" count="">
xml = []
xml.append(XML_HEADER)
xml.append("<%s %s=%s fields=\"%s\" count=\"%s\">" % (
root,
root != "table" and "table" or "name",
xml_format(table.name), # Use Query.aliases as field names.
", ".join(encode_entities(aliases.get(f, f)) for f in fields),
len(rows)))
# <schema>
# Field information is retrieved from the (related) table schema.
# If the XML is imported as a Table, the related fields become part of it.
xml.append("\t<schema>")
for f in fields:
if f not in table.schema:
s = f.split(".")
s = table.db[s[0]].schema[s[-1]]
else:
s = table.schema[f]
# <field name="" type="" length="" default="" index="" optional="" extra="" />
xml.append("\t\t<field name=%s type=%s%s%s%s%s%s />" % (
xml_format(aliases.get(f, f)),
xml_format(s.type),
s.length is not None and " length=%s" % xml_format(s.length) or "",
s.default is not None and " default=%s" % xml_format(s.default) or "",
s.index is not False and " index=%s" % xml_format(s.index) or "",
s.optional is not True and " optional=%s" % xml_format(s.optional) or "",
s.extra is not None and " extra=%s" % xml_format(s.extra) or ""))
xml.append("\t</schema>")
xml.append("\t<rows>")
# <rows>
for r in rows:
# <row field="value" />
xml.append("\t\t<row %s />" % " ".join("%s=%s" % (aliases.get(k, k), xml_format(v)) for k, v in zip(fields, r)))
xml.append("\t</rows>")
xml.append("</%s>" % root)
xml = "\n".join(xml)
return xml
def parse_xml(database, xml, table=None, field=lambda s: s.replace(".", "-")):
""" Creates a new table in the given database from the given XML-string.
The XML must be in the format generated by Table.xml.
If the table already exists, raises a TableError.
The given table parameter can be used to rename the table.
The given field function can be used to rename field names.
"""
# parseString() will decode entities, no need for decode_entities().
from xml.dom.minidom import parseString
dom = parseString(encode_utf8(xml))
a = dom.getElementsByTagName("table")
b = dom.getElementsByTagName("query")
if len(a) > 0:
table = table or _attr(a[0], "name", "")
if len(b) > 0:
table = table or _attr(b[0], "table", "")
# Parse field information (i.e., field name, field type, etc.)
fields, schema, rows = [], [], []
for f in dom.getElementsByTagName("field"):
fields.append(_attr(f, "name"))
schema.append(_field(
name = field(_attr(f, "name")),
type = _attr(f, "type") == STRING and STRING(int(_attr(f, "length", 255))) or _attr(f, "type"),
default = _attr(f, "default", None),
index = _attr(f, "index", False),
optional = _attr(f, "optional", True) != "no"
))
# Integer primary key is always auto-increment.
# The id's in the new table will differ from those in the XML.
if _attr(f, "index") == PRIMARY and _attr(f, "type") == INTEGER:
fields.pop()
# Parse row data.
for r in dom.getElementsByTagName("row"):
rows.append({})
for i, f in enumerate(fields):
v = _attr(r, f, None)
if schema[i][1] == BOOLEAN:
rows[-1][f] = (0, 1)[v != "no"]
else:
rows[-1][f] = v
# Create table if not exists and insert rows.
if database.connected is False:
database.connect()
if table in database:
raise TableError("table '%s' already exists" % table)
database.create(table, fields=schema)
for r in rows:
database[table].insert(r, commit=False)
database.commit()
return database[table]
#db = Database("test")
#db.create("persons", (pk(), field("data", TEXT)))
#db.persons.append((json.dumps({"name": u"Schrödinger", "type": "cat"}),))
#
#for id, data in db.persons:
# print(id, json.loads(data))
#### DATASHEET #####################################################################################
#--- CSV -------------------------------------------------------------------------------------------
# Raise the default field size limit:
if sys.platform == 'win32':
csvlib.field_size_limit(min(sys.maxsize, 2147483647))
else:
csvlib.field_size_limit(sys.maxsize)
#--- DATASHEET -------------------------------------------------------------------------------------
def flip(datasheet):
""" Returns a new datasheet with rows for columns and columns for rows.
"""
return Datasheet(rows=datasheet.columns)
def csv(*args, **kwargs):
""" Returns a Datasheet from the given CSV file path.
"""
if len(args) == 0:
return Datasheet(**kwargs)
return Datasheet.load(*args, **kwargs)
#--- DATASHEET ROWS --------------------------------------------------------------------------------
# Datasheet.rows mimics the operations on Datasheet:
#--- DATASHEET COLUMNS -----------------------------------------------------------------------------
#--- DATASHEET COLUMN ------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------
_UID = 0
def truncate(string, length=100):
""" Returns a (head, tail)-tuple, where the head string length is less than the given length.
Preferably the string is split at a space, otherwise a hyphen ("-") is injected.
"""
if len(string) <= length:
return string, ""
n, words = 0, string.split(" ")
for i, w in enumerate(words):
if n + len(w) > length:
break
n += len(w) + 1
if i == 0 and len(w) > length:
return (w[:length - 1] + "-",
(w[length - 1:] + " " + " ".join(words[1:])).strip())
return (" ".join(words[:i]),
" ".join(words[i:]))
_truncate = truncate
def pprint(datasheet, truncate=40, padding=" ", fill="."):
""" Prints a string where the rows in the datasheet are organized in outlined columns.
"""
# Calculate the width of each column, based on the longest field in each column.
# Long fields can be split across different lines, so we need to check each line.
w = [0 for column in datasheet.columns]
R = []
for i, row in enumerate(datasheet.rows):
fields = []
for j, v in enumerate(row):
# Cast each field in the row to a string.
# Strings that span beyond the maximum column width are wrapped.
# Thus, each "field" in the row is a list of lines.
lines = []
if not isinstance(v, str):
v = str(v)
for v in v.splitlines():
v = decode_utf8(v.strip())
while v:
head, v = _truncate(v, truncate)
lines.append(head)
w[j] = max(w[j], len(head))
fields.append(lines)
R.append(fields)
for i, fields in enumerate(R):
# Add empty lines to each field so they are of equal height.
n = max([len(lines) for lines in fields])
fields = [lines + [""] * (n - len(lines)) for lines in fields]
# Print the row line per line, justifying the fields with spaces.
columns = []
for k in range(n):
for j, lines in enumerate(fields):
s = lines[k]
s += ((k == 0 or len(lines[k]) > 0) and fill or " ") * (w[j] - len(lines[k]))
s += padding
columns.append(s)
print(" ".join(columns))
| [
4242,
28748,
31800,
930,
20137,
1303,
29113,
29113,
14468,
2,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
3050,
2059,
286,
3738,
15448,
79,
11,
15664,
198,
2,
6434,
25,
4186,
1024,
311... | 2.503104 | 12,405 |
# Generated by Django 3.1.7 on 2021-10-22 16:48
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
16,
13,
22,
319,
33448,
12,
940,
12,
1828,
1467,
25,
2780,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# Copyright 2018 H. Gaspar hagax8@gmail.com
import numpy as np
import keras
import keras.backend as K
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import MaxPooling2D
from keras.models import Model
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from keras.utils import to_categorical
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
| [
2,
15069,
2864,
367,
13,
14345,
1845,
289,
363,
897,
23,
31,
14816,
13,
785,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
41927,
292,
198,
11748,
41927,
292,
13,
1891,
437,
355,
509,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
234... | 3.256098 | 164 |