code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import random
import numpy as np
from scipy import signal
### Transformations
class Fork:
def __init__(self, transform_dict):
self.transform_dict = transform_dict
def __call__(self, data):
result = {}
for fork_name, transformations in self.transform_dict.items():
fork_data = data
for trans in transformations:
fork_data = trans(fork_data)
result[fork_name] = fork_data
return result
class Crop:
def __init__(self, crop_len):
self.crop_len = crop_len
def __call__(self, data):
crop_len = self.crop_len
if len(data[0]) > crop_len:
start_idx = np.random.randint(len(data[0]) - crop_len)
data = data[:, start_idx: start_idx + crop_len]
return data
class Threshold:
def __init__(self, threshold=None, sigma=None):
assert bool(threshold is None) != bool(sigma is None),\
(bool(threshold is None), bool(sigma is None))
self.thr = threshold
self.sigma = sigma
def __call__(self, data):
if self.sigma is None:
data[np.abs(data) > self.thr] = self.thr
else:
data[np.abs(data) > data.std()*self.sigma] = data.std()*self.sigma
return data
class RandomMultiplier:
def __init__(self, multiplier=-1.):
self.multiplier = multiplier
def __call__(self, data):
multiplier = self.multiplier if random.random() < .5 else 1.
return data * multiplier
class Logarithm:
def __call__(self, data):
return np.log(np.abs(data)+1e-8)
class Spectrogram:
def __init__(self, NFFT=None, overlap=None):
self.NFFT = NFFT
self.overlap = overlap
if overlap is None:
self.overlap = NFFT - 1
def __call__(self, data):
data = data.squeeze()
assert len(data.shape) == 1
length = len(data)
Sx = signal.spectrogram(
x=data,
nperseg=self.NFFT,
noverlap=self.overlap)[-1]
Sx = signal.resample(Sx, length, axis=1)
return Sx
### Transformations | transformations.py | import random
import numpy as np
from scipy import signal
### Transformations
class Fork:
def __init__(self, transform_dict):
self.transform_dict = transform_dict
def __call__(self, data):
result = {}
for fork_name, transformations in self.transform_dict.items():
fork_data = data
for trans in transformations:
fork_data = trans(fork_data)
result[fork_name] = fork_data
return result
class Crop:
def __init__(self, crop_len):
self.crop_len = crop_len
def __call__(self, data):
crop_len = self.crop_len
if len(data[0]) > crop_len:
start_idx = np.random.randint(len(data[0]) - crop_len)
data = data[:, start_idx: start_idx + crop_len]
return data
class Threshold:
def __init__(self, threshold=None, sigma=None):
assert bool(threshold is None) != bool(sigma is None),\
(bool(threshold is None), bool(sigma is None))
self.thr = threshold
self.sigma = sigma
def __call__(self, data):
if self.sigma is None:
data[np.abs(data) > self.thr] = self.thr
else:
data[np.abs(data) > data.std()*self.sigma] = data.std()*self.sigma
return data
class RandomMultiplier:
def __init__(self, multiplier=-1.):
self.multiplier = multiplier
def __call__(self, data):
multiplier = self.multiplier if random.random() < .5 else 1.
return data * multiplier
class Logarithm:
def __call__(self, data):
return np.log(np.abs(data)+1e-8)
class Spectrogram:
def __init__(self, NFFT=None, overlap=None):
self.NFFT = NFFT
self.overlap = overlap
if overlap is None:
self.overlap = NFFT - 1
def __call__(self, data):
data = data.squeeze()
assert len(data.shape) == 1
length = len(data)
Sx = signal.spectrogram(
x=data,
nperseg=self.NFFT,
noverlap=self.overlap)[-1]
Sx = signal.resample(Sx, length, axis=1)
return Sx
### Transformations | 0.414188 | 0.374876 |
"""Tests for `awesim` module."""
from copy import copy
import unittest
from pkg_resources import resource_filename
import numpy as np
import astropy.units as q
import astropy.constants as ac
import batman
from awesimsoss import TSO, BlackbodyTSO, TestTSO, STAR_DATA, PLANET_DATA
class test_BlackbodyTSO(unittest.TestCase):
"""A test of the BlackbodyTSO class"""
def setUp(self):
pass
def test_run_no_planet(self):
"""A test of the BlackbodyTSO class with no planet"""
tso = BlackbodyTSO()
def test_run_with_planet(self):
"""A test of the BlackbodyTSO class with a planet"""
tso = BlackbodyTSO(add_planet=True)
class test_TestTSO(unittest.TestCase):
"""A test of the TestTSO class"""
def setUp(self):
pass
def test_run_no_planet(self):
"""A test of the TestTSO class with no planet"""
tso = TestTSO()
def test_run_with_planet(self):
"""A test of the TestTSO class with a planet"""
tso = TestTSO(add_planet=True)
class test_TSO(unittest.TestCase):
"""Tests for the TSO class"""
def setUp(self):
"""Setup for the tests"""
# Get data
self.star = STAR_DATA
self.planet = PLANET_DATA
def test_export(self):
"""Test the export method"""
# Make the TSO object and save
test_tso = TSO(ngrps=2, nints=2, star=self.star, subarray='SUBSTRIP256')
test_tso.simulate()
try:
test_tso.export('outfile.fits')
except NameError:
pass
def test_init(self):
"""Test that the TSO class is generated properly"""
# Initialize the FULL frame with two groups and two integrations
# and the CLEAR filter
tso2048clear = TSO(ngrps=2, nints=2, star=self.star, subarray='FULL')
self.assertEqual(tso2048clear.ngrps, 2)
self.assertEqual(tso2048clear.nints, 2)
self.assertEqual(tso2048clear.nframes, 4)
self.assertEqual(tso2048clear.dims, (2, 2, 2048, 2048))
self.assertEqual(tso2048clear.subarray, 'FULL')
self.assertEqual(tso2048clear.filter, 'CLEAR')
# Initialize the 256 subarray with two groups and two integrations
# and the CLEAR filter
tso256clear = TSO(ngrps=2, nints=2, star=self.star, subarray='SUBSTRIP256')
self.assertEqual(tso256clear.ngrps, 2)
self.assertEqual(tso256clear.nints, 2)
self.assertEqual(tso256clear.nframes, 4)
self.assertEqual(tso256clear.dims, (2, 2, 256, 2048))
self.assertEqual(tso256clear.subarray, 'SUBSTRIP256')
self.assertEqual(tso256clear.filter, 'CLEAR')
# Initialize the 96 subarray with two groups and two integrations
# and the CLEAR filter
tso96clear = TSO(ngrps=2, nints=2, star=self.star, subarray='SUBSTRIP96')
self.assertEqual(tso96clear.ngrps, 2)
self.assertEqual(tso96clear.nints, 2)
self.assertEqual(tso96clear.nframes, 4)
self.assertEqual(tso96clear.dims, (2, 2, 96, 2048))
self.assertEqual(tso96clear.subarray, 'SUBSTRIP96')
self.assertEqual(tso96clear.filter, 'CLEAR')
# Initialize the FULL frame with two groups and two integrations
# and the F277W filter
tso2048f277w = TSO(ngrps=2, nints=2, star=self.star, subarray='FULL', filter='F277W')
self.assertEqual(tso2048f277w.ngrps, 2)
self.assertEqual(tso2048f277w.nints, 2)
self.assertEqual(tso2048f277w.nframes, 4)
self.assertEqual(tso2048f277w.dims, (2, 2, 2048, 2048))
self.assertEqual(tso2048f277w.subarray, 'FULL')
self.assertEqual(tso2048f277w.filter, 'F277W')
# Initialize the 256 subarray with two groups and two integrations
# and the F277W filter
tso256f277w = TSO(ngrps=2, nints=2, star=self.star, subarray='SUBSTRIP256', filter='F277W')
self.assertEqual(tso256f277w.ngrps, 2)
self.assertEqual(tso256f277w.nints, 2)
self.assertEqual(tso256f277w.nframes, 4)
self.assertEqual(tso256f277w.dims, (2, 2, 256, 2048))
self.assertEqual(tso256f277w.subarray, 'SUBSTRIP256')
self.assertEqual(tso256f277w.filter, 'F277W')
# Initialize the 96 subarray with two groups and two integrations
# and the F277W filter
tso96f277w = TSO(ngrps=2, nints=2, star=self.star, subarray='SUBSTRIP96', filter='F277W')
self.assertEqual(tso96f277w.ngrps, 2)
self.assertEqual(tso96f277w.nints, 2)
self.assertEqual(tso96f277w.nframes, 4)
self.assertEqual(tso96f277w.dims, (2, 2, 96, 2048))
self.assertEqual(tso96f277w.subarray, 'SUBSTRIP96')
self.assertEqual(tso96f277w.filter, 'F277W')
def test_run_no_planet(self):
"""A test of simulate() with no planet"""
# Make the TSO object
tso = TSO(ngrps=2, nints=2, star=self.star)
tso.simulate()
tso.subarray = 'SUBSTRIP96'
tso.simulate()
tso.subarray = 'FULL'
tso.simulate()
def test_run_with_planet(self):
"""A test of simulate() with a planet"""
# Make the TSO object
tso = TSO(ngrps=2, nints=2, star=self.star)
# Make orbital params
params = batman.TransitParams()
params.t0 = 0.
params.per = 5.7214742
params.a = 0.0558*q.AU.to(ac.R_sun)*0.66
params.inc = 89.8
params.ecc = 0.
params.w = 90.
params.limb_dark = 'quadratic'
params.u = [0.1, 0.1]
params.rp = 0.
tmodel = batman.TransitModel(params, tso.time)
tmodel.teff = 3500
tmodel.logg = 5
tmodel.feh = 0
# Run the simulation
tso.simulate(planet=self.planet, tmodel=tmodel)
tso.subarray = 'SUBSTRIP96'
tso.simulate(planet=self.planet, tmodel=tmodel)
tso.subarray = 'FULL'
tso.simulate(planet=self.planet, tmodel=tmodel)
def test_lookup(self):
"""Test that coordinates are looked up if given a name"""
# Make the TSO object
targ = TSO(ngrps=2, nints=2, star=self.star, target='trappist-1')
no_targ = TSO(ngrps=2, nints=2, star=self.star)
# Check target name
self.assertNotEqual(targ.target, no_targ.target)
# Check coordinates
self.assertNotEqual(targ.ra, no_targ.ra)
self.assertNotEqual(targ.dec, no_targ.dec)
def test_star(self):
"""Test that errors are thrown for bas star input"""
# Test that non wavelength units fail
bad_wave_star = copy(self.star)
bad_wave_star[0] *= q.Jy
kwargs = {'nints': 2, 'ngrps': 2, 'star': bad_wave_star}
self.assertRaises(ValueError, TSO, **kwargs)
# Test that non flux density units fail
bad_flux_star = copy(self.star)
bad_flux_star[1] *= q.K
kwargs = {'nints': 2, 'ngrps': 2, 'star': bad_flux_star}
self.assertRaises(ValueError, TSO, **kwargs)
# Test that no units fail
bad_unit_star = copy(self.star)
bad_unit_star[0] = bad_unit_star[0].value
kwargs = {'nints': 2, 'ngrps': 2, 'star': bad_unit_star}
self.assertRaises(ValueError, TSO, **kwargs)
# Test that spectrum shape
bad_size_star = [self.star[0]]
kwargs = {'nints': 2, 'ngrps': 2, 'star': bad_size_star}
self.assertRaises(ValueError, TSO, **kwargs)
def test_bad_attrs(self):
"""Test that invalid attributes throw an error"""
# Make the TSO object
tso = TSO(ngrps=2, nints=2, star=self.star)
# Bad fiilter
self.assertRaises(ValueError, setattr, tso, 'filter', 'foo')
# Bad ncols
self.assertRaises(TypeError, setattr, tso, 'ncols', 3)
# Bad nrows
self.assertRaises(TypeError, setattr, tso, 'nrows', 3)
# Bad nints
self.assertRaises(TypeError, setattr, tso, 'nints', 'three')
# Bad ngrps
self.assertRaises(TypeError, setattr, tso, 'ngrps', 'three')
# Bad nresets
self.assertRaises(TypeError, setattr, tso, 'nresets', 'three')
# Bad orders
tso.orders = 1
self.assertRaises(ValueError, setattr, tso, 'orders', 'three')
# Bad subarray
self.assertRaises(ValueError, setattr, tso, 'subarray', 'three')
# Bad t0
self.assertRaises(ValueError, setattr, tso, 't0', 'three')
# Bad target
self.assertRaises(TypeError, setattr, tso, 'target', 3)
def test_ldcs(self):
"""Test the limb darkening coefficients"""
# Create instance
tso = TSO(ngrps=2, nints=2, star=self.star)
# Set manually
ldcs = tso.ld_coeffs
tso.ld_coeffs = np.ones((3, 2048, 2))
# Bad LDCs (Removed TypeError in favor of print statement)
# self.assertRaises(TypeError, setattr, tso, 'ld_coeffs', 'foo')
def test_plot(self):
"""Test plot method"""
# Make the TSO object
tso = TSO(ngrps=2, nints=2, star=self.star)
# Test plot with no data
plt = tso.plot(draw=False)
# Run simulation
tso.simulate()
# Test bad ptype
kwargs = {'ptype': 'foo', 'draw': False}
self.assertRaises(ValueError, tso.plot, **kwargs)
# Standard plot with traces
plt = tso.plot(traces=True)
# Standard plot with one order
plt = tso.plot(order=1, draw=False)
# No noise plot
plt = tso.plot(noise=False, draw=False)
# Log plot
plt = tso.plot(scale='log', draw=False)
def test_plot_slice(self):
"""Test plot_slice method"""
# Make the TSO object
tso = TSO(ngrps=2, nints=2, star=self.star)
tso.simulate()
# Standard plot with traces
plt = tso.plot_slice(500, traces=True)
# Standard plot with one order
plt = tso.plot_slice(500, order=1, draw=False)
# Plot with noise
plt = tso.plot_slice(500, noise=True, draw=False)
# Log plot
plt = tso.plot_slice(500, scale='log', draw=False)
# List of slices
plt = tso.plot_slice([500, 1000], draw=False)
def test_plot_ramp(self):
"""Test plot_ramp method"""
# Make the TSO object
tso = TSO(ngrps=2, nints=2, star=self.star)
tso.simulate()
# Standard plot
plt = tso.plot_ramp(draw=False)
tso.plot_ramp()
def test_plot_lightcurve(self):
"""Test plot_lightcurve method"""
# Make the TSO object
tso = TSO(ngrps=2, nints=2, star=self.star)
tso.simulate()
# Test bad units
kwargs = {'column': 500, 'time_unit': 'foo', 'draw': False}
self.assertRaises(ValueError, tso.plot_lightcurve, **kwargs)
# Standard plot
plt = tso.plot_lightcurve(500)
# Wavelength
plt = tso.plot_lightcurve(1.6, draw=False)
# Neither
plt = tso.plot_lightcurve('foo', draw=False)
# List of lightcurves
plt = tso.plot_lightcurve([500, 1000], draw=False)
def test_plot_spectrum(self):
"""Test plot_spectrum method"""
# Make the TSO object
tso = TSO(ngrps=2, nints=2, star=self.star)
tso.simulate()
# Standard plot
plt = tso.plot_spectrum()
# Standard plot with one order
plt = tso.plot_spectrum(order=1, draw=False)
# Log plot
plt = tso.plot_spectrum(scale='log', draw=False)
# No noise plot
plt = tso.plot_spectrum(noise=True, draw=False)
# Specific order
plt = tso.plot_spectrum(order=1, draw=False) | tests/test_awesim.py | """Tests for `awesim` module."""
from copy import copy
import unittest
from pkg_resources import resource_filename
import numpy as np
import astropy.units as q
import astropy.constants as ac
import batman
from awesimsoss import TSO, BlackbodyTSO, TestTSO, STAR_DATA, PLANET_DATA
class test_BlackbodyTSO(unittest.TestCase):
"""A test of the BlackbodyTSO class"""
def setUp(self):
pass
def test_run_no_planet(self):
"""A test of the BlackbodyTSO class with no planet"""
tso = BlackbodyTSO()
def test_run_with_planet(self):
"""A test of the BlackbodyTSO class with a planet"""
tso = BlackbodyTSO(add_planet=True)
class test_TestTSO(unittest.TestCase):
"""A test of the TestTSO class"""
def setUp(self):
pass
def test_run_no_planet(self):
"""A test of the TestTSO class with no planet"""
tso = TestTSO()
def test_run_with_planet(self):
"""A test of the TestTSO class with a planet"""
tso = TestTSO(add_planet=True)
class test_TSO(unittest.TestCase):
"""Tests for the TSO class"""
def setUp(self):
"""Setup for the tests"""
# Get data
self.star = STAR_DATA
self.planet = PLANET_DATA
def test_export(self):
"""Test the export method"""
# Make the TSO object and save
test_tso = TSO(ngrps=2, nints=2, star=self.star, subarray='SUBSTRIP256')
test_tso.simulate()
try:
test_tso.export('outfile.fits')
except NameError:
pass
def test_init(self):
"""Test that the TSO class is generated properly"""
# Initialize the FULL frame with two groups and two integrations
# and the CLEAR filter
tso2048clear = TSO(ngrps=2, nints=2, star=self.star, subarray='FULL')
self.assertEqual(tso2048clear.ngrps, 2)
self.assertEqual(tso2048clear.nints, 2)
self.assertEqual(tso2048clear.nframes, 4)
self.assertEqual(tso2048clear.dims, (2, 2, 2048, 2048))
self.assertEqual(tso2048clear.subarray, 'FULL')
self.assertEqual(tso2048clear.filter, 'CLEAR')
# Initialize the 256 subarray with two groups and two integrations
# and the CLEAR filter
tso256clear = TSO(ngrps=2, nints=2, star=self.star, subarray='SUBSTRIP256')
self.assertEqual(tso256clear.ngrps, 2)
self.assertEqual(tso256clear.nints, 2)
self.assertEqual(tso256clear.nframes, 4)
self.assertEqual(tso256clear.dims, (2, 2, 256, 2048))
self.assertEqual(tso256clear.subarray, 'SUBSTRIP256')
self.assertEqual(tso256clear.filter, 'CLEAR')
# Initialize the 96 subarray with two groups and two integrations
# and the CLEAR filter
tso96clear = TSO(ngrps=2, nints=2, star=self.star, subarray='SUBSTRIP96')
self.assertEqual(tso96clear.ngrps, 2)
self.assertEqual(tso96clear.nints, 2)
self.assertEqual(tso96clear.nframes, 4)
self.assertEqual(tso96clear.dims, (2, 2, 96, 2048))
self.assertEqual(tso96clear.subarray, 'SUBSTRIP96')
self.assertEqual(tso96clear.filter, 'CLEAR')
# Initialize the FULL frame with two groups and two integrations
# and the F277W filter
tso2048f277w = TSO(ngrps=2, nints=2, star=self.star, subarray='FULL', filter='F277W')
self.assertEqual(tso2048f277w.ngrps, 2)
self.assertEqual(tso2048f277w.nints, 2)
self.assertEqual(tso2048f277w.nframes, 4)
self.assertEqual(tso2048f277w.dims, (2, 2, 2048, 2048))
self.assertEqual(tso2048f277w.subarray, 'FULL')
self.assertEqual(tso2048f277w.filter, 'F277W')
# Initialize the 256 subarray with two groups and two integrations
# and the F277W filter
tso256f277w = TSO(ngrps=2, nints=2, star=self.star, subarray='SUBSTRIP256', filter='F277W')
self.assertEqual(tso256f277w.ngrps, 2)
self.assertEqual(tso256f277w.nints, 2)
self.assertEqual(tso256f277w.nframes, 4)
self.assertEqual(tso256f277w.dims, (2, 2, 256, 2048))
self.assertEqual(tso256f277w.subarray, 'SUBSTRIP256')
self.assertEqual(tso256f277w.filter, 'F277W')
# Initialize the 96 subarray with two groups and two integrations
# and the F277W filter
tso96f277w = TSO(ngrps=2, nints=2, star=self.star, subarray='SUBSTRIP96', filter='F277W')
self.assertEqual(tso96f277w.ngrps, 2)
self.assertEqual(tso96f277w.nints, 2)
self.assertEqual(tso96f277w.nframes, 4)
self.assertEqual(tso96f277w.dims, (2, 2, 96, 2048))
self.assertEqual(tso96f277w.subarray, 'SUBSTRIP96')
self.assertEqual(tso96f277w.filter, 'F277W')
def test_run_no_planet(self):
"""A test of simulate() with no planet"""
# Make the TSO object
tso = TSO(ngrps=2, nints=2, star=self.star)
tso.simulate()
tso.subarray = 'SUBSTRIP96'
tso.simulate()
tso.subarray = 'FULL'
tso.simulate()
def test_run_with_planet(self):
"""A test of simulate() with a planet"""
# Make the TSO object
tso = TSO(ngrps=2, nints=2, star=self.star)
# Make orbital params
params = batman.TransitParams()
params.t0 = 0.
params.per = 5.7214742
params.a = 0.0558*q.AU.to(ac.R_sun)*0.66
params.inc = 89.8
params.ecc = 0.
params.w = 90.
params.limb_dark = 'quadratic'
params.u = [0.1, 0.1]
params.rp = 0.
tmodel = batman.TransitModel(params, tso.time)
tmodel.teff = 3500
tmodel.logg = 5
tmodel.feh = 0
# Run the simulation
tso.simulate(planet=self.planet, tmodel=tmodel)
tso.subarray = 'SUBSTRIP96'
tso.simulate(planet=self.planet, tmodel=tmodel)
tso.subarray = 'FULL'
tso.simulate(planet=self.planet, tmodel=tmodel)
def test_lookup(self):
"""Test that coordinates are looked up if given a name"""
# Make the TSO object
targ = TSO(ngrps=2, nints=2, star=self.star, target='trappist-1')
no_targ = TSO(ngrps=2, nints=2, star=self.star)
# Check target name
self.assertNotEqual(targ.target, no_targ.target)
# Check coordinates
self.assertNotEqual(targ.ra, no_targ.ra)
self.assertNotEqual(targ.dec, no_targ.dec)
def test_star(self):
"""Test that errors are thrown for bas star input"""
# Test that non wavelength units fail
bad_wave_star = copy(self.star)
bad_wave_star[0] *= q.Jy
kwargs = {'nints': 2, 'ngrps': 2, 'star': bad_wave_star}
self.assertRaises(ValueError, TSO, **kwargs)
# Test that non flux density units fail
bad_flux_star = copy(self.star)
bad_flux_star[1] *= q.K
kwargs = {'nints': 2, 'ngrps': 2, 'star': bad_flux_star}
self.assertRaises(ValueError, TSO, **kwargs)
# Test that no units fail
bad_unit_star = copy(self.star)
bad_unit_star[0] = bad_unit_star[0].value
kwargs = {'nints': 2, 'ngrps': 2, 'star': bad_unit_star}
self.assertRaises(ValueError, TSO, **kwargs)
# Test that spectrum shape
bad_size_star = [self.star[0]]
kwargs = {'nints': 2, 'ngrps': 2, 'star': bad_size_star}
self.assertRaises(ValueError, TSO, **kwargs)
def test_bad_attrs(self):
"""Test that invalid attributes throw an error"""
# Make the TSO object
tso = TSO(ngrps=2, nints=2, star=self.star)
# Bad fiilter
self.assertRaises(ValueError, setattr, tso, 'filter', 'foo')
# Bad ncols
self.assertRaises(TypeError, setattr, tso, 'ncols', 3)
# Bad nrows
self.assertRaises(TypeError, setattr, tso, 'nrows', 3)
# Bad nints
self.assertRaises(TypeError, setattr, tso, 'nints', 'three')
# Bad ngrps
self.assertRaises(TypeError, setattr, tso, 'ngrps', 'three')
# Bad nresets
self.assertRaises(TypeError, setattr, tso, 'nresets', 'three')
# Bad orders
tso.orders = 1
self.assertRaises(ValueError, setattr, tso, 'orders', 'three')
# Bad subarray
self.assertRaises(ValueError, setattr, tso, 'subarray', 'three')
# Bad t0
self.assertRaises(ValueError, setattr, tso, 't0', 'three')
# Bad target
self.assertRaises(TypeError, setattr, tso, 'target', 3)
def test_ldcs(self):
"""Test the limb darkening coefficients"""
# Create instance
tso = TSO(ngrps=2, nints=2, star=self.star)
# Set manually
ldcs = tso.ld_coeffs
tso.ld_coeffs = np.ones((3, 2048, 2))
# Bad LDCs (Removed TypeError in favor of print statement)
# self.assertRaises(TypeError, setattr, tso, 'ld_coeffs', 'foo')
def test_plot(self):
"""Test plot method"""
# Make the TSO object
tso = TSO(ngrps=2, nints=2, star=self.star)
# Test plot with no data
plt = tso.plot(draw=False)
# Run simulation
tso.simulate()
# Test bad ptype
kwargs = {'ptype': 'foo', 'draw': False}
self.assertRaises(ValueError, tso.plot, **kwargs)
# Standard plot with traces
plt = tso.plot(traces=True)
# Standard plot with one order
plt = tso.plot(order=1, draw=False)
# No noise plot
plt = tso.plot(noise=False, draw=False)
# Log plot
plt = tso.plot(scale='log', draw=False)
def test_plot_slice(self):
"""Test plot_slice method"""
# Make the TSO object
tso = TSO(ngrps=2, nints=2, star=self.star)
tso.simulate()
# Standard plot with traces
plt = tso.plot_slice(500, traces=True)
# Standard plot with one order
plt = tso.plot_slice(500, order=1, draw=False)
# Plot with noise
plt = tso.plot_slice(500, noise=True, draw=False)
# Log plot
plt = tso.plot_slice(500, scale='log', draw=False)
# List of slices
plt = tso.plot_slice([500, 1000], draw=False)
def test_plot_ramp(self):
"""Test plot_ramp method"""
# Make the TSO object
tso = TSO(ngrps=2, nints=2, star=self.star)
tso.simulate()
# Standard plot
plt = tso.plot_ramp(draw=False)
tso.plot_ramp()
def test_plot_lightcurve(self):
"""Test plot_lightcurve method"""
# Make the TSO object
tso = TSO(ngrps=2, nints=2, star=self.star)
tso.simulate()
# Test bad units
kwargs = {'column': 500, 'time_unit': 'foo', 'draw': False}
self.assertRaises(ValueError, tso.plot_lightcurve, **kwargs)
# Standard plot
plt = tso.plot_lightcurve(500)
# Wavelength
plt = tso.plot_lightcurve(1.6, draw=False)
# Neither
plt = tso.plot_lightcurve('foo', draw=False)
# List of lightcurves
plt = tso.plot_lightcurve([500, 1000], draw=False)
def test_plot_spectrum(self):
"""Test plot_spectrum method"""
# Make the TSO object
tso = TSO(ngrps=2, nints=2, star=self.star)
tso.simulate()
# Standard plot
plt = tso.plot_spectrum()
# Standard plot with one order
plt = tso.plot_spectrum(order=1, draw=False)
# Log plot
plt = tso.plot_spectrum(scale='log', draw=False)
# No noise plot
plt = tso.plot_spectrum(noise=True, draw=False)
# Specific order
plt = tso.plot_spectrum(order=1, draw=False) | 0.819496 | 0.715714 |
import grpc
from google.bigtable.admin.v2 import bigtable_table_admin_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2
from google.bigtable.admin.v2 import table_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2
from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class BigtableTableAdminStub(object):
"""Service for creating, configuring, and deleting Cloud Bigtable tables.
Provides access to the table schemas only, not the data stored within
the tables.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateTable = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString,
)
self.CreateTableFromSnapshot = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.ListTables = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/ListTables',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString,
)
self.GetTable = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/GetTable',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString,
)
self.DeleteTable = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ModifyColumnFamilies = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString,
)
self.DropRowRange = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GenerateConsistencyToken = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString,
)
self.CheckConsistency = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString,
)
self.SnapshotTable = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetSnapshot = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Snapshot.FromString,
)
self.ListSnapshots = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString,
)
self.DeleteSnapshot = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class BigtableTableAdminServicer(object):
"""Service for creating, configuring, and deleting Cloud Bigtable tables.
Provides access to the table schemas only, not the data stored within
the tables.
"""
def CreateTable(self, request, context):
"""Creates a new table in the specified instance.
The table can be created with a full set of initial column families,
specified in the request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateTableFromSnapshot(self, request, context):
"""Creates a new table from the specified snapshot. The target table must
not exist. The snapshot and the table must be in the same instance.
Note: This is a private alpha release of Cloud Bigtable snapshots. This
feature is not currently available to most Cloud Bigtable customers. This
feature might be changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any SLA or deprecation
policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListTables(self, request, context):
"""Lists all tables served from a specified instance.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTable(self, request, context):
"""Gets metadata information about the specified table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteTable(self, request, context):
"""Permanently deletes a specified table and all of its data.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModifyColumnFamilies(self, request, context):
"""Performs a series of column family modifications on the specified table.
Either all or none of the modifications will occur before this method
returns, but data requests received prior to that point may see a table
where only some modifications have taken effect.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DropRowRange(self, request, context):
"""Permanently drop/delete a row range from a specified table. The request can
specify whether to delete all rows in a table, or only those that match a
particular prefix.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GenerateConsistencyToken(self, request, context):
"""Generates a consistency token for a Table, which can be used in
CheckConsistency to check whether mutations to the table that finished
before this call started have been replicated. The tokens will be available
for 90 days.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CheckConsistency(self, request, context):
"""Checks replication consistency based on a consistency token, that is, if
replication has caught up based on the conditions specified in the token
and the check request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SnapshotTable(self, request, context):
"""Creates a new snapshot in the specified cluster from the specified
source table. The cluster and the table must be in the same instance.
Note: This is a private alpha release of Cloud Bigtable snapshots. This
feature is not currently available to most Cloud Bigtable customers. This
feature might be changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any SLA or deprecation
policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSnapshot(self, request, context):
"""Gets metadata information about the specified snapshot.
Note: This is a private alpha release of Cloud Bigtable snapshots. This
feature is not currently available to most Cloud Bigtable customers. This
feature might be changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any SLA or deprecation
policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListSnapshots(self, request, context):
"""Lists all snapshots associated with the specified cluster.
Note: This is a private alpha release of Cloud Bigtable snapshots. This
feature is not currently available to most Cloud Bigtable customers. This
feature might be changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any SLA or deprecation
policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteSnapshot(self, request, context):
"""Permanently deletes the specified snapshot.
Note: This is a private alpha release of Cloud Bigtable snapshots. This
feature is not currently available to most Cloud Bigtable customers. This
feature might be changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any SLA or deprecation
policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BigtableTableAdminServicer_to_server(servicer, server):
rpc_method_handlers = {
'CreateTable': grpc.unary_unary_rpc_method_handler(
servicer.CreateTable,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.CreateTableRequest.FromString,
response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString,
),
'CreateTableFromSnapshot': grpc.unary_unary_rpc_method_handler(
servicer.CreateTableFromSnapshot,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
'ListTables': grpc.unary_unary_rpc_method_handler(
servicer.ListTables,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.ListTablesRequest.FromString,
response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.ListTablesResponse.SerializeToString,
),
'GetTable': grpc.unary_unary_rpc_method_handler(
servicer.GetTable,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.GetTableRequest.FromString,
response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString,
),
'DeleteTable': grpc.unary_unary_rpc_method_handler(
servicer.DeleteTable,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.DeleteTableRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ModifyColumnFamilies': grpc.unary_unary_rpc_method_handler(
servicer.ModifyColumnFamilies,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.FromString,
response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString,
),
'DropRowRange': grpc.unary_unary_rpc_method_handler(
servicer.DropRowRange,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.DropRowRangeRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GenerateConsistencyToken': grpc.unary_unary_rpc_method_handler(
servicer.GenerateConsistencyToken,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.FromString,
response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.SerializeToString,
),
'CheckConsistency': grpc.unary_unary_rpc_method_handler(
servicer.CheckConsistency,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.FromString,
response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.SerializeToString,
),
'SnapshotTable': grpc.unary_unary_rpc_method_handler(
servicer.SnapshotTable,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.SnapshotTableRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
'GetSnapshot': grpc.unary_unary_rpc_method_handler(
servicer.GetSnapshot,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.GetSnapshotRequest.FromString,
response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Snapshot.SerializeToString,
),
'ListSnapshots': grpc.unary_unary_rpc_method_handler(
servicer.ListSnapshots,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.FromString,
response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.SerializeToString,
),
'DeleteSnapshot': grpc.unary_unary_rpc_method_handler(
servicer.DeleteSnapshot,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.bigtable.admin.v2.BigtableTableAdmin', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,)) | lib/third_party/google/bigtable/admin/v2/bigtable_table_admin_pb2_grpc.py | import grpc
from google.bigtable.admin.v2 import bigtable_table_admin_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2
from google.bigtable.admin.v2 import table_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2
from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class BigtableTableAdminStub(object):
"""Service for creating, configuring, and deleting Cloud Bigtable tables.
Provides access to the table schemas only, not the data stored within
the tables.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateTable = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.CreateTableRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString,
)
self.CreateTableFromSnapshot = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.ListTables = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/ListTables',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.ListTablesRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.ListTablesResponse.FromString,
)
self.GetTable = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/GetTable',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.GetTableRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString,
)
self.DeleteTable = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.DeleteTableRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ModifyColumnFamilies = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.FromString,
)
self.DropRowRange = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.DropRowRangeRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GenerateConsistencyToken = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.FromString,
)
self.CheckConsistency = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.FromString,
)
self.SnapshotTable = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.SnapshotTableRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetSnapshot = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.GetSnapshotRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Snapshot.FromString,
)
self.ListSnapshots = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.SerializeToString,
response_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.FromString,
)
self.DeleteSnapshot = channel.unary_unary(
'/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot',
request_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class BigtableTableAdminServicer(object):
"""Service for creating, configuring, and deleting Cloud Bigtable tables.
Provides access to the table schemas only, not the data stored within
the tables.
"""
def CreateTable(self, request, context):
"""Creates a new table in the specified instance.
The table can be created with a full set of initial column families,
specified in the request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateTableFromSnapshot(self, request, context):
"""Creates a new table from the specified snapshot. The target table must
not exist. The snapshot and the table must be in the same instance.
Note: This is a private alpha release of Cloud Bigtable snapshots. This
feature is not currently available to most Cloud Bigtable customers. This
feature might be changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any SLA or deprecation
policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListTables(self, request, context):
"""Lists all tables served from a specified instance.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTable(self, request, context):
"""Gets metadata information about the specified table.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteTable(self, request, context):
"""Permanently deletes a specified table and all of its data.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModifyColumnFamilies(self, request, context):
"""Performs a series of column family modifications on the specified table.
Either all or none of the modifications will occur before this method
returns, but data requests received prior to that point may see a table
where only some modifications have taken effect.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DropRowRange(self, request, context):
"""Permanently drop/delete a row range from a specified table. The request can
specify whether to delete all rows in a table, or only those that match a
particular prefix.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GenerateConsistencyToken(self, request, context):
"""Generates a consistency token for a Table, which can be used in
CheckConsistency to check whether mutations to the table that finished
before this call started have been replicated. The tokens will be available
for 90 days.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CheckConsistency(self, request, context):
"""Checks replication consistency based on a consistency token, that is, if
replication has caught up based on the conditions specified in the token
and the check request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SnapshotTable(self, request, context):
"""Creates a new snapshot in the specified cluster from the specified
source table. The cluster and the table must be in the same instance.
Note: This is a private alpha release of Cloud Bigtable snapshots. This
feature is not currently available to most Cloud Bigtable customers. This
feature might be changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any SLA or deprecation
policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSnapshot(self, request, context):
"""Gets metadata information about the specified snapshot.
Note: This is a private alpha release of Cloud Bigtable snapshots. This
feature is not currently available to most Cloud Bigtable customers. This
feature might be changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any SLA or deprecation
policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListSnapshots(self, request, context):
"""Lists all snapshots associated with the specified cluster.
Note: This is a private alpha release of Cloud Bigtable snapshots. This
feature is not currently available to most Cloud Bigtable customers. This
feature might be changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any SLA or deprecation
policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteSnapshot(self, request, context):
"""Permanently deletes the specified snapshot.
Note: This is a private alpha release of Cloud Bigtable snapshots. This
feature is not currently available to most Cloud Bigtable customers. This
feature might be changed in backward-incompatible ways and is not
recommended for production use. It is not subject to any SLA or deprecation
policy.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BigtableTableAdminServicer_to_server(servicer, server):
rpc_method_handlers = {
'CreateTable': grpc.unary_unary_rpc_method_handler(
servicer.CreateTable,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.CreateTableRequest.FromString,
response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString,
),
'CreateTableFromSnapshot': grpc.unary_unary_rpc_method_handler(
servicer.CreateTableFromSnapshot,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.CreateTableFromSnapshotRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
'ListTables': grpc.unary_unary_rpc_method_handler(
servicer.ListTables,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.ListTablesRequest.FromString,
response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.ListTablesResponse.SerializeToString,
),
'GetTable': grpc.unary_unary_rpc_method_handler(
servicer.GetTable,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.GetTableRequest.FromString,
response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString,
),
'DeleteTable': grpc.unary_unary_rpc_method_handler(
servicer.DeleteTable,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.DeleteTableRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ModifyColumnFamilies': grpc.unary_unary_rpc_method_handler(
servicer.ModifyColumnFamilies,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.ModifyColumnFamiliesRequest.FromString,
response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Table.SerializeToString,
),
'DropRowRange': grpc.unary_unary_rpc_method_handler(
servicer.DropRowRange,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.DropRowRangeRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GenerateConsistencyToken': grpc.unary_unary_rpc_method_handler(
servicer.GenerateConsistencyToken,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenRequest.FromString,
response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.GenerateConsistencyTokenResponse.SerializeToString,
),
'CheckConsistency': grpc.unary_unary_rpc_method_handler(
servicer.CheckConsistency,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.CheckConsistencyRequest.FromString,
response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.CheckConsistencyResponse.SerializeToString,
),
'SnapshotTable': grpc.unary_unary_rpc_method_handler(
servicer.SnapshotTable,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.SnapshotTableRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
'GetSnapshot': grpc.unary_unary_rpc_method_handler(
servicer.GetSnapshot,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.GetSnapshotRequest.FromString,
response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2.Snapshot.SerializeToString,
),
'ListSnapshots': grpc.unary_unary_rpc_method_handler(
servicer.ListSnapshots,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.ListSnapshotsRequest.FromString,
response_serializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.ListSnapshotsResponse.SerializeToString,
),
'DeleteSnapshot': grpc.unary_unary_rpc_method_handler(
servicer.DeleteSnapshot,
request_deserializer=google_dot_bigtable_dot_admin_dot_v2_dot_bigtable__table__admin__pb2.DeleteSnapshotRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.bigtable.admin.v2.BigtableTableAdmin', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,)) | 0.434221 | 0.078784 |
from __future__ import print_function
print(__doc__)
from fenics import *
set_log_level(30)
T = 10.0 # final time
num_steps = 50 # number of time steps
dt = T / num_steps/ 100 # time step size
eps = 0.01 # diffusion coefficient
K = 10.0 # reaction rate
# Read mesh from file
mesh = Mesh('navier_stokes_cylinder/cylinder.xml.gz')
# Define function space for velocity
W = VectorFunctionSpace(mesh, 'P', 2)
# Define function space for system of concentrations
P1 = FiniteElement('P', triangle, 1)
element = MixedElement([P1, P1, P1])
V = FunctionSpace(mesh, element)
# Define test functions
v_1, v_2, v_3 = TestFunctions(V)
# Define functions for velocity and concentrations
w = Function(W)
u = Function(V)
u_n = Function(V)
# Split system functions to access components
u_1, u_2, u_3 = split(u)
u_n1, u_n2, u_n3 = split(u_n)
# Define source terms
f_1 = Expression('pow(x[0]-0.1,2)+pow(x[1]-0.1,2)<0.05*0.05 ? 0.1 : 0', degree=1)
f_2 = Expression('pow(x[0]-0.1,2)+pow(x[1]-0.3,2)<0.05*0.05 ? 0.1 : 0', degree=1)
f_3 = Constant(0)
# Define expressions used in variational forms
k = Constant(dt)
K = Constant(K)
eps = Constant(eps)
# Define variational problem
F = ((u_1 - u_n1) / k)*v_1*dx + dot(w, grad(u_1))*v_1*dx \
+ eps*dot(grad(u_1), grad(v_1))*dx + K*u_1*u_2*v_1*dx \
+ ((u_2 - u_n2) / k)*v_2*dx + dot(w, grad(u_2))*v_2*dx \
+ eps*dot(grad(u_2), grad(v_2))*dx + K*u_1*u_2*v_2*dx \
+ ((u_3 - u_n3) / k)*v_3*dx + dot(w, grad(u_3))*v_3*dx \
+ eps*dot(grad(u_3), grad(v_3))*dx - K*u_1*u_2*v_3*dx + K*u_3*v_3*dx \
- f_1*v_1*dx - f_2*v_2*dx - f_3*v_3*dx
# Create time series for reading velocity data
timeseries_w = TimeSeries('navier_stokes_cylinder/velocity_series')
# Time-stepping
from vedo.dolfin import plot, ProgressBar
pb = ProgressBar(0, num_steps, c='red')
t = 0
for n in pb.range():
# Update current time
t += dt
# Read velocity from file
timeseries_w.retrieve(w.vector(), t)
# Solve variational problem for time step
solve(F == 0, u)
_u_1, _u_2, _u_3 = u.split()
# Update previous solution
u_n.assign(u)
# Plot solution
plot(_u_3, at=0, # draw on renderer nr.0
shape=(2,1), # two rows, one column
size='fullscreen',
cmap='bone',
scalarbar=False,
axes=0,
zoom=2,
interactive=False)
plot(_u_2, at=1,
cmap='bone',
zoom=2,
scalarbar=False,
interactive=False)
pb.print(t)
plot() | examples/other/dolfin/ft09_reaction_system.py | from __future__ import print_function
print(__doc__)
from fenics import *
set_log_level(30)
T = 10.0 # final time
num_steps = 50 # number of time steps
dt = T / num_steps/ 100 # time step size
eps = 0.01 # diffusion coefficient
K = 10.0 # reaction rate
# Read mesh from file
mesh = Mesh('navier_stokes_cylinder/cylinder.xml.gz')
# Define function space for velocity
W = VectorFunctionSpace(mesh, 'P', 2)
# Define function space for system of concentrations
P1 = FiniteElement('P', triangle, 1)
element = MixedElement([P1, P1, P1])
V = FunctionSpace(mesh, element)
# Define test functions
v_1, v_2, v_3 = TestFunctions(V)
# Define functions for velocity and concentrations
w = Function(W)
u = Function(V)
u_n = Function(V)
# Split system functions to access components
u_1, u_2, u_3 = split(u)
u_n1, u_n2, u_n3 = split(u_n)
# Define source terms
f_1 = Expression('pow(x[0]-0.1,2)+pow(x[1]-0.1,2)<0.05*0.05 ? 0.1 : 0', degree=1)
f_2 = Expression('pow(x[0]-0.1,2)+pow(x[1]-0.3,2)<0.05*0.05 ? 0.1 : 0', degree=1)
f_3 = Constant(0)
# Define expressions used in variational forms
k = Constant(dt)
K = Constant(K)
eps = Constant(eps)
# Define variational problem
F = ((u_1 - u_n1) / k)*v_1*dx + dot(w, grad(u_1))*v_1*dx \
+ eps*dot(grad(u_1), grad(v_1))*dx + K*u_1*u_2*v_1*dx \
+ ((u_2 - u_n2) / k)*v_2*dx + dot(w, grad(u_2))*v_2*dx \
+ eps*dot(grad(u_2), grad(v_2))*dx + K*u_1*u_2*v_2*dx \
+ ((u_3 - u_n3) / k)*v_3*dx + dot(w, grad(u_3))*v_3*dx \
+ eps*dot(grad(u_3), grad(v_3))*dx - K*u_1*u_2*v_3*dx + K*u_3*v_3*dx \
- f_1*v_1*dx - f_2*v_2*dx - f_3*v_3*dx
# Create time series for reading velocity data
timeseries_w = TimeSeries('navier_stokes_cylinder/velocity_series')
# Time-stepping
from vedo.dolfin import plot, ProgressBar
pb = ProgressBar(0, num_steps, c='red')
t = 0
for n in pb.range():
# Update current time
t += dt
# Read velocity from file
timeseries_w.retrieve(w.vector(), t)
# Solve variational problem for time step
solve(F == 0, u)
_u_1, _u_2, _u_3 = u.split()
# Update previous solution
u_n.assign(u)
# Plot solution
plot(_u_3, at=0, # draw on renderer nr.0
shape=(2,1), # two rows, one column
size='fullscreen',
cmap='bone',
scalarbar=False,
axes=0,
zoom=2,
interactive=False)
plot(_u_2, at=1,
cmap='bone',
zoom=2,
scalarbar=False,
interactive=False)
pb.print(t)
plot() | 0.664323 | 0.490724 |
from .response import Response
from datetime import datetime
class Statement(object):
"""
A statement represents a single spoken entity, sentence or
phrase that someone can say.
"""
def __init__(self, text, **kwargs):
# Try not to allow non-string types to be passed to statements
try:
text = str(text)
except UnicodeEncodeError:
pass
self.text = text
self.in_response_to = kwargs.pop('in_response_to', [])
# The date and time that this statement was created at
self.created_at = kwargs.pop('created_at', datetime.now())
self.extra_data = kwargs.pop('extra_data', {})
# This is the confidence with which the chat bot believes
# this is an accurate response. This value is set when the
# statement is returned by the chat bot.
self.confidence = 0
self.storage = None
def __str__(self):
return self.text
def __repr__(self):
return '<Statement text:%s>' % (self.text)
def __hash__(self):
return hash(self.text)
def __eq__(self, other):
if not other:
return False
if isinstance(other, Statement):
return self.text == other.text
return self.text == other
def save(self):
"""
Save the statement in the database.
"""
self.storage.update(self)
def add_extra_data(self, key, value):
"""
This method allows additional data to be stored on the statement object.
Typically this data is something that pertains just to this statement.
For example, a value stored here might be the tagged parts of speech for
each word in the statement text.
- key = 'pos_tags'
- value = [('Now', 'RB'), ('for', 'IN'), ('something', 'NN'), ('different', 'JJ')]
:param key: The key to use in the dictionary of extra data.
:type key: str
:param value: The value to set for the specified key.
"""
self.extra_data[key] = value
def add_response(self, response):
"""
Add the response to the list of statements that this statement is in response to.
If the response is already in the list, increment the occurrence count of that response.
:param response: The response to add.
:type response: `Response`
"""
if not isinstance(response, Response):
raise Statement.InvalidTypeException(
'A {} was recieved when a {} instance was expected'.format(
type(response),
type(Response(''))
)
)
updated = False
for index in range(0, len(self.in_response_to)):
if response.text == self.in_response_to[index].text:
self.in_response_to[index].occurrence += 1
updated = True
if not updated:
self.in_response_to.append(response)
def remove_response(self, response_text):
"""
Removes a response from the statement's response list based
on the value of the response text.
:param response_text: The text of the response to be removed.
:type response_text: str
"""
for response in self.in_response_to:
if response_text == response.text:
self.in_response_to.remove(response)
return True
return False
def get_response_count(self, statement):
"""
Find the number of times that the statement has been used
as a response to the current statement.
:param statement: The statement object to get the count for.
:type statement: `Statement`
:returns: Return the number of times the statement has been used as a response.
:rtype: int
"""
for response in self.in_response_to:
if statement.text == response.text:
return response.occurrence
return 0
def serialize(self):
"""
:returns: A dictionary representation of the statement object.
:rtype: dict
"""
data = {}
data['text'] = self.text
data['in_response_to'] = []
data['created_at'] = self.created_at
data['extra_data'] = self.extra_data
for response in self.in_response_to:
data['in_response_to'].append(response.serialize())
return data
@property
def response_statement_cache(self):
"""
This property is to allow ChatterBot Statement objects to
be swappable with Django Statement models.
"""
return self.in_response_to
class InvalidTypeException(Exception):
def __init__(self, value='Recieved an unexpected value type.'):
self.value = value
def __str__(self):
return repr(self.value) | chatbotenv/lib/python2.7/site-packages/chatterbot/conversation/statement.py | from .response import Response
from datetime import datetime
class Statement(object):
"""
A statement represents a single spoken entity, sentence or
phrase that someone can say.
"""
def __init__(self, text, **kwargs):
# Try not to allow non-string types to be passed to statements
try:
text = str(text)
except UnicodeEncodeError:
pass
self.text = text
self.in_response_to = kwargs.pop('in_response_to', [])
# The date and time that this statement was created at
self.created_at = kwargs.pop('created_at', datetime.now())
self.extra_data = kwargs.pop('extra_data', {})
# This is the confidence with which the chat bot believes
# this is an accurate response. This value is set when the
# statement is returned by the chat bot.
self.confidence = 0
self.storage = None
def __str__(self):
return self.text
def __repr__(self):
return '<Statement text:%s>' % (self.text)
def __hash__(self):
return hash(self.text)
def __eq__(self, other):
if not other:
return False
if isinstance(other, Statement):
return self.text == other.text
return self.text == other
def save(self):
"""
Save the statement in the database.
"""
self.storage.update(self)
def add_extra_data(self, key, value):
"""
This method allows additional data to be stored on the statement object.
Typically this data is something that pertains just to this statement.
For example, a value stored here might be the tagged parts of speech for
each word in the statement text.
- key = 'pos_tags'
- value = [('Now', 'RB'), ('for', 'IN'), ('something', 'NN'), ('different', 'JJ')]
:param key: The key to use in the dictionary of extra data.
:type key: str
:param value: The value to set for the specified key.
"""
self.extra_data[key] = value
def add_response(self, response):
"""
Add the response to the list of statements that this statement is in response to.
If the response is already in the list, increment the occurrence count of that response.
:param response: The response to add.
:type response: `Response`
"""
if not isinstance(response, Response):
raise Statement.InvalidTypeException(
'A {} was recieved when a {} instance was expected'.format(
type(response),
type(Response(''))
)
)
updated = False
for index in range(0, len(self.in_response_to)):
if response.text == self.in_response_to[index].text:
self.in_response_to[index].occurrence += 1
updated = True
if not updated:
self.in_response_to.append(response)
def remove_response(self, response_text):
"""
Removes a response from the statement's response list based
on the value of the response text.
:param response_text: The text of the response to be removed.
:type response_text: str
"""
for response in self.in_response_to:
if response_text == response.text:
self.in_response_to.remove(response)
return True
return False
def get_response_count(self, statement):
"""
Find the number of times that the statement has been used
as a response to the current statement.
:param statement: The statement object to get the count for.
:type statement: `Statement`
:returns: Return the number of times the statement has been used as a response.
:rtype: int
"""
for response in self.in_response_to:
if statement.text == response.text:
return response.occurrence
return 0
def serialize(self):
"""
:returns: A dictionary representation of the statement object.
:rtype: dict
"""
data = {}
data['text'] = self.text
data['in_response_to'] = []
data['created_at'] = self.created_at
data['extra_data'] = self.extra_data
for response in self.in_response_to:
data['in_response_to'].append(response.serialize())
return data
@property
def response_statement_cache(self):
"""
This property is to allow ChatterBot Statement objects to
be swappable with Django Statement models.
"""
return self.in_response_to
class InvalidTypeException(Exception):
def __init__(self, value='Recieved an unexpected value type.'):
self.value = value
def __str__(self):
return repr(self.value) | 0.831554 | 0.236395 |
import argparse
import json
import logging
import os
import secrets
import socket
from typing import Dict
from numpy.random import randint
from lpot.utils.utility import singleton
from lpot.ux.utils.exceptions import NotFoundException
@singleton
class Configuration:
"""Configuration object for UX server."""
PORT_DEFAULT = 5000
MAX_PORTS_TRIED = 10
def __init__(self) -> None:
"""Set the variables."""
self.server_address = ""
self.server_port = 0
self.gui_port = 0
self.log_level = 0
self.token = ""
self.scheme = ""
self.workdir = ""
self.set_up()
def set_up(self) -> None:
"""Reset variables."""
self.determine_values_from_environment()
self.determine_values_from_existing_config()
def determine_values_from_environment(self) -> None:
"""Set variables based on environment values."""
self.server_address = "localhost"
args = self.get_command_line_args()
self.server_port = self.determine_server_port(args)
self.gui_port = self.determine_gui_port(args)
self.log_level = self.determine_log_level(args)
self.token = secrets.token_hex(16)
self.scheme = "http"
self.workdir = os.path.join(os.environ.get("HOME", ""), "workdir")
def determine_values_from_existing_config(self) -> None:
"""Set variables based on existing files."""
workloads_list_filepath = os.path.join(
os.environ.get("HOME", ""),
".lpot",
"workloads_list.json",
)
if os.path.isfile(workloads_list_filepath):
with open(workloads_list_filepath, encoding="utf-8") as workloads_list:
workloads_data = json.load(workloads_list)
self.workdir = workloads_data.get("active_workspace_path", self.workdir)
def get_command_line_args(self) -> Dict:
"""Return arguments passed in command line."""
parser = argparse.ArgumentParser(description="Run UX server.")
parser.add_argument(
"-p",
"--port",
type=int,
help="server port number to listen on",
)
parser.add_argument(
"-P",
"--gui_port",
type=int,
help="port number for GUI",
)
parser.add_argument(
"--verbose",
"-v",
action="count",
default=0,
help="verbosity of logging output, use -vv and -vvv for even more logs",
)
return vars(parser.parse_args())
def determine_server_port(self, args: Dict) -> int:
"""
Return port to be used by the server.
Will raise a NotFoundException if port is already in use.
When port given in command line, only that port will be tried.
When no port specified will try self.MAX_PORTS_TRIED times,
starting with self.PORT_DEFAULT.
"""
command_line_port = args.get("port")
if command_line_port is not None:
self._ensure_valid_port(command_line_port)
if self.is_port_taken(command_line_port):
raise NotFoundException(
f"Port {command_line_port} already in use, exiting.",
)
else:
return command_line_port
ports = [self.PORT_DEFAULT] + randint(
1025,
65536,
self.MAX_PORTS_TRIED - 1,
).tolist()
for port in ports:
if not self.is_port_taken(port):
return port
raise NotFoundException(
f"Unable to find a free port in {len(ports)} attempts, exiting.",
)
def determine_gui_port(self, args: Dict) -> int:
"""
Return port to be used by the GUI client.
Will return self.server_port unless specified in configuration.
"""
command_line_port = args.get("gui_port")
if command_line_port is not None:
self._ensure_valid_port(command_line_port)
return command_line_port
return self.server_port
def is_port_taken(self, port: int) -> bool:
"""Return if given port is already in use."""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((self.server_address, port))
except socket.error:
return True
finally:
s.close()
return False
def determine_log_level(self, args: Dict) -> int:
"""Determine log level based on parameters given."""
verbosity_mapping = [
logging.CRITICAL,
logging.WARNING,
logging.INFO,
logging.DEBUG,
]
verbosity: int = args.get("verbose") # type:ignore
try:
return verbosity_mapping[verbosity]
except IndexError:
return logging.DEBUG
def get_url(self) -> str:
"""Return URL to access application."""
return f"{self.scheme}://{self.server_address}:{self.gui_port}/?token={self.token}"
def _ensure_valid_port(self, port: int) -> None:
"""Validate if proposed port number is allowed by TCP/IP."""
if port < 1:
raise ValueError(f"Lowest allowed port number is 1, attempted to use: {port}")
if port > 65535:
raise ValueError(f"Highest allowed port number is 65535, attempted to use: {port}") | lpot/ux/web/configuration.py | import argparse
import json
import logging
import os
import secrets
import socket
from typing import Dict
from numpy.random import randint
from lpot.utils.utility import singleton
from lpot.ux.utils.exceptions import NotFoundException
@singleton
class Configuration:
"""Configuration object for UX server."""
PORT_DEFAULT = 5000
MAX_PORTS_TRIED = 10
def __init__(self) -> None:
"""Set the variables."""
self.server_address = ""
self.server_port = 0
self.gui_port = 0
self.log_level = 0
self.token = ""
self.scheme = ""
self.workdir = ""
self.set_up()
def set_up(self) -> None:
"""Reset variables."""
self.determine_values_from_environment()
self.determine_values_from_existing_config()
def determine_values_from_environment(self) -> None:
"""Set variables based on environment values."""
self.server_address = "localhost"
args = self.get_command_line_args()
self.server_port = self.determine_server_port(args)
self.gui_port = self.determine_gui_port(args)
self.log_level = self.determine_log_level(args)
self.token = secrets.token_hex(16)
self.scheme = "http"
self.workdir = os.path.join(os.environ.get("HOME", ""), "workdir")
def determine_values_from_existing_config(self) -> None:
"""Set variables based on existing files."""
workloads_list_filepath = os.path.join(
os.environ.get("HOME", ""),
".lpot",
"workloads_list.json",
)
if os.path.isfile(workloads_list_filepath):
with open(workloads_list_filepath, encoding="utf-8") as workloads_list:
workloads_data = json.load(workloads_list)
self.workdir = workloads_data.get("active_workspace_path", self.workdir)
def get_command_line_args(self) -> Dict:
"""Return arguments passed in command line."""
parser = argparse.ArgumentParser(description="Run UX server.")
parser.add_argument(
"-p",
"--port",
type=int,
help="server port number to listen on",
)
parser.add_argument(
"-P",
"--gui_port",
type=int,
help="port number for GUI",
)
parser.add_argument(
"--verbose",
"-v",
action="count",
default=0,
help="verbosity of logging output, use -vv and -vvv for even more logs",
)
return vars(parser.parse_args())
def determine_server_port(self, args: Dict) -> int:
"""
Return port to be used by the server.
Will raise a NotFoundException if port is already in use.
When port given in command line, only that port will be tried.
When no port specified will try self.MAX_PORTS_TRIED times,
starting with self.PORT_DEFAULT.
"""
command_line_port = args.get("port")
if command_line_port is not None:
self._ensure_valid_port(command_line_port)
if self.is_port_taken(command_line_port):
raise NotFoundException(
f"Port {command_line_port} already in use, exiting.",
)
else:
return command_line_port
ports = [self.PORT_DEFAULT] + randint(
1025,
65536,
self.MAX_PORTS_TRIED - 1,
).tolist()
for port in ports:
if not self.is_port_taken(port):
return port
raise NotFoundException(
f"Unable to find a free port in {len(ports)} attempts, exiting.",
)
def determine_gui_port(self, args: Dict) -> int:
"""
Return port to be used by the GUI client.
Will return self.server_port unless specified in configuration.
"""
command_line_port = args.get("gui_port")
if command_line_port is not None:
self._ensure_valid_port(command_line_port)
return command_line_port
return self.server_port
def is_port_taken(self, port: int) -> bool:
"""Return if given port is already in use."""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((self.server_address, port))
except socket.error:
return True
finally:
s.close()
return False
def determine_log_level(self, args: Dict) -> int:
"""Determine log level based on parameters given."""
verbosity_mapping = [
logging.CRITICAL,
logging.WARNING,
logging.INFO,
logging.DEBUG,
]
verbosity: int = args.get("verbose") # type:ignore
try:
return verbosity_mapping[verbosity]
except IndexError:
return logging.DEBUG
def get_url(self) -> str:
"""Return URL to access application."""
return f"{self.scheme}://{self.server_address}:{self.gui_port}/?token={self.token}"
def _ensure_valid_port(self, port: int) -> None:
"""Validate if proposed port number is allowed by TCP/IP."""
if port < 1:
raise ValueError(f"Lowest allowed port number is 1, attempted to use: {port}")
if port > 65535:
raise ValueError(f"Highest allowed port number is 65535, attempted to use: {port}") | 0.779867 | 0.102305 |
# All rights reserved.
# Modifications made as part of the fparser project are distributed
# under the following license:
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Test Fortran 2008 rule R513
upper-cobound is specification-expr
'''
import pytest
from fparser.two.Fortran2008 import Upper_Cobound
from fparser.two import Fortran2003
@pytest.mark.usefixtures("f2008_create")
@pytest.mark.parametrize('attr, _type', [
('aaa', Fortran2003.Name),
('aAa', Fortran2003.Name),
('1', Fortran2003.Int_Literal_Constant),
('5 + 7', Fortran2003.Level_2_Expr),
('3-9', Fortran2003.Level_2_Expr)
])
def test_upper_cobound(attr, _type):
'''Test that upper_cobound is parsed correctly.'''
obj = Upper_Cobound(attr)
assert isinstance(obj, _type), repr(obj)
ref = attr.replace(' ', '').replace('+', ' + ').replace('-', ' - ')
assert str(obj) == ref
@pytest.mark.usefixtures("f2008_create")
@pytest.mark.parametrize('attr', ['', '*'])
def test_invalid_upper_cobound(attr):
'''Test that invalid upper_cobound raise exception.'''
with pytest.raises(Fortran2003.NoMatchError):
_ = Upper_Cobound(attr) | src/fparser/two/tests/fortran2008/test_upper_cobound_r513.py |
# All rights reserved.
# Modifications made as part of the fparser project are distributed
# under the following license:
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Test Fortran 2008 rule R513
upper-cobound is specification-expr
'''
import pytest
from fparser.two.Fortran2008 import Upper_Cobound
from fparser.two import Fortran2003
@pytest.mark.usefixtures("f2008_create")
@pytest.mark.parametrize('attr, _type', [
('aaa', Fortran2003.Name),
('aAa', Fortran2003.Name),
('1', Fortran2003.Int_Literal_Constant),
('5 + 7', Fortran2003.Level_2_Expr),
('3-9', Fortran2003.Level_2_Expr)
])
def test_upper_cobound(attr, _type):
'''Test that upper_cobound is parsed correctly.'''
obj = Upper_Cobound(attr)
assert isinstance(obj, _type), repr(obj)
ref = attr.replace(' ', '').replace('+', ' + ').replace('-', ' - ')
assert str(obj) == ref
@pytest.mark.usefixtures("f2008_create")
@pytest.mark.parametrize('attr', ['', '*'])
def test_invalid_upper_cobound(attr):
'''Test that invalid upper_cobound raise exception.'''
with pytest.raises(Fortran2003.NoMatchError):
_ = Upper_Cobound(attr) | 0.645679 | 0.089893 |
import copy
import jsonpatch
import os
import subprocess
import sys
import yaml
migrationName = sys.argv[1]
migrationTemplateFile = sys.argv[2]
migrationAppType = sys.argv[3]
class literal(str):
pass
def literal_presenter(dumper, data):
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
yaml.add_representer(literal, literal_presenter)
def dict_deep_merge(target, customization):
"""Merges customizations into a dictionary in place"""
for key, value in customization.items():
if isinstance(value, list):
if key not in target:
target[key] = copy.deepcopy(value)
else:
target[key].extend(value)
elif isinstance(value, dict):
if key not in target:
target[key] = copy.deepcopy(value)
else:
dict_deep_merge(target[key], value)
elif isinstance(value, set):
if key not in target:
target[key] = value.copy()
else:
target[key].update(value.copy())
else:
target[key] = copy.copy(value)
def execute_command(command):
"""Executes a command, capturing the output"""
output = subprocess.run(command, capture_output=True, encoding='utf-8')
if len(output.stderr) > 0:
print(output.stderr)
output.check_returncode()
return output
# Get generated plan
if migrationAppType == "system":
plan_name_cmd = ['kubectl', 'get', 'migrations.anthos-migrate.cloud.google.com', '-n', 'v2k-system',
migrationName, '-o', 'jsonpath={.status.resources.generateArtifacts.name}']
plan_name = execute_command(plan_name_cmd).stdout
print(f"Plan Name: {plan_name}")
plan_get_cmd = ['kubectl', 'get', 'generateartifactsflows.anthos-migrate.cloud.google.com', '-n', 'v2k-system',
plan_name, '-o', 'yaml']
else: # must be appx type
plan_get_cmd = ['kubectl', 'get', 'appxgenerateartifactsflows.anthos-migrate.cloud.google.com', '-n', 'v2k-system',
f'appx-generateartifactsflow-{migrationName}', '-o', 'jsonpath={.spec.appXGenerateArtifactsConfig}']
plan_output = execute_command(plan_get_cmd)
full_plan_yaml = yaml.load(plan_output.stdout, Loader=yaml.SafeLoader)
if migrationAppType == "system":
plan_raw = full_plan_yaml["metadata"]["annotations"].pop("anthos-migrate.cloud.google.com/raw-content")
plan_yaml = yaml.load(plan_raw, Loader=yaml.SafeLoader)
else:
plan_yaml = full_plan_yaml
print(f"Plan yaml: {yaml.dump(plan_yaml)}")
# Customize Plan
if migrationTemplateFile.endswith(".yaml") or \
migrationTemplateFile.endswith(".YAML") or \
migrationTemplateFile.endswith(".yml") or \
migrationTemplateFile.endswith(".YML"):
with open(migrationTemplateFile) as m:
customization_yaml = yaml.load(m, Loader=yaml.SafeLoader)
dict_deep_merge(plan_yaml, customization_yaml)
elif migrationTemplateFile.endswith(".json") or \
migrationTemplateFile.endswith(".JSON"):
with open(migrationTemplateFile) as m:
patch = jsonpatch.json.dumps(jsonpatch.json.load(m))
jsonpatch.apply_patch(plan_yaml, patch, in_place=True)
else:
print("Using Default Plan")
# Change Names to match
if migrationAppType == "system":
name_patch = jsonpatch.JsonPatch([
{'op': 'replace', 'path': '/spec/image/base', 'value': f'{migrationName}-non-runnable-base'},
{'op': 'replace', 'path': '/spec/image/name', 'value': migrationName},
{'op': 'replace', 'path': '/spec/deployment/appName', 'value': migrationName},
])
name_patch.apply(plan_yaml, in_place=True)
elif migrationAppType == "tomcat":
name_patch = jsonpatch.JsonPatch([
#{'op': 'replace', 'path': '/tomcatServers/0/imageName', 'value': f'{migrationName}-tomcat'},
{'op': 'replace', 'path': '/tomcatServers/0/name', 'value': migrationName},
])
name_patch.apply(plan_yaml, in_place=True)
# Apply customized plan
if migrationAppType != "system":
# handle appx update
appx_generateartifactsflow_get_cmd = ['kubectl', 'get', 'appxgenerateartifactsflows.anthos-migrate.cloud.google.com', '-n', 'v2k-system',
f'appx-generateartifactsflow-{migrationName}', '-o', 'yaml']
appx_plan_output = execute_command(appx_generateartifactsflow_get_cmd)
full_appx_plan_yaml = yaml.load(appx_plan_output.stdout, Loader=yaml.SafeLoader)
full_appx_plan_yaml["spec"]["appXGenerateArtifactsConfig"] = literal(yaml.dump(plan_yaml))
plan_yaml = full_appx_plan_yaml
print(f'{yaml.dump(plan_yaml)}')
plan_yaml_path = "/plan.yaml"
with open(plan_yaml_path, "w") as m:
yaml.dump(plan_yaml, m)
plan_apply_cmd = ['kubectl', 'apply', '-f', plan_yaml_path]
os.system("cat " + plan_yaml_path)
execute_command(plan_apply_cmd) | examples/pipeline/tekton/scripts/customize-migration-plan.py |
import copy
import jsonpatch
import os
import subprocess
import sys
import yaml
migrationName = sys.argv[1]
migrationTemplateFile = sys.argv[2]
migrationAppType = sys.argv[3]
class literal(str):
pass
def literal_presenter(dumper, data):
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
yaml.add_representer(literal, literal_presenter)
def dict_deep_merge(target, customization):
"""Merges customizations into a dictionary in place"""
for key, value in customization.items():
if isinstance(value, list):
if key not in target:
target[key] = copy.deepcopy(value)
else:
target[key].extend(value)
elif isinstance(value, dict):
if key not in target:
target[key] = copy.deepcopy(value)
else:
dict_deep_merge(target[key], value)
elif isinstance(value, set):
if key not in target:
target[key] = value.copy()
else:
target[key].update(value.copy())
else:
target[key] = copy.copy(value)
def execute_command(command):
"""Executes a command, capturing the output"""
output = subprocess.run(command, capture_output=True, encoding='utf-8')
if len(output.stderr) > 0:
print(output.stderr)
output.check_returncode()
return output
# Get generated plan
if migrationAppType == "system":
plan_name_cmd = ['kubectl', 'get', 'migrations.anthos-migrate.cloud.google.com', '-n', 'v2k-system',
migrationName, '-o', 'jsonpath={.status.resources.generateArtifacts.name}']
plan_name = execute_command(plan_name_cmd).stdout
print(f"Plan Name: {plan_name}")
plan_get_cmd = ['kubectl', 'get', 'generateartifactsflows.anthos-migrate.cloud.google.com', '-n', 'v2k-system',
plan_name, '-o', 'yaml']
else: # must be appx type
plan_get_cmd = ['kubectl', 'get', 'appxgenerateartifactsflows.anthos-migrate.cloud.google.com', '-n', 'v2k-system',
f'appx-generateartifactsflow-{migrationName}', '-o', 'jsonpath={.spec.appXGenerateArtifactsConfig}']
plan_output = execute_command(plan_get_cmd)
full_plan_yaml = yaml.load(plan_output.stdout, Loader=yaml.SafeLoader)
if migrationAppType == "system":
plan_raw = full_plan_yaml["metadata"]["annotations"].pop("anthos-migrate.cloud.google.com/raw-content")
plan_yaml = yaml.load(plan_raw, Loader=yaml.SafeLoader)
else:
plan_yaml = full_plan_yaml
print(f"Plan yaml: {yaml.dump(plan_yaml)}")
# Customize Plan
if migrationTemplateFile.endswith(".yaml") or \
migrationTemplateFile.endswith(".YAML") or \
migrationTemplateFile.endswith(".yml") or \
migrationTemplateFile.endswith(".YML"):
with open(migrationTemplateFile) as m:
customization_yaml = yaml.load(m, Loader=yaml.SafeLoader)
dict_deep_merge(plan_yaml, customization_yaml)
elif migrationTemplateFile.endswith(".json") or \
migrationTemplateFile.endswith(".JSON"):
with open(migrationTemplateFile) as m:
patch = jsonpatch.json.dumps(jsonpatch.json.load(m))
jsonpatch.apply_patch(plan_yaml, patch, in_place=True)
else:
print("Using Default Plan")
# Change Names to match
if migrationAppType == "system":
name_patch = jsonpatch.JsonPatch([
{'op': 'replace', 'path': '/spec/image/base', 'value': f'{migrationName}-non-runnable-base'},
{'op': 'replace', 'path': '/spec/image/name', 'value': migrationName},
{'op': 'replace', 'path': '/spec/deployment/appName', 'value': migrationName},
])
name_patch.apply(plan_yaml, in_place=True)
elif migrationAppType == "tomcat":
name_patch = jsonpatch.JsonPatch([
#{'op': 'replace', 'path': '/tomcatServers/0/imageName', 'value': f'{migrationName}-tomcat'},
{'op': 'replace', 'path': '/tomcatServers/0/name', 'value': migrationName},
])
name_patch.apply(plan_yaml, in_place=True)
# Apply customized plan
if migrationAppType != "system":
# handle appx update
appx_generateartifactsflow_get_cmd = ['kubectl', 'get', 'appxgenerateartifactsflows.anthos-migrate.cloud.google.com', '-n', 'v2k-system',
f'appx-generateartifactsflow-{migrationName}', '-o', 'yaml']
appx_plan_output = execute_command(appx_generateartifactsflow_get_cmd)
full_appx_plan_yaml = yaml.load(appx_plan_output.stdout, Loader=yaml.SafeLoader)
full_appx_plan_yaml["spec"]["appXGenerateArtifactsConfig"] = literal(yaml.dump(plan_yaml))
plan_yaml = full_appx_plan_yaml
print(f'{yaml.dump(plan_yaml)}')
plan_yaml_path = "/plan.yaml"
with open(plan_yaml_path, "w") as m:
yaml.dump(plan_yaml, m)
plan_apply_cmd = ['kubectl', 'apply', '-f', plan_yaml_path]
os.system("cat " + plan_yaml_path)
execute_command(plan_apply_cmd) | 0.266071 | 0.145874 |
import os
import socket
import time
from datetime import datetime
from enum import Enum
import dbus
from render.font import Font
class SpotifyClient(object):
def __init__(self, props_interface):
self.player_props = props_interface.GetAll("org.mpris.MediaPlayer2.Player")
self.metadata = Metadata(self.player_props["Metadata"])
self.playback_status = self.player_props["PlaybackStatus"]
@classmethod
def init(cls):
bus_name = 'org.mpris.MediaPlayer2.spotify'
object_path = '/org/mpris/MediaPlayer2'
while True:
try:
proxy = dbus.SessionBus().get_object(bus_name, object_path)
props_interface = dbus.Interface(proxy, dbus_interface='org.freedesktop.DBus.Properties')
break
except dbus.DBusException:
time.sleep(0.5)
return SpotifyClient(props_interface)
def __repr__(self):
return "Artist: %s\nAlbum: %s\nTitle: %s" % \
(self.metadata.get_artist(), self.metadata.get_album(),
self.metadata.get_title())
class Metadata(object):
def __init__(self, metadata: dbus.Dictionary):
self.metadata = metadata
def get_artist(self):
return self.metadata["xesam:artist"][0] if len(self.metadata["xesam:artist"]) else ""
def get_title(self):
return self.metadata["xesam:title"]
def get_album(self):
return self.metadata["xesam:album"]
def get_length(self):
return self.metadata["mrpis:length"]
class ScreenType(Enum):
""" Init strings to be sent to the g15daemon to set display mode. """
Pixel = b"GBUF"
Text = b"TBUF"
Bitmap = b"WBUF"
class Display(object):
height = 43
width = 160
buffer_size = height * width
class G15(object):
def __init__(self, host='127.0.0.1', port=15550):
self.screen = bytearray(Display.buffer_size)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Attempt initial connection
try:
self.socket.connect((host, port))
except Exception as e:
print("Could not connect to G15 daemon at %s:%d" % (host, port))
print("Cause: %s" % e)
# Check response
response = self.socket.recv(16)
if response == b"G15 daemon HELLO":
print("Received expected response from daemon.")
else:
raise Exception("Incorrect response received: %s", response)
self.socket.send(ScreenType.Pixel.value)
# Initialize empty character buffer for screen
self.clear()
font_path = "/usr/share/fonts/nerd-fonts-complete/TTF/Bitstream Vera Sans Mono Nerd Font Complete Mono.ttf"
if not os.path.isfile(font_path):
raise Exception("%s is not a valid font path" % font_path)
self.font = FontWrapper(font_path, 10)
def clear(self):
self.screen = bytearray(Display.buffer_size)
self.display()
def display(self):
self.socket.send(self.screen)
def write(self, strings):
self.clear()
if type(strings) is str:
strings = [strings]
for i, string in enumerate(strings):
bitmap = self.font.from_string(string)
y_offset = (self.font.vertical_padding * i) + (i * self.font.char_height)
for y in range(bitmap.height):
for x in range(bitmap.width):
index = G15.translate_coordinates(x, y_offset + y)
text_coords = x + (y * bitmap.width)
self.screen[index] = bitmap.pixels[text_coords]
self.display()
@staticmethod
def translate_coordinates(x, y):
"""
Translates a 2d coordinate pair (x, y) to an index for an array
"""
if x < 0 or y < 0 or x >= Display.width \
or y >= Display.height or x * y > Display.buffer_size:
raise Exception("Invalid coordinates: %d, %d" % (x, y))
return (y * Display.width) + x
class FontWrapper(object):
def __init__(self, font_filepath, font_size):
self.padding = "..."
self.vertical_padding = 2
self.font = Font(font_filepath, font_size)
# Render an arbitrary character to calculate how many can fit in a screen
char = self.font.render_text(".")
self.char_width, self.char_height = char.width, font_size
self.max_characters = Display.width // self.char_width
def _truncate_string(self, string):
return string[:self.max_characters - len(self.padding)] + self.padding
def from_string(self, string):
width, _, _ = self.font.text_dimensions(string)
text = self._truncate_string(string) if width > Display.width else string
return self.font.render_text(text)
def main():
g15 = G15()
info = (get_time_string(), "Waiting for Spotify")
g15.write(info)
last_hash = hash(info)
while True:
spotify_client = SpotifyClient.init()
# Write to display only if content has changed
info = (get_time_string(), spotify_client.metadata.get_artist(), spotify_client.metadata.get_title())
if last_hash != hash(info):
g15.write(info)
last_hash = hash(info)
time.sleep(1)
def get_time_string():
return datetime.now().strftime("%Y-%m-%d | %H:%M | %a")
if __name__ == "__main__":
main() | g15client.py | import os
import socket
import time
from datetime import datetime
from enum import Enum
import dbus
from render.font import Font
class SpotifyClient(object):
def __init__(self, props_interface):
self.player_props = props_interface.GetAll("org.mpris.MediaPlayer2.Player")
self.metadata = Metadata(self.player_props["Metadata"])
self.playback_status = self.player_props["PlaybackStatus"]
@classmethod
def init(cls):
bus_name = 'org.mpris.MediaPlayer2.spotify'
object_path = '/org/mpris/MediaPlayer2'
while True:
try:
proxy = dbus.SessionBus().get_object(bus_name, object_path)
props_interface = dbus.Interface(proxy, dbus_interface='org.freedesktop.DBus.Properties')
break
except dbus.DBusException:
time.sleep(0.5)
return SpotifyClient(props_interface)
def __repr__(self):
return "Artist: %s\nAlbum: %s\nTitle: %s" % \
(self.metadata.get_artist(), self.metadata.get_album(),
self.metadata.get_title())
class Metadata(object):
def __init__(self, metadata: dbus.Dictionary):
self.metadata = metadata
def get_artist(self):
return self.metadata["xesam:artist"][0] if len(self.metadata["xesam:artist"]) else ""
def get_title(self):
return self.metadata["xesam:title"]
def get_album(self):
return self.metadata["xesam:album"]
def get_length(self):
return self.metadata["mrpis:length"]
class ScreenType(Enum):
""" Init strings to be sent to the g15daemon to set display mode. """
Pixel = b"GBUF"
Text = b"TBUF"
Bitmap = b"WBUF"
class Display(object):
height = 43
width = 160
buffer_size = height * width
class G15(object):
def __init__(self, host='127.0.0.1', port=15550):
self.screen = bytearray(Display.buffer_size)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Attempt initial connection
try:
self.socket.connect((host, port))
except Exception as e:
print("Could not connect to G15 daemon at %s:%d" % (host, port))
print("Cause: %s" % e)
# Check response
response = self.socket.recv(16)
if response == b"G15 daemon HELLO":
print("Received expected response from daemon.")
else:
raise Exception("Incorrect response received: %s", response)
self.socket.send(ScreenType.Pixel.value)
# Initialize empty character buffer for screen
self.clear()
font_path = "/usr/share/fonts/nerd-fonts-complete/TTF/Bitstream Vera Sans Mono Nerd Font Complete Mono.ttf"
if not os.path.isfile(font_path):
raise Exception("%s is not a valid font path" % font_path)
self.font = FontWrapper(font_path, 10)
def clear(self):
self.screen = bytearray(Display.buffer_size)
self.display()
def display(self):
self.socket.send(self.screen)
def write(self, strings):
self.clear()
if type(strings) is str:
strings = [strings]
for i, string in enumerate(strings):
bitmap = self.font.from_string(string)
y_offset = (self.font.vertical_padding * i) + (i * self.font.char_height)
for y in range(bitmap.height):
for x in range(bitmap.width):
index = G15.translate_coordinates(x, y_offset + y)
text_coords = x + (y * bitmap.width)
self.screen[index] = bitmap.pixels[text_coords]
self.display()
@staticmethod
def translate_coordinates(x, y):
"""
Translates a 2d coordinate pair (x, y) to an index for an array
"""
if x < 0 or y < 0 or x >= Display.width \
or y >= Display.height or x * y > Display.buffer_size:
raise Exception("Invalid coordinates: %d, %d" % (x, y))
return (y * Display.width) + x
class FontWrapper(object):
def __init__(self, font_filepath, font_size):
self.padding = "..."
self.vertical_padding = 2
self.font = Font(font_filepath, font_size)
# Render an arbitrary character to calculate how many can fit in a screen
char = self.font.render_text(".")
self.char_width, self.char_height = char.width, font_size
self.max_characters = Display.width // self.char_width
def _truncate_string(self, string):
return string[:self.max_characters - len(self.padding)] + self.padding
def from_string(self, string):
width, _, _ = self.font.text_dimensions(string)
text = self._truncate_string(string) if width > Display.width else string
return self.font.render_text(text)
def main():
g15 = G15()
info = (get_time_string(), "Waiting for Spotify")
g15.write(info)
last_hash = hash(info)
while True:
spotify_client = SpotifyClient.init()
# Write to display only if content has changed
info = (get_time_string(), spotify_client.metadata.get_artist(), spotify_client.metadata.get_title())
if last_hash != hash(info):
g15.write(info)
last_hash = hash(info)
time.sleep(1)
def get_time_string():
return datetime.now().strftime("%Y-%m-%d | %H:%M | %a")
if __name__ == "__main__":
main() | 0.543348 | 0.155367 |
import os
import sys
import base64
import httplib
import urllib
import json
import time
import re
default_encoding = 'utf-8'
if sys.getdefaultencoding() != default_encoding:
reload(sys)
sys.setdefaultencoding(default_encoding)
# 新的抓取地址:http://a.app.qq.com/o/ajax/micro/AppDetail?pkgname=com.blsm.sft.fresh(直接返回json数据)
# 应用宝抓取数据的url
myapp_api_url = {'host': 'android.myapp.com', 'app_detail': '/myapp/detail.htm'}
# 打印对象
def prn_obj(obj):
print '\n '.join(['%s:%s' % item for item in obj.__dict__.items()])
class MyAppDetail(object):
appPackage = None
apkCode = 0
apkVName = None
appId = 0
appName = None
appCate = None
iconUrl = None
downUrl = None
# 构造方法
def __init__(self, package_name=''):
if package_name:
self.app_detail(package_name)
"""
获取应用的详情
"""
def app_detail(self, package_name):
url = myapp_api_url['app_detail'] + "?" + urllib.urlencode({'apkName': package_name})
headers = {}
conn = httplib.HTTPConnection(myapp_api_url['host'])
conn.request("GET", url)
res = conn.getresponse()
# print "\app_detail\t","GET\t",myapp_api_url['host']+url,"\n",res.status, res.reason
# 如果数据返回出错,那么return 空
if res.status != 200:
print "None"
conn.close()
return None
data = res.read()
conn.close()
self.analysis_data(data)
"""
解析返回的数据
"""
def analysis_data(self, data):
try:
match = re.search(r"appDetailData = (.*?(\n.*){11})", data)
if match:
match_data = match.groups()[0]
match_data = match_data.replace("orgame", "\"orgame\"").replace("apkName", "\"apkName\"").replace(
"apkCode", "\"apkCode\"").replace("appId", "\"appId\"").replace("appName", "\"appName\"").replace(
"iconUrl", "\"iconUrl\"").replace("appScore", "\"appScore\"").replace("downTimes",
"\"downTimes\"").replace(
"downUrl", "\"downUrl\"").replace("tipsUpDown", "\"tipsUpDown\"")
json_data = json.loads(match_data)
self.appPackage = json_data['apkName']
self.apkCode = json_data['apkCode']
self.appId = json_data['appId']
self.appName = json_data['appName']
self.iconUrl = json_data['iconUrl']
self.downUrl = json_data['downUrl']
match_vname = re.search(r"<div class=\"det-othinfo-data\">V(.[^<>]*)</div>", data)
if match_vname:
self.apkVName = match_vname.groups()[0]
match_cate = re.search(r"id=\"J_DetCate\">(.[^<>]*)</a>", data)
if match_cate:
self.appCate = match_cate.groups()[0]
except Exception, e:
print "analysis_data Exception", e
if len(sys.argv) > 1:
if len(sys.argv) > 2 and sys.argv[1] == 'download':
app_detail = MyAppDetail(sys.argv[2])
print app_detail.downUrl
else:
app_detail = MyAppDetail(sys.argv[1])
prn_obj(app_detail) | src/myapp.py | import os
import sys
import base64
import httplib
import urllib
import json
import time
import re
default_encoding = 'utf-8'
if sys.getdefaultencoding() != default_encoding:
reload(sys)
sys.setdefaultencoding(default_encoding)
# 新的抓取地址:http://a.app.qq.com/o/ajax/micro/AppDetail?pkgname=com.blsm.sft.fresh(直接返回json数据)
# 应用宝抓取数据的url
myapp_api_url = {'host': 'android.myapp.com', 'app_detail': '/myapp/detail.htm'}
# 打印对象
def prn_obj(obj):
print '\n '.join(['%s:%s' % item for item in obj.__dict__.items()])
class MyAppDetail(object):
appPackage = None
apkCode = 0
apkVName = None
appId = 0
appName = None
appCate = None
iconUrl = None
downUrl = None
# 构造方法
def __init__(self, package_name=''):
if package_name:
self.app_detail(package_name)
"""
获取应用的详情
"""
def app_detail(self, package_name):
url = myapp_api_url['app_detail'] + "?" + urllib.urlencode({'apkName': package_name})
headers = {}
conn = httplib.HTTPConnection(myapp_api_url['host'])
conn.request("GET", url)
res = conn.getresponse()
# print "\app_detail\t","GET\t",myapp_api_url['host']+url,"\n",res.status, res.reason
# 如果数据返回出错,那么return 空
if res.status != 200:
print "None"
conn.close()
return None
data = res.read()
conn.close()
self.analysis_data(data)
"""
解析返回的数据
"""
def analysis_data(self, data):
try:
match = re.search(r"appDetailData = (.*?(\n.*){11})", data)
if match:
match_data = match.groups()[0]
match_data = match_data.replace("orgame", "\"orgame\"").replace("apkName", "\"apkName\"").replace(
"apkCode", "\"apkCode\"").replace("appId", "\"appId\"").replace("appName", "\"appName\"").replace(
"iconUrl", "\"iconUrl\"").replace("appScore", "\"appScore\"").replace("downTimes",
"\"downTimes\"").replace(
"downUrl", "\"downUrl\"").replace("tipsUpDown", "\"tipsUpDown\"")
json_data = json.loads(match_data)
self.appPackage = json_data['apkName']
self.apkCode = json_data['apkCode']
self.appId = json_data['appId']
self.appName = json_data['appName']
self.iconUrl = json_data['iconUrl']
self.downUrl = json_data['downUrl']
match_vname = re.search(r"<div class=\"det-othinfo-data\">V(.[^<>]*)</div>", data)
if match_vname:
self.apkVName = match_vname.groups()[0]
match_cate = re.search(r"id=\"J_DetCate\">(.[^<>]*)</a>", data)
if match_cate:
self.appCate = match_cate.groups()[0]
except Exception, e:
print "analysis_data Exception", e
if len(sys.argv) > 1:
if len(sys.argv) > 2 and sys.argv[1] == 'download':
app_detail = MyAppDetail(sys.argv[2])
print app_detail.downUrl
else:
app_detail = MyAppDetail(sys.argv[1])
prn_obj(app_detail) | 0.089524 | 0.056835 |
import os
from typing import List
import re
import numpy as np
from mtc.core.experiment import Measuring
from mtc.settings import NLP_EXPERIMENT_PATH, NLP_RAW_DATA
from mtc.helpers.file_management import len_sts_data
input_folder = os.environ.get('FEATURE_PATH', os.path.join(NLP_EXPERIMENT_PATH, 'pickles_for_bert'))
use_features = os.environ.get('USE_FEATURES', 'yes')
def load_features(pickle_folder, sts_data_path):
measuring = Measuring([], pickle_folder)
measuring.set_sts_data_dict(sts_data_path)
measuring.load_feature_matrix()
return measuring
def get_features_for_bert_size():
if use_features == 'yes':
pickle_folder = os.path.join(input_folder, 'train')
measuring = load_features(pickle_folder, os.path.join(NLP_RAW_DATA, 'n2c2', 'clinicalSTS2019.train.txt'))
X, y, raw_sentences_a, raw_sentences_b = measuring()
return X.shape[1]
else:
return 0
def add_features_to_bert(examples) -> List:
if use_features == 'yes':
# Applying similarity measures and saving the sentences object
guids = np.array([example.guid for example in examples])
match = re.search(r'(\w+)-', guids[0])
assert match, f'Could not read guid {guids[0]}'
mode = match.group(1)
n_train = len_sts_data('clinicalSTS2019.train.txt')
id_list = []
id_shift = 0
if mode == 'train' or mode == 'dev':
train_or_test_folder = 'train'
sts_data_path = os.path.join(NLP_RAW_DATA, 'n2c2', 'clinicalSTS2019.train.txt')
elif mode == 'test':
train_or_test_folder = 'test'
sts_data_path = os.path.join(NLP_RAW_DATA, 'n2c2', 'clinicalSTS2019.test.txt')
# The ids in bert continue after the training set but here we need the indices to use as array index
id_shift = n_train
else:
assert False
for guid in guids:
match = re.search(r'-(\d+)', guid)
assert match, f'Could not extract id from {guid}'
id = int(match.group(1)) - id_shift
id_list.append(id)
pickle_folder = os.path.join(input_folder, train_or_test_folder)
measuring = load_features(pickle_folder, sts_data_path)
X, y, raw_sentences_a, raw_sentences_b = measuring()
examples_labels = np.array([float(example.label) for example in examples])
assert all(examples_labels == y[id_list]), 'The labels between bert and the features do not match up'
return X[id_list]
else:
return [[] for _ in range(len(examples))]
if __name__ == '__main__':
get_features_for_bert_size() | mtc/copied_from_bert/features_for_bert.py | import os
from typing import List
import re
import numpy as np
from mtc.core.experiment import Measuring
from mtc.settings import NLP_EXPERIMENT_PATH, NLP_RAW_DATA
from mtc.helpers.file_management import len_sts_data
input_folder = os.environ.get('FEATURE_PATH', os.path.join(NLP_EXPERIMENT_PATH, 'pickles_for_bert'))
use_features = os.environ.get('USE_FEATURES', 'yes')
def load_features(pickle_folder, sts_data_path):
measuring = Measuring([], pickle_folder)
measuring.set_sts_data_dict(sts_data_path)
measuring.load_feature_matrix()
return measuring
def get_features_for_bert_size():
if use_features == 'yes':
pickle_folder = os.path.join(input_folder, 'train')
measuring = load_features(pickle_folder, os.path.join(NLP_RAW_DATA, 'n2c2', 'clinicalSTS2019.train.txt'))
X, y, raw_sentences_a, raw_sentences_b = measuring()
return X.shape[1]
else:
return 0
def add_features_to_bert(examples) -> List:
if use_features == 'yes':
# Applying similarity measures and saving the sentences object
guids = np.array([example.guid for example in examples])
match = re.search(r'(\w+)-', guids[0])
assert match, f'Could not read guid {guids[0]}'
mode = match.group(1)
n_train = len_sts_data('clinicalSTS2019.train.txt')
id_list = []
id_shift = 0
if mode == 'train' or mode == 'dev':
train_or_test_folder = 'train'
sts_data_path = os.path.join(NLP_RAW_DATA, 'n2c2', 'clinicalSTS2019.train.txt')
elif mode == 'test':
train_or_test_folder = 'test'
sts_data_path = os.path.join(NLP_RAW_DATA, 'n2c2', 'clinicalSTS2019.test.txt')
# The ids in bert continue after the training set but here we need the indices to use as array index
id_shift = n_train
else:
assert False
for guid in guids:
match = re.search(r'-(\d+)', guid)
assert match, f'Could not extract id from {guid}'
id = int(match.group(1)) - id_shift
id_list.append(id)
pickle_folder = os.path.join(input_folder, train_or_test_folder)
measuring = load_features(pickle_folder, sts_data_path)
X, y, raw_sentences_a, raw_sentences_b = measuring()
examples_labels = np.array([float(example.label) for example in examples])
assert all(examples_labels == y[id_list]), 'The labels between bert and the features do not match up'
return X[id_list]
else:
return [[] for _ in range(len(examples))]
if __name__ == '__main__':
get_features_for_bert_size() | 0.504394 | 0.356867 |
from __future__ import unicode_literals
from django.db import migrations
def move_banners_to_index_page(apps, schema_editor):
from molo.core.models import (
LanguagePage, BannerPage, BannerIndexPage, Main)
main = Main.objects.all().first()
current_language = LanguagePage.objects.live().first()
if main and current_language:
# Move existing banners
index_page = BannerIndexPage.objects.live().first()
for page in BannerPage.objects.all().child_of(current_language):
page.move(index_page, pos='last-child')
def move_footers_to_index_page(apps, schema_editor):
from molo.core.models import (LanguagePage, FooterPage,
FooterIndexPage, Main)
main = Main.objects.all().first()
current_language = LanguagePage.objects.live().first()
if main and current_language:
# Move existing footers
index_page = FooterIndexPage.objects.live().first()
for page in FooterPage.objects.all().child_of(current_language):
page.move(index_page, pos='last-child')
def move_sections_to_index_page(apps, schema_editor):
from molo.core.models import (LanguagePage, SectionPage,
SectionIndexPage, Main)
main = Main.objects.all().first()
current_language = LanguagePage.objects.live().first()
if main and current_language:
# Move existing sections
index_page = SectionIndexPage.objects.live().first()
for page in SectionPage.objects.all().child_of(current_language):
page.move(index_page, pos='last-child')
def move_yourwords_to_index_page(apps, schema_editor):
from molo.core.models import (LanguagePage, Main)
from molo.yourwords.models import (
YourWordsCompetition, YourWordsCompetitionIndexPage)
main = Main.objects.all().first()
current_language = LanguagePage.objects.live().first()
if main and current_language:
# Move existing your words competition
index_page = YourWordsCompetitionIndexPage.objects.live().first()
for p in YourWordsCompetition.objects.all().child_of(current_language):
p.move(index_page, pos='last-child')
class Migration(migrations.Migration):
dependencies = [
('tuneme', '0002_add_language_relation'),
]
operations = [
migrations.RunPython(move_banners_to_index_page),
migrations.RunPython(move_footers_to_index_page),
migrations.RunPython(move_sections_to_index_page),
migrations.RunPython(move_yourwords_to_index_page),
] | tuneme/migrations/0003_move_pages_to_index_pages.py | from __future__ import unicode_literals
from django.db import migrations
def move_banners_to_index_page(apps, schema_editor):
from molo.core.models import (
LanguagePage, BannerPage, BannerIndexPage, Main)
main = Main.objects.all().first()
current_language = LanguagePage.objects.live().first()
if main and current_language:
# Move existing banners
index_page = BannerIndexPage.objects.live().first()
for page in BannerPage.objects.all().child_of(current_language):
page.move(index_page, pos='last-child')
def move_footers_to_index_page(apps, schema_editor):
from molo.core.models import (LanguagePage, FooterPage,
FooterIndexPage, Main)
main = Main.objects.all().first()
current_language = LanguagePage.objects.live().first()
if main and current_language:
# Move existing footers
index_page = FooterIndexPage.objects.live().first()
for page in FooterPage.objects.all().child_of(current_language):
page.move(index_page, pos='last-child')
def move_sections_to_index_page(apps, schema_editor):
from molo.core.models import (LanguagePage, SectionPage,
SectionIndexPage, Main)
main = Main.objects.all().first()
current_language = LanguagePage.objects.live().first()
if main and current_language:
# Move existing sections
index_page = SectionIndexPage.objects.live().first()
for page in SectionPage.objects.all().child_of(current_language):
page.move(index_page, pos='last-child')
def move_yourwords_to_index_page(apps, schema_editor):
from molo.core.models import (LanguagePage, Main)
from molo.yourwords.models import (
YourWordsCompetition, YourWordsCompetitionIndexPage)
main = Main.objects.all().first()
current_language = LanguagePage.objects.live().first()
if main and current_language:
# Move existing your words competition
index_page = YourWordsCompetitionIndexPage.objects.live().first()
for p in YourWordsCompetition.objects.all().child_of(current_language):
p.move(index_page, pos='last-child')
class Migration(migrations.Migration):
dependencies = [
('tuneme', '0002_add_language_relation'),
]
operations = [
migrations.RunPython(move_banners_to_index_page),
migrations.RunPython(move_footers_to_index_page),
migrations.RunPython(move_sections_to_index_page),
migrations.RunPython(move_yourwords_to_index_page),
] | 0.507324 | 0.189934 |
import torch
import torch.nn as nn
from src.model import layers
from src.training_utils import training_utils
class GenBigGAN(nn.Module):
def __init__(self, mult_chs, ks, num_cls, latent_dim, embedding_dim, sn, w_init):
super().__init__()
self.ch = mult_chs["pre"][0]
self.conditional = num_cls > 0
m_pre_chs, m_post_chs, out_ch = mult_chs["pre"], mult_chs["post"], mult_chs["colors"]
self.splits = (len(m_pre_chs) + len(m_post_chs) + 1)
split_latent_dim = latent_dim // self.splits
assert latent_dim % self.splits == 0, "latent has to be divisible by number of CondResnetBlocks layers"
m_post_chs = training_utils.get_channel_inputs(m_post_chs, input_dim=m_pre_chs[-1])
m_pre_chs = training_utils.get_channel_inputs(m_pre_chs, input_dim=m_pre_chs[0])
top_block = [True] + [False] * (len(m_pre_chs) - 1)
cond_dim = split_latent_dim + embedding_dim if self.conditional else split_latent_dim
if self.conditional:
self.class_embedding = nn.Embedding(num_embeddings=num_cls, embedding_dim=embedding_dim)
self.linear = layers.LinearSN(in_features=split_latent_dim, out_features=4 * 4 * self.ch, sn=sn, w_init=w_init)
# tf 4 * 4 * 256 # here 4 * 4 * 256
self.pre_up_blocks = nn.Sequential(*[
layers.UpResnetBlock(in_m, out_m, ks, cond_dim, sn, bias=False, w_init=w_init, first=f)
for (in_m, out_m), f in zip(m_pre_chs, top_block)
]) # tf 256 -> 128 # here 256, 128
self.non_loc = layers.SelfAttn(mult_chs["pre"][-1], sn=sn) # tf 128 -> # here 128
# should be 2 times bigger same as output of prev block i.e. 256 // 2
# but this implementation keeps the same dim so ch // 2 -> attn -> ch // 4
self.post_up_blocks = nn.Sequential(*[
layers.UpResnetBlock(in_m, out_m, ks, cond_dim, sn, bias=False, w_init=w_init)
for in_m, out_m in m_post_chs
]) # tf -> 64 # 64
self.bn = nn.BatchNorm2d(mult_chs["post"][-1])
self.relu = nn.ReLU()
self.conv = layers.ConvTranspose2dSN(
in_channels=mult_chs["post"][-1], out_channels=out_ch,
kernel_size=ks, padding=1, sn=sn, bias=False, w_init=w_init)
self.sigmoid = nn.Sigmoid()
def forward(self, z, cls):
z = z.float()
all_z = z.chunk(self.splits, dim=-1)
z, conds = all_z[0], all_z[1:]
if self.conditional:
cls_embed = self.class_embedding(cls)
conds = [torch.cat([conds[d], cls_embed], dim=-1) for d in range(len(conds))]
z = self.linear(z)
z = z.reshape(-1, self.ch, 4, 4)
for i, layer in enumerate(self.pre_up_blocks):
z = layer(z, cond=conds[i])
z = self.non_loc(z)
for i, layer in enumerate(self.post_up_blocks, start=len(self.pre_up_blocks)):
z = layer(z, cond=conds[i])
z = self.bn(z)
z = self.relu(z)
z = self.conv(z)
x = self.sigmoid(z)
return x
@classmethod
def from_config(cls, config):
return cls(
mult_chs=config.gen_mult_chs,
ks=config.ks,
num_cls=config.num_cls,
latent_dim=config.latent_dim,
embedding_dim=config.embedding_dim,
w_init=config.w_init,
sn=config.spectral_norm,
) | src/model/generators.py | import torch
import torch.nn as nn
from src.model import layers
from src.training_utils import training_utils
class GenBigGAN(nn.Module):
def __init__(self, mult_chs, ks, num_cls, latent_dim, embedding_dim, sn, w_init):
super().__init__()
self.ch = mult_chs["pre"][0]
self.conditional = num_cls > 0
m_pre_chs, m_post_chs, out_ch = mult_chs["pre"], mult_chs["post"], mult_chs["colors"]
self.splits = (len(m_pre_chs) + len(m_post_chs) + 1)
split_latent_dim = latent_dim // self.splits
assert latent_dim % self.splits == 0, "latent has to be divisible by number of CondResnetBlocks layers"
m_post_chs = training_utils.get_channel_inputs(m_post_chs, input_dim=m_pre_chs[-1])
m_pre_chs = training_utils.get_channel_inputs(m_pre_chs, input_dim=m_pre_chs[0])
top_block = [True] + [False] * (len(m_pre_chs) - 1)
cond_dim = split_latent_dim + embedding_dim if self.conditional else split_latent_dim
if self.conditional:
self.class_embedding = nn.Embedding(num_embeddings=num_cls, embedding_dim=embedding_dim)
self.linear = layers.LinearSN(in_features=split_latent_dim, out_features=4 * 4 * self.ch, sn=sn, w_init=w_init)
# tf 4 * 4 * 256 # here 4 * 4 * 256
self.pre_up_blocks = nn.Sequential(*[
layers.UpResnetBlock(in_m, out_m, ks, cond_dim, sn, bias=False, w_init=w_init, first=f)
for (in_m, out_m), f in zip(m_pre_chs, top_block)
]) # tf 256 -> 128 # here 256, 128
self.non_loc = layers.SelfAttn(mult_chs["pre"][-1], sn=sn) # tf 128 -> # here 128
# should be 2 times bigger same as output of prev block i.e. 256 // 2
# but this implementation keeps the same dim so ch // 2 -> attn -> ch // 4
self.post_up_blocks = nn.Sequential(*[
layers.UpResnetBlock(in_m, out_m, ks, cond_dim, sn, bias=False, w_init=w_init)
for in_m, out_m in m_post_chs
]) # tf -> 64 # 64
self.bn = nn.BatchNorm2d(mult_chs["post"][-1])
self.relu = nn.ReLU()
self.conv = layers.ConvTranspose2dSN(
in_channels=mult_chs["post"][-1], out_channels=out_ch,
kernel_size=ks, padding=1, sn=sn, bias=False, w_init=w_init)
self.sigmoid = nn.Sigmoid()
def forward(self, z, cls):
z = z.float()
all_z = z.chunk(self.splits, dim=-1)
z, conds = all_z[0], all_z[1:]
if self.conditional:
cls_embed = self.class_embedding(cls)
conds = [torch.cat([conds[d], cls_embed], dim=-1) for d in range(len(conds))]
z = self.linear(z)
z = z.reshape(-1, self.ch, 4, 4)
for i, layer in enumerate(self.pre_up_blocks):
z = layer(z, cond=conds[i])
z = self.non_loc(z)
for i, layer in enumerate(self.post_up_blocks, start=len(self.pre_up_blocks)):
z = layer(z, cond=conds[i])
z = self.bn(z)
z = self.relu(z)
z = self.conv(z)
x = self.sigmoid(z)
return x
@classmethod
def from_config(cls, config):
return cls(
mult_chs=config.gen_mult_chs,
ks=config.ks,
num_cls=config.num_cls,
latent_dim=config.latent_dim,
embedding_dim=config.embedding_dim,
w_init=config.w_init,
sn=config.spectral_norm,
) | 0.915955 | 0.508788 |
import logging
import re
import signal
import socket
import socketserver
import sys
import threading
import time
from contextlib import contextmanager
from functools import partial
import bots
import server
class RequestHandler(socketserver.StreamRequestHandler):
def _write_out(self, m):
out = b"%s\n" % m
logging.debug(b"< " + out)
self.request.sendall(out)
def _read_in(self):
x = self.rfile.readline().strip()
assert re.match(r'[a-zA-Z0-9\-\s]+', x.decode('utf-8')), x
logging.debug(b"> %s\n" % x)
return x
def server_loop(self, end_time):
logging.info('recieved request from %s' % str(self.client_address))
pubkey = self._read_in()
# set up server
serv = server.Server()
client = serv.register(pubkey)
serv._set_client(client)
self._write_out(client)
all_bots = [bots.ReportBot(serv, self), bots.AbuseBot(serv, self)]
COMMANDS = {
b"list": serv.list_users,
b"put": serv.put_msg,
b"get": serv.get_msg,
b"key": serv.get_user,
b"send": partial(serv.send, client),
b"recv": partial(serv.recv, client),
b"report": partial(serv.report, client),
}
while (time.time() < end_time):
cmd = self._read_in().split(b' ')
if not cmd[0] in COMMANDS:
logging.info("INVALID COMMAND: %s" % cmd[0])
sys.exit(1)
# mypy doesn't like partial :(
resp = COMMANDS[cmd[0]](*cmd[1:]) #type: ignore
if cmd[0] == b"send":
for bot in all_bots:
bot.react()
if resp is not None:
if cmd[0] in [b"recv", b"list"]:
for x in resp:
self._write_out(x)
self._write_out(b"done")
else:
self._write_out(resp)
if serv.should_exit():
sys.exit(0)
if cmd[0] == b"send":
for bot in all_bots:
bot.react()
def handle(self):
try:
self.request.settimeout(30)
end_time = time.time() + 30
self.server_loop(end_time)
except socket.timeout:
logging.info('timeout')
except ConnectionResetError:
logging.info('reset')
except Exception as e:
logging.info('other error: %s' % str(e))
finally:
logging.info('closing request from %s' % str(self.client_address))
class TCPThreadServer(socketserver.ThreadingMixIn, socketserver.TCPServer): pass
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
logging.basicConfig(
format="%(levelname)s:%(threadName)s:%(message)s",
level=logging.DEBUG,
handlers=[
logging.FileHandler("frank.log"),
logging.StreamHandler()
],
)
with TCPThreadServer(("0.0.0.0", 4567), RequestHandler) as s:
s.allow_reuse_address = True
s.serve_forever() | crypto/frank/src/handler.py |
import logging
import re
import signal
import socket
import socketserver
import sys
import threading
import time
from contextlib import contextmanager
from functools import partial
import bots
import server
class RequestHandler(socketserver.StreamRequestHandler):
def _write_out(self, m):
out = b"%s\n" % m
logging.debug(b"< " + out)
self.request.sendall(out)
def _read_in(self):
x = self.rfile.readline().strip()
assert re.match(r'[a-zA-Z0-9\-\s]+', x.decode('utf-8')), x
logging.debug(b"> %s\n" % x)
return x
def server_loop(self, end_time):
logging.info('recieved request from %s' % str(self.client_address))
pubkey = self._read_in()
# set up server
serv = server.Server()
client = serv.register(pubkey)
serv._set_client(client)
self._write_out(client)
all_bots = [bots.ReportBot(serv, self), bots.AbuseBot(serv, self)]
COMMANDS = {
b"list": serv.list_users,
b"put": serv.put_msg,
b"get": serv.get_msg,
b"key": serv.get_user,
b"send": partial(serv.send, client),
b"recv": partial(serv.recv, client),
b"report": partial(serv.report, client),
}
while (time.time() < end_time):
cmd = self._read_in().split(b' ')
if not cmd[0] in COMMANDS:
logging.info("INVALID COMMAND: %s" % cmd[0])
sys.exit(1)
# mypy doesn't like partial :(
resp = COMMANDS[cmd[0]](*cmd[1:]) #type: ignore
if cmd[0] == b"send":
for bot in all_bots:
bot.react()
if resp is not None:
if cmd[0] in [b"recv", b"list"]:
for x in resp:
self._write_out(x)
self._write_out(b"done")
else:
self._write_out(resp)
if serv.should_exit():
sys.exit(0)
if cmd[0] == b"send":
for bot in all_bots:
bot.react()
def handle(self):
try:
self.request.settimeout(30)
end_time = time.time() + 30
self.server_loop(end_time)
except socket.timeout:
logging.info('timeout')
except ConnectionResetError:
logging.info('reset')
except Exception as e:
logging.info('other error: %s' % str(e))
finally:
logging.info('closing request from %s' % str(self.client_address))
class TCPThreadServer(socketserver.ThreadingMixIn, socketserver.TCPServer): pass
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
logging.basicConfig(
format="%(levelname)s:%(threadName)s:%(message)s",
level=logging.DEBUG,
handlers=[
logging.FileHandler("frank.log"),
logging.StreamHandler()
],
)
with TCPThreadServer(("0.0.0.0", 4567), RequestHandler) as s:
s.allow_reuse_address = True
s.serve_forever() | 0.205456 | 0.116437 |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
Sets the color of the `line` around each `link`.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the `line` around each `link`.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# widthsrc
# --------
@property
def widthsrc(self):
"""
Sets the source reference on plot.ly for width .
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "sankey.link"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the color of the `line` around each `link`.
colorsrc
Sets the source reference on plot.ly for color .
width
Sets the width (in px) of the `line` around each
`link`.
widthsrc
Sets the source reference on plot.ly for width .
"""
def __init__(
self, arg=None, color=None, colorsrc=None, width=None, widthsrc=None, **kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.sankey.link.Line
color
Sets the color of the `line` around each `link`.
colorsrc
Sets the source reference on plot.ly for color .
width
Sets the width (in px) of the `line` around each
`link`.
widthsrc
Sets the source reference on plot.ly for width .
Returns
-------
Line
"""
super(Line, self).__init__("line")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.link.Line
constructor must be a dict or
an instance of plotly.graph_objs.sankey.link.Line"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.sankey.link import line as v_line
# Initialize validators
# ---------------------
self._validators["color"] = v_line.ColorValidator()
self._validators["colorsrc"] = v_line.ColorsrcValidator()
self._validators["width"] = v_line.WidthValidator()
self._validators["widthsrc"] = v_line.WidthsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("width", None)
self["width"] = width if width is not None else _v
_v = arg.pop("widthsrc", None)
self["widthsrc"] = widthsrc if widthsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on plot.ly for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on plot.ly for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on plot.ly for bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly.graph_objs.sankey.link.hoverlabel.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
Returns
-------
plotly.graph_objs.sankey.link.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on plot.ly for namelength .
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "sankey.link"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.sankey.link.Hoverlabel
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.link.Hoverlabel
constructor must be a dict or
an instance of plotly.graph_objs.sankey.link.Hoverlabel"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.sankey.link import hoverlabel as v_hoverlabel
# Initialize validators
# ---------------------
self._validators["align"] = v_hoverlabel.AlignValidator()
self._validators["alignsrc"] = v_hoverlabel.AlignsrcValidator()
self._validators["bgcolor"] = v_hoverlabel.BgcolorValidator()
self._validators["bgcolorsrc"] = v_hoverlabel.BgcolorsrcValidator()
self._validators["bordercolor"] = v_hoverlabel.BordercolorValidator()
self._validators["bordercolorsrc"] = v_hoverlabel.BordercolorsrcValidator()
self._validators["font"] = v_hoverlabel.FontValidator()
self._validators["namelength"] = v_hoverlabel.NamelengthValidator()
self._validators["namelengthsrc"] = v_hoverlabel.NamelengthsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
self["align"] = align if align is not None else _v
_v = arg.pop("alignsrc", None)
self["alignsrc"] = alignsrc if alignsrc is not None else _v
_v = arg.pop("bgcolor", None)
self["bgcolor"] = bgcolor if bgcolor is not None else _v
_v = arg.pop("bgcolorsrc", None)
self["bgcolorsrc"] = bgcolorsrc if bgcolorsrc is not None else _v
_v = arg.pop("bordercolor", None)
self["bordercolor"] = bordercolor if bordercolor is not None else _v
_v = arg.pop("bordercolorsrc", None)
self["bordercolorsrc"] = bordercolorsrc if bordercolorsrc is not None else _v
_v = arg.pop("font", None)
self["font"] = font if font is not None else _v
_v = arg.pop("namelength", None)
self["namelength"] = namelength if namelength is not None else _v
_v = arg.pop("namelengthsrc", None)
self["namelengthsrc"] = namelengthsrc if namelengthsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Colorscale(_BaseTraceHierarchyType):
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. The colorscale must be an array containing
arrays mapping a normalized value to an rgb, rgba, hex, hsl,
hsv, or named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use`cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,Earth,Electric,Vi
ridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'peach', 'phase', 'picnic', 'pinkyl', 'piyg',
'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn', 'puor',
'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu', 'rdgy',
'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar', 'spectral',
'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn', 'tealrose',
'tempo', 'temps', 'thermal', 'tropic', 'turbid', 'twilight',
'viridis', 'ylgn', 'ylgnbu', 'ylorbr', 'ylorrd']
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# label
# -----
@property
def label(self):
"""
The label of the links to color based on their concentration
within a flow.
The 'label' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["label"]
@label.setter
def label(self, val):
self["label"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "sankey.link"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
cmax
Sets the upper bound of the color domain.
cmin
Sets the lower bound of the color domain.
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
label
The label of the links to color based on their
concentration within a flow.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
"""
def __init__(
self,
arg=None,
cmax=None,
cmin=None,
colorscale=None,
label=None,
name=None,
templateitemname=None,
**kwargs
):
"""
Construct a new Colorscale object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.sankey.link.Colorscale
cmax
Sets the upper bound of the color domain.
cmin
Sets the lower bound of the color domain.
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
label
The label of the links to color based on their
concentration within a flow.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
Returns
-------
Colorscale
"""
super(Colorscale, self).__init__("colorscales")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.link.Colorscale
constructor must be a dict or
an instance of plotly.graph_objs.sankey.link.Colorscale"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.sankey.link import colorscale as v_colorscale
# Initialize validators
# ---------------------
self._validators["cmax"] = v_colorscale.CmaxValidator()
self._validators["cmin"] = v_colorscale.CminValidator()
self._validators["colorscale"] = v_colorscale.ColorscaleValidator()
self._validators["label"] = v_colorscale.LabelValidator()
self._validators["name"] = v_colorscale.NameValidator()
self._validators["templateitemname"] = v_colorscale.TemplateitemnameValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("cmax", None)
self["cmax"] = cmax if cmax is not None else _v
_v = arg.pop("cmin", None)
self["cmin"] = cmin if cmin is not None else _v
_v = arg.pop("colorscale", None)
self["colorscale"] = colorscale if colorscale is not None else _v
_v = arg.pop("label", None)
self["label"] = label if label is not None else _v
_v = arg.pop("name", None)
self["name"] = name if name is not None else _v
_v = arg.pop("templateitemname", None)
self["templateitemname"] = (
templateitemname if templateitemname is not None else _v
)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Colorscale", "Colorscale", "Hoverlabel", "Line", "hoverlabel"]
from plotly.graph_objs.sankey.link import hoverlabel | pyInstaller/plotly/graph_objs/sankey/link/__init__.py | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
Sets the color of the `line` around each `link`.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the `line` around each `link`.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# widthsrc
# --------
@property
def widthsrc(self):
"""
Sets the source reference on plot.ly for width .
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "sankey.link"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the color of the `line` around each `link`.
colorsrc
Sets the source reference on plot.ly for color .
width
Sets the width (in px) of the `line` around each
`link`.
widthsrc
Sets the source reference on plot.ly for width .
"""
def __init__(
self, arg=None, color=None, colorsrc=None, width=None, widthsrc=None, **kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.sankey.link.Line
color
Sets the color of the `line` around each `link`.
colorsrc
Sets the source reference on plot.ly for color .
width
Sets the width (in px) of the `line` around each
`link`.
widthsrc
Sets the source reference on plot.ly for width .
Returns
-------
Line
"""
super(Line, self).__init__("line")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.link.Line
constructor must be a dict or
an instance of plotly.graph_objs.sankey.link.Line"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.sankey.link import line as v_line
# Initialize validators
# ---------------------
self._validators["color"] = v_line.ColorValidator()
self._validators["colorsrc"] = v_line.ColorsrcValidator()
self._validators["width"] = v_line.WidthValidator()
self._validators["widthsrc"] = v_line.WidthsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("width", None)
self["width"] = width if width is not None else _v
_v = arg.pop("widthsrc", None)
self["widthsrc"] = widthsrc if widthsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on plot.ly for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on plot.ly for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on plot.ly for bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly.graph_objs.sankey.link.hoverlabel.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
Returns
-------
plotly.graph_objs.sankey.link.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on plot.ly for namelength .
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "sankey.link"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.sankey.link.Hoverlabel
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.link.Hoverlabel
constructor must be a dict or
an instance of plotly.graph_objs.sankey.link.Hoverlabel"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.sankey.link import hoverlabel as v_hoverlabel
# Initialize validators
# ---------------------
self._validators["align"] = v_hoverlabel.AlignValidator()
self._validators["alignsrc"] = v_hoverlabel.AlignsrcValidator()
self._validators["bgcolor"] = v_hoverlabel.BgcolorValidator()
self._validators["bgcolorsrc"] = v_hoverlabel.BgcolorsrcValidator()
self._validators["bordercolor"] = v_hoverlabel.BordercolorValidator()
self._validators["bordercolorsrc"] = v_hoverlabel.BordercolorsrcValidator()
self._validators["font"] = v_hoverlabel.FontValidator()
self._validators["namelength"] = v_hoverlabel.NamelengthValidator()
self._validators["namelengthsrc"] = v_hoverlabel.NamelengthsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
self["align"] = align if align is not None else _v
_v = arg.pop("alignsrc", None)
self["alignsrc"] = alignsrc if alignsrc is not None else _v
_v = arg.pop("bgcolor", None)
self["bgcolor"] = bgcolor if bgcolor is not None else _v
_v = arg.pop("bgcolorsrc", None)
self["bgcolorsrc"] = bgcolorsrc if bgcolorsrc is not None else _v
_v = arg.pop("bordercolor", None)
self["bordercolor"] = bordercolor if bordercolor is not None else _v
_v = arg.pop("bordercolorsrc", None)
self["bordercolorsrc"] = bordercolorsrc if bordercolorsrc is not None else _v
_v = arg.pop("font", None)
self["font"] = font if font is not None else _v
_v = arg.pop("namelength", None)
self["namelength"] = namelength if namelength is not None else _v
_v = arg.pop("namelengthsrc", None)
self["namelengthsrc"] = namelengthsrc if namelengthsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Colorscale(_BaseTraceHierarchyType):
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. The colorscale must be an array containing
arrays mapping a normalized value to an rgb, rgba, hex, hsl,
hsv, or named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use`cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,Earth,Electric,Vi
ridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'peach', 'phase', 'picnic', 'pinkyl', 'piyg',
'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn', 'puor',
'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu', 'rdgy',
'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar', 'spectral',
'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn', 'tealrose',
'tempo', 'temps', 'thermal', 'tropic', 'turbid', 'twilight',
'viridis', 'ylgn', 'ylgnbu', 'ylorbr', 'ylorrd']
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# label
# -----
@property
def label(self):
"""
The label of the links to color based on their concentration
within a flow.
The 'label' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["label"]
@label.setter
def label(self, val):
self["label"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "sankey.link"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
cmax
Sets the upper bound of the color domain.
cmin
Sets the lower bound of the color domain.
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
label
The label of the links to color based on their
concentration within a flow.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
"""
def __init__(
self,
arg=None,
cmax=None,
cmin=None,
colorscale=None,
label=None,
name=None,
templateitemname=None,
**kwargs
):
"""
Construct a new Colorscale object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.sankey.link.Colorscale
cmax
Sets the upper bound of the color domain.
cmin
Sets the lower bound of the color domain.
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`cmin` and `cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
label
The label of the links to color based on their
concentration within a flow.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
Returns
-------
Colorscale
"""
super(Colorscale, self).__init__("colorscales")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.sankey.link.Colorscale
constructor must be a dict or
an instance of plotly.graph_objs.sankey.link.Colorscale"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.sankey.link import colorscale as v_colorscale
# Initialize validators
# ---------------------
self._validators["cmax"] = v_colorscale.CmaxValidator()
self._validators["cmin"] = v_colorscale.CminValidator()
self._validators["colorscale"] = v_colorscale.ColorscaleValidator()
self._validators["label"] = v_colorscale.LabelValidator()
self._validators["name"] = v_colorscale.NameValidator()
self._validators["templateitemname"] = v_colorscale.TemplateitemnameValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("cmax", None)
self["cmax"] = cmax if cmax is not None else _v
_v = arg.pop("cmin", None)
self["cmin"] = cmin if cmin is not None else _v
_v = arg.pop("colorscale", None)
self["colorscale"] = colorscale if colorscale is not None else _v
_v = arg.pop("label", None)
self["label"] = label if label is not None else _v
_v = arg.pop("name", None)
self["name"] = name if name is not None else _v
_v = arg.pop("templateitemname", None)
self["templateitemname"] = (
templateitemname if templateitemname is not None else _v
)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Colorscale", "Colorscale", "Hoverlabel", "Line", "hoverlabel"]
from plotly.graph_objs.sankey.link import hoverlabel | 0.923906 | 0.273571 |
r"""Computes the reward prediction confusion matrix given checkpoints and data.
Usage:
reward_confusion \
--problem="gym_pong_deterministic-v4_random" \
--model="next_frame_sv2p" \
--hparams_set="next_frame_sv2p" \
--output_dir=$CHECKPOINT_DIRECTORY \
--data_dir=$DATA_DIRECTORY \
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.bin.t2t_decoder import create_hparams
from tensor2tensor.data_generators import problem # pylint: disable=unused-import
from tensor2tensor.utils import registry
from tensor2tensor.utils import trainer_lib
from tensor2tensor.utils import usr_dir
import tensorflow.compat.v1 as tf
flags = tf.flags
FLAGS = flags.FLAGS
def print_confusion_matrix(title, cm):
print("=" * 30)
print(title)
print("=" * 30)
print(cm)
print("=" * 30)
print()
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
trainer_lib.set_random_seed(FLAGS.random_seed)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
# Create hparams
hparams = create_hparams()
hparams.force_full_predict = True
batch_size = hparams.batch_size
# Iterating over dev/test partition of the data.
# Change the data partition if necessary.
dataset = registry.problem(FLAGS.problem).dataset(
tf.estimator.ModeKeys.PREDICT,
shuffle_files=False,
hparams=hparams)
dataset = dataset.batch(batch_size, drop_remainder=True)
data = dataset.make_one_shot_iterator().get_next()
input_data = dict((k, data[k]) for k in data.keys() if k.startswith("input"))
# Creat model
model_cls = registry.model(FLAGS.model)
model = model_cls(hparams, tf.estimator.ModeKeys.PREDICT)
prediction_ops = model.infer(input_data)
# Confusion Matrix
nr = hparams.problem.num_rewards
cm_per_frame = np.zeros((nr, nr), dtype=np.uint64)
cm_next_frame = np.zeros((nr, nr), dtype=np.uint64)
saver = tf.train.Saver()
with tf.train.SingularMonitoredSession() as sess:
# Load latest checkpoint
ckpt = tf.train.get_checkpoint_state(FLAGS.output_dir).model_checkpoint_path
saver.restore(sess.raw_session(), ckpt)
counter = 0
while not sess.should_stop():
counter += 1
if counter % 1 == 0:
print(counter)
# Predict next frames
rew_pd, rew_gt = sess.run(
[prediction_ops["target_reward"], data["target_reward"]])
for i in range(batch_size):
cm_next_frame[rew_gt[i, 0, 0], rew_pd[i, 0, 0]] += 1
for gt, pd in zip(rew_gt[i], rew_pd[i]):
cm_per_frame[gt, pd] += 1
print_confusion_matrix("Per-frame Confusion Matrix", cm_per_frame)
print_confusion_matrix("Next-frame Confusion Matrix", cm_next_frame)
if __name__ == "__main__":
tf.app.run() | tensor2tensor/utils/video/reward_confusion.py |
r"""Computes the reward prediction confusion matrix given checkpoints and data.
Usage:
reward_confusion \
--problem="gym_pong_deterministic-v4_random" \
--model="next_frame_sv2p" \
--hparams_set="next_frame_sv2p" \
--output_dir=$CHECKPOINT_DIRECTORY \
--data_dir=$DATA_DIRECTORY \
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.bin.t2t_decoder import create_hparams
from tensor2tensor.data_generators import problem # pylint: disable=unused-import
from tensor2tensor.utils import registry
from tensor2tensor.utils import trainer_lib
from tensor2tensor.utils import usr_dir
import tensorflow.compat.v1 as tf
flags = tf.flags
FLAGS = flags.FLAGS
def print_confusion_matrix(title, cm):
print("=" * 30)
print(title)
print("=" * 30)
print(cm)
print("=" * 30)
print()
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
trainer_lib.set_random_seed(FLAGS.random_seed)
usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)
# Create hparams
hparams = create_hparams()
hparams.force_full_predict = True
batch_size = hparams.batch_size
# Iterating over dev/test partition of the data.
# Change the data partition if necessary.
dataset = registry.problem(FLAGS.problem).dataset(
tf.estimator.ModeKeys.PREDICT,
shuffle_files=False,
hparams=hparams)
dataset = dataset.batch(batch_size, drop_remainder=True)
data = dataset.make_one_shot_iterator().get_next()
input_data = dict((k, data[k]) for k in data.keys() if k.startswith("input"))
# Creat model
model_cls = registry.model(FLAGS.model)
model = model_cls(hparams, tf.estimator.ModeKeys.PREDICT)
prediction_ops = model.infer(input_data)
# Confusion Matrix
nr = hparams.problem.num_rewards
cm_per_frame = np.zeros((nr, nr), dtype=np.uint64)
cm_next_frame = np.zeros((nr, nr), dtype=np.uint64)
saver = tf.train.Saver()
with tf.train.SingularMonitoredSession() as sess:
# Load latest checkpoint
ckpt = tf.train.get_checkpoint_state(FLAGS.output_dir).model_checkpoint_path
saver.restore(sess.raw_session(), ckpt)
counter = 0
while not sess.should_stop():
counter += 1
if counter % 1 == 0:
print(counter)
# Predict next frames
rew_pd, rew_gt = sess.run(
[prediction_ops["target_reward"], data["target_reward"]])
for i in range(batch_size):
cm_next_frame[rew_gt[i, 0, 0], rew_pd[i, 0, 0]] += 1
for gt, pd in zip(rew_gt[i], rew_pd[i]):
cm_per_frame[gt, pd] += 1
print_confusion_matrix("Per-frame Confusion Matrix", cm_per_frame)
print_confusion_matrix("Next-frame Confusion Matrix", cm_next_frame)
if __name__ == "__main__":
tf.app.run() | 0.796411 | 0.46035 |
import os
import os.path
import platform
import shutil
import sys
import tempfile
import unittest
try:
# py2
from StringIO import StringIO
except ImportError:
# py3
from io import StringIO
from swift_build_support import shell
class ShellTestCase(unittest.TestCase):
def setUp(self):
self.tmpdir = os.path.realpath(tempfile.mkdtemp())
self._orig_stdout = sys.stdout
self._orig_stderr = sys.stderr
self.stdout = StringIO()
self.stderr = StringIO()
sys.stdout = self.stdout
sys.stderr = self.stderr
def tearDown(self):
sys.stdout = self._orig_stdout
sys.stderr = self._orig_stderr
if os.path.exists(self.tmpdir):
shutil.rmtree(self.tmpdir)
def test_quote_command(self):
self.assertEqual(shell.quote_command(["a b", "", "c"]), "'a b' '' c")
def test_call(self):
shell.dry_run = False
foo_file = os.path.join(self.tmpdir, 'foo.txt')
bar_file = os.path.join(self.tmpdir, 'bar.txt')
with open(foo_file, 'w') as f:
f.write("Hello Swift")
shell.call(['cp', foo_file, bar_file])
with open(bar_file, 'r') as f:
self.assertEqual(f.read(), "Hello Swift")
self.assertEqual(self.stdout.getvalue(), "")
self.assertEqual(self.stderr.getvalue(), '''\
+ cp {foo_file} {bar_file}
'''.format(foo_file=self._platform_quote(foo_file),
bar_file=self._platform_quote(bar_file)))
def test_capture(self):
self.assertEqual(shell.capture(["echo", "hi"]), "hi\n")
with self.assertRaises(SystemExit):
shell.capture(["false"])
self.assertIsNone(shell.capture(["false"], optional=True))
self.assertEqual(
shell.capture(["sh", "-c", "echo foo && false"],
allow_non_zero_exit=True), "foo\n")
with self.assertRaises(SystemExit):
shell.capture(["**not-a-command**"], optional=False)
self.assertIsNone(shell.capture(["**not-a-command**"], optional=True))
def test_rmtree(self):
shell.dry_run = False
path = os.path.join(self.tmpdir, 'foo', 'bar')
shell.makedirs(path)
self.assertTrue(os.path.isdir(path))
shell.rmtree(os.path.join(path))
self.assertFalse(
os.path.exists(os.path.join(path)))
self.assertTrue(
os.path.exists(os.path.join(self.tmpdir, 'foo')))
self.assertEqual(self.stdout.getvalue(), "")
self.assertEqual(self.stderr.getvalue(), '''\
+ mkdir -p {path}
+ rm -rf {path}
'''.format(path=self._platform_quote(path)))
def test_pushd(self):
shell.dry_run = False
basedir = os.getcwd()
with shell.pushd(self.tmpdir):
self.assertEqual(os.getcwd(), self.tmpdir)
self.assertEqual(os.getcwd(), basedir)
# pushd inside pushd
with shell.pushd(self.tmpdir):
self.assertEqual(os.getcwd(), self.tmpdir)
shell.makedirs('foo')
with shell.pushd('foo'):
self.assertEqual(os.getcwd(),
os.path.join(self.tmpdir, 'foo'))
self.assertEqual(os.getcwd(), self.tmpdir)
self.assertEqual(os.getcwd(), basedir)
# cd inside pushd
with shell.pushd(self.tmpdir):
os.chdir('foo')
self.assertEqual(os.getcwd(), os.path.join(self.tmpdir, 'foo'))
os.chdir('..')
self.assertEqual(os.getcwd(), self.tmpdir)
shell.rmtree('foo')
self.assertEqual(os.getcwd(), basedir)
self.assertEqual(self.stdout.getvalue(), "")
self.assertEqual(self.stderr.getvalue(), '''\
+ pushd {tmpdir}
+ popd
+ pushd {tmpdir}
+ mkdir -p foo
+ pushd foo
+ popd
+ popd
+ pushd {tmpdir}
+ rm -rf foo
+ popd
'''.format(tmpdir=self._platform_quote(self.tmpdir)))
def test_dry_run(self):
shell.dry_run = True
basedir = os.getcwd()
foobar_dir = os.path.join(self.tmpdir, 'foo', 'bar')
shell.makedirs(foobar_dir)
self.assertFalse(os.path.exists(os.path.join(self.tmpdir, 'foo')))
self.assertFalse(os.path.exists(foobar_dir))
with shell.pushd(foobar_dir):
self.assertEqual(os.getcwd(), basedir)
shell.call(['touch', 'testfile'])
self.assertFalse(os.path.exists(
os.path.join(foobar_dir, 'testfile')))
self.assertEqual(os.getcwd(), basedir)
shell.rmtree(self.tmpdir)
self.assertTrue(os.path.exists(self.tmpdir))
self.assertEqual(self.stdout.getvalue(), '''\
+ mkdir -p {foobar_dir}
+ pushd {foobar_dir}
+ touch testfile
+ popd
+ rm -rf {tmpdir}
'''.format(foobar_dir=self._platform_quote(foobar_dir),
tmpdir=self._platform_quote(self.tmpdir)))
self.assertEqual(self.stderr.getvalue(), "")
self.dry_run = False
def _platform_quote(self, path):
if platform.system() == 'Windows':
return "'{}'".format(path)
else:
return path | utils/swift_build_support/tests/test_shell.py |
import os
import os.path
import platform
import shutil
import sys
import tempfile
import unittest
try:
# py2
from StringIO import StringIO
except ImportError:
# py3
from io import StringIO
from swift_build_support import shell
class ShellTestCase(unittest.TestCase):
def setUp(self):
self.tmpdir = os.path.realpath(tempfile.mkdtemp())
self._orig_stdout = sys.stdout
self._orig_stderr = sys.stderr
self.stdout = StringIO()
self.stderr = StringIO()
sys.stdout = self.stdout
sys.stderr = self.stderr
def tearDown(self):
sys.stdout = self._orig_stdout
sys.stderr = self._orig_stderr
if os.path.exists(self.tmpdir):
shutil.rmtree(self.tmpdir)
def test_quote_command(self):
self.assertEqual(shell.quote_command(["a b", "", "c"]), "'a b' '' c")
def test_call(self):
shell.dry_run = False
foo_file = os.path.join(self.tmpdir, 'foo.txt')
bar_file = os.path.join(self.tmpdir, 'bar.txt')
with open(foo_file, 'w') as f:
f.write("Hello Swift")
shell.call(['cp', foo_file, bar_file])
with open(bar_file, 'r') as f:
self.assertEqual(f.read(), "Hello Swift")
self.assertEqual(self.stdout.getvalue(), "")
self.assertEqual(self.stderr.getvalue(), '''\
+ cp {foo_file} {bar_file}
'''.format(foo_file=self._platform_quote(foo_file),
bar_file=self._platform_quote(bar_file)))
def test_capture(self):
self.assertEqual(shell.capture(["echo", "hi"]), "hi\n")
with self.assertRaises(SystemExit):
shell.capture(["false"])
self.assertIsNone(shell.capture(["false"], optional=True))
self.assertEqual(
shell.capture(["sh", "-c", "echo foo && false"],
allow_non_zero_exit=True), "foo\n")
with self.assertRaises(SystemExit):
shell.capture(["**not-a-command**"], optional=False)
self.assertIsNone(shell.capture(["**not-a-command**"], optional=True))
def test_rmtree(self):
shell.dry_run = False
path = os.path.join(self.tmpdir, 'foo', 'bar')
shell.makedirs(path)
self.assertTrue(os.path.isdir(path))
shell.rmtree(os.path.join(path))
self.assertFalse(
os.path.exists(os.path.join(path)))
self.assertTrue(
os.path.exists(os.path.join(self.tmpdir, 'foo')))
self.assertEqual(self.stdout.getvalue(), "")
self.assertEqual(self.stderr.getvalue(), '''\
+ mkdir -p {path}
+ rm -rf {path}
'''.format(path=self._platform_quote(path)))
def test_pushd(self):
shell.dry_run = False
basedir = os.getcwd()
with shell.pushd(self.tmpdir):
self.assertEqual(os.getcwd(), self.tmpdir)
self.assertEqual(os.getcwd(), basedir)
# pushd inside pushd
with shell.pushd(self.tmpdir):
self.assertEqual(os.getcwd(), self.tmpdir)
shell.makedirs('foo')
with shell.pushd('foo'):
self.assertEqual(os.getcwd(),
os.path.join(self.tmpdir, 'foo'))
self.assertEqual(os.getcwd(), self.tmpdir)
self.assertEqual(os.getcwd(), basedir)
# cd inside pushd
with shell.pushd(self.tmpdir):
os.chdir('foo')
self.assertEqual(os.getcwd(), os.path.join(self.tmpdir, 'foo'))
os.chdir('..')
self.assertEqual(os.getcwd(), self.tmpdir)
shell.rmtree('foo')
self.assertEqual(os.getcwd(), basedir)
self.assertEqual(self.stdout.getvalue(), "")
self.assertEqual(self.stderr.getvalue(), '''\
+ pushd {tmpdir}
+ popd
+ pushd {tmpdir}
+ mkdir -p foo
+ pushd foo
+ popd
+ popd
+ pushd {tmpdir}
+ rm -rf foo
+ popd
'''.format(tmpdir=self._platform_quote(self.tmpdir)))
def test_dry_run(self):
shell.dry_run = True
basedir = os.getcwd()
foobar_dir = os.path.join(self.tmpdir, 'foo', 'bar')
shell.makedirs(foobar_dir)
self.assertFalse(os.path.exists(os.path.join(self.tmpdir, 'foo')))
self.assertFalse(os.path.exists(foobar_dir))
with shell.pushd(foobar_dir):
self.assertEqual(os.getcwd(), basedir)
shell.call(['touch', 'testfile'])
self.assertFalse(os.path.exists(
os.path.join(foobar_dir, 'testfile')))
self.assertEqual(os.getcwd(), basedir)
shell.rmtree(self.tmpdir)
self.assertTrue(os.path.exists(self.tmpdir))
self.assertEqual(self.stdout.getvalue(), '''\
+ mkdir -p {foobar_dir}
+ pushd {foobar_dir}
+ touch testfile
+ popd
+ rm -rf {tmpdir}
'''.format(foobar_dir=self._platform_quote(foobar_dir),
tmpdir=self._platform_quote(self.tmpdir)))
self.assertEqual(self.stderr.getvalue(), "")
self.dry_run = False
def _platform_quote(self, path):
if platform.system() == 'Windows':
return "'{}'".format(path)
else:
return path | 0.31321 | 0.252517 |
import urllib
import six
from cinder import context
from cinder import exception
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit.volume.drivers.emc import scaleio
class TestCreateVolumeFromSnapShot(scaleio.TestScaleIODriver):
"""Test cases for ``ScaleIODriver.create_volume_from_snapshot()``"""
STORAGE_POOL_ID = six.text_type('1')
STORAGE_POOL_NAME = 'SP1'
PROT_DOMAIN_ID = six.text_type('1')
PROT_DOMAIN_NAME = 'PD1'
def setUp(self):
"""Setup a test case environment.
Creates fake volume and snapshot objects and sets up the required
API responses.
"""
super(TestCreateVolumeFromSnapShot, self).setUp()
ctx = context.RequestContext('fake', 'fake', auth_token=True)
self.snapshot = fake_snapshot.fake_snapshot_obj(ctx)
self.snapshot_name_2x_enc = urllib.quote(
urllib.quote(self.driver.id_to_base64(self.snapshot.id))
)
self.volume = fake_volume.fake_volume_obj(ctx)
self.volume_name_2x_enc = urllib.quote(
urllib.quote(self.driver.id_to_base64(self.volume.id))
)
self.HTTPS_MOCK_RESPONSES = {
self.RESPONSE_MODE.Valid: {
'types/Volume/instances/getByName::' +
self.snapshot_name_2x_enc: self.snapshot.id,
'instances/System/action/snapshotVolumes': self.volume.id,
},
self.RESPONSE_MODE.BadStatus: {
'instances/System/action/snapshotVolumes::':
self.BAD_STATUS_RESPONSE,
'types/Volume/instances/getByName::' +
self.snapshot_name_2x_enc: self.BAD_STATUS_RESPONSE,
self.snapshot_name_2x_enc: self.BAD_STATUS_RESPONSE,
},
self.RESPONSE_MODE.Invalid: {
'types/Volume/instances/getByName::' +
self.snapshot_name_2x_enc: None,
},
}
def test_bad_login(self):
self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
self.volume,
self.snapshot
)
def test_invalid_snapshot(self):
self.set_https_response_mode(self.RESPONSE_MODE.Invalid)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
self.volume,
self.snapshot
)
def test_create_volume_from_snapshot(self):
self.set_https_response_mode(self.RESPONSE_MODE.Valid)
self.driver.create_volume_from_snapshot(self.volume, self.snapshot) | cinder/tests/unit/volume/drivers/emc/scaleio/test_create_volume_from_snapshot.py | import urllib
import six
from cinder import context
from cinder import exception
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit.volume.drivers.emc import scaleio
class TestCreateVolumeFromSnapShot(scaleio.TestScaleIODriver):
"""Test cases for ``ScaleIODriver.create_volume_from_snapshot()``"""
STORAGE_POOL_ID = six.text_type('1')
STORAGE_POOL_NAME = 'SP1'
PROT_DOMAIN_ID = six.text_type('1')
PROT_DOMAIN_NAME = 'PD1'
def setUp(self):
"""Setup a test case environment.
Creates fake volume and snapshot objects and sets up the required
API responses.
"""
super(TestCreateVolumeFromSnapShot, self).setUp()
ctx = context.RequestContext('fake', 'fake', auth_token=True)
self.snapshot = fake_snapshot.fake_snapshot_obj(ctx)
self.snapshot_name_2x_enc = urllib.quote(
urllib.quote(self.driver.id_to_base64(self.snapshot.id))
)
self.volume = fake_volume.fake_volume_obj(ctx)
self.volume_name_2x_enc = urllib.quote(
urllib.quote(self.driver.id_to_base64(self.volume.id))
)
self.HTTPS_MOCK_RESPONSES = {
self.RESPONSE_MODE.Valid: {
'types/Volume/instances/getByName::' +
self.snapshot_name_2x_enc: self.snapshot.id,
'instances/System/action/snapshotVolumes': self.volume.id,
},
self.RESPONSE_MODE.BadStatus: {
'instances/System/action/snapshotVolumes::':
self.BAD_STATUS_RESPONSE,
'types/Volume/instances/getByName::' +
self.snapshot_name_2x_enc: self.BAD_STATUS_RESPONSE,
self.snapshot_name_2x_enc: self.BAD_STATUS_RESPONSE,
},
self.RESPONSE_MODE.Invalid: {
'types/Volume/instances/getByName::' +
self.snapshot_name_2x_enc: None,
},
}
def test_bad_login(self):
self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
self.volume,
self.snapshot
)
def test_invalid_snapshot(self):
self.set_https_response_mode(self.RESPONSE_MODE.Invalid)
self.assertRaises(
exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
self.volume,
self.snapshot
)
def test_create_volume_from_snapshot(self):
self.set_https_response_mode(self.RESPONSE_MODE.Valid)
self.driver.create_volume_from_snapshot(self.volume, self.snapshot) | 0.514156 | 0.168412 |
import argparse
import subprocess
import shutil
import sys
import os
from create_xcframework import create_xcframework
DSYMUTIL = os.path.join(os.path.dirname(__file__), '..', '..', '..',
'buildtools', 'mac-x64', 'clang', 'bin', 'dsymutil')
def main():
parser = argparse.ArgumentParser(description='Creates Flutter.framework and Flutter.xcframework')
parser.add_argument('--dst', type=str, required=True)
parser.add_argument('--arm64-out-dir', type=str, required=True)
parser.add_argument('--armv7-out-dir', type=str, required=False)
# TODO(gw280): Remove --simulator-out-dir alias when all recipes are updated
parser.add_argument('--simulator-x64-out-dir', '--simulator-out-dir', type=str, required=True)
parser.add_argument('--simulator-arm64-out-dir', type=str, required=False)
parser.add_argument('--strip', action="store_true", default=False)
parser.add_argument('--dsym', action="store_true", default=False)
parser.add_argument('--strip-bitcode', dest='strip_bitcode', action="store_true", default=False)
args = parser.parse_args()
framework = os.path.join(args.dst, 'Flutter.framework')
simulator_framework = os.path.join(args.dst, 'sim', 'Flutter.framework')
arm64_framework = os.path.join(args.arm64_out_dir, 'Flutter.framework')
simulator_x64_framework = os.path.join(args.simulator_x64_out_dir, 'Flutter.framework')
if args.simulator_arm64_out_dir is not None:
simulator_arm64_framework = os.path.join(args.simulator_arm64_out_dir, 'Flutter.framework')
simulator_arm64_dylib = os.path.join(simulator_arm64_framework, 'Flutter')
arm64_dylib = os.path.join(arm64_framework, 'Flutter')
simulator_x64_dylib = os.path.join(simulator_x64_framework, 'Flutter')
if not os.path.isdir(arm64_framework):
print('Cannot find iOS arm64 Framework at %s' % arm64_framework)
return 1
if not os.path.isdir(simulator_x64_framework):
print('Cannot find iOS x64 simulator Framework at %s' % simulator_framework)
return 1
if not os.path.isfile(arm64_dylib):
print('Cannot find iOS arm64 dylib at %s' % arm64_dylib)
return 1
if not os.path.isfile(simulator_x64_dylib):
print('Cannot find iOS simulator dylib at %s' % simulator_dylib)
return 1
if not os.path.isfile(DSYMUTIL):
print('Cannot find dsymutil at %s' % DSYMUTIL)
return 1
shutil.rmtree(framework, True)
shutil.copytree(arm64_framework, framework)
framework_binary = os.path.join(framework, 'Flutter')
process_framework(args, framework, framework_binary)
if args.simulator_arm64_out_dir is not None:
shutil.rmtree(simulator_framework, True)
shutil.copytree(simulator_arm64_framework, simulator_framework)
simulator_framework_binary = os.path.join(simulator_framework, 'Flutter')
# Create the arm64/x64 simulator fat framework.
subprocess.check_call([
'lipo',
simulator_x64_dylib,
simulator_arm64_dylib,
'-create',
'-output',
simulator_framework_binary
])
process_framework(args, simulator_framework, simulator_framework_binary)
simulator_framework = simulator_framework
else:
simulator_framework = simulator_x64_framework
# Create XCFramework from the arm-only fat framework and the arm64/x64 simulator frameworks, or just the
# x64 simulator framework if only that one exists.
xcframeworks = [simulator_framework, framework]
create_xcframework(location=args.dst, name='Flutter', frameworks=xcframeworks)
# Add the x64 simulator into the fat framework
subprocess.check_call([
'lipo',
arm64_dylib,
simulator_x64_dylib,
'-create',
'-output',
framework_binary
])
process_framework(args, framework, framework_binary)
def process_framework(args, framework, framework_binary):
if args.strip_bitcode:
subprocess.check_call(['xcrun', 'bitcode_strip', '-r', framework_binary, '-o', framework_binary])
if args.dsym:
dsym_out = os.path.splitext(framework)[0] + '.dSYM'
subprocess.check_call([DSYMUTIL, '-o', dsym_out, framework_binary])
if args.strip:
# copy unstripped
unstripped_out = os.path.join(args.dst, 'Flutter.unstripped')
shutil.copyfile(framework_binary, unstripped_out)
subprocess.check_call(["strip", "-x", "-S", framework_binary])
if __name__ == '__main__':
sys.exit(main()) | sky/tools/create_ios_framework.py |
import argparse
import subprocess
import shutil
import sys
import os
from create_xcframework import create_xcframework
DSYMUTIL = os.path.join(os.path.dirname(__file__), '..', '..', '..',
'buildtools', 'mac-x64', 'clang', 'bin', 'dsymutil')
def main():
parser = argparse.ArgumentParser(description='Creates Flutter.framework and Flutter.xcframework')
parser.add_argument('--dst', type=str, required=True)
parser.add_argument('--arm64-out-dir', type=str, required=True)
parser.add_argument('--armv7-out-dir', type=str, required=False)
# TODO(gw280): Remove --simulator-out-dir alias when all recipes are updated
parser.add_argument('--simulator-x64-out-dir', '--simulator-out-dir', type=str, required=True)
parser.add_argument('--simulator-arm64-out-dir', type=str, required=False)
parser.add_argument('--strip', action="store_true", default=False)
parser.add_argument('--dsym', action="store_true", default=False)
parser.add_argument('--strip-bitcode', dest='strip_bitcode', action="store_true", default=False)
args = parser.parse_args()
framework = os.path.join(args.dst, 'Flutter.framework')
simulator_framework = os.path.join(args.dst, 'sim', 'Flutter.framework')
arm64_framework = os.path.join(args.arm64_out_dir, 'Flutter.framework')
simulator_x64_framework = os.path.join(args.simulator_x64_out_dir, 'Flutter.framework')
if args.simulator_arm64_out_dir is not None:
simulator_arm64_framework = os.path.join(args.simulator_arm64_out_dir, 'Flutter.framework')
simulator_arm64_dylib = os.path.join(simulator_arm64_framework, 'Flutter')
arm64_dylib = os.path.join(arm64_framework, 'Flutter')
simulator_x64_dylib = os.path.join(simulator_x64_framework, 'Flutter')
if not os.path.isdir(arm64_framework):
print('Cannot find iOS arm64 Framework at %s' % arm64_framework)
return 1
if not os.path.isdir(simulator_x64_framework):
print('Cannot find iOS x64 simulator Framework at %s' % simulator_framework)
return 1
if not os.path.isfile(arm64_dylib):
print('Cannot find iOS arm64 dylib at %s' % arm64_dylib)
return 1
if not os.path.isfile(simulator_x64_dylib):
print('Cannot find iOS simulator dylib at %s' % simulator_dylib)
return 1
if not os.path.isfile(DSYMUTIL):
print('Cannot find dsymutil at %s' % DSYMUTIL)
return 1
shutil.rmtree(framework, True)
shutil.copytree(arm64_framework, framework)
framework_binary = os.path.join(framework, 'Flutter')
process_framework(args, framework, framework_binary)
if args.simulator_arm64_out_dir is not None:
shutil.rmtree(simulator_framework, True)
shutil.copytree(simulator_arm64_framework, simulator_framework)
simulator_framework_binary = os.path.join(simulator_framework, 'Flutter')
# Create the arm64/x64 simulator fat framework.
subprocess.check_call([
'lipo',
simulator_x64_dylib,
simulator_arm64_dylib,
'-create',
'-output',
simulator_framework_binary
])
process_framework(args, simulator_framework, simulator_framework_binary)
simulator_framework = simulator_framework
else:
simulator_framework = simulator_x64_framework
# Create XCFramework from the arm-only fat framework and the arm64/x64 simulator frameworks, or just the
# x64 simulator framework if only that one exists.
xcframeworks = [simulator_framework, framework]
create_xcframework(location=args.dst, name='Flutter', frameworks=xcframeworks)
# Add the x64 simulator into the fat framework
subprocess.check_call([
'lipo',
arm64_dylib,
simulator_x64_dylib,
'-create',
'-output',
framework_binary
])
process_framework(args, framework, framework_binary)
def process_framework(args, framework, framework_binary):
if args.strip_bitcode:
subprocess.check_call(['xcrun', 'bitcode_strip', '-r', framework_binary, '-o', framework_binary])
if args.dsym:
dsym_out = os.path.splitext(framework)[0] + '.dSYM'
subprocess.check_call([DSYMUTIL, '-o', dsym_out, framework_binary])
if args.strip:
# copy unstripped
unstripped_out = os.path.join(args.dst, 'Flutter.unstripped')
shutil.copyfile(framework_binary, unstripped_out)
subprocess.check_call(["strip", "-x", "-S", framework_binary])
if __name__ == '__main__':
sys.exit(main()) | 0.250546 | 0.097133 |
from whoosh.index import create_in, open_dir
from whoosh.analysis import StemmingAnalyzer
from whoosh.fields import *
import os
import shutil
import glob
import re
from bs4 import BeautifulSoup
htmlDocDir = 'SeriesMgrHtmlDoc'
indexDir = 'SeriesMgrHelpIndex'
def BuildHelpIndex():
if os.path.exists( indexDir ):
shutil.rmtree( indexDir, ignore_errors = True )
os.mkdir( indexDir )
stemmingAnalyzer = StemmingAnalyzer()
schema = Schema( path=ID(stored=True, unique=True), section=TEXT(stored=True), title=TEXT(stored=True, analyzer=stemmingAnalyzer),
level=NUMERIC(stored=True), content=TEXT(stored=True, analyzer=stemmingAnalyzer) )
ix = create_in( indexDir, schema )
writer = ix.writer()
titleTags = set(['h1', 'h2', 'h3', 'h4', 'h5'])
newLines = re.compile( '\n+' )
nonNumeric = re.compile( r'[^\d]' )
def addDocument( fname, section, lastTitle, textCur ):
# print( 'addDocument: lastTitle={}'.format(lastTitle) )
if lastTitle and textCur:
section = '|'.join( section ) if section else lastTitle.get_text()
# print( 'Indexing: {}: {}'.format(os.path.basename(fname), section) )
content = newLines.sub( '\n', '\n'.join(textCur) )
writer.add_document( path = os.path.basename(fname) + '#' + lastTitle['id'],
title = lastTitle.get_text(),
section = section,
level = int(nonNumeric.sub('', lastTitle.name)),
content = content )
# Extract content sections from the html pages.
for f in glob.iglob( os.path.join(htmlDocDir, '*.html') ):
doc = BeautifulSoup( open(f).read(), 'html.parser' )
div = doc.find('div', class_='content')
if not div:
continue
lastTitle = None
textCur = []
section = []
for child in div.contents:
try:
tag = child.name
except Exception:
tag = None
if tag not in titleTags:
try:
textCur.append( child.get_text() )
except Exception:
pass
continue
addDocument( f, section, lastTitle, textCur )
iSection = int(int(nonNumeric.sub('', tag))) - 1
section = section[:iSection]
section.append( child.get_text() )
lastTitle = child
textCur = []
addDocument( f, section, lastTitle, textCur )
writer.commit()
#---------------------------------------------------------------------------------------------
if __name__ == '__main__':
BuildHelpIndex()
from whoosh.qparser import QueryParser
ix = open_dir( indexDir, readonly=True )
with ix.searcher() as searcher, open('search.html', 'w') as f:
query = QueryParser('content', ix.schema).parse('fastest lap')
results = searcher.search(query, limit=20)
f.write( '<table><tr><th></th><th align="left">Section</th><th align="left">Match</th></tr>\n' )
for i, hit in enumerate(results):
f.write( '<tr><td align="left">%d.</td><td><a href="%s">%s</a></td><td>%s</td></tr>\n' % ((i+1), hit['path'], hit['section'], hit.highlights('content')) )
f.write( '</table>\n' )
ix.close() | SeriesMgr/HelpIndex.py | from whoosh.index import create_in, open_dir
from whoosh.analysis import StemmingAnalyzer
from whoosh.fields import *
import os
import shutil
import glob
import re
from bs4 import BeautifulSoup
htmlDocDir = 'SeriesMgrHtmlDoc'
indexDir = 'SeriesMgrHelpIndex'
def BuildHelpIndex():
if os.path.exists( indexDir ):
shutil.rmtree( indexDir, ignore_errors = True )
os.mkdir( indexDir )
stemmingAnalyzer = StemmingAnalyzer()
schema = Schema( path=ID(stored=True, unique=True), section=TEXT(stored=True), title=TEXT(stored=True, analyzer=stemmingAnalyzer),
level=NUMERIC(stored=True), content=TEXT(stored=True, analyzer=stemmingAnalyzer) )
ix = create_in( indexDir, schema )
writer = ix.writer()
titleTags = set(['h1', 'h2', 'h3', 'h4', 'h5'])
newLines = re.compile( '\n+' )
nonNumeric = re.compile( r'[^\d]' )
def addDocument( fname, section, lastTitle, textCur ):
# print( 'addDocument: lastTitle={}'.format(lastTitle) )
if lastTitle and textCur:
section = '|'.join( section ) if section else lastTitle.get_text()
# print( 'Indexing: {}: {}'.format(os.path.basename(fname), section) )
content = newLines.sub( '\n', '\n'.join(textCur) )
writer.add_document( path = os.path.basename(fname) + '#' + lastTitle['id'],
title = lastTitle.get_text(),
section = section,
level = int(nonNumeric.sub('', lastTitle.name)),
content = content )
# Extract content sections from the html pages.
for f in glob.iglob( os.path.join(htmlDocDir, '*.html') ):
doc = BeautifulSoup( open(f).read(), 'html.parser' )
div = doc.find('div', class_='content')
if not div:
continue
lastTitle = None
textCur = []
section = []
for child in div.contents:
try:
tag = child.name
except Exception:
tag = None
if tag not in titleTags:
try:
textCur.append( child.get_text() )
except Exception:
pass
continue
addDocument( f, section, lastTitle, textCur )
iSection = int(int(nonNumeric.sub('', tag))) - 1
section = section[:iSection]
section.append( child.get_text() )
lastTitle = child
textCur = []
addDocument( f, section, lastTitle, textCur )
writer.commit()
#---------------------------------------------------------------------------------------------
if __name__ == '__main__':
BuildHelpIndex()
from whoosh.qparser import QueryParser
ix = open_dir( indexDir, readonly=True )
with ix.searcher() as searcher, open('search.html', 'w') as f:
query = QueryParser('content', ix.schema).parse('fastest lap')
results = searcher.search(query, limit=20)
f.write( '<table><tr><th></th><th align="left">Section</th><th align="left">Match</th></tr>\n' )
for i, hit in enumerate(results):
f.write( '<tr><td align="left">%d.</td><td><a href="%s">%s</a></td><td>%s</td></tr>\n' % ((i+1), hit['path'], hit['section'], hit.highlights('content')) )
f.write( '</table>\n' )
ix.close() | 0.046779 | 0.063832 |
import io
import os
import struct
import sys
import sysconfig
from zipfile import ZipFile
import pkg_resources
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_shebang(gui=False):
if not sysconfig.is_python_build():
executable = sys.executable
elif in_venv():
executable = os.path.join(sysconfig.get_path('scripts'),
'python%s' % sysconfig.get_config_var('EXE'))
else:
executable = os.path.join(
sysconfig.get_config_var('BINDIR'),
'python%s%s' % (sysconfig.get_config_var('VERSION'),
sysconfig.get_config_var('EXE')))
if gui and os.name == 'nt':
dn, fn = os.path.split(executable)
fn = fn.replace('python', 'pythonw')
executable = os.path.join(dn, fn)
# For executable paths with spaces (not uncommon on Windows)
if ' ' in executable:
executable = '"%s"' % executable
executable = executable.encode('utf-8')
return b'#!' + executable + b'\n'
def get_launcher(gui):
"""Use the exe files generated via
https://bitbucket.org/vinay.sajip/simple_launcher"""
if struct.calcsize('P') == 8: # 64-bit
bits = '64'
else:
bits = '32'
name = 'launchers/%s%s.exe' % ('w' if gui else 't', bits)
data = pkg_resources.ResourceManager().resource_stream('scriptlib', name)
return data.read()
def get_global_script_bytes(shebang, script_bytes, gui=False):
linesep = os.linesep.encode('utf-8')
if os.name != 'nt':
return shebang + linesep + script_bytes
else:
launcher = get_launcher(gui)
stream = io.BytesIO()
with ZipFile(stream, 'w') as zf:
zf.writestr('__main__.py', script_bytes)
zip_data = stream.getvalue()
return launcher + shebang + linesep + zip_data
def write_script(target_filename, shebang, script_bytes, gui=False):
script_bytes = get_global_script_bytes(shebang, script_bytes, gui)
outname = os.path.join(target_filename)
if os.name == 'nt':
outname_noext, ext = os.path.splitext(outname)
if ext.startswith('.py'):
outname = outname_noext
outname = '%s.exe' % outname
try:
with open(outname, 'wb') as f:
f.write(script_bytes)
except Exception:
# cf https://mail.python.org/pipermail/distutils-sig/2013-August/022263.html
dfname = '%s.deleteme' % outname
if os.path.exists(dfname):
os.remove(dfname) # Not allowed to fail here
os.rename(outname, dfname) # nor here
with open(outname, 'wb') as f:
f.write(script_bytes)
try:
os.remove(dfname)
except Exception:
pass # still in use - ignore error
else:
with open(outname, 'wb') as f:
f.write(script_bytes)
if os.name == 'posix':
mode = (os.stat(outname).st_mode | 0o555) & 0o7777
os.chmod(outname, mode)
return outname | scriptlib/__init__.py | import io
import os
import struct
import sys
import sysconfig
from zipfile import ZipFile
import pkg_resources
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_shebang(gui=False):
if not sysconfig.is_python_build():
executable = sys.executable
elif in_venv():
executable = os.path.join(sysconfig.get_path('scripts'),
'python%s' % sysconfig.get_config_var('EXE'))
else:
executable = os.path.join(
sysconfig.get_config_var('BINDIR'),
'python%s%s' % (sysconfig.get_config_var('VERSION'),
sysconfig.get_config_var('EXE')))
if gui and os.name == 'nt':
dn, fn = os.path.split(executable)
fn = fn.replace('python', 'pythonw')
executable = os.path.join(dn, fn)
# For executable paths with spaces (not uncommon on Windows)
if ' ' in executable:
executable = '"%s"' % executable
executable = executable.encode('utf-8')
return b'#!' + executable + b'\n'
def get_launcher(gui):
"""Use the exe files generated via
https://bitbucket.org/vinay.sajip/simple_launcher"""
if struct.calcsize('P') == 8: # 64-bit
bits = '64'
else:
bits = '32'
name = 'launchers/%s%s.exe' % ('w' if gui else 't', bits)
data = pkg_resources.ResourceManager().resource_stream('scriptlib', name)
return data.read()
def get_global_script_bytes(shebang, script_bytes, gui=False):
linesep = os.linesep.encode('utf-8')
if os.name != 'nt':
return shebang + linesep + script_bytes
else:
launcher = get_launcher(gui)
stream = io.BytesIO()
with ZipFile(stream, 'w') as zf:
zf.writestr('__main__.py', script_bytes)
zip_data = stream.getvalue()
return launcher + shebang + linesep + zip_data
def write_script(target_filename, shebang, script_bytes, gui=False):
script_bytes = get_global_script_bytes(shebang, script_bytes, gui)
outname = os.path.join(target_filename)
if os.name == 'nt':
outname_noext, ext = os.path.splitext(outname)
if ext.startswith('.py'):
outname = outname_noext
outname = '%s.exe' % outname
try:
with open(outname, 'wb') as f:
f.write(script_bytes)
except Exception:
# cf https://mail.python.org/pipermail/distutils-sig/2013-August/022263.html
dfname = '%s.deleteme' % outname
if os.path.exists(dfname):
os.remove(dfname) # Not allowed to fail here
os.rename(outname, dfname) # nor here
with open(outname, 'wb') as f:
f.write(script_bytes)
try:
os.remove(dfname)
except Exception:
pass # still in use - ignore error
else:
with open(outname, 'wb') as f:
f.write(script_bytes)
if os.name == 'posix':
mode = (os.stat(outname).st_mode | 0o555) & 0o7777
os.chmod(outname, mode)
return outname | 0.173813 | 0.050658 |
import os
import re
import json
import unittest
import responses
from oneandone.client import OneAndOneService
from oneandone.client import SharedStorage, AttachServer
class TestSharedStorage(unittest.TestCase):
def setUp(self):
self.client = OneAndOneService('<USER-API-KEY>')
# 'GET' Methods
@responses.activate
def test_list_shared_storages(self):
with open('mock-api/list-storages.json') as f:
data = json.load(f)
test_id = data[0]['id']
responses.add(responses.GET, 'https://cloudpanel-api.1and1.com/v1/shared_storages',
body=json.dumps(data), status=200,
content_type="application/json")
r = self.client.list_shared_storages()
self.assertEqual(r[0]['id'], test_id)
@responses.activate
def test_get_shared_storage(self):
with open('mock-api/get-storage.json') as f:
data = json.load(f)
shared_storage_id = data['id']
responses.add(responses.GET, 'https://cloudpanel-api.1and1.com/v1/shared_storages/%s' % shared_storage_id,
body=json.dumps(data), status=200,
content_type="application/json")
r = self.client.get_shared_storage(shared_storage_id=shared_storage_id)
self.assertEqual(r['id'], shared_storage_id)
@responses.activate
def test_servers_attached(self):
with open('mock-api/storage-servers.json') as f:
data = json.load(f)
shared_storage_id = data[0]['id']
server_name = data[0]['name']
responses.add(responses.GET, 'https://cloudpanel-api.1and1.com/v1/shared_storages/%s/servers' % shared_storage_id,
body=json.dumps(data), status=200,
content_type="application/json")
r = self.client.list_servers_attached_storage(shared_storage_id=shared_storage_id)
self.assertEqual(r[0]['name'], server_name)
@responses.activate
def test_get_server(self):
with open('mock-api/get-server-storage.json') as f:
data = json.load(f)
shared_storage_id = 'SHARED_STORAGE_ID'
server_id = data['id']
responses.add(responses.GET, 'https://cloudpanel-api.1and1.com/v1/shared_storages/%s/servers/%s' % (shared_storage_id, server_id),
body=json.dumps(data), status=200,
content_type="application/json")
r = self.client.get_shared_storage_server(shared_storage_id=shared_storage_id, server_id=server_id)
self.assertEqual(r['id'], server_id)
@responses.activate
def test_get_credentials(self):
with open('mock-api/list-credentials.json') as f:
data = json.load(f)
kerberos_content_file = data['kerberos_content_file']
responses.add(responses.GET, 'https://cloudpanel-api.1and1.com/v1/shared_storages/access',
body=json.dumps(data), status=200,
content_type="application/json")
r = self.client.get_credentials()
self.assertEqual(r['kerberos_content_file'], kerberos_content_file)
# 'POST' Methods
@responses.activate
def test_create_shared_storage(self):
with open('mock-api/create-storage.json') as f:
data = json.load(f)
storage1 = SharedStorage(name=data['name'], description=data['description'], size=data['size'])
responses.add(responses.POST, 'https://cloudpanel-api.1and1.com/v1/shared_storages',
body=json.dumps(data), status=202,
content_type="application/json")
r = self.client.create_shared_storage(shared_storage=storage1)
self.assertEqual(r['name'], storage1.name)
self.assertEqual(r['description'], storage1.description)
self.assertEqual(r['size'], storage1.size)
@responses.activate
def test_attach_servers(self):
with open('mock-api/attach-server-storage.json') as f:
data = json.load(f)
shared_storage_id = data['id']
server1 = AttachServer(server_id=data['servers'][0]['id'], rights=data['servers'][0]['rights'])
server2 = AttachServer(server_id=data['servers'][1]['id'], rights=data['servers'][1]['rights'])
servers = [server1, server2]
responses.add(responses.POST, 'https://cloudpanel-api.1and1.com/v1/shared_storages/%s/servers' % shared_storage_id,
body=json.dumps(data), status=202,
content_type="application/json")
r = self.client.attach_server_shared_storage(shared_storage_id=shared_storage_id, server_ids=servers)
self.assertEqual(r['servers'][0]['id'], server1.server_id)
self.assertEqual(r['servers'][1]['id'], server2.server_id)
# 'PUT' Methods
@responses.activate
def test_modify_shared_storage(self):
with open('mock-api/modify-storage.json') as f:
data = json.load(f)
shared_storage_id = data['id']
name = data['name']
description = data['description']
size = data['size']
responses.add(responses.PUT, 'https://cloudpanel-api.1and1.com/v1/shared_storages/%s' % shared_storage_id,
body=json.dumps(data), status=202,
content_type="application/json")
r = self.client.modify_shared_storage(shared_storage_id=shared_storage_id, name=name, description=description, size=size)
self.assertEqual(r['name'], name)
self.assertEqual(r['description'], description)
self.assertEqual(r['size'], size)
@responses.activate
def test_change_password(self):
with open('mock-api/change-password.json') as f:
data = json.load(f)
new_password = '<PASSWORD>'
responses.add(responses.PUT, 'https://cloudpanel-api.1and1.com/v1/shared_storages/access',
body=json.dumps(data), status=202,
content_type="application/json")
r = self.client.change_password(password=<PASSWORD>)
self.assertEqual(r['state'], 'CONFIGURING')
# 'DELETE' Methods
@responses.activate
def test_delete_shared_storage(self):
with open('mock-api/delete-storage.json') as f:
data = json.load(f)
shared_storage_id = data['id']
responses.add(responses.DELETE, 'https://cloudpanel-api.1and1.com/v1/shared_storages/%s' % shared_storage_id,
body=json.dumps(data), status=202,
content_type="application/json")
r = self.client.delete_shared_storage(shared_storage_id=shared_storage_id)
self.assertEqual(r['state'], 'REMOVING')
@responses.activate
def test_detach_server_ss(self):
with open('mock-api/detach-server-storage.json') as f:
data = json.load(f)
shared_storage_id = data['id']
server_id = 'SERVER_ID'
responses.add(responses.DELETE, 'https://cloudpanel-api.1and1.com/v1/shared_storages/%s/servers/%s' % (shared_storage_id, server_id),
body=json.dumps(data), status=202,
content_type="application/json")
r = self.client.detach_server_shared_storage(shared_storage_id=shared_storage_id, server_id=server_id)
self.assertNotEqual(r['servers'][0]['id'], server_id)
if __name__ == '__main__':
unittest.main() | tests/test_mock_shared_storages.py | import os
import re
import json
import unittest
import responses
from oneandone.client import OneAndOneService
from oneandone.client import SharedStorage, AttachServer
class TestSharedStorage(unittest.TestCase):
def setUp(self):
self.client = OneAndOneService('<USER-API-KEY>')
# 'GET' Methods
@responses.activate
def test_list_shared_storages(self):
with open('mock-api/list-storages.json') as f:
data = json.load(f)
test_id = data[0]['id']
responses.add(responses.GET, 'https://cloudpanel-api.1and1.com/v1/shared_storages',
body=json.dumps(data), status=200,
content_type="application/json")
r = self.client.list_shared_storages()
self.assertEqual(r[0]['id'], test_id)
@responses.activate
def test_get_shared_storage(self):
with open('mock-api/get-storage.json') as f:
data = json.load(f)
shared_storage_id = data['id']
responses.add(responses.GET, 'https://cloudpanel-api.1and1.com/v1/shared_storages/%s' % shared_storage_id,
body=json.dumps(data), status=200,
content_type="application/json")
r = self.client.get_shared_storage(shared_storage_id=shared_storage_id)
self.assertEqual(r['id'], shared_storage_id)
@responses.activate
def test_servers_attached(self):
with open('mock-api/storage-servers.json') as f:
data = json.load(f)
shared_storage_id = data[0]['id']
server_name = data[0]['name']
responses.add(responses.GET, 'https://cloudpanel-api.1and1.com/v1/shared_storages/%s/servers' % shared_storage_id,
body=json.dumps(data), status=200,
content_type="application/json")
r = self.client.list_servers_attached_storage(shared_storage_id=shared_storage_id)
self.assertEqual(r[0]['name'], server_name)
@responses.activate
def test_get_server(self):
with open('mock-api/get-server-storage.json') as f:
data = json.load(f)
shared_storage_id = 'SHARED_STORAGE_ID'
server_id = data['id']
responses.add(responses.GET, 'https://cloudpanel-api.1and1.com/v1/shared_storages/%s/servers/%s' % (shared_storage_id, server_id),
body=json.dumps(data), status=200,
content_type="application/json")
r = self.client.get_shared_storage_server(shared_storage_id=shared_storage_id, server_id=server_id)
self.assertEqual(r['id'], server_id)
@responses.activate
def test_get_credentials(self):
with open('mock-api/list-credentials.json') as f:
data = json.load(f)
kerberos_content_file = data['kerberos_content_file']
responses.add(responses.GET, 'https://cloudpanel-api.1and1.com/v1/shared_storages/access',
body=json.dumps(data), status=200,
content_type="application/json")
r = self.client.get_credentials()
self.assertEqual(r['kerberos_content_file'], kerberos_content_file)
# 'POST' Methods
@responses.activate
def test_create_shared_storage(self):
with open('mock-api/create-storage.json') as f:
data = json.load(f)
storage1 = SharedStorage(name=data['name'], description=data['description'], size=data['size'])
responses.add(responses.POST, 'https://cloudpanel-api.1and1.com/v1/shared_storages',
body=json.dumps(data), status=202,
content_type="application/json")
r = self.client.create_shared_storage(shared_storage=storage1)
self.assertEqual(r['name'], storage1.name)
self.assertEqual(r['description'], storage1.description)
self.assertEqual(r['size'], storage1.size)
@responses.activate
def test_attach_servers(self):
with open('mock-api/attach-server-storage.json') as f:
data = json.load(f)
shared_storage_id = data['id']
server1 = AttachServer(server_id=data['servers'][0]['id'], rights=data['servers'][0]['rights'])
server2 = AttachServer(server_id=data['servers'][1]['id'], rights=data['servers'][1]['rights'])
servers = [server1, server2]
responses.add(responses.POST, 'https://cloudpanel-api.1and1.com/v1/shared_storages/%s/servers' % shared_storage_id,
body=json.dumps(data), status=202,
content_type="application/json")
r = self.client.attach_server_shared_storage(shared_storage_id=shared_storage_id, server_ids=servers)
self.assertEqual(r['servers'][0]['id'], server1.server_id)
self.assertEqual(r['servers'][1]['id'], server2.server_id)
# 'PUT' Methods
@responses.activate
def test_modify_shared_storage(self):
with open('mock-api/modify-storage.json') as f:
data = json.load(f)
shared_storage_id = data['id']
name = data['name']
description = data['description']
size = data['size']
responses.add(responses.PUT, 'https://cloudpanel-api.1and1.com/v1/shared_storages/%s' % shared_storage_id,
body=json.dumps(data), status=202,
content_type="application/json")
r = self.client.modify_shared_storage(shared_storage_id=shared_storage_id, name=name, description=description, size=size)
self.assertEqual(r['name'], name)
self.assertEqual(r['description'], description)
self.assertEqual(r['size'], size)
@responses.activate
def test_change_password(self):
with open('mock-api/change-password.json') as f:
data = json.load(f)
new_password = '<PASSWORD>'
responses.add(responses.PUT, 'https://cloudpanel-api.1and1.com/v1/shared_storages/access',
body=json.dumps(data), status=202,
content_type="application/json")
r = self.client.change_password(password=<PASSWORD>)
self.assertEqual(r['state'], 'CONFIGURING')
# 'DELETE' Methods
@responses.activate
def test_delete_shared_storage(self):
with open('mock-api/delete-storage.json') as f:
data = json.load(f)
shared_storage_id = data['id']
responses.add(responses.DELETE, 'https://cloudpanel-api.1and1.com/v1/shared_storages/%s' % shared_storage_id,
body=json.dumps(data), status=202,
content_type="application/json")
r = self.client.delete_shared_storage(shared_storage_id=shared_storage_id)
self.assertEqual(r['state'], 'REMOVING')
@responses.activate
def test_detach_server_ss(self):
with open('mock-api/detach-server-storage.json') as f:
data = json.load(f)
shared_storage_id = data['id']
server_id = 'SERVER_ID'
responses.add(responses.DELETE, 'https://cloudpanel-api.1and1.com/v1/shared_storages/%s/servers/%s' % (shared_storage_id, server_id),
body=json.dumps(data), status=202,
content_type="application/json")
r = self.client.detach_server_shared_storage(shared_storage_id=shared_storage_id, server_id=server_id)
self.assertNotEqual(r['servers'][0]['id'], server_id)
if __name__ == '__main__':
unittest.main() | 0.196672 | 0.125574 |
from datetime import datetime
from tqdm import tqdm
from .Parser import Parser
from ..utils.utils import start_date
class NextstrainParser(Parser):
"""
Parser for Nextstrain metadata.tsv file
"""
missing_info_mark = '?'
def parse(self, selected_countries):
self.f.readline()
for line in tqdm(self.f, desc='\t\t'):
s = line.split("\t")
country_name = s[7].strip()
if len(selected_countries) != 0:
if country_name.lower() not in selected_countries:
continue
continent_name = s[6].strip()
if s[8] == self.missing_info_mark:
region_name = None
else:
region_name = s[8].strip()
try:
date = (datetime.strptime(s[5], "%Y-%m-%d") - start_date).days
except:
continue
if self.filter_by_data_flag and self.is_out_of_range(date):
continue
lineage_name = s[19] if s[19] != self.missing_info_mark else 'None'
length = int(s[14])
try:
n = float(s[30]) / length
except:
n = 0.
if (29000 < length < 30000) and (n < 0.05):
continent_id, country_id, region_id = self.get_location_ids(continent_name, country_name, region_name)
lineage_id = self.get_lineage_id(lineage_name)
sequence_id = self.get_sequence_id()
self.batch_seqs.append((sequence_id, date, lineage_id, continent_id, country_id, region_id))
for aa in s[48].split(","):
if aa != '':
protein_name, mutation_name = aa.split(":")
protein_id = self.get_protein_id(protein_name)
self.batch_subs.append((sequence_id, protein_id, mutation_name))
if len(self.batch_subs) > 50000:
self.batch_to_subs()
if len(self.batch_seqs) > 50000:
self.batch_to_seqs()
del line
self.batch_to_subs()
self.batch_to_seqs()
self.dict_to_tables() | backend/apis/parsers/NextstrainParser.py | from datetime import datetime
from tqdm import tqdm
from .Parser import Parser
from ..utils.utils import start_date
class NextstrainParser(Parser):
"""
Parser for Nextstrain metadata.tsv file
"""
missing_info_mark = '?'
def parse(self, selected_countries):
self.f.readline()
for line in tqdm(self.f, desc='\t\t'):
s = line.split("\t")
country_name = s[7].strip()
if len(selected_countries) != 0:
if country_name.lower() not in selected_countries:
continue
continent_name = s[6].strip()
if s[8] == self.missing_info_mark:
region_name = None
else:
region_name = s[8].strip()
try:
date = (datetime.strptime(s[5], "%Y-%m-%d") - start_date).days
except:
continue
if self.filter_by_data_flag and self.is_out_of_range(date):
continue
lineage_name = s[19] if s[19] != self.missing_info_mark else 'None'
length = int(s[14])
try:
n = float(s[30]) / length
except:
n = 0.
if (29000 < length < 30000) and (n < 0.05):
continent_id, country_id, region_id = self.get_location_ids(continent_name, country_name, region_name)
lineage_id = self.get_lineage_id(lineage_name)
sequence_id = self.get_sequence_id()
self.batch_seqs.append((sequence_id, date, lineage_id, continent_id, country_id, region_id))
for aa in s[48].split(","):
if aa != '':
protein_name, mutation_name = aa.split(":")
protein_id = self.get_protein_id(protein_name)
self.batch_subs.append((sequence_id, protein_id, mutation_name))
if len(self.batch_subs) > 50000:
self.batch_to_subs()
if len(self.batch_seqs) > 50000:
self.batch_to_seqs()
del line
self.batch_to_subs()
self.batch_to_seqs()
self.dict_to_tables() | 0.280025 | 0.12075 |
from __future__ import absolute_import
from __future__ import print_function
import copy
import random
from itertools import chain, product
import numpy as np
from six.moves import range
class BotProcessor:
def __init__(self, model, top=10):
self.model = model
self.top = top
def model_predict(self, bot_color, go_board):
moves = self.calc_model_chain(bot_color, go_board)
move_array = []
for move in moves:
move_array.append(move)
return move_array
def calc_model_chain(self, bot_color, go_board):
return chain(
self.calc_model(bot_color, go_board),
self.gen_random(self.get_all_empty(go_board)),
)
def calc_model(self, bot_color, go_board):
# 0, 0 is for generating the label.
X, label = self.set_ready(
bot_color, (0, 0), go_board, 7)
X = X.reshape((1, X.shape[0], X.shape[1], X.shape[2]))
# Generate bot move.
model_pred = self.model.predict(X)
score = self.model.evaluate(X, model_pred, verbose=0)
print(score)
# Remove single-dimensional entries from the shape of an array.
# squeeze the prediction to 1d array so we can handpick and make predictions
pred = np.squeeze(model_pred)
# Argsort and get top 10 predictions
top_n_pred_idx = pred.argsort()[-self.top:][::-1]
print(len(top_n_pred_idx))
for idx in top_n_pred_idx:
prediction = int(idx)
print(prediction)
pred_row = prediction // 19
pred_col = prediction % 19
pred_move = (pred_row, pred_col)
yield pred_move
def model_evaluate(self, x, y):
score = self.model.evaluate(x, y, verbose=0)
print(score)
return score
def set_ready(self, color, move, go_board, num_planes):
row, col = move
if color == "WHITE":
enemy_color = "BLACK"
else:
enemy_color = "WHITE"
label = row * 19 + col
move_array = np.zeros((num_planes, 19, 19))
for row in range(0, 19):
for col in range(0, 19):
pos = (row, col)
if go_board[pos[0]][pos[1]]["type"] == color:
if len(go_board[pos[0]][pos[1]]["group"]["liberties"]) == 1:
move_array[0, row, col] = 1
elif len(go_board[pos[0]][pos[1]]["group"]["liberties"]) == 2:
move_array[1, row, col] = 1
elif len(go_board[pos[0]][pos[1]]["group"]["liberties"]) >= 3:
move_array[2, row, col] = 1
if go_board[pos[0]][pos[1]]["type"] == enemy_color:
if len(go_board[pos[0]][pos[1]]["group"]["liberties"]) == 1:
move_array[3, row, col] = 1
elif len(go_board[pos[0]][pos[1]]["group"]["liberties"]) == 2:
move_array[4, row, col] = 1
elif len(go_board[pos[0]][pos[1]]["group"]["liberties"]) >= 3:
move_array[5, row, col] = 1
if len(go_board[pos[0]][pos[1]]["group"]["liberties"]) == 0:
move_array[6, row, col] = 1
return move_array, label
def gen_random(self, point_list):
point_list = copy.copy(point_list)
random.shuffle(point_list)
for candidate in point_list:
yield candidate
def get_all_empty(self, board):
empty_points = []
for point in product(list(range(19)), list(range(19))):
if board[point[0]][point[1]]["type"] == "EMPTY":
empty_points.append(point)
return empty_points | go-engine-server/go/kerasimpl.py | from __future__ import absolute_import
from __future__ import print_function
import copy
import random
from itertools import chain, product
import numpy as np
from six.moves import range
class BotProcessor:
def __init__(self, model, top=10):
self.model = model
self.top = top
def model_predict(self, bot_color, go_board):
moves = self.calc_model_chain(bot_color, go_board)
move_array = []
for move in moves:
move_array.append(move)
return move_array
def calc_model_chain(self, bot_color, go_board):
return chain(
self.calc_model(bot_color, go_board),
self.gen_random(self.get_all_empty(go_board)),
)
def calc_model(self, bot_color, go_board):
# 0, 0 is for generating the label.
X, label = self.set_ready(
bot_color, (0, 0), go_board, 7)
X = X.reshape((1, X.shape[0], X.shape[1], X.shape[2]))
# Generate bot move.
model_pred = self.model.predict(X)
score = self.model.evaluate(X, model_pred, verbose=0)
print(score)
# Remove single-dimensional entries from the shape of an array.
# squeeze the prediction to 1d array so we can handpick and make predictions
pred = np.squeeze(model_pred)
# Argsort and get top 10 predictions
top_n_pred_idx = pred.argsort()[-self.top:][::-1]
print(len(top_n_pred_idx))
for idx in top_n_pred_idx:
prediction = int(idx)
print(prediction)
pred_row = prediction // 19
pred_col = prediction % 19
pred_move = (pred_row, pred_col)
yield pred_move
def model_evaluate(self, x, y):
score = self.model.evaluate(x, y, verbose=0)
print(score)
return score
def set_ready(self, color, move, go_board, num_planes):
row, col = move
if color == "WHITE":
enemy_color = "BLACK"
else:
enemy_color = "WHITE"
label = row * 19 + col
move_array = np.zeros((num_planes, 19, 19))
for row in range(0, 19):
for col in range(0, 19):
pos = (row, col)
if go_board[pos[0]][pos[1]]["type"] == color:
if len(go_board[pos[0]][pos[1]]["group"]["liberties"]) == 1:
move_array[0, row, col] = 1
elif len(go_board[pos[0]][pos[1]]["group"]["liberties"]) == 2:
move_array[1, row, col] = 1
elif len(go_board[pos[0]][pos[1]]["group"]["liberties"]) >= 3:
move_array[2, row, col] = 1
if go_board[pos[0]][pos[1]]["type"] == enemy_color:
if len(go_board[pos[0]][pos[1]]["group"]["liberties"]) == 1:
move_array[3, row, col] = 1
elif len(go_board[pos[0]][pos[1]]["group"]["liberties"]) == 2:
move_array[4, row, col] = 1
elif len(go_board[pos[0]][pos[1]]["group"]["liberties"]) >= 3:
move_array[5, row, col] = 1
if len(go_board[pos[0]][pos[1]]["group"]["liberties"]) == 0:
move_array[6, row, col] = 1
return move_array, label
def gen_random(self, point_list):
point_list = copy.copy(point_list)
random.shuffle(point_list)
for candidate in point_list:
yield candidate
def get_all_empty(self, board):
empty_points = []
for point in product(list(range(19)), list(range(19))):
if board[point[0]][point[1]]["type"] == "EMPTY":
empty_points.append(point)
return empty_points | 0.506347 | 0.291145 |
class Context(object):
"""
Context stores model relevant worker information
Some fixed during load times and some
"""
def __init__(self, model_name, model_dir, manifest, batch_size, gpu, mms_version):
self.model_name = model_name
self.manifest = manifest
self._system_properties = {
"model_dir": model_dir,
"gpu_id": gpu,
"batch_size": batch_size,
"server_name": "MMS",
"server_version": mms_version
}
self.request_ids = None
self.request_processor = None
self._metrics = None
@property
def system_properties(self):
return self._system_properties
@property
def request_processor(self):
return self._request_processor
@request_processor.setter
def request_processor(self, request_processor):
self._request_processor = request_processor
@property
def metrics(self):
return self._metrics
@metrics.setter
def metrics(self, metrics):
self._metrics = metrics
def get_request_id(self, idx=0):
return self.request_ids.get(idx)
def get_request_header(self, idx, key):
return self._request_processor[idx].get_request_property(key)
def get_all_request_header(self, idx):
return self._request_processor[idx].get_request_properties()
def set_response_content_type(self, idx, value):
self.set_response_header(idx, 'content-type', value)
def get_response_content_type(self, idx):
return self.get_response_headers(idx).get('content-type')
def get_response_status(self, idx):
return self._request_processor[idx].get_response_status_code(), \
self._request_processor[idx].get_response_status_phrase()
def set_response_status(self, code=200, phrase="", idx=0):
"""
Set the status code of individual requests
:param phrase:
:param idx: The index data in the list(data) that is sent to the handle() method
:param code:
:return:
"""
if self._request_processor is not None and self._request_processor[idx] is not None:
self._request_processor[idx].report_status(code,
reason_phrase=phrase)
def set_all_response_status(self, code=200, phrase=""):
"""
Set the status code of individual requests
:param phrase:
:param code:
:return:
"""
for idx, _ in enumerate(self._request_processor):
self._request_processor[idx].report_status(code, reason_phrase=phrase)
def get_response_headers(self, idx):
return self._request_processor[idx].get_response_headers()
def set_response_header(self, idx, key, value):
self._request_processor[idx].add_response_property(key, value)
# TODO: Should we add "add_header()" interface, to have multiple values for a single header. EG: Accept headers.
def __eq__(self, other):
return isinstance(other, Context) and self.__dict__ == other.__dict__
class RequestProcessor(object):
"""
Request processor
"""
def __init__(self, request_header):
self._status_code = 200
self._reason_phrase = None
self._response_header = {}
self._request_header = request_header
def get_request_property(self, key):
return self._request_header.get(key)
def report_status(self, code, reason_phrase=None):
self._status_code = code
self._reason_phrase = reason_phrase
def get_response_status_code(self):
return self._status_code
def get_response_status_phrase(self):
return self._reason_phrase
def add_response_property(self, key, value):
self._response_header[key] = value
def get_response_headers(self):
return self._response_header
def get_response_header(self, key):
return self._response_header.get(key)
def get_request_properties(self):
return self._request_header | ts/context.py | class Context(object):
"""
Context stores model relevant worker information
Some fixed during load times and some
"""
def __init__(self, model_name, model_dir, manifest, batch_size, gpu, mms_version):
self.model_name = model_name
self.manifest = manifest
self._system_properties = {
"model_dir": model_dir,
"gpu_id": gpu,
"batch_size": batch_size,
"server_name": "MMS",
"server_version": mms_version
}
self.request_ids = None
self.request_processor = None
self._metrics = None
@property
def system_properties(self):
return self._system_properties
@property
def request_processor(self):
return self._request_processor
@request_processor.setter
def request_processor(self, request_processor):
self._request_processor = request_processor
@property
def metrics(self):
return self._metrics
@metrics.setter
def metrics(self, metrics):
self._metrics = metrics
def get_request_id(self, idx=0):
return self.request_ids.get(idx)
def get_request_header(self, idx, key):
return self._request_processor[idx].get_request_property(key)
def get_all_request_header(self, idx):
return self._request_processor[idx].get_request_properties()
def set_response_content_type(self, idx, value):
self.set_response_header(idx, 'content-type', value)
def get_response_content_type(self, idx):
return self.get_response_headers(idx).get('content-type')
def get_response_status(self, idx):
return self._request_processor[idx].get_response_status_code(), \
self._request_processor[idx].get_response_status_phrase()
def set_response_status(self, code=200, phrase="", idx=0):
"""
Set the status code of individual requests
:param phrase:
:param idx: The index data in the list(data) that is sent to the handle() method
:param code:
:return:
"""
if self._request_processor is not None and self._request_processor[idx] is not None:
self._request_processor[idx].report_status(code,
reason_phrase=phrase)
def set_all_response_status(self, code=200, phrase=""):
"""
Set the status code of individual requests
:param phrase:
:param code:
:return:
"""
for idx, _ in enumerate(self._request_processor):
self._request_processor[idx].report_status(code, reason_phrase=phrase)
def get_response_headers(self, idx):
return self._request_processor[idx].get_response_headers()
def set_response_header(self, idx, key, value):
self._request_processor[idx].add_response_property(key, value)
# TODO: Should we add "add_header()" interface, to have multiple values for a single header. EG: Accept headers.
def __eq__(self, other):
return isinstance(other, Context) and self.__dict__ == other.__dict__
class RequestProcessor(object):
"""
Request processor
"""
def __init__(self, request_header):
self._status_code = 200
self._reason_phrase = None
self._response_header = {}
self._request_header = request_header
def get_request_property(self, key):
return self._request_header.get(key)
def report_status(self, code, reason_phrase=None):
self._status_code = code
self._reason_phrase = reason_phrase
def get_response_status_code(self):
return self._status_code
def get_response_status_phrase(self):
return self._reason_phrase
def add_response_property(self, key, value):
self._response_header[key] = value
def get_response_headers(self):
return self._response_header
def get_response_header(self, key):
return self._response_header.get(key)
def get_request_properties(self):
return self._request_header | 0.759627 | 0.291233 |
def transpose(A):
B = [[0] * len(A) for i in range(len(A[0]))]
for i in range(len(A[0])):
for j in range(len(A)):
B[i][j] = A[j][i]
return B
def multiplyMatrixByScalar(A, scalar):
B = [[] for i in range(len(A))]
for i in range(len(A)):
B[i] = map(lambda x: x * scalar, A[i])
return B
def addMatrices(A, B):
C = [[0] * len(A[0]) for i in range(len(A))]
for i in range(len(A)):
for j in range(len(B)):
C[i][j] = A[i][j] + B[i][j]
return C
def multiplyMatrices(A, B):
C = [[0] * len(B[0]) for i in range(len(A))]
for i in range(len(A)):
for j in range(len(B[0])):
C[i][j] = 0
for k in range(len(B)):
C[i][j] += A[i][k] * B[k][j]
return C
def determinante(A):
positive = 0
negative = 0
for k in range(len(A)):
j = k
aux = 1
for i in range(len(A)):
aux = aux * A[i][j]
j = (j+1) % len(A)
positive += aux
i = k
aux = 1
for j in range(len(A)-1, -1, -1):
aux = aux * A[i][j]
i = (i+1) % len(A)
negative += aux
return (positive - negative)
def cofator3x3(i, j, A):
i1 = i2 = j1 = j2 = 0
if (i == 0):
i1 = i+1
i2 = i+2
elif (i == 1):
i1 = i-1
i2 = i+1
else:
i1 = i-2
i2 = i-1
if (j == 0):
j1 = j+1
j2 = j+2
elif (j == 1):
j1 = j-1
j2 = j+1
else:
j1 = j-2
j2 = j-1
return A[i1][j1] * A[i2][j2] - A[i1][j2] * A[i2][j1]
def matrixInversa(A):
Cofatores = [[0] * len(A[0]) for i in range(len(A))]
for i in range(len(A)):
for j in range(len(A)):
Cofatores[i][j] = cofator3x3(i, j, A)
Adjunta = transpose(Cofatores)
mult = 1.0/determinante(A)
Inversa = [[0.0] * len(Adjunta[0]) for i in range(len(Adjunta))]
for i in range(len(Adjunta)):
for j in range(len(Adjunta[0])):
Inversa[i][j] = Adjunta[i][j] * mult
return Inversa | paradigma_funcional/roteiro8/matrix_operations.py | def transpose(A):
B = [[0] * len(A) for i in range(len(A[0]))]
for i in range(len(A[0])):
for j in range(len(A)):
B[i][j] = A[j][i]
return B
def multiplyMatrixByScalar(A, scalar):
B = [[] for i in range(len(A))]
for i in range(len(A)):
B[i] = map(lambda x: x * scalar, A[i])
return B
def addMatrices(A, B):
C = [[0] * len(A[0]) for i in range(len(A))]
for i in range(len(A)):
for j in range(len(B)):
C[i][j] = A[i][j] + B[i][j]
return C
def multiplyMatrices(A, B):
C = [[0] * len(B[0]) for i in range(len(A))]
for i in range(len(A)):
for j in range(len(B[0])):
C[i][j] = 0
for k in range(len(B)):
C[i][j] += A[i][k] * B[k][j]
return C
def determinante(A):
positive = 0
negative = 0
for k in range(len(A)):
j = k
aux = 1
for i in range(len(A)):
aux = aux * A[i][j]
j = (j+1) % len(A)
positive += aux
i = k
aux = 1
for j in range(len(A)-1, -1, -1):
aux = aux * A[i][j]
i = (i+1) % len(A)
negative += aux
return (positive - negative)
def cofator3x3(i, j, A):
i1 = i2 = j1 = j2 = 0
if (i == 0):
i1 = i+1
i2 = i+2
elif (i == 1):
i1 = i-1
i2 = i+1
else:
i1 = i-2
i2 = i-1
if (j == 0):
j1 = j+1
j2 = j+2
elif (j == 1):
j1 = j-1
j2 = j+1
else:
j1 = j-2
j2 = j-1
return A[i1][j1] * A[i2][j2] - A[i1][j2] * A[i2][j1]
def matrixInversa(A):
Cofatores = [[0] * len(A[0]) for i in range(len(A))]
for i in range(len(A)):
for j in range(len(A)):
Cofatores[i][j] = cofator3x3(i, j, A)
Adjunta = transpose(Cofatores)
mult = 1.0/determinante(A)
Inversa = [[0.0] * len(Adjunta[0]) for i in range(len(Adjunta))]
for i in range(len(Adjunta)):
for j in range(len(Adjunta[0])):
Inversa[i][j] = Adjunta[i][j] * mult
return Inversa | 0.275617 | 0.548008 |
from __future__ import division
import json
import os
import random
import urllib
import demjson
import requests
import six
from . import helpers
from .webtrader import NotLoginError
from .webtrader import WebTrader
log = helpers.get_logger(__file__)
class YJBTrader(WebTrader):
config_path = os.path.dirname(__file__) + '/config/yjb.json'
def __init__(self):
super(YJBTrader, self).__init__()
self.cookie = None
self.account_config = None
self.s = requests.session()
self.s.mount('https://', helpers.Ssl3HttpAdapter())
def login(self, throw=False):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'
}
self.s.headers.update(headers)
self.s.get(self.config['login_page'])
verify_code = self.handle_recognize_code()
if not verify_code:
return False
login_status, result = self.post_login_data(verify_code)
if login_status is False and throw:
raise NotLoginError(result)
return login_status
def handle_recognize_code(self):
"""获取并识别返回的验证码
:return:失败返回 False 成功返回 验证码"""
# 获取验证码
verify_code_response = self.s.get(self.config['verify_code_api'], params=dict(randomStamp=random.random()))
# 保存验证码
image_path = os.path.join(os.getcwd(), 'vcode')
with open(image_path, 'wb') as f:
f.write(verify_code_response.content)
verify_code = helpers.recognize_verify_code(image_path, 'yjb')
log.debug('verify code detect result: %s' % verify_code)
os.remove(image_path)
ht_verify_code_length = 4
if len(verify_code) != ht_verify_code_length:
return False
return verify_code
def post_login_data(self, verify_code):
if six.PY2:
password = urllib.unquote(self.account_config['password'])
else:
password = urllib.parse.unquote(self.account_config['password'])
login_params = dict(
self.config['login'],
mac_addr=helpers.get_mac(),
account_content=self.account_config['account'],
password=password,
validateCode=verify_code
)
login_response = self.s.post(self.config['login_api'], params=login_params)
log.debug('login response: %s' % login_response.text)
if login_response.text.find('上次登陆') != -1:
return True, None
return False, login_response.text
@property
def token(self):
return self.cookie['JSESSIONID']
@token.setter
def token(self, token):
self.cookie = dict(JSESSIONID=token)
self.keepalive()
def cancel_entrust(self, entrust_no, stock_code):
"""撤单
:param entrust_no: 委托单号
:param stock_code: 股票代码"""
cancel_params = dict(
self.config['cancel_entrust'],
entrust_no=entrust_no,
stock_code=stock_code
)
return self.do(cancel_params)
@property
def current_deal(self):
return self.get_current_deal()
def get_current_deal(self):
"""获取当日成交列表"""
"""
[{'business_amount': '成交数量',
'business_price': '成交价格',
'entrust_amount': '委托数量',
'entrust_bs': '买卖方向',
'stock_account': '证券帐号',
'fund_account': '资金帐号',
'position_str': '定位串',
'business_status': '成交状态',
'date': '发生日期',
'business_type': '成交类别',
'business_time': '成交时间',
'stock_code': '证券代码',
'stock_name': '证券名称'}]
"""
return self.do(self.config['current_deal'])
# TODO: 实现买入卖出的各种委托类型
def buy(self, stock_code, price, amount=0, volume=0, entrust_prop=0):
"""买入卖出股票
:param stock_code: 股票代码
:param price: 卖出价格
:param amount: 卖出股数
:param volume: 卖出总金额 由 volume / price 取整, 若指定 price 则此参数无效
:param entrust_prop: 委托类型,暂未实现,默认为限价委托
"""
params = dict(
self.config['buy'],
entrust_bs=1, # 买入1 卖出2
entrust_amount=amount if amount else volume // price // 100 * 100
)
return self.__trade(stock_code, price, entrust_prop=entrust_prop, other=params)
def sell(self, stock_code, price, amount=0, volume=0, entrust_prop=0):
"""卖出股票
:param stock_code: 股票代码
:param price: 卖出价格
:param amount: 卖出股数
:param volume: 卖出总金额 由 volume / price 取整, 若指定 amount 则此参数无效
:param entrust_prop: 委托类型,暂未实现,默认为限价委托
"""
params = dict(
self.config['sell'],
entrust_bs=2, # 买入1 卖出2
entrust_amount=amount if amount else volume // price
)
return self.__trade(stock_code, price, entrust_prop=entrust_prop, other=params)
def __trade(self, stock_code, price, entrust_prop, other):
# 检查是否已经掉线
if not self.heart_thread.is_alive():
check_data = self.get_balance()
if type(check_data) == dict:
return check_data
need_info = self.__get_trade_need_info(stock_code)
return self.do(dict(
other,
stock_account=need_info['stock_account'], # '沪深帐号'
exchange_type=need_info['exchange_type'], # '沪市1 深市2'
entrust_prop=entrust_prop, # 委托方式
stock_code='{:0>6}'.format(stock_code), # 股票代码, 右对齐宽为6左侧填充0
elig_riskmatch_flag=1, # 用户风险等级
entrust_price=price,
))
def __get_trade_need_info(self, stock_code):
"""获取股票对应的证券市场和帐号"""
# 获取股票对应的证券市场
sh_exchange_type = 1
sz_exchange_type = 2
exchange_type = sh_exchange_type if helpers.get_stock_type(stock_code) == 'sh' else sz_exchange_type
# 获取股票对应的证券帐号
if not hasattr(self, 'exchange_stock_account'):
self.exchange_stock_account = dict()
if exchange_type not in self.exchange_stock_account:
stock_account_index = 0
response_data = self.do(dict(
self.config['account4stock'],
exchange_type=exchange_type,
stock_code=stock_code
))[stock_account_index]
self.exchange_stock_account[exchange_type] = response_data['stock_account']
return dict(
exchange_type=exchange_type,
stock_account=self.exchange_stock_account[exchange_type]
)
def create_basic_params(self):
basic_params = dict(
CSRF_Token='<PASSWORD>',
timestamp=random.random(),
)
return basic_params
def request(self, params):
r = self.s.get(self.trade_prefix, params=params, cookies=self.cookie)
return r.text
def format_response_data(self, data):
# 获取 returnJSON
return_json = json.loads(data)['returnJson']
raw_json_data = demjson.decode(return_json)
fun_data = raw_json_data['Func%s' % raw_json_data['function_id']]
header_index = 1
remove_header_data = fun_data[header_index:]
return self.format_response_data_type(remove_header_data)
def fix_error_data(self, data):
error_index = 0
return data[error_index] if type(data) == list and data[error_index].get('error_no') is not None else data
def check_login_status(self, return_data):
if hasattr(return_data, 'get') and return_data.get('error_no') == '-1':
raise NotLoginError
def check_account_live(self, response):
if hasattr(response, 'get') and response.get('error_no') == '-1':
self.heart_active = False | easytrader/yjbtrader.py | from __future__ import division
import json
import os
import random
import urllib
import demjson
import requests
import six
from . import helpers
from .webtrader import NotLoginError
from .webtrader import WebTrader
log = helpers.get_logger(__file__)
class YJBTrader(WebTrader):
config_path = os.path.dirname(__file__) + '/config/yjb.json'
def __init__(self):
super(YJBTrader, self).__init__()
self.cookie = None
self.account_config = None
self.s = requests.session()
self.s.mount('https://', helpers.Ssl3HttpAdapter())
def login(self, throw=False):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'
}
self.s.headers.update(headers)
self.s.get(self.config['login_page'])
verify_code = self.handle_recognize_code()
if not verify_code:
return False
login_status, result = self.post_login_data(verify_code)
if login_status is False and throw:
raise NotLoginError(result)
return login_status
def handle_recognize_code(self):
"""获取并识别返回的验证码
:return:失败返回 False 成功返回 验证码"""
# 获取验证码
verify_code_response = self.s.get(self.config['verify_code_api'], params=dict(randomStamp=random.random()))
# 保存验证码
image_path = os.path.join(os.getcwd(), 'vcode')
with open(image_path, 'wb') as f:
f.write(verify_code_response.content)
verify_code = helpers.recognize_verify_code(image_path, 'yjb')
log.debug('verify code detect result: %s' % verify_code)
os.remove(image_path)
ht_verify_code_length = 4
if len(verify_code) != ht_verify_code_length:
return False
return verify_code
def post_login_data(self, verify_code):
if six.PY2:
password = urllib.unquote(self.account_config['password'])
else:
password = urllib.parse.unquote(self.account_config['password'])
login_params = dict(
self.config['login'],
mac_addr=helpers.get_mac(),
account_content=self.account_config['account'],
password=password,
validateCode=verify_code
)
login_response = self.s.post(self.config['login_api'], params=login_params)
log.debug('login response: %s' % login_response.text)
if login_response.text.find('上次登陆') != -1:
return True, None
return False, login_response.text
@property
def token(self):
return self.cookie['JSESSIONID']
@token.setter
def token(self, token):
self.cookie = dict(JSESSIONID=token)
self.keepalive()
def cancel_entrust(self, entrust_no, stock_code):
"""撤单
:param entrust_no: 委托单号
:param stock_code: 股票代码"""
cancel_params = dict(
self.config['cancel_entrust'],
entrust_no=entrust_no,
stock_code=stock_code
)
return self.do(cancel_params)
@property
def current_deal(self):
return self.get_current_deal()
def get_current_deal(self):
"""获取当日成交列表"""
"""
[{'business_amount': '成交数量',
'business_price': '成交价格',
'entrust_amount': '委托数量',
'entrust_bs': '买卖方向',
'stock_account': '证券帐号',
'fund_account': '资金帐号',
'position_str': '定位串',
'business_status': '成交状态',
'date': '发生日期',
'business_type': '成交类别',
'business_time': '成交时间',
'stock_code': '证券代码',
'stock_name': '证券名称'}]
"""
return self.do(self.config['current_deal'])
# TODO: 实现买入卖出的各种委托类型
def buy(self, stock_code, price, amount=0, volume=0, entrust_prop=0):
"""买入卖出股票
:param stock_code: 股票代码
:param price: 卖出价格
:param amount: 卖出股数
:param volume: 卖出总金额 由 volume / price 取整, 若指定 price 则此参数无效
:param entrust_prop: 委托类型,暂未实现,默认为限价委托
"""
params = dict(
self.config['buy'],
entrust_bs=1, # 买入1 卖出2
entrust_amount=amount if amount else volume // price // 100 * 100
)
return self.__trade(stock_code, price, entrust_prop=entrust_prop, other=params)
def sell(self, stock_code, price, amount=0, volume=0, entrust_prop=0):
"""卖出股票
:param stock_code: 股票代码
:param price: 卖出价格
:param amount: 卖出股数
:param volume: 卖出总金额 由 volume / price 取整, 若指定 amount 则此参数无效
:param entrust_prop: 委托类型,暂未实现,默认为限价委托
"""
params = dict(
self.config['sell'],
entrust_bs=2, # 买入1 卖出2
entrust_amount=amount if amount else volume // price
)
return self.__trade(stock_code, price, entrust_prop=entrust_prop, other=params)
def __trade(self, stock_code, price, entrust_prop, other):
# 检查是否已经掉线
if not self.heart_thread.is_alive():
check_data = self.get_balance()
if type(check_data) == dict:
return check_data
need_info = self.__get_trade_need_info(stock_code)
return self.do(dict(
other,
stock_account=need_info['stock_account'], # '沪深帐号'
exchange_type=need_info['exchange_type'], # '沪市1 深市2'
entrust_prop=entrust_prop, # 委托方式
stock_code='{:0>6}'.format(stock_code), # 股票代码, 右对齐宽为6左侧填充0
elig_riskmatch_flag=1, # 用户风险等级
entrust_price=price,
))
def __get_trade_need_info(self, stock_code):
"""获取股票对应的证券市场和帐号"""
# 获取股票对应的证券市场
sh_exchange_type = 1
sz_exchange_type = 2
exchange_type = sh_exchange_type if helpers.get_stock_type(stock_code) == 'sh' else sz_exchange_type
# 获取股票对应的证券帐号
if not hasattr(self, 'exchange_stock_account'):
self.exchange_stock_account = dict()
if exchange_type not in self.exchange_stock_account:
stock_account_index = 0
response_data = self.do(dict(
self.config['account4stock'],
exchange_type=exchange_type,
stock_code=stock_code
))[stock_account_index]
self.exchange_stock_account[exchange_type] = response_data['stock_account']
return dict(
exchange_type=exchange_type,
stock_account=self.exchange_stock_account[exchange_type]
)
def create_basic_params(self):
basic_params = dict(
CSRF_Token='<PASSWORD>',
timestamp=random.random(),
)
return basic_params
def request(self, params):
r = self.s.get(self.trade_prefix, params=params, cookies=self.cookie)
return r.text
def format_response_data(self, data):
# 获取 returnJSON
return_json = json.loads(data)['returnJson']
raw_json_data = demjson.decode(return_json)
fun_data = raw_json_data['Func%s' % raw_json_data['function_id']]
header_index = 1
remove_header_data = fun_data[header_index:]
return self.format_response_data_type(remove_header_data)
def fix_error_data(self, data):
error_index = 0
return data[error_index] if type(data) == list and data[error_index].get('error_no') is not None else data
def check_login_status(self, return_data):
if hasattr(return_data, 'get') and return_data.get('error_no') == '-1':
raise NotLoginError
def check_account_live(self, response):
if hasattr(response, 'get') and response.get('error_no') == '-1':
self.heart_active = False | 0.350866 | 0.083031 |
import re
import logging
from six import iteritems
from six.moves.urllib_parse import urlparse
# These formats are trusted and are not skipped
VALID_FORMATS = r"""^mp4$|
^pdf$|
^.?.?\.?txt$|
^.?.?\.?srt$|
.*txt$|
.*srt$|
^html?$|
^zip$|
^rar$|
^[ct]sv$|
^xlsx$|
^ipynb$|
^json$|
^pptx?$|
^docx?$|
^xls$|
^py$|
^Rmd$|
^Rdata$|
^wf1$"""
# Non simple format contains characters besides letters, numbers, "_" and "-"
NON_SIMPLE_FORMAT = r".*[^a-zA-Z0-9_-]"
RE_VALID_FORMATS = re.compile(VALID_FORMATS, re.VERBOSE)
RE_NON_SIMPLE_FORMAT = re.compile(NON_SIMPLE_FORMAT)
def skip_format_url(format_, url):
"""
Checks whether a give format/url should be skipped and not downloaded.
@param format_: Filename format (extension).
@type format_: str (e.g. html, txt, zip, pdf)
@param url: URL.
@type url: str
@return: True if format/url should be skipped, False otherwise.
@rtype bool
"""
# Do not download empty formats
if format_ == '':
return True
# Do not download email addresses
if ('mailto:' in url) and ('@' in url):
return True
# Is this localhost?
parsed = urlparse(url)
if parsed.hostname == 'localhost':
return True
# These are trusted manually added formats, do not skip them
if RE_VALID_FORMATS.match(format_):
return False
# Simple formats only contain letters, numbers, "_" and "-"
# If this a non simple format?
if RE_NON_SIMPLE_FORMAT.match(format_):
return True
# Is this a link to the site root?
if parsed.path in ('', '/'):
return True
# Do not skip
return False
def find_resources_to_get(lecture, file_formats, resource_filter, ignored_formats=None):
"""
Select formats to download.
"""
resources_to_get = []
if ignored_formats is None:
ignored_formats = []
if len(ignored_formats):
logging.info("The following file formats will be ignored: " + ",".join(ignored_formats))
for fmt, resources in iteritems(lecture):
fmt0 = fmt
short_fmt = None
if '.' in fmt:
short_fmt = fmt.split('.')[1]
if fmt in ignored_formats or (short_fmt != None and short_fmt in ignored_formats) :
continue
if fmt in file_formats or (short_fmt != None and short_fmt in file_formats) or 'all' in file_formats:
for r in resources:
if resource_filter and r[1] and not re.search(resource_filter, r[1]):
logging.debug('Skipping b/c of rf: %s %s',
resource_filter, r[1])
continue
resources_to_get.append((fmt0, r[0], r[1]))
else:
logging.debug(
'Skipping b/c format %s not in %s', fmt, file_formats)
return resources_to_get | coursera/filtering.py | import re
import logging
from six import iteritems
from six.moves.urllib_parse import urlparse
# These formats are trusted and are not skipped
VALID_FORMATS = r"""^mp4$|
^pdf$|
^.?.?\.?txt$|
^.?.?\.?srt$|
.*txt$|
.*srt$|
^html?$|
^zip$|
^rar$|
^[ct]sv$|
^xlsx$|
^ipynb$|
^json$|
^pptx?$|
^docx?$|
^xls$|
^py$|
^Rmd$|
^Rdata$|
^wf1$"""
# Non simple format contains characters besides letters, numbers, "_" and "-"
NON_SIMPLE_FORMAT = r".*[^a-zA-Z0-9_-]"
RE_VALID_FORMATS = re.compile(VALID_FORMATS, re.VERBOSE)
RE_NON_SIMPLE_FORMAT = re.compile(NON_SIMPLE_FORMAT)
def skip_format_url(format_, url):
"""
Checks whether a give format/url should be skipped and not downloaded.
@param format_: Filename format (extension).
@type format_: str (e.g. html, txt, zip, pdf)
@param url: URL.
@type url: str
@return: True if format/url should be skipped, False otherwise.
@rtype bool
"""
# Do not download empty formats
if format_ == '':
return True
# Do not download email addresses
if ('mailto:' in url) and ('@' in url):
return True
# Is this localhost?
parsed = urlparse(url)
if parsed.hostname == 'localhost':
return True
# These are trusted manually added formats, do not skip them
if RE_VALID_FORMATS.match(format_):
return False
# Simple formats only contain letters, numbers, "_" and "-"
# If this a non simple format?
if RE_NON_SIMPLE_FORMAT.match(format_):
return True
# Is this a link to the site root?
if parsed.path in ('', '/'):
return True
# Do not skip
return False
def find_resources_to_get(lecture, file_formats, resource_filter, ignored_formats=None):
"""
Select formats to download.
"""
resources_to_get = []
if ignored_formats is None:
ignored_formats = []
if len(ignored_formats):
logging.info("The following file formats will be ignored: " + ",".join(ignored_formats))
for fmt, resources in iteritems(lecture):
fmt0 = fmt
short_fmt = None
if '.' in fmt:
short_fmt = fmt.split('.')[1]
if fmt in ignored_formats or (short_fmt != None and short_fmt in ignored_formats) :
continue
if fmt in file_formats or (short_fmt != None and short_fmt in file_formats) or 'all' in file_formats:
for r in resources:
if resource_filter and r[1] and not re.search(resource_filter, r[1]):
logging.debug('Skipping b/c of rf: %s %s',
resource_filter, r[1])
continue
resources_to_get.append((fmt0, r[0], r[1]))
else:
logging.debug(
'Skipping b/c format %s not in %s', fmt, file_formats)
return resources_to_get | 0.514156 | 0.253163 |
from __future__ import division
from mmtbx.command_line import massage_data
from iotbx import file_reader
from cctbx.development import random_structure
from scitbx.array_family import flex
from libtbx.test_utils import approx_equal
from libtbx.utils import null_out
import os.path as op
import random
def exercise_twin_detwin () :
random.seed(12345)
flex.set_random_seed(12345)
xrs = random_structure.xray_structure(
unit_cell=(12,5,12,90,90,90),
space_group_symbol="P1",
n_scatterers=12,
elements="random")
fc = abs(xrs.structure_factors(d_min=1.5).f_calc())
fc = fc.set_observation_type_xray_amplitude()
mtz_file = "tmp_massage_in.mtz"
fc.as_mtz_dataset(column_root_label="F").mtz_object().write(mtz_file)
massage_data.run(
args=[
mtz_file,
"aniso.action=None",
"outlier.action=None",
"symmetry.action=twin",
"twin_law='l,-k,h'",
"fraction=0.3",
"hklout=tmp_massage_twinned.mtz",
],
out=null_out())
assert op.isfile("tmp_massage_twinned.mtz")
mtz_in = file_reader.any_file("tmp_massage_twinned.mtz")
fc_twin = mtz_in.file_server.miller_arrays[0].f_sq_as_f()
fc_twin, fc_tmp = fc_twin.common_sets(other=fc)
for hkl, f1, f2 in zip(fc_tmp.indices(), fc_tmp.data(), fc_twin.data()) :
if (abs(hkl[0]) != abs(hkl[2])) :
assert not approx_equal(f1, f2, eps=0.01, out=null_out()), (hkl, f1, f2)
massage_data.run(
args=[
mtz_file,
"aniso.action=None",
"outlier.action=None",
"symmetry.action=twin",
"twin_law='l,-k,h'",
"fraction=0.3",
"hklout=tmp_massage_twinned.sca",
],
out=null_out())
assert op.isfile("tmp_massage_twinned.sca")
massage_data.run(
args=[
"tmp_massage_twinned.mtz",
"aniso.action=None",
"outlier.action=None",
"symmetry.action=detwin",
"twin_law='l,-k,h'",
"fraction=0.3",
"hklout=tmp_massage_detwinned.mtz",
],
out=null_out())
mtz_in = file_reader.any_file("tmp_massage_detwinned.mtz")
fc_detwin = mtz_in.file_server.miller_arrays[0].f_sq_as_f()
fc_detwin, fc_tmp = fc_detwin.common_sets(other=fc)
# XXX we appear to lose some accuracy here, possibly due to the use of
# MTZ format
for hkl, f1, f2 in zip(fc_tmp.indices(), fc_tmp.data(), fc_detwin.data()) :
assert approx_equal(f1, f2, eps=0.01), hkl
if (__name__ == "__main__") :
exercise_twin_detwin()
print "OK" | mmtbx/scaling/tst_massage_data.py | from __future__ import division
from mmtbx.command_line import massage_data
from iotbx import file_reader
from cctbx.development import random_structure
from scitbx.array_family import flex
from libtbx.test_utils import approx_equal
from libtbx.utils import null_out
import os.path as op
import random
def exercise_twin_detwin () :
random.seed(12345)
flex.set_random_seed(12345)
xrs = random_structure.xray_structure(
unit_cell=(12,5,12,90,90,90),
space_group_symbol="P1",
n_scatterers=12,
elements="random")
fc = abs(xrs.structure_factors(d_min=1.5).f_calc())
fc = fc.set_observation_type_xray_amplitude()
mtz_file = "tmp_massage_in.mtz"
fc.as_mtz_dataset(column_root_label="F").mtz_object().write(mtz_file)
massage_data.run(
args=[
mtz_file,
"aniso.action=None",
"outlier.action=None",
"symmetry.action=twin",
"twin_law='l,-k,h'",
"fraction=0.3",
"hklout=tmp_massage_twinned.mtz",
],
out=null_out())
assert op.isfile("tmp_massage_twinned.mtz")
mtz_in = file_reader.any_file("tmp_massage_twinned.mtz")
fc_twin = mtz_in.file_server.miller_arrays[0].f_sq_as_f()
fc_twin, fc_tmp = fc_twin.common_sets(other=fc)
for hkl, f1, f2 in zip(fc_tmp.indices(), fc_tmp.data(), fc_twin.data()) :
if (abs(hkl[0]) != abs(hkl[2])) :
assert not approx_equal(f1, f2, eps=0.01, out=null_out()), (hkl, f1, f2)
massage_data.run(
args=[
mtz_file,
"aniso.action=None",
"outlier.action=None",
"symmetry.action=twin",
"twin_law='l,-k,h'",
"fraction=0.3",
"hklout=tmp_massage_twinned.sca",
],
out=null_out())
assert op.isfile("tmp_massage_twinned.sca")
massage_data.run(
args=[
"tmp_massage_twinned.mtz",
"aniso.action=None",
"outlier.action=None",
"symmetry.action=detwin",
"twin_law='l,-k,h'",
"fraction=0.3",
"hklout=tmp_massage_detwinned.mtz",
],
out=null_out())
mtz_in = file_reader.any_file("tmp_massage_detwinned.mtz")
fc_detwin = mtz_in.file_server.miller_arrays[0].f_sq_as_f()
fc_detwin, fc_tmp = fc_detwin.common_sets(other=fc)
# XXX we appear to lose some accuracy here, possibly due to the use of
# MTZ format
for hkl, f1, f2 in zip(fc_tmp.indices(), fc_tmp.data(), fc_detwin.data()) :
assert approx_equal(f1, f2, eps=0.01), hkl
if (__name__ == "__main__") :
exercise_twin_detwin()
print "OK" | 0.441432 | 0.278655 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
def myround(x, base=5):
return int(base * round(float(x)/base))
def plot_accidents_bytown(exit_mile,town_mile,town_name,town_data, y_max, y_min, title_out):
"""Plot accidents by milemarker"""
exit_mile_E = exit_mile[exit_mile["Direction"]=='E']
exit_mile_W = exit_mile[exit_mile["Direction"]=='W']
town_data_east = town_data[town_data["Direction"]=='E']
town_data_west = town_data[town_data["Direction"]=='W']
if town_name=="All":
town_min_ind = 0.00
town_max_ind = 98.25
else:
town_ind = town_mile[town_mile["Town_Name"]==town_name]
town_min_ind= town_ind["Mile"].values[0]
town_max_ind = town_min_ind + town_ind["Town_Miles"].values[0]
x_index = np.arange(town_min_ind,town_max_ind,0.25)
town_data_east_loc = town_data_east["Milemarker"]
town_data_west_loc = town_data_west["Milemarker"]
town_data_east_bin = pd.cut(town_data_east_loc, x_index, include_lowest = 1)
town_data_west_bin = pd.cut(town_data_west_loc, x_index, include_lowest = 1)
town_data_east_bin_count = town_data_east_loc.groupby(town_data_east_bin).size()
town_data_west_bin_count = town_data_west_loc.groupby(town_data_west_bin).size()
max_acc = max(town_data_east_bin_count.max(),town_data_west_bin_count.max())
exit_mile_town_E = exit_mile_E[exit_mile_E["Mile"]>=town_min_ind]
exit_mile_town_E = exit_mile_town_E[exit_mile_town_E["Mile"]<town_max_ind]
exit_mile_town_W = exit_mile_W[exit_mile_W["Mile"]>=town_min_ind]
exit_mile_town_W = exit_mile_town_W[exit_mile_town_W["Mile"]<town_max_ind]
if max_acc > 250:
steps = 100
elif max_acc <=250 and max_acc > 100:
steps = 50
elif max_acc<= 100 and max_acc > 50:
steps = 25
elif max_acc<=50 and max_acc > 25:
steps = 5
else:
steps = 2
if y_max == 'not_set' and y_min == 'not_set':
y_max = myround(town_data_east_bin_count.max(),steps)+steps/2
y_min = -1*myround(town_data_west_bin_count.max(),steps)-steps/2
fig, ax = plt.subplots(figsize=[15,10])
plt.rcParams['figure.figsize'] = [15, 10]
plt.bar(x_index[:-1],town_data_east_bin_count,align='edge', width = 0.25,color='darksalmon')
plt.bar(x_index[:-1],-town_data_west_bin_count,align='edge', width=0.25, color='cornflowerblue')
plt.ylim(y_min,y_max)
plt.yticks(np.arange(y_min,y_max,step=steps),abs(np.arange(y_min,y_max,step=steps)))
plt.ylabel('Number of accidents',fontsize=14)
plt.xlabel('I-84 Milemarker',fontsize=14)
plt.title(title_out,fontsize=15)
# create custom legend
leg_elements = [Line2D([0], [0], color='coral', lw=3, label='East'),
Line2D([0], [0], color='cornflowerblue', lw=3, label='West')]
ax.legend(handles=leg_elements, loc='upper right')
# Add exit ramp delineations and names
max_height_mile = int(max(steps/4,2))
plt.vlines(exit_mile_town_E["Mile"],ymin = 0, ymax = max_height_mile, color = 'red', linewidth = 0.4, linestyle='-')
for i, row in enumerate(exit_mile_town_E.values):
go=0
if (town_name=="All") and ((i)%5==0):
go=1
elif (town_name!="All"):
go=1
if go==1:
Direction, Mile, Exit, Town_Number, Town_name = row
loc_print = Mile
plt.text(loc_print,max_height_mile + min(max_height_mile,5),Exit,rotation = 90,color='black',
fontsize = 9,verticalalignment='center',horizontalalignment='center' )
plt.vlines(exit_mile_town_W["Mile"],ymin = -1*max_height_mile, ymax =0, color = 'red', linewidth = 0.4, linestyle='-')
for i, row in enumerate(exit_mile_town_W.values):
go=0
if (town_name=="All") and ((i)%5==0):
go=1
elif (town_name!='All'):
go=1
if go==1:
Direction, Mile, Exit, Town_Number, Town_name = row
loc_print = Mile
plt.text(loc_print,-1*(max_height_mile+min(max_height_mile,5)),Exit,rotation = 90,color='black',
fontsize = 9,verticalalignment='center',horizontalalignment='center' )
# Add town names
if town_name=="All":
loc_town_names = town_data_west_bin_count.max()
plt.vlines(town_mile["Mile"], ymin=-int(max_acc/3), ymax=int(max_acc/3),color='dimgrey', linewidth=0.5,linestyle='-.')
for i, row in enumerate(town_mile.values):
Mile, Town_Number, Town_Name, Town_Miles = row
loc_print = Mile + Town_Miles/2
if (i!=2 or i!=3):
plt.text(loc_print,y_min+5,Town_Name,rotation=-90,
fontsize=8,verticalalignment='bottom',horizontalalignment='right')
elif i==2:
plt.text(loc_print+5,-(max(loc_town_names,10)+10),Town_Name,rotation=-90,
fontsize=8,verticalalignment='bottom',horizontalalignment='right')
elif i==3:
plt.text(loc_print+8,-(max(loc_town_names,10)+10),Town_Name,rotation=-90,
fontsize=8,verticalalignment='bottom',horizontalalignment='right') | code/plot_results.py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
def myround(x, base=5):
return int(base * round(float(x)/base))
def plot_accidents_bytown(exit_mile,town_mile,town_name,town_data, y_max, y_min, title_out):
"""Plot accidents by milemarker"""
exit_mile_E = exit_mile[exit_mile["Direction"]=='E']
exit_mile_W = exit_mile[exit_mile["Direction"]=='W']
town_data_east = town_data[town_data["Direction"]=='E']
town_data_west = town_data[town_data["Direction"]=='W']
if town_name=="All":
town_min_ind = 0.00
town_max_ind = 98.25
else:
town_ind = town_mile[town_mile["Town_Name"]==town_name]
town_min_ind= town_ind["Mile"].values[0]
town_max_ind = town_min_ind + town_ind["Town_Miles"].values[0]
x_index = np.arange(town_min_ind,town_max_ind,0.25)
town_data_east_loc = town_data_east["Milemarker"]
town_data_west_loc = town_data_west["Milemarker"]
town_data_east_bin = pd.cut(town_data_east_loc, x_index, include_lowest = 1)
town_data_west_bin = pd.cut(town_data_west_loc, x_index, include_lowest = 1)
town_data_east_bin_count = town_data_east_loc.groupby(town_data_east_bin).size()
town_data_west_bin_count = town_data_west_loc.groupby(town_data_west_bin).size()
max_acc = max(town_data_east_bin_count.max(),town_data_west_bin_count.max())
exit_mile_town_E = exit_mile_E[exit_mile_E["Mile"]>=town_min_ind]
exit_mile_town_E = exit_mile_town_E[exit_mile_town_E["Mile"]<town_max_ind]
exit_mile_town_W = exit_mile_W[exit_mile_W["Mile"]>=town_min_ind]
exit_mile_town_W = exit_mile_town_W[exit_mile_town_W["Mile"]<town_max_ind]
if max_acc > 250:
steps = 100
elif max_acc <=250 and max_acc > 100:
steps = 50
elif max_acc<= 100 and max_acc > 50:
steps = 25
elif max_acc<=50 and max_acc > 25:
steps = 5
else:
steps = 2
if y_max == 'not_set' and y_min == 'not_set':
y_max = myround(town_data_east_bin_count.max(),steps)+steps/2
y_min = -1*myround(town_data_west_bin_count.max(),steps)-steps/2
fig, ax = plt.subplots(figsize=[15,10])
plt.rcParams['figure.figsize'] = [15, 10]
plt.bar(x_index[:-1],town_data_east_bin_count,align='edge', width = 0.25,color='darksalmon')
plt.bar(x_index[:-1],-town_data_west_bin_count,align='edge', width=0.25, color='cornflowerblue')
plt.ylim(y_min,y_max)
plt.yticks(np.arange(y_min,y_max,step=steps),abs(np.arange(y_min,y_max,step=steps)))
plt.ylabel('Number of accidents',fontsize=14)
plt.xlabel('I-84 Milemarker',fontsize=14)
plt.title(title_out,fontsize=15)
# create custom legend
leg_elements = [Line2D([0], [0], color='coral', lw=3, label='East'),
Line2D([0], [0], color='cornflowerblue', lw=3, label='West')]
ax.legend(handles=leg_elements, loc='upper right')
# Add exit ramp delineations and names
max_height_mile = int(max(steps/4,2))
plt.vlines(exit_mile_town_E["Mile"],ymin = 0, ymax = max_height_mile, color = 'red', linewidth = 0.4, linestyle='-')
for i, row in enumerate(exit_mile_town_E.values):
go=0
if (town_name=="All") and ((i)%5==0):
go=1
elif (town_name!="All"):
go=1
if go==1:
Direction, Mile, Exit, Town_Number, Town_name = row
loc_print = Mile
plt.text(loc_print,max_height_mile + min(max_height_mile,5),Exit,rotation = 90,color='black',
fontsize = 9,verticalalignment='center',horizontalalignment='center' )
plt.vlines(exit_mile_town_W["Mile"],ymin = -1*max_height_mile, ymax =0, color = 'red', linewidth = 0.4, linestyle='-')
for i, row in enumerate(exit_mile_town_W.values):
go=0
if (town_name=="All") and ((i)%5==0):
go=1
elif (town_name!='All'):
go=1
if go==1:
Direction, Mile, Exit, Town_Number, Town_name = row
loc_print = Mile
plt.text(loc_print,-1*(max_height_mile+min(max_height_mile,5)),Exit,rotation = 90,color='black',
fontsize = 9,verticalalignment='center',horizontalalignment='center' )
# Add town names
if town_name=="All":
loc_town_names = town_data_west_bin_count.max()
plt.vlines(town_mile["Mile"], ymin=-int(max_acc/3), ymax=int(max_acc/3),color='dimgrey', linewidth=0.5,linestyle='-.')
for i, row in enumerate(town_mile.values):
Mile, Town_Number, Town_Name, Town_Miles = row
loc_print = Mile + Town_Miles/2
if (i!=2 or i!=3):
plt.text(loc_print,y_min+5,Town_Name,rotation=-90,
fontsize=8,verticalalignment='bottom',horizontalalignment='right')
elif i==2:
plt.text(loc_print+5,-(max(loc_town_names,10)+10),Town_Name,rotation=-90,
fontsize=8,verticalalignment='bottom',horizontalalignment='right')
elif i==3:
plt.text(loc_print+8,-(max(loc_town_names,10)+10),Town_Name,rotation=-90,
fontsize=8,verticalalignment='bottom',horizontalalignment='right') | 0.160299 | 0.289623 |
# COMMAND ----------
from pyspark.sql.functions import *
from pyspark.sql.types import StructType, StructField, StringType, IntegerType,LongType,FloatType,DoubleType, TimestampType
# COMMAND ----------
# MAGIC %md
# MAGIC ### 1. Execute notebook with common/reusable functions
# COMMAND ----------
# MAGIC %run "../01-General/2-CommonFunctions"
# COMMAND ----------
# MAGIC %md
# MAGIC ### 2. Read raw, augment, persist as parquet
# COMMAND ----------
curatedDF = sql("""
select distinct t.taxi_type,
t.vendor_id as vendor_id,
t.pickup_datetime,
t.dropoff_datetime,
t.store_and_fwd_flag,
t.rate_code_id,
t.pickup_location_id,
t.dropoff_location_id,
t.pickup_longitude,
t.pickup_latitude,
t.dropoff_longitude,
t.dropoff_latitude,
t.passenger_count,
t.trip_distance,
t.fare_amount,
t.extra,
t.mta_tax,
t.tip_amount,
t.tolls_amount,
t.improvement_surcharge,
t.total_amount,
t.payment_type,
t.trip_year,
t.trip_month,
v.abbreviation as vendor_abbreviation,
v.description as vendor_description,
tm.month_name_short,
tm.month_name_full,
pt.description as payment_type_description,
rc.description as rate_code_description,
tzpu.borough as pickup_borough,
tzpu.zone as pickup_zone,
tzpu.service_zone as pickup_service_zone,
tzdo.borough as dropoff_borough,
tzdo.zone as dropoff_zone,
tzdo.service_zone as dropoff_service_zone,
year(t.pickup_datetime) as pickup_year,
month(t.pickup_datetime) as pickup_month,
day(t.pickup_datetime) as pickup_day,
hour(t.pickup_datetime) as pickup_hour,
minute(t.pickup_datetime) as pickup_minute,
second(t.pickup_datetime) as pickup_second,
date(t.pickup_datetime) as pickup_date,
year(t.dropoff_datetime) as dropoff_year,
month(t.dropoff_datetime) as dropoff_month,
day(t.dropoff_datetime) as dropoff_day,
hour(t.dropoff_datetime) as dropoff_hour,
minute(t.dropoff_datetime) as dropoff_minute,
second(t.dropoff_datetime) as dropoff_second,
date(t.dropoff_datetime) as dropoff_date
from
taxi_db.yellow_taxi_trips_raw t
left outer join taxi_db.vendor_lookup v
on (t.vendor_id = case when t.trip_year < "2015" then v.abbreviation else v.vendor_id end)
left outer join taxi_db.trip_month_lookup tm
on (t.trip_month = tm.trip_month)
left outer join taxi_db.payment_type_lookup pt
on (t.payment_type = case when t.trip_year < "2015" then pt.abbreviation else pt.payment_type end)
left outer join taxi_db.rate_code_lookup rc
on (t.rate_code_id = rc.rate_code_id)
left outer join taxi_db.taxi_zone_lookup tzpu
on (t.pickup_location_id = tzpu.location_id)
left outer join taxi_db.taxi_zone_lookup tzdo
on (t.dropoff_location_id = tzdo.location_id)
""")
curatedDFConformed = (curatedDF.withColumn("temp_vendor_id", col("vendor_id").cast("integer")).drop("vendor_id").withColumnRenamed("temp_vendor_id", "vendor_id").withColumn("temp_payment_type", col("payment_type").cast("integer")).drop("payment_type").withColumnRenamed("temp_payment_type", "payment_type"))
#Save as parquet, partition by year and month
#curatedDFConformed.coalesce(15).write.partitionBy("trip_year", "trip_month").parquet(destDataDirRoot)
# COMMAND ----------
#Destination directory
destDataDirRoot = "/mnt/workshop/curated/nyctaxi/transactions/yellow-taxi"
#Delete any residual data from prior executions for an idempotent run
dbutils.fs.rm(destDataDirRoot,recurse=True)
# COMMAND ----------
#Save as Delta, partition by year and month
curatedDFConformed.coalesce(10).write.format("delta").mode("append").partitionBy("trip_year","trip_month").save(destDataDirRoot)
# COMMAND ----------
# MAGIC %md
# MAGIC ### 3. Define external table
# COMMAND ----------
# MAGIC %sql
# MAGIC USE taxi_db;
# MAGIC DROP TABLE IF EXISTS yellow_taxi_trips_curated;
# MAGIC CREATE TABLE yellow_taxi_trips_curated
# MAGIC USING DELTA
# MAGIC LOCATION '/mnt/workshop/curated/nyctaxi/transactions/yellow-taxi';
# COMMAND ----------
# MAGIC %md
# MAGIC ### 4. Explore
# COMMAND ----------
# MAGIC %sql
# MAGIC select count(*) as trip_count from taxi_db.yellow_taxi_trips_curated
# COMMAND ----------
# MAGIC %sql
# MAGIC select trip_year,trip_month, count(*) as trip_count from taxi_db.yellow_taxi_trips_curated group by trip_year,trip_month | code/02-Data-Engineering/pyspark/03-TransformData/1-TransformData-YellowTaxi.py |
# COMMAND ----------
from pyspark.sql.functions import *
from pyspark.sql.types import StructType, StructField, StringType, IntegerType,LongType,FloatType,DoubleType, TimestampType
# COMMAND ----------
# MAGIC %md
# MAGIC ### 1. Execute notebook with common/reusable functions
# COMMAND ----------
# MAGIC %run "../01-General/2-CommonFunctions"
# COMMAND ----------
# MAGIC %md
# MAGIC ### 2. Read raw, augment, persist as parquet
# COMMAND ----------
curatedDF = sql("""
select distinct t.taxi_type,
t.vendor_id as vendor_id,
t.pickup_datetime,
t.dropoff_datetime,
t.store_and_fwd_flag,
t.rate_code_id,
t.pickup_location_id,
t.dropoff_location_id,
t.pickup_longitude,
t.pickup_latitude,
t.dropoff_longitude,
t.dropoff_latitude,
t.passenger_count,
t.trip_distance,
t.fare_amount,
t.extra,
t.mta_tax,
t.tip_amount,
t.tolls_amount,
t.improvement_surcharge,
t.total_amount,
t.payment_type,
t.trip_year,
t.trip_month,
v.abbreviation as vendor_abbreviation,
v.description as vendor_description,
tm.month_name_short,
tm.month_name_full,
pt.description as payment_type_description,
rc.description as rate_code_description,
tzpu.borough as pickup_borough,
tzpu.zone as pickup_zone,
tzpu.service_zone as pickup_service_zone,
tzdo.borough as dropoff_borough,
tzdo.zone as dropoff_zone,
tzdo.service_zone as dropoff_service_zone,
year(t.pickup_datetime) as pickup_year,
month(t.pickup_datetime) as pickup_month,
day(t.pickup_datetime) as pickup_day,
hour(t.pickup_datetime) as pickup_hour,
minute(t.pickup_datetime) as pickup_minute,
second(t.pickup_datetime) as pickup_second,
date(t.pickup_datetime) as pickup_date,
year(t.dropoff_datetime) as dropoff_year,
month(t.dropoff_datetime) as dropoff_month,
day(t.dropoff_datetime) as dropoff_day,
hour(t.dropoff_datetime) as dropoff_hour,
minute(t.dropoff_datetime) as dropoff_minute,
second(t.dropoff_datetime) as dropoff_second,
date(t.dropoff_datetime) as dropoff_date
from
taxi_db.yellow_taxi_trips_raw t
left outer join taxi_db.vendor_lookup v
on (t.vendor_id = case when t.trip_year < "2015" then v.abbreviation else v.vendor_id end)
left outer join taxi_db.trip_month_lookup tm
on (t.trip_month = tm.trip_month)
left outer join taxi_db.payment_type_lookup pt
on (t.payment_type = case when t.trip_year < "2015" then pt.abbreviation else pt.payment_type end)
left outer join taxi_db.rate_code_lookup rc
on (t.rate_code_id = rc.rate_code_id)
left outer join taxi_db.taxi_zone_lookup tzpu
on (t.pickup_location_id = tzpu.location_id)
left outer join taxi_db.taxi_zone_lookup tzdo
on (t.dropoff_location_id = tzdo.location_id)
""")
curatedDFConformed = (curatedDF.withColumn("temp_vendor_id", col("vendor_id").cast("integer")).drop("vendor_id").withColumnRenamed("temp_vendor_id", "vendor_id").withColumn("temp_payment_type", col("payment_type").cast("integer")).drop("payment_type").withColumnRenamed("temp_payment_type", "payment_type"))
#Save as parquet, partition by year and month
#curatedDFConformed.coalesce(15).write.partitionBy("trip_year", "trip_month").parquet(destDataDirRoot)
# COMMAND ----------
#Destination directory
destDataDirRoot = "/mnt/workshop/curated/nyctaxi/transactions/yellow-taxi"
#Delete any residual data from prior executions for an idempotent run
dbutils.fs.rm(destDataDirRoot,recurse=True)
# COMMAND ----------
#Save as Delta, partition by year and month
curatedDFConformed.coalesce(10).write.format("delta").mode("append").partitionBy("trip_year","trip_month").save(destDataDirRoot)
# COMMAND ----------
# MAGIC %md
# MAGIC ### 3. Define external table
# COMMAND ----------
# MAGIC %sql
# MAGIC USE taxi_db;
# MAGIC DROP TABLE IF EXISTS yellow_taxi_trips_curated;
# MAGIC CREATE TABLE yellow_taxi_trips_curated
# MAGIC USING DELTA
# MAGIC LOCATION '/mnt/workshop/curated/nyctaxi/transactions/yellow-taxi';
# COMMAND ----------
# MAGIC %md
# MAGIC ### 4. Explore
# COMMAND ----------
# MAGIC %sql
# MAGIC select count(*) as trip_count from taxi_db.yellow_taxi_trips_curated
# COMMAND ----------
# MAGIC %sql
# MAGIC select trip_year,trip_month, count(*) as trip_count from taxi_db.yellow_taxi_trips_curated group by trip_year,trip_month | 0.493409 | 0.393269 |
# Copyright (c) 2016-2020, <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of ytranslate nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module containing the RandPlay function class."""
from pathlib import Path
from random import choice
import wx
from ytranslate import t
from audio import audiolib
from log import logger
from sharp import Function
class RandPlay(Function):
"""Function SharpScript 'randplay'.
This function plays a random sound from a list.
"""
description = "Play a sound from a list at random"
def run(self, filenames):
"""Play the audio file."""
log = logger("sharp")
if not filenames:
return
filenames = filenames.split(";")
filename = choice(filenames)
if self.engine.sounds:
log.debug(f"#randplay {filename!r}")
else:
log.debug(f"#randplay-silent {filename!r}")
return
files = self.find_files(filename)
if files:
filename = choice(files)
log.debug(f"#randplay playing {filename!r}")
audiolib.play(filename)
else:
log.warning(f"#randplay cannot find any sound")
def find_files(self, filename):
"""Return a list of existing files matching this filename."""
absolute = Path(filename)
if not absolute.is_absolute():
absolute = Path(self.world.path) / filename
# The last part in the file name is searched
parent = absolute.parent
match = absolute.parts[-1]
results = list(parent.rglob(match))
results = [path for path in results if path.is_file()]
return [str(path) for path in results]
def display(self, dialog, filenames=""):
"""Display the function's argument."""
self.dialog = dialog
l_files = self.t("files", "Audio files to be played")
# Dialog
l_files = wx.StaticText(dialog, label=l_files)
t_files = wx.TextCtrl(dialog, value=filenames)
test = wx.Button(dialog, label=t("ui.button.test"))
dialog.files = t_files
dialog.top.Add(l_files)
dialog.top.Add(t_files)
dialog.top.Add(test)
# Event binding
test.Bind(wx.EVT_BUTTON, self.test_files)
def complete(self, dialog):
"""The user pressed 'ok' in the dialog."""
files = dialog.files.GetValue()
empty_path = self.t("empty_path",
"The path hasn't been set. What file should I play?")
if not files:
wx.MessageBox(empty_path, t("ui.message.error"),
wx.OK | wx.ICON_ERROR)
dialog.files.SetFocus()
return None
return (files, )
def test_files(self, e):
"""Test the audio files."""
parent = self.dialog
names = parent.files.GetValue().split(";")
filename = choice(self.find_files(choice(names)))
audiolib.play(filename) | src/sharp/functions/randplay.py | # Copyright (c) 2016-2020, <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of ytranslate nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module containing the RandPlay function class."""
from pathlib import Path
from random import choice
import wx
from ytranslate import t
from audio import audiolib
from log import logger
from sharp import Function
class RandPlay(Function):
"""Function SharpScript 'randplay'.
This function plays a random sound from a list.
"""
description = "Play a sound from a list at random"
def run(self, filenames):
"""Play the audio file."""
log = logger("sharp")
if not filenames:
return
filenames = filenames.split(";")
filename = choice(filenames)
if self.engine.sounds:
log.debug(f"#randplay {filename!r}")
else:
log.debug(f"#randplay-silent {filename!r}")
return
files = self.find_files(filename)
if files:
filename = choice(files)
log.debug(f"#randplay playing {filename!r}")
audiolib.play(filename)
else:
log.warning(f"#randplay cannot find any sound")
def find_files(self, filename):
"""Return a list of existing files matching this filename."""
absolute = Path(filename)
if not absolute.is_absolute():
absolute = Path(self.world.path) / filename
# The last part in the file name is searched
parent = absolute.parent
match = absolute.parts[-1]
results = list(parent.rglob(match))
results = [path for path in results if path.is_file()]
return [str(path) for path in results]
def display(self, dialog, filenames=""):
"""Display the function's argument."""
self.dialog = dialog
l_files = self.t("files", "Audio files to be played")
# Dialog
l_files = wx.StaticText(dialog, label=l_files)
t_files = wx.TextCtrl(dialog, value=filenames)
test = wx.Button(dialog, label=t("ui.button.test"))
dialog.files = t_files
dialog.top.Add(l_files)
dialog.top.Add(t_files)
dialog.top.Add(test)
# Event binding
test.Bind(wx.EVT_BUTTON, self.test_files)
def complete(self, dialog):
"""The user pressed 'ok' in the dialog."""
files = dialog.files.GetValue()
empty_path = self.t("empty_path",
"The path hasn't been set. What file should I play?")
if not files:
wx.MessageBox(empty_path, t("ui.message.error"),
wx.OK | wx.ICON_ERROR)
dialog.files.SetFocus()
return None
return (files, )
def test_files(self, e):
"""Test the audio files."""
parent = self.dialog
names = parent.files.GetValue().split(";")
filename = choice(self.find_files(choice(names)))
audiolib.play(filename) | 0.689619 | 0.065455 |
import xmltodict
import yaml
import re
import json
import os
import re
import toml
from collections import OrderedDict
from glob import glob
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
# ======= load chips
chips = {}
for f in sorted(glob('stm32-data/data/chips/*.yaml')):
if 'STM32F4' not in f and 'STM32L4' not in f and 'STM32H7' not in f and 'STM32L0' not in f:
continue
with open(f, 'r') as f:
chip = yaml.load(f, Loader=yaml.CSafeLoader)
chip['name'] = chip['name'].lower()
chip['features'] = set()
family = chip["family"].lower().replace('+', 'p')
chip['features'].add(f'_{family}')
print(chip['name'])
chips[chip['name']] = chip
# ======= load GPIO AF
gpio_afs = {}
for f in sorted(glob('stm32-data/data/gpio_af/*.yaml')):
name = f.split('/')[-1].split('.')[0]
with open(f, 'r') as f:
af = yaml.load(f, Loader=yaml.CSafeLoader)
gpio_afs[name] = af
# ========= Generate pac/mod.rs
with open('src/pac/mod.rs', 'w') as f:
for chip in chips.values():
f.write(
f'#[cfg_attr(feature="{chip["name"]}", path="{chip["name"]}.rs")]\n')
f.write('mod chip;\n')
f.write('pub use chip::*;\n')
f.write('#[allow(dead_code, unused_imports)]\n')
f.write('pub mod regs;\n')
# ========= Generate pac/stm32xxx.rs
for chip in chips.values():
print(f'generating {chip["name"]}')
with open(f'src/pac/{chip["name"]}.rs', 'w') as f:
f.write("""
#![allow(dead_code)]
#![allow(unused_imports)]
#![allow(non_snake_case)]
""")
af = gpio_afs[chip['gpio_af']]
peripheral_names = [] # USART1, PA5, EXTI8
exti_interrupts = [] # EXTI IRQs, EXTI0, EXTI4_15 etc.
peripheral_versions = {} # usart -> v1, syscfg -> f4
pins = set() # set of all present pins. PA4, PA5...
# TODO this should probably come from the yamls?
# We don't want to hardcode the EXTI peripheral addr
gpio_base = chip['peripherals']['GPIOA']['address']
gpio_stride = 0x400
f.write(f"""
pub fn GPIO(n: usize) -> gpio::Gpio {{
gpio::Gpio((0x{gpio_base:x} + 0x{gpio_stride:x}*n) as _)
}}
""")
# ========= peripherals
peripheral_names.extend((f'EXTI{x}' for x in range(16)))
num_dmas = 0
for (name, peri) in chip['peripherals'].items():
if 'block' not in peri:
continue
block = peri['block']
block_mod, block_name_unparsed = block.rsplit('/')
block_mod, block_version = block_mod.rsplit('_')
block_name = ''
for b in block_name_unparsed.split('_'):
block_name += b.capitalize()
# Check all peripherals have the same version: it's not OK for the same chip to use both usart_v1 and usart_v2
if old_version := peripheral_versions.get(block_mod):
if old_version != block_version:
raise Exception(f'Peripheral {block_mod} has two versions: {old_version} and {block_version}')
peripheral_versions[block_mod] = block_version
# Set features
chip['features'].add(f'_{block_mod}')
chip['features'].add(f'_{block_mod}_{block_version}')
f.write(f'pub const {name}: {block_mod}::{block_name} = {block_mod}::{block_name}(0x{peri["address"]:x} as _);')
custom_singletons = False
if block_mod == 'usart':
f.write(f'impl_usart!({name});')
for pin, funcs in af.items():
if pin in pins:
if (func := funcs.get(f'{name}_RX')) != None:
f.write(f'impl_usart_pin!({name}, RxPin, {pin}, {func});')
if (func := funcs.get(f'{name}_TX')) != None:
f.write(f'impl_usart_pin!({name}, TxPin, {pin}, {func});')
if (func := funcs.get(f'{name}_CTS')) != None:
f.write(f'impl_usart_pin!({name}, CtsPin, {pin}, {func});')
if (func := funcs.get(f'{name}_RTS')) != None:
f.write(f'impl_usart_pin!({name}, RtsPin, {pin}, {func});')
if (func := funcs.get(f'{name}_CK')) != None:
f.write(f'impl_usart_pin!({name}, CkPin, {pin}, {func});')
if block_mod == 'rng':
for irq in chip['interrupts']:
if re.search('RNG', irq):
f.write(f'impl_rng!({name}, {irq});')
if block_mod == 'spi':
if 'clock' in peri:
clock = peri['clock']
f.write(f'impl_spi!({name}, {clock});')
for pin, funcs in af.items():
if pin in pins:
if (func := funcs.get(f'{name}_SCK')) != None:
f.write(f'impl_spi_pin!({name}, SckPin, {pin}, {func});')
if (func := funcs.get(f'{name}_MOSI')) != None:
f.write(f'impl_spi_pin!({name}, MosiPin, {pin}, {func});')
if (func := funcs.get(f'{name}_MISO')) != None:
f.write(f'impl_spi_pin!({name}, MisoPin, {pin}, {func});')
if block_mod == 'i2c':
f.write(f'impl_i2c!({name});')
for pin, funcs in af.items():
if pin in pins:
if func := funcs.get(f'{name}_SCL'):
f.write(f'impl_i2c_pin!({name}, SclPin, {pin}, {func});')
if func := funcs.get(f'{name}_SDA'):
f.write(f'impl_i2c_pin!({name}, SdaPin, {pin}, {func});')
if block_mod == 'gpio':
custom_singletons = True
port = name[4:]
port_num = ord(port) - ord('A')
assert peri['address'] == gpio_base + gpio_stride*port_num
for pin_num in range(16):
pin = f'P{port}{pin_num}'
pins.add(pin)
peripheral_names.append(pin)
f.write(f'impl_gpio_pin!({pin}, {port_num}, {pin_num}, EXTI{pin_num});')
if block_mod == 'dma':
custom_singletons = True
num_dmas += 1
dma_num = int(name[3:])-1 # substract 1 because we want DMA1=0, DMA2=1
for ch_num in range(8):
channel = f'{name}_CH{ch_num}'
peripheral_names.append(channel)
f.write(f'impl_dma_channel!({channel}, {dma_num}, {ch_num});')
if peri['block'] == 'sdmmc_v2/SDMMC':
f.write(f'impl_sdmmc!({name});')
for pin, funcs in af.items():
if pin in pins:
if (func := funcs.get(f'{name}_CK')) != None:
f.write(f'impl_sdmmc_pin!({name}, CkPin, {pin}, {func});')
if (func := funcs.get(f'{name}_CMD')) != None:
f.write(f'impl_sdmmc_pin!({name}, CmdPin, {pin}, {func});')
if (func := funcs.get(f'{name}_D0')) != None:
f.write(f'impl_sdmmc_pin!({name}, D0Pin, {pin}, {func});')
if (func := funcs.get(f'{name}_D1')) != None:
f.write(f'impl_sdmmc_pin!({name}, D1Pin, {pin}, {func});')
if (func := funcs.get(f'{name}_D2')) != None:
f.write(f'impl_sdmmc_pin!({name}, D2Pin, {pin}, {func});')
if (func := funcs.get(f'{name}_D3')) != None:
f.write(f'impl_sdmmc_pin!({name}, D3Pin, {pin}, {func});')
if (func := funcs.get(f'{name}_D4')) != None:
f.write(f'impl_sdmmc_pin!({name}, D4Pin, {pin}, {func});')
if (func := funcs.get(f'{name}_D5')) != None:
f.write(f'impl_sdmmc_pin!({name}, D5Pin, {pin}, {func});')
if (func := funcs.get(f'{name}_D6')) != None:
f.write(f'impl_sdmmc_pin!({name}, D6Pin, {pin}, {func});')
if (func := funcs.get(f'{name}_D7')) != None:
f.write(f'impl_sdmmc_pin!({name}, D7Pin, {pin}, {func});')
if block_name == 'TimGp16':
if re.match('TIM[2345]$', name):
f.write(f'impl_timer!({name});')
if block_mod == 'exti':
for irq in chip['interrupts']:
if re.match('EXTI', irq):
exti_interrupts.append(irq)
if not custom_singletons:
peripheral_names.append(name)
for mod, version in peripheral_versions.items():
f.write(f'pub use super::regs::{mod}_{version} as {mod};')
f.write(f"embassy_extras::peripherals!({','.join(peripheral_names)});")
# ========= DMA peripherals
if num_dmas > 0:
f.write(f"""
pub fn DMA(n: u8) -> dma::Dma {{
match n {{
""")
for n in range(num_dmas - 1):
f.write(f'{n} => DMA{n + 1},')
f.write(f"""
_ => DMA{num_dmas},
}}
}}
""")
# ========= exti interrupts
f.write(f"impl_exti_irq!({','.join(exti_interrupts)});")
# ========= interrupts
irq_variants = []
irq_vectors = []
irq_fns = []
irq_declares = []
irqs = {num: name for name, num in chip['interrupts'].items()}
irq_count = max(irqs.keys()) + 1
for num, name in irqs.items():
irq_variants.append(f'{name} = {num},')
irq_fns.append(f'fn {name}();')
irq_declares.append(f'declare!({name});')
for num in range(irq_count):
if name := irqs.get(num):
irq_vectors.append(f'Vector {{ _handler: {name} }},')
else:
irq_vectors.append(f'Vector {{ _reserved: 0 }},')
f.write(f"""
pub mod interrupt {{
pub use bare_metal::Mutex;
pub use critical_section::CriticalSection;
pub use embassy::interrupt::{{declare, take, Interrupt}};
pub use embassy_extras::interrupt::Priority4 as Priority;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[allow(non_camel_case_types)]
pub enum InterruptEnum {{
{''.join(irq_variants)}
}}
unsafe impl cortex_m::interrupt::InterruptNumber for InterruptEnum {{
#[inline(always)]
fn number(self) -> u16 {{
self as u16
}}
}}
{''.join(irq_declares)}
}}
mod interrupt_vector {{
extern "C" {{
{''.join(irq_fns)}
}}
pub union Vector {{
_handler: unsafe extern "C" fn(),
_reserved: u32,
}}
#[link_section = ".vector_table.interrupts"]
#[no_mangle]
pub static __INTERRUPTS: [Vector; {irq_count}] = [
{''.join(irq_vectors)}
];
}}
""")
# ========= Update Cargo features
feature_optional_deps = {}
feature_optional_deps['_rng'] = ['rand_core']
feature_optional_deps['_sdmmc'] = ['sdio-host']
features = {}
extra_features = set()
for name, chip in chips.items():
features[name] = sorted(list(chip['features']))
for feature in chip['features']:
extra_features.add(feature)
for feature in sorted(list(extra_features)):
features[feature] = feature_optional_deps.get(feature) or []
SEPARATOR_START = '# BEGIN GENERATED FEATURES\n'
SEPARATOR_END = '# END GENERATED FEATURES\n'
with open('Cargo.toml', 'r') as f:
cargo = f.read()
before, cargo = cargo.split(SEPARATOR_START, maxsplit=1)
_, after = cargo.split(SEPARATOR_END, maxsplit=1)
cargo = before + SEPARATOR_START + toml.dumps(features) + SEPARATOR_END + after
with open('Cargo.toml', 'w') as f:
f.write(cargo)
# ========= Generate pac/regs.rs
os.system('cargo run --manifest-path ../../svd2rust/Cargo.toml -- generate --dir stm32-data/data/registers')
os.system('mv lib.rs src/pac/regs.rs')
# ========= Update Cargo features
os.system('rustfmt src/pac/*') | embassy-stm32/gen.py | import xmltodict
import yaml
import re
import json
import os
import re
import toml
from collections import OrderedDict
from glob import glob
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
# ======= load chips
chips = {}
for f in sorted(glob('stm32-data/data/chips/*.yaml')):
if 'STM32F4' not in f and 'STM32L4' not in f and 'STM32H7' not in f and 'STM32L0' not in f:
continue
with open(f, 'r') as f:
chip = yaml.load(f, Loader=yaml.CSafeLoader)
chip['name'] = chip['name'].lower()
chip['features'] = set()
family = chip["family"].lower().replace('+', 'p')
chip['features'].add(f'_{family}')
print(chip['name'])
chips[chip['name']] = chip
# ======= load GPIO AF
gpio_afs = {}
for f in sorted(glob('stm32-data/data/gpio_af/*.yaml')):
name = f.split('/')[-1].split('.')[0]
with open(f, 'r') as f:
af = yaml.load(f, Loader=yaml.CSafeLoader)
gpio_afs[name] = af
# ========= Generate pac/mod.rs
with open('src/pac/mod.rs', 'w') as f:
for chip in chips.values():
f.write(
f'#[cfg_attr(feature="{chip["name"]}", path="{chip["name"]}.rs")]\n')
f.write('mod chip;\n')
f.write('pub use chip::*;\n')
f.write('#[allow(dead_code, unused_imports)]\n')
f.write('pub mod regs;\n')
# ========= Generate pac/stm32xxx.rs
for chip in chips.values():
print(f'generating {chip["name"]}')
with open(f'src/pac/{chip["name"]}.rs', 'w') as f:
f.write("""
#![allow(dead_code)]
#![allow(unused_imports)]
#![allow(non_snake_case)]
""")
af = gpio_afs[chip['gpio_af']]
peripheral_names = [] # USART1, PA5, EXTI8
exti_interrupts = [] # EXTI IRQs, EXTI0, EXTI4_15 etc.
peripheral_versions = {} # usart -> v1, syscfg -> f4
pins = set() # set of all present pins. PA4, PA5...
# TODO this should probably come from the yamls?
# We don't want to hardcode the EXTI peripheral addr
gpio_base = chip['peripherals']['GPIOA']['address']
gpio_stride = 0x400
f.write(f"""
pub fn GPIO(n: usize) -> gpio::Gpio {{
gpio::Gpio((0x{gpio_base:x} + 0x{gpio_stride:x}*n) as _)
}}
""")
# ========= peripherals
peripheral_names.extend((f'EXTI{x}' for x in range(16)))
num_dmas = 0
for (name, peri) in chip['peripherals'].items():
if 'block' not in peri:
continue
block = peri['block']
block_mod, block_name_unparsed = block.rsplit('/')
block_mod, block_version = block_mod.rsplit('_')
block_name = ''
for b in block_name_unparsed.split('_'):
block_name += b.capitalize()
# Check all peripherals have the same version: it's not OK for the same chip to use both usart_v1 and usart_v2
if old_version := peripheral_versions.get(block_mod):
if old_version != block_version:
raise Exception(f'Peripheral {block_mod} has two versions: {old_version} and {block_version}')
peripheral_versions[block_mod] = block_version
# Set features
chip['features'].add(f'_{block_mod}')
chip['features'].add(f'_{block_mod}_{block_version}')
f.write(f'pub const {name}: {block_mod}::{block_name} = {block_mod}::{block_name}(0x{peri["address"]:x} as _);')
custom_singletons = False
if block_mod == 'usart':
f.write(f'impl_usart!({name});')
for pin, funcs in af.items():
if pin in pins:
if (func := funcs.get(f'{name}_RX')) != None:
f.write(f'impl_usart_pin!({name}, RxPin, {pin}, {func});')
if (func := funcs.get(f'{name}_TX')) != None:
f.write(f'impl_usart_pin!({name}, TxPin, {pin}, {func});')
if (func := funcs.get(f'{name}_CTS')) != None:
f.write(f'impl_usart_pin!({name}, CtsPin, {pin}, {func});')
if (func := funcs.get(f'{name}_RTS')) != None:
f.write(f'impl_usart_pin!({name}, RtsPin, {pin}, {func});')
if (func := funcs.get(f'{name}_CK')) != None:
f.write(f'impl_usart_pin!({name}, CkPin, {pin}, {func});')
if block_mod == 'rng':
for irq in chip['interrupts']:
if re.search('RNG', irq):
f.write(f'impl_rng!({name}, {irq});')
if block_mod == 'spi':
if 'clock' in peri:
clock = peri['clock']
f.write(f'impl_spi!({name}, {clock});')
for pin, funcs in af.items():
if pin in pins:
if (func := funcs.get(f'{name}_SCK')) != None:
f.write(f'impl_spi_pin!({name}, SckPin, {pin}, {func});')
if (func := funcs.get(f'{name}_MOSI')) != None:
f.write(f'impl_spi_pin!({name}, MosiPin, {pin}, {func});')
if (func := funcs.get(f'{name}_MISO')) != None:
f.write(f'impl_spi_pin!({name}, MisoPin, {pin}, {func});')
if block_mod == 'i2c':
f.write(f'impl_i2c!({name});')
for pin, funcs in af.items():
if pin in pins:
if func := funcs.get(f'{name}_SCL'):
f.write(f'impl_i2c_pin!({name}, SclPin, {pin}, {func});')
if func := funcs.get(f'{name}_SDA'):
f.write(f'impl_i2c_pin!({name}, SdaPin, {pin}, {func});')
if block_mod == 'gpio':
custom_singletons = True
port = name[4:]
port_num = ord(port) - ord('A')
assert peri['address'] == gpio_base + gpio_stride*port_num
for pin_num in range(16):
pin = f'P{port}{pin_num}'
pins.add(pin)
peripheral_names.append(pin)
f.write(f'impl_gpio_pin!({pin}, {port_num}, {pin_num}, EXTI{pin_num});')
if block_mod == 'dma':
custom_singletons = True
num_dmas += 1
dma_num = int(name[3:])-1 # substract 1 because we want DMA1=0, DMA2=1
for ch_num in range(8):
channel = f'{name}_CH{ch_num}'
peripheral_names.append(channel)
f.write(f'impl_dma_channel!({channel}, {dma_num}, {ch_num});')
if peri['block'] == 'sdmmc_v2/SDMMC':
f.write(f'impl_sdmmc!({name});')
for pin, funcs in af.items():
if pin in pins:
if (func := funcs.get(f'{name}_CK')) != None:
f.write(f'impl_sdmmc_pin!({name}, CkPin, {pin}, {func});')
if (func := funcs.get(f'{name}_CMD')) != None:
f.write(f'impl_sdmmc_pin!({name}, CmdPin, {pin}, {func});')
if (func := funcs.get(f'{name}_D0')) != None:
f.write(f'impl_sdmmc_pin!({name}, D0Pin, {pin}, {func});')
if (func := funcs.get(f'{name}_D1')) != None:
f.write(f'impl_sdmmc_pin!({name}, D1Pin, {pin}, {func});')
if (func := funcs.get(f'{name}_D2')) != None:
f.write(f'impl_sdmmc_pin!({name}, D2Pin, {pin}, {func});')
if (func := funcs.get(f'{name}_D3')) != None:
f.write(f'impl_sdmmc_pin!({name}, D3Pin, {pin}, {func});')
if (func := funcs.get(f'{name}_D4')) != None:
f.write(f'impl_sdmmc_pin!({name}, D4Pin, {pin}, {func});')
if (func := funcs.get(f'{name}_D5')) != None:
f.write(f'impl_sdmmc_pin!({name}, D5Pin, {pin}, {func});')
if (func := funcs.get(f'{name}_D6')) != None:
f.write(f'impl_sdmmc_pin!({name}, D6Pin, {pin}, {func});')
if (func := funcs.get(f'{name}_D7')) != None:
f.write(f'impl_sdmmc_pin!({name}, D7Pin, {pin}, {func});')
if block_name == 'TimGp16':
if re.match('TIM[2345]$', name):
f.write(f'impl_timer!({name});')
if block_mod == 'exti':
for irq in chip['interrupts']:
if re.match('EXTI', irq):
exti_interrupts.append(irq)
if not custom_singletons:
peripheral_names.append(name)
for mod, version in peripheral_versions.items():
f.write(f'pub use super::regs::{mod}_{version} as {mod};')
f.write(f"embassy_extras::peripherals!({','.join(peripheral_names)});")
# ========= DMA peripherals
if num_dmas > 0:
f.write(f"""
pub fn DMA(n: u8) -> dma::Dma {{
match n {{
""")
for n in range(num_dmas - 1):
f.write(f'{n} => DMA{n + 1},')
f.write(f"""
_ => DMA{num_dmas},
}}
}}
""")
# ========= exti interrupts
f.write(f"impl_exti_irq!({','.join(exti_interrupts)});")
# ========= interrupts
irq_variants = []
irq_vectors = []
irq_fns = []
irq_declares = []
irqs = {num: name for name, num in chip['interrupts'].items()}
irq_count = max(irqs.keys()) + 1
for num, name in irqs.items():
irq_variants.append(f'{name} = {num},')
irq_fns.append(f'fn {name}();')
irq_declares.append(f'declare!({name});')
for num in range(irq_count):
if name := irqs.get(num):
irq_vectors.append(f'Vector {{ _handler: {name} }},')
else:
irq_vectors.append(f'Vector {{ _reserved: 0 }},')
f.write(f"""
pub mod interrupt {{
pub use bare_metal::Mutex;
pub use critical_section::CriticalSection;
pub use embassy::interrupt::{{declare, take, Interrupt}};
pub use embassy_extras::interrupt::Priority4 as Priority;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[allow(non_camel_case_types)]
pub enum InterruptEnum {{
{''.join(irq_variants)}
}}
unsafe impl cortex_m::interrupt::InterruptNumber for InterruptEnum {{
#[inline(always)]
fn number(self) -> u16 {{
self as u16
}}
}}
{''.join(irq_declares)}
}}
mod interrupt_vector {{
extern "C" {{
{''.join(irq_fns)}
}}
pub union Vector {{
_handler: unsafe extern "C" fn(),
_reserved: u32,
}}
#[link_section = ".vector_table.interrupts"]
#[no_mangle]
pub static __INTERRUPTS: [Vector; {irq_count}] = [
{''.join(irq_vectors)}
];
}}
""")
# ========= Update Cargo features
feature_optional_deps = {}
feature_optional_deps['_rng'] = ['rand_core']
feature_optional_deps['_sdmmc'] = ['sdio-host']
features = {}
extra_features = set()
for name, chip in chips.items():
features[name] = sorted(list(chip['features']))
for feature in chip['features']:
extra_features.add(feature)
for feature in sorted(list(extra_features)):
features[feature] = feature_optional_deps.get(feature) or []
SEPARATOR_START = '# BEGIN GENERATED FEATURES\n'
SEPARATOR_END = '# END GENERATED FEATURES\n'
with open('Cargo.toml', 'r') as f:
cargo = f.read()
before, cargo = cargo.split(SEPARATOR_START, maxsplit=1)
_, after = cargo.split(SEPARATOR_END, maxsplit=1)
cargo = before + SEPARATOR_START + toml.dumps(features) + SEPARATOR_END + after
with open('Cargo.toml', 'w') as f:
f.write(cargo)
# ========= Generate pac/regs.rs
os.system('cargo run --manifest-path ../../svd2rust/Cargo.toml -- generate --dir stm32-data/data/registers')
os.system('mv lib.rs src/pac/regs.rs')
# ========= Update Cargo features
os.system('rustfmt src/pac/*') | 0.338952 | 0.078678 |
class Node:
def __init__(self, data = None):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def AppendNode(self, data):
if self.head == None:
self.head = Node(data)
else:
newNode = Node(data)
curr = self.head
while curr.next!=None:
curr = curr.next
curr.next = newNode
def PrintList(self):
curr = self.head
print("The linked list is: ")
while curr!=None:
print("|" + str(curr.data) + "|->", end = '')
curr = curr.next
print("NULL")
def InsertNodeAtTheBeginning(self, data):
if self.head == None:
self.head = Node(data)
else:
newNode = Node(data)
newNode.next = self.head
self.head = newNode
def InsertNodeAtPosition(self, data, position):
if position == 1:
newNode = Node(data)
newNode.next = self.head
self.head = newNode
else:
counter = 1
curr = self.head
newNode = Node(data)
while counter!=position-1:
curr = curr.next
counter+=1
newNode.next = curr.next
curr.next = newNode
def DeleteNode(self, position):
prev = self.head
curr = self.head.next
counter = 1
while counter!= position-1:
prev = prev.next
curr = curr.next
counter+=1
prev.next = curr.next
def SearchByValue(self, data):
curr = self.head
counter = 1
while curr.data!=data:
curr = curr.next
counter+=1
return counter
def ValueAtPosition(self, position):
counter = 1
curr = self.head
while counter!=position:
curr = curr.next
counter+=1
return curr.data
def MakeCircular(self): # Infinite Loop
curr = self.head
while curr.next!=None:
curr = curr.next
curr.next = self.head
newList = LinkedList()
#newList.InsertNodeAtTheBeginning(1)
newList.AppendNode(4)
newList.AppendNode(5)
newList.AppendNode(7)
newList.AppendNode(9)
newList.AppendNode(11)
newList.InsertNodeAtPosition(10,3)
newList.InsertNodeAtPosition(29,4)
# newList.DeleteNode(3)
# newList.InsertNodeAtTheBeginning(10)
# newList.InsertNodeAtTheBeginning(17)
newList.MakeCircular()
newList.PrintList()
print(newList.ValueAtPosition(3))
inpData = int(input("Enter the data to be searched: "))
print("The position of {} is {}".format(inpData,newList.SearchByValue(inpData))) | Basics/Data Structures/SinglyLL.py | class Node:
def __init__(self, data = None):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def AppendNode(self, data):
if self.head == None:
self.head = Node(data)
else:
newNode = Node(data)
curr = self.head
while curr.next!=None:
curr = curr.next
curr.next = newNode
def PrintList(self):
curr = self.head
print("The linked list is: ")
while curr!=None:
print("|" + str(curr.data) + "|->", end = '')
curr = curr.next
print("NULL")
def InsertNodeAtTheBeginning(self, data):
if self.head == None:
self.head = Node(data)
else:
newNode = Node(data)
newNode.next = self.head
self.head = newNode
def InsertNodeAtPosition(self, data, position):
if position == 1:
newNode = Node(data)
newNode.next = self.head
self.head = newNode
else:
counter = 1
curr = self.head
newNode = Node(data)
while counter!=position-1:
curr = curr.next
counter+=1
newNode.next = curr.next
curr.next = newNode
def DeleteNode(self, position):
prev = self.head
curr = self.head.next
counter = 1
while counter!= position-1:
prev = prev.next
curr = curr.next
counter+=1
prev.next = curr.next
def SearchByValue(self, data):
curr = self.head
counter = 1
while curr.data!=data:
curr = curr.next
counter+=1
return counter
def ValueAtPosition(self, position):
counter = 1
curr = self.head
while counter!=position:
curr = curr.next
counter+=1
return curr.data
def MakeCircular(self): # Infinite Loop
curr = self.head
while curr.next!=None:
curr = curr.next
curr.next = self.head
newList = LinkedList()
#newList.InsertNodeAtTheBeginning(1)
newList.AppendNode(4)
newList.AppendNode(5)
newList.AppendNode(7)
newList.AppendNode(9)
newList.AppendNode(11)
newList.InsertNodeAtPosition(10,3)
newList.InsertNodeAtPosition(29,4)
# newList.DeleteNode(3)
# newList.InsertNodeAtTheBeginning(10)
# newList.InsertNodeAtTheBeginning(17)
newList.MakeCircular()
newList.PrintList()
print(newList.ValueAtPosition(3))
inpData = int(input("Enter the data to be searched: "))
print("The position of {} is {}".format(inpData,newList.SearchByValue(inpData))) | 0.357231 | 0.386156 |
from flask_restful import fields
from flask_restful_swagger import swagger
class BaseResponse(object):
resource_fields = {}
def __init__(self, **kwargs):
for name in self.resource_fields:
setattr(self, name, kwargs.get(name))
@swagger.model
class ResourceID(BaseResponse):
resource_fields = {
'resource_id': fields.String
}
@swagger.model
class SecretsListResponse(BaseResponse):
resource_fields = {
'key': fields.String,
'created_at': fields.String,
'updated_at': fields.String,
'resource_availability': fields.String,
'visibility': fields.String,
'tenant_name': fields.String,
'created_by': fields.String,
'is_hidden_value': fields.Boolean,
}
@swagger.model
class UserResponse(BaseResponse):
resource_fields = {
'username': fields.String,
'tenants': fields.Raw,
'tenant_roles': fields.Raw,
'groups': fields.Raw,
'role': fields.String,
'group_system_roles': fields.Raw,
'active': fields.Boolean,
'last_login_at': fields.String,
'is_locked': fields.Boolean
}
@swagger.model
class TenantResponse(BaseResponse):
resource_fields = {
'name': fields.String,
'groups': fields.Raw,
'users': fields.Raw,
'user_roles': fields.Raw
}
@swagger.model
class TenantDetailsResponse(BaseResponse):
resource_fields = {
'name': fields.String,
'groups': fields.Raw,
'users': fields.Raw,
'user_roles': fields.Raw,
'rabbitmq_username': fields.String,
'rabbitmq_password': fields.String,
'rabbitmq_vhost': fields.String,
}
@swagger.model
class AgentResponse(BaseResponse):
resource_fields = {
'id': fields.String,
'host_id': fields.String,
'ip': fields.String,
'install_method': fields.String,
'system': fields.String,
'version': fields.String,
'node': fields.String,
'deployment': fields.String,
'tenant_name': fields.String
}
@swagger.model
class DeploymentCapabilities(BaseResponse):
resource_fields = {
'deployment_id': fields.String,
'capabilities': fields.Raw
}
@swagger.model
class OperationResponse(BaseResponse):
resource_fields = {
'id': fields.String,
'name': fields.String,
'state': fields.String,
}
@swagger.model
class License(BaseResponse):
resource_fields = {
'customer_id': fields.String,
'expiration_date': fields.String,
'license_edition': fields.String,
'trial': fields.Boolean,
'cloudify_version': fields.String,
'capabilities': fields.Raw,
'expired': fields.Boolean
} | rest-service/manager_rest/rest/responses_v3.py |
from flask_restful import fields
from flask_restful_swagger import swagger
class BaseResponse(object):
resource_fields = {}
def __init__(self, **kwargs):
for name in self.resource_fields:
setattr(self, name, kwargs.get(name))
@swagger.model
class ResourceID(BaseResponse):
resource_fields = {
'resource_id': fields.String
}
@swagger.model
class SecretsListResponse(BaseResponse):
resource_fields = {
'key': fields.String,
'created_at': fields.String,
'updated_at': fields.String,
'resource_availability': fields.String,
'visibility': fields.String,
'tenant_name': fields.String,
'created_by': fields.String,
'is_hidden_value': fields.Boolean,
}
@swagger.model
class UserResponse(BaseResponse):
resource_fields = {
'username': fields.String,
'tenants': fields.Raw,
'tenant_roles': fields.Raw,
'groups': fields.Raw,
'role': fields.String,
'group_system_roles': fields.Raw,
'active': fields.Boolean,
'last_login_at': fields.String,
'is_locked': fields.Boolean
}
@swagger.model
class TenantResponse(BaseResponse):
resource_fields = {
'name': fields.String,
'groups': fields.Raw,
'users': fields.Raw,
'user_roles': fields.Raw
}
@swagger.model
class TenantDetailsResponse(BaseResponse):
resource_fields = {
'name': fields.String,
'groups': fields.Raw,
'users': fields.Raw,
'user_roles': fields.Raw,
'rabbitmq_username': fields.String,
'rabbitmq_password': fields.String,
'rabbitmq_vhost': fields.String,
}
@swagger.model
class AgentResponse(BaseResponse):
resource_fields = {
'id': fields.String,
'host_id': fields.String,
'ip': fields.String,
'install_method': fields.String,
'system': fields.String,
'version': fields.String,
'node': fields.String,
'deployment': fields.String,
'tenant_name': fields.String
}
@swagger.model
class DeploymentCapabilities(BaseResponse):
resource_fields = {
'deployment_id': fields.String,
'capabilities': fields.Raw
}
@swagger.model
class OperationResponse(BaseResponse):
resource_fields = {
'id': fields.String,
'name': fields.String,
'state': fields.String,
}
@swagger.model
class License(BaseResponse):
resource_fields = {
'customer_id': fields.String,
'expiration_date': fields.String,
'license_edition': fields.String,
'trial': fields.Boolean,
'cloudify_version': fields.String,
'capabilities': fields.Raw,
'expired': fields.Boolean
} | 0.758689 | 0.068819 |
import os
import json
import functools
import logging
import platform
import copy
from .exceptions import (
SaveWarningExc
)
from .constants import (
M_OVERRIDEN_KEY,
M_ENVIRONMENT_KEY,
METADATA_KEYS,
SYSTEM_SETTINGS_KEY,
PROJECT_SETTINGS_KEY,
PROJECT_ANATOMY_KEY,
DEFAULT_PROJECT_KEY
)
log = logging.getLogger(__name__)
# Py2 + Py3 json decode exception
JSON_EXC = getattr(json.decoder, "JSONDecodeError", ValueError)
# Path to default settings
DEFAULTS_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"defaults"
)
# Variable where cache of default settings are stored
_DEFAULT_SETTINGS = None
# Handler of studio overrides
_SETTINGS_HANDLER = None
# Handler of local settings
_LOCAL_SETTINGS_HANDLER = None
def require_handler(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
global _SETTINGS_HANDLER
if _SETTINGS_HANDLER is None:
_SETTINGS_HANDLER = create_settings_handler()
return func(*args, **kwargs)
return wrapper
def require_local_handler(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
global _LOCAL_SETTINGS_HANDLER
if _LOCAL_SETTINGS_HANDLER is None:
_LOCAL_SETTINGS_HANDLER = create_local_settings_handler()
return func(*args, **kwargs)
return wrapper
def create_settings_handler():
from .handlers import MongoSettingsHandler
# Handler can't be created in global space on initialization but only when
# needed. Plus here may be logic: Which handler is used (in future).
return MongoSettingsHandler()
def create_local_settings_handler():
from .handlers import MongoLocalSettingsHandler
return MongoLocalSettingsHandler()
def calculate_changes(old_value, new_value):
changes = {}
for key, value in new_value.items():
if key not in old_value:
changes[key] = value
continue
_value = old_value[key]
if isinstance(value, dict) and isinstance(_value, dict):
_changes = calculate_changes(_value, value)
if _changes:
changes[key] = _changes
continue
if _value != value:
changes[key] = value
return changes
@require_handler
def save_studio_settings(data):
"""Save studio overrides of system settings.
Triggers callbacks on modules that want to know about system settings
changes.
Callbacks are triggered on all modules. They must check if their enabled
value has changed.
For saving of data cares registered Settings handler.
Warning messages are not logged as module raising them should log it within
it's logger.
Args:
data(dict): Overrides data with metadata defying studio overrides.
Raises:
SaveWarningExc: If any module raises the exception.
"""
# Notify Pype modules
from openpype.modules import ModulesManager
from openpype_interfaces import ISettingsChangeListener
old_data = get_system_settings()
default_values = get_default_settings()[SYSTEM_SETTINGS_KEY]
new_data = apply_overrides(default_values, copy.deepcopy(data))
new_data_with_metadata = copy.deepcopy(new_data)
clear_metadata_from_settings(new_data)
changes = calculate_changes(old_data, new_data)
modules_manager = ModulesManager(_system_settings=new_data)
warnings = []
for module in modules_manager.get_enabled_modules():
if isinstance(module, ISettingsChangeListener):
try:
module.on_system_settings_save(
old_data, new_data, changes, new_data_with_metadata
)
except SaveWarningExc as exc:
warnings.extend(exc.warnings)
_SETTINGS_HANDLER.save_studio_settings(data)
if warnings:
raise SaveWarningExc(warnings)
@require_handler
def save_project_settings(project_name, overrides):
"""Save studio overrides of project settings.
Old value, new value and changes are passed to enabled modules that want to
know about settings changes.
For saving of data cares registered Settings handler.
Warning messages are not logged as module raising them should log it within
it's logger.
Args:
project_name (str): Project name for which overrides are passed.
Default project's value is None.
overrides(dict): Overrides data with metadata defying studio overrides.
Raises:
SaveWarningExc: If any module raises the exception.
"""
# Notify Pype modules
from openpype.modules import ModulesManager
from openpype_interfaces import ISettingsChangeListener
default_values = get_default_settings()[PROJECT_SETTINGS_KEY]
if project_name:
old_data = get_project_settings(project_name)
studio_overrides = get_studio_project_settings_overrides()
studio_values = apply_overrides(default_values, studio_overrides)
clear_metadata_from_settings(studio_values)
new_data = apply_overrides(studio_values, copy.deepcopy(overrides))
else:
old_data = get_default_project_settings(exclude_locals=True)
new_data = apply_overrides(default_values, copy.deepcopy(overrides))
new_data_with_metadata = copy.deepcopy(new_data)
clear_metadata_from_settings(new_data)
changes = calculate_changes(old_data, new_data)
modules_manager = ModulesManager()
warnings = []
for module in modules_manager.get_enabled_modules():
if isinstance(module, ISettingsChangeListener):
try:
module.on_project_settings_save(
old_data,
new_data,
project_name,
changes,
new_data_with_metadata
)
except SaveWarningExc as exc:
warnings.extend(exc.warnings)
_SETTINGS_HANDLER.save_project_settings(project_name, overrides)
if warnings:
raise SaveWarningExc(warnings)
@require_handler
def save_project_anatomy(project_name, anatomy_data):
"""Save studio overrides of project anatomy.
Old value, new value and changes are passed to enabled modules that want to
know about settings changes.
For saving of data cares registered Settings handler.
Warning messages are not logged as module raising them should log it within
it's logger.
Args:
project_name (str): Project name for which overrides are passed.
Default project's value is None.
overrides(dict): Overrides data with metadata defying studio overrides.
Raises:
SaveWarningExc: If any module raises the exception.
"""
# Notify Pype modules
from openpype.modules import ModulesManager
from openpype_interfaces import ISettingsChangeListener
default_values = get_default_settings()[PROJECT_ANATOMY_KEY]
if project_name:
old_data = get_anatomy_settings(project_name)
studio_overrides = get_studio_project_settings_overrides()
studio_values = apply_overrides(default_values, studio_overrides)
clear_metadata_from_settings(studio_values)
new_data = apply_overrides(studio_values, copy.deepcopy(anatomy_data))
else:
old_data = get_default_anatomy_settings(exclude_locals=True)
new_data = apply_overrides(default_values, copy.deepcopy(anatomy_data))
new_data_with_metadata = copy.deepcopy(new_data)
clear_metadata_from_settings(new_data)
changes = calculate_changes(old_data, new_data)
modules_manager = ModulesManager()
warnings = []
for module in modules_manager.get_enabled_modules():
if isinstance(module, ISettingsChangeListener):
try:
module.on_project_anatomy_save(
old_data,
new_data,
changes,
project_name,
new_data_with_metadata
)
except SaveWarningExc as exc:
warnings.extend(exc.warnings)
_SETTINGS_HANDLER.save_project_anatomy(project_name, anatomy_data)
if warnings:
raise SaveWarningExc(warnings)
@require_handler
def get_studio_system_settings_overrides():
return _SETTINGS_HANDLER.get_studio_system_settings_overrides()
@require_handler
def get_studio_project_settings_overrides():
return _SETTINGS_HANDLER.get_studio_project_settings_overrides()
@require_handler
def get_studio_project_anatomy_overrides():
return _SETTINGS_HANDLER.get_studio_project_anatomy_overrides()
@require_handler
def get_project_settings_overrides(project_name):
return _SETTINGS_HANDLER.get_project_settings_overrides(project_name)
@require_handler
def get_project_anatomy_overrides(project_name):
return _SETTINGS_HANDLER.get_project_anatomy_overrides(project_name)
@require_local_handler
def save_local_settings(data):
return _LOCAL_SETTINGS_HANDLER.save_local_settings(data)
@require_local_handler
def get_local_settings():
return _LOCAL_SETTINGS_HANDLER.get_local_settings()
class DuplicatedEnvGroups(Exception):
def __init__(self, duplicated):
self.origin_duplicated = duplicated
self.duplicated = {}
for key, items in duplicated.items():
self.duplicated[key] = []
for item in items:
self.duplicated[key].append("/".join(item["parents"]))
msg = "Duplicated environment group keys. {}".format(
", ".join([
"\"{}\"".format(env_key) for env_key in self.duplicated.keys()
])
)
super(DuplicatedEnvGroups, self).__init__(msg)
def load_openpype_default_settings():
"""Load openpype default settings."""
return load_jsons_from_dir(DEFAULTS_DIR)
def reset_default_settings():
"""Reset cache of default settings. Can't be used now."""
global _DEFAULT_SETTINGS
_DEFAULT_SETTINGS = None
def _get_default_settings():
from openpype.modules import get_module_settings_defs
defaults = load_openpype_default_settings()
module_settings_defs = get_module_settings_defs()
for module_settings_def_cls in module_settings_defs:
module_settings_def = module_settings_def_cls()
system_defaults = module_settings_def.get_defaults(
SYSTEM_SETTINGS_KEY
) or {}
for path, value in system_defaults.items():
if not path:
continue
subdict = defaults["system_settings"]
path_items = list(path.split("/"))
last_key = path_items.pop(-1)
for key in path_items:
subdict = subdict[key]
subdict[last_key] = value
project_defaults = module_settings_def.get_defaults(
PROJECT_SETTINGS_KEY
) or {}
for path, value in project_defaults.items():
if not path:
continue
subdict = defaults
path_items = list(path.split("/"))
last_key = path_items.pop(-1)
for key in path_items:
subdict = subdict[key]
subdict[last_key] = value
return defaults
def get_default_settings():
"""Get default settings.
Todo:
Cache loaded defaults.
Returns:
dict: Loaded default settings.
"""
global _DEFAULT_SETTINGS
if _DEFAULT_SETTINGS is None:
_DEFAULT_SETTINGS = _get_default_settings()
return copy.deepcopy(_DEFAULT_SETTINGS)
def load_json_file(fpath):
# Load json data
try:
with open(fpath, "r") as opened_file:
return json.load(opened_file)
except JSON_EXC:
log.warning(
"File has invalid json format \"{}\"".format(fpath),
exc_info=True
)
return {}
def load_jsons_from_dir(path, *args, **kwargs):
"""Load all .json files with content from entered folder path.
Data are loaded recursively from a directory and recreate the
hierarchy as a dictionary.
Entered path hiearchy:
|_ folder1
| |_ data1.json
|_ folder2
|_ subfolder1
|_ data2.json
Will result in:
```javascript
{
"folder1": {
"data1": "CONTENT OF FILE"
},
"folder2": {
"subfolder1": {
"data2": "CONTENT OF FILE"
}
}
}
```
Args:
path (str): Path to the root folder where the json hierarchy starts.
Returns:
dict: Loaded data.
"""
output = {}
path = os.path.normpath(path)
if not os.path.exists(path):
# TODO warning
return output
sub_keys = list(kwargs.pop("subkeys", args))
for sub_key in tuple(sub_keys):
_path = os.path.join(path, sub_key)
if not os.path.exists(_path):
break
path = _path
sub_keys.pop(0)
base_len = len(path) + 1
for base, _directories, filenames in os.walk(path):
base_items_str = base[base_len:]
if not base_items_str:
base_items = []
else:
base_items = base_items_str.split(os.path.sep)
for filename in filenames:
basename, ext = os.path.splitext(filename)
if ext == ".json":
full_path = os.path.join(base, filename)
value = load_json_file(full_path)
dict_keys = base_items + [basename]
output = subkey_merge(output, value, dict_keys)
for sub_key in sub_keys:
output = output[sub_key]
return output
def find_environments(data, with_items=False, parents=None):
""" Find environemnt values from system settings by it's metadata.
Args:
data(dict): System settings data or dictionary which may contain
environments metadata.
Returns:
dict: Key as Environment key and value for `acre` module.
"""
if not data or not isinstance(data, dict):
return {}
output = {}
if parents is None:
parents = []
if M_ENVIRONMENT_KEY in data:
metadata = data.get(M_ENVIRONMENT_KEY)
for env_group_key, env_keys in metadata.items():
if env_group_key not in output:
output[env_group_key] = []
_env_values = {}
for key in env_keys:
_env_values[key] = data[key]
item = {
"env": _env_values,
"parents": parents[:-1]
}
output[env_group_key].append(item)
for key, value in data.items():
_parents = copy.deepcopy(parents)
_parents.append(key)
result = find_environments(value, True, _parents)
if not result:
continue
for env_group_key, env_values in result.items():
if env_group_key not in output:
output[env_group_key] = []
for env_values_item in env_values:
output[env_group_key].append(env_values_item)
if with_items:
return output
duplicated_env_groups = {}
final_output = {}
for key, value_in_list in output.items():
if len(value_in_list) > 1:
duplicated_env_groups[key] = value_in_list
else:
final_output[key] = value_in_list[0]["env"]
if duplicated_env_groups:
raise DuplicatedEnvGroups(duplicated_env_groups)
return final_output
def subkey_merge(_dict, value, keys):
key = keys.pop(0)
if not keys:
_dict[key] = value
return _dict
if key not in _dict:
_dict[key] = {}
_dict[key] = subkey_merge(_dict[key], value, keys)
return _dict
def merge_overrides(source_dict, override_dict):
"""Merge data from override_dict to source_dict."""
if M_OVERRIDEN_KEY in override_dict:
overriden_keys = set(override_dict.pop(M_OVERRIDEN_KEY))
else:
overriden_keys = set()
for key, value in override_dict.items():
if (key in overriden_keys or key not in source_dict):
source_dict[key] = value
elif isinstance(value, dict) and isinstance(source_dict[key], dict):
source_dict[key] = merge_overrides(source_dict[key], value)
else:
source_dict[key] = value
return source_dict
def apply_overrides(source_data, override_data):
if not override_data:
return source_data
_source_data = copy.deepcopy(source_data)
return merge_overrides(_source_data, override_data)
def apply_local_settings_on_system_settings(system_settings, local_settings):
"""Apply local settings on studio system settings.
ATM local settings can modify only application executables. Executable
values are not overriden but prepended.
"""
if not local_settings or "applications" not in local_settings:
return
current_platform = platform.system().lower()
for app_group_name, value in local_settings["applications"].items():
if not value or app_group_name not in system_settings["applications"]:
continue
variants = system_settings["applications"][app_group_name]["variants"]
for app_name, app_value in value.items():
if (
not app_value
or app_name not in variants
or "executables" not in variants[app_name]
):
continue
executable = app_value.get("executable")
if not executable:
continue
platform_executables = variants[app_name]["executables"].get(
current_platform
)
# TODO This is temporary fix until launch arguments will be stored
# per platform and not per executable.
# - local settings store only executable
new_executables = [executable]
new_executables.extend(platform_executables)
variants[app_name]["executables"] = new_executables
def apply_local_settings_on_anatomy_settings(
anatomy_settings, local_settings, project_name, site_name=None
):
"""Apply local settings on anatomy settings.
ATM local settings can modify project roots. Project name is required as
local settings have data stored data by project's name.
Local settings override root values in this order:
1.) Check if local settings contain overrides for default project and
apply it's values on roots if there are any.
2.) If passed `project_name` is not None then check project specific
overrides in local settings for the project and apply it's value on
roots if there are any.
NOTE: Root values of default project from local settings are always applied
if are set.
Args:
anatomy_settings (dict): Data for anatomy settings.
local_settings (dict): Data of local settings.
project_name (str): Name of project for which anatomy data are.
"""
if not local_settings:
return
local_project_settings = local_settings.get("projects") or {}
# Check for roots existence in local settings first
roots_project_locals = (
local_project_settings
.get(project_name, {})
)
roots_default_locals = (
local_project_settings
.get(DEFAULT_PROJECT_KEY, {})
)
# Skip rest of processing if roots are not set
if not roots_project_locals and not roots_default_locals:
return
# Get active site from settings
if site_name is None:
if project_name:
project_settings = get_project_settings(project_name)
else:
project_settings = get_default_project_settings()
site_name = (
project_settings["global"]["sync_server"]["config"]["active_site"]
)
# QUESTION should raise an exception?
if not site_name:
return
# Combine roots from local settings
roots_locals = roots_default_locals.get(site_name) or {}
roots_locals.update(roots_project_locals.get(site_name) or {})
# Skip processing if roots for current active site are not available in
# local settings
if not roots_locals:
return
current_platform = platform.system().lower()
root_data = anatomy_settings["roots"]
for root_name, path in roots_locals.items():
if root_name not in root_data:
continue
anatomy_settings["roots"][root_name][current_platform] = (
path
)
def get_site_local_overrides(project_name, site_name, local_settings=None):
"""Site overrides from local settings for passet project and site name.
Args:
project_name (str): For which project are overrides.
site_name (str): For which site are overrides needed.
local_settings (dict): Preloaded local settings. They are loaded
automatically if not passed.
"""
# Check if local settings were passed
if local_settings is None:
local_settings = get_local_settings()
output = {}
# Skip if local settings are empty
if not local_settings:
return output
local_project_settings = local_settings.get("projects") or {}
# Prepare overrides for entered project and for default project
project_locals = None
if project_name:
project_locals = local_project_settings.get(project_name)
default_project_locals = local_project_settings.get(DEFAULT_PROJECT_KEY)
# First load and use local settings from default project
if default_project_locals and site_name in default_project_locals:
output.update(default_project_locals[site_name])
# Apply project specific local settings if there are any
if project_locals and site_name in project_locals:
output.update(project_locals[site_name])
return output
def apply_local_settings_on_project_settings(
project_settings, local_settings, project_name
):
"""Apply local settings on project settings.
Currently is modifying active site and remote site in sync server.
Args:
project_settings (dict): Data for project settings.
local_settings (dict): Data of local settings.
project_name (str): Name of project for which settings data are.
"""
if not local_settings:
return
local_project_settings = local_settings.get("projects")
if not local_project_settings:
return
project_locals = local_project_settings.get(project_name) or {}
default_locals = local_project_settings.get(DEFAULT_PROJECT_KEY) or {}
active_site = (
project_locals.get("active_site")
or default_locals.get("active_site")
)
remote_site = (
project_locals.get("remote_site")
or default_locals.get("remote_site")
)
sync_server_config = project_settings["global"]["sync_server"]["config"]
if active_site:
sync_server_config["active_site"] = active_site
if remote_site:
sync_server_config["remote_site"] = remote_site
def get_system_settings(clear_metadata=True, exclude_locals=None):
"""System settings with applied studio overrides."""
default_values = get_default_settings()[SYSTEM_SETTINGS_KEY]
studio_values = get_studio_system_settings_overrides()
result = apply_overrides(default_values, studio_values)
# Clear overrides metadata from settings
if clear_metadata:
clear_metadata_from_settings(result)
# Apply local settings
# Default behavior is based on `clear_metadata` value
if exclude_locals is None:
exclude_locals = not clear_metadata
if not exclude_locals:
# TODO local settings may be required to apply for environments
local_settings = get_local_settings()
apply_local_settings_on_system_settings(result, local_settings)
return result
def get_default_project_settings(clear_metadata=True, exclude_locals=None):
"""Project settings with applied studio's default project overrides."""
default_values = get_default_settings()[PROJECT_SETTINGS_KEY]
studio_values = get_studio_project_settings_overrides()
result = apply_overrides(default_values, studio_values)
# Clear overrides metadata from settings
if clear_metadata:
clear_metadata_from_settings(result)
# Apply local settings
if exclude_locals is None:
exclude_locals = not clear_metadata
if not exclude_locals:
local_settings = get_local_settings()
apply_local_settings_on_project_settings(
result, local_settings, None
)
return result
def get_default_anatomy_settings(clear_metadata=True, exclude_locals=None):
"""Project anatomy data with applied studio's default project overrides."""
default_values = get_default_settings()[PROJECT_ANATOMY_KEY]
studio_values = get_studio_project_anatomy_overrides()
result = apply_overrides(default_values, studio_values)
# Clear overrides metadata from settings
if clear_metadata:
clear_metadata_from_settings(result)
# Apply local settings
if exclude_locals is None:
exclude_locals = not clear_metadata
if not exclude_locals:
local_settings = get_local_settings()
apply_local_settings_on_anatomy_settings(
result, local_settings, None
)
return result
def get_anatomy_settings(
project_name, site_name=None, clear_metadata=True, exclude_locals=None
):
"""Project anatomy data with applied studio and project overrides."""
if not project_name:
raise ValueError(
"Must enter project name. Call "
"`get_default_anatomy_settings` to get project defaults."
)
studio_overrides = get_default_anatomy_settings(False)
project_overrides = get_project_anatomy_overrides(
project_name
)
result = copy.deepcopy(studio_overrides)
if project_overrides:
for key, value in project_overrides.items():
result[key] = value
# Clear overrides metadata from settings
if clear_metadata:
clear_metadata_from_settings(result)
# Apply local settings
if exclude_locals is None:
exclude_locals = not clear_metadata
if not exclude_locals:
local_settings = get_local_settings()
apply_local_settings_on_anatomy_settings(
result, local_settings, project_name, site_name
)
return result
def get_project_settings(
project_name, clear_metadata=True, exclude_locals=None
):
"""Project settings with applied studio and project overrides."""
if not project_name:
raise ValueError(
"Must enter project name."
" Call `get_default_project_settings` to get project defaults."
)
studio_overrides = get_default_project_settings(False)
project_overrides = get_project_settings_overrides(
project_name
)
result = apply_overrides(studio_overrides, project_overrides)
# Clear overrides metadata from settings
if clear_metadata:
clear_metadata_from_settings(result)
# Apply local settings
if exclude_locals is None:
exclude_locals = not clear_metadata
if not exclude_locals:
local_settings = get_local_settings()
apply_local_settings_on_project_settings(
result, local_settings, project_name
)
return result
def get_current_project_settings():
"""Project settings for current context project.
Project name should be stored in environment variable `AVALON_PROJECT`.
This function should be used only in host context where environment
variable must be set and should not happen that any part of process will
change the value of the enviornment variable.
"""
project_name = os.environ.get("AVALON_PROJECT")
if not project_name:
raise ValueError(
"Missing context project in environemt variable `AVALON_PROJECT`."
)
return get_project_settings(project_name)
def get_environments():
"""Calculated environment based on defaults and system settings.
Any default environment also found in the system settings will be fully
overriden by the one from the system settings.
Returns:
dict: Output should be ready for `acre` module.
"""
return find_environments(get_system_settings(False))
def get_general_environments():
"""Get general environments.
Function is implemented to be able load general environments without using
`get_default_settings`.
"""
# Use only openpype defaults.
# - prevent to use `get_system_settings` where `get_default_settings`
# is used
default_values = load_openpype_default_settings()
system_settings = default_values["system_settings"]
studio_overrides = get_studio_system_settings_overrides()
result = apply_overrides(system_settings, studio_overrides)
environments = result["general"]["environment"]
clear_metadata_from_settings(environments)
return environments
def clear_metadata_from_settings(values):
"""Remove all metadata keys from loaded settings."""
if isinstance(values, dict):
for key in tuple(values.keys()):
if key in METADATA_KEYS:
values.pop(key)
else:
clear_metadata_from_settings(values[key])
elif isinstance(values, list):
for item in values:
clear_metadata_from_settings(item) | openpype/settings/lib.py | import os
import json
import functools
import logging
import platform
import copy
from .exceptions import (
SaveWarningExc
)
from .constants import (
M_OVERRIDEN_KEY,
M_ENVIRONMENT_KEY,
METADATA_KEYS,
SYSTEM_SETTINGS_KEY,
PROJECT_SETTINGS_KEY,
PROJECT_ANATOMY_KEY,
DEFAULT_PROJECT_KEY
)
log = logging.getLogger(__name__)
# Py2 + Py3 json decode exception
JSON_EXC = getattr(json.decoder, "JSONDecodeError", ValueError)
# Path to default settings
DEFAULTS_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"defaults"
)
# Variable where cache of default settings are stored
_DEFAULT_SETTINGS = None
# Handler of studio overrides
_SETTINGS_HANDLER = None
# Handler of local settings
_LOCAL_SETTINGS_HANDLER = None
def require_handler(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
global _SETTINGS_HANDLER
if _SETTINGS_HANDLER is None:
_SETTINGS_HANDLER = create_settings_handler()
return func(*args, **kwargs)
return wrapper
def require_local_handler(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
global _LOCAL_SETTINGS_HANDLER
if _LOCAL_SETTINGS_HANDLER is None:
_LOCAL_SETTINGS_HANDLER = create_local_settings_handler()
return func(*args, **kwargs)
return wrapper
def create_settings_handler():
from .handlers import MongoSettingsHandler
# Handler can't be created in global space on initialization but only when
# needed. Plus here may be logic: Which handler is used (in future).
return MongoSettingsHandler()
def create_local_settings_handler():
from .handlers import MongoLocalSettingsHandler
return MongoLocalSettingsHandler()
def calculate_changes(old_value, new_value):
changes = {}
for key, value in new_value.items():
if key not in old_value:
changes[key] = value
continue
_value = old_value[key]
if isinstance(value, dict) and isinstance(_value, dict):
_changes = calculate_changes(_value, value)
if _changes:
changes[key] = _changes
continue
if _value != value:
changes[key] = value
return changes
@require_handler
def save_studio_settings(data):
"""Save studio overrides of system settings.
Triggers callbacks on modules that want to know about system settings
changes.
Callbacks are triggered on all modules. They must check if their enabled
value has changed.
For saving of data cares registered Settings handler.
Warning messages are not logged as module raising them should log it within
it's logger.
Args:
data(dict): Overrides data with metadata defying studio overrides.
Raises:
SaveWarningExc: If any module raises the exception.
"""
# Notify Pype modules
from openpype.modules import ModulesManager
from openpype_interfaces import ISettingsChangeListener
old_data = get_system_settings()
default_values = get_default_settings()[SYSTEM_SETTINGS_KEY]
new_data = apply_overrides(default_values, copy.deepcopy(data))
new_data_with_metadata = copy.deepcopy(new_data)
clear_metadata_from_settings(new_data)
changes = calculate_changes(old_data, new_data)
modules_manager = ModulesManager(_system_settings=new_data)
warnings = []
for module in modules_manager.get_enabled_modules():
if isinstance(module, ISettingsChangeListener):
try:
module.on_system_settings_save(
old_data, new_data, changes, new_data_with_metadata
)
except SaveWarningExc as exc:
warnings.extend(exc.warnings)
_SETTINGS_HANDLER.save_studio_settings(data)
if warnings:
raise SaveWarningExc(warnings)
@require_handler
def save_project_settings(project_name, overrides):
"""Save studio overrides of project settings.
Old value, new value and changes are passed to enabled modules that want to
know about settings changes.
For saving of data cares registered Settings handler.
Warning messages are not logged as module raising them should log it within
it's logger.
Args:
project_name (str): Project name for which overrides are passed.
Default project's value is None.
overrides(dict): Overrides data with metadata defying studio overrides.
Raises:
SaveWarningExc: If any module raises the exception.
"""
# Notify Pype modules
from openpype.modules import ModulesManager
from openpype_interfaces import ISettingsChangeListener
default_values = get_default_settings()[PROJECT_SETTINGS_KEY]
if project_name:
old_data = get_project_settings(project_name)
studio_overrides = get_studio_project_settings_overrides()
studio_values = apply_overrides(default_values, studio_overrides)
clear_metadata_from_settings(studio_values)
new_data = apply_overrides(studio_values, copy.deepcopy(overrides))
else:
old_data = get_default_project_settings(exclude_locals=True)
new_data = apply_overrides(default_values, copy.deepcopy(overrides))
new_data_with_metadata = copy.deepcopy(new_data)
clear_metadata_from_settings(new_data)
changes = calculate_changes(old_data, new_data)
modules_manager = ModulesManager()
warnings = []
for module in modules_manager.get_enabled_modules():
if isinstance(module, ISettingsChangeListener):
try:
module.on_project_settings_save(
old_data,
new_data,
project_name,
changes,
new_data_with_metadata
)
except SaveWarningExc as exc:
warnings.extend(exc.warnings)
_SETTINGS_HANDLER.save_project_settings(project_name, overrides)
if warnings:
raise SaveWarningExc(warnings)
@require_handler
def save_project_anatomy(project_name, anatomy_data):
"""Save studio overrides of project anatomy.
Old value, new value and changes are passed to enabled modules that want to
know about settings changes.
For saving of data cares registered Settings handler.
Warning messages are not logged as module raising them should log it within
it's logger.
Args:
project_name (str): Project name for which overrides are passed.
Default project's value is None.
overrides(dict): Overrides data with metadata defying studio overrides.
Raises:
SaveWarningExc: If any module raises the exception.
"""
# Notify Pype modules
from openpype.modules import ModulesManager
from openpype_interfaces import ISettingsChangeListener
default_values = get_default_settings()[PROJECT_ANATOMY_KEY]
if project_name:
old_data = get_anatomy_settings(project_name)
studio_overrides = get_studio_project_settings_overrides()
studio_values = apply_overrides(default_values, studio_overrides)
clear_metadata_from_settings(studio_values)
new_data = apply_overrides(studio_values, copy.deepcopy(anatomy_data))
else:
old_data = get_default_anatomy_settings(exclude_locals=True)
new_data = apply_overrides(default_values, copy.deepcopy(anatomy_data))
new_data_with_metadata = copy.deepcopy(new_data)
clear_metadata_from_settings(new_data)
changes = calculate_changes(old_data, new_data)
modules_manager = ModulesManager()
warnings = []
for module in modules_manager.get_enabled_modules():
if isinstance(module, ISettingsChangeListener):
try:
module.on_project_anatomy_save(
old_data,
new_data,
changes,
project_name,
new_data_with_metadata
)
except SaveWarningExc as exc:
warnings.extend(exc.warnings)
_SETTINGS_HANDLER.save_project_anatomy(project_name, anatomy_data)
if warnings:
raise SaveWarningExc(warnings)
@require_handler
def get_studio_system_settings_overrides():
return _SETTINGS_HANDLER.get_studio_system_settings_overrides()
@require_handler
def get_studio_project_settings_overrides():
return _SETTINGS_HANDLER.get_studio_project_settings_overrides()
@require_handler
def get_studio_project_anatomy_overrides():
return _SETTINGS_HANDLER.get_studio_project_anatomy_overrides()
@require_handler
def get_project_settings_overrides(project_name):
return _SETTINGS_HANDLER.get_project_settings_overrides(project_name)
@require_handler
def get_project_anatomy_overrides(project_name):
return _SETTINGS_HANDLER.get_project_anatomy_overrides(project_name)
@require_local_handler
def save_local_settings(data):
return _LOCAL_SETTINGS_HANDLER.save_local_settings(data)
@require_local_handler
def get_local_settings():
return _LOCAL_SETTINGS_HANDLER.get_local_settings()
class DuplicatedEnvGroups(Exception):
def __init__(self, duplicated):
self.origin_duplicated = duplicated
self.duplicated = {}
for key, items in duplicated.items():
self.duplicated[key] = []
for item in items:
self.duplicated[key].append("/".join(item["parents"]))
msg = "Duplicated environment group keys. {}".format(
", ".join([
"\"{}\"".format(env_key) for env_key in self.duplicated.keys()
])
)
super(DuplicatedEnvGroups, self).__init__(msg)
def load_openpype_default_settings():
"""Load openpype default settings."""
return load_jsons_from_dir(DEFAULTS_DIR)
def reset_default_settings():
"""Reset cache of default settings. Can't be used now."""
global _DEFAULT_SETTINGS
_DEFAULT_SETTINGS = None
def _get_default_settings():
from openpype.modules import get_module_settings_defs
defaults = load_openpype_default_settings()
module_settings_defs = get_module_settings_defs()
for module_settings_def_cls in module_settings_defs:
module_settings_def = module_settings_def_cls()
system_defaults = module_settings_def.get_defaults(
SYSTEM_SETTINGS_KEY
) or {}
for path, value in system_defaults.items():
if not path:
continue
subdict = defaults["system_settings"]
path_items = list(path.split("/"))
last_key = path_items.pop(-1)
for key in path_items:
subdict = subdict[key]
subdict[last_key] = value
project_defaults = module_settings_def.get_defaults(
PROJECT_SETTINGS_KEY
) or {}
for path, value in project_defaults.items():
if not path:
continue
subdict = defaults
path_items = list(path.split("/"))
last_key = path_items.pop(-1)
for key in path_items:
subdict = subdict[key]
subdict[last_key] = value
return defaults
def get_default_settings():
"""Get default settings.
Todo:
Cache loaded defaults.
Returns:
dict: Loaded default settings.
"""
global _DEFAULT_SETTINGS
if _DEFAULT_SETTINGS is None:
_DEFAULT_SETTINGS = _get_default_settings()
return copy.deepcopy(_DEFAULT_SETTINGS)
def load_json_file(fpath):
# Load json data
try:
with open(fpath, "r") as opened_file:
return json.load(opened_file)
except JSON_EXC:
log.warning(
"File has invalid json format \"{}\"".format(fpath),
exc_info=True
)
return {}
def load_jsons_from_dir(path, *args, **kwargs):
"""Load all .json files with content from entered folder path.
Data are loaded recursively from a directory and recreate the
hierarchy as a dictionary.
Entered path hiearchy:
|_ folder1
| |_ data1.json
|_ folder2
|_ subfolder1
|_ data2.json
Will result in:
```javascript
{
"folder1": {
"data1": "CONTENT OF FILE"
},
"folder2": {
"subfolder1": {
"data2": "CONTENT OF FILE"
}
}
}
```
Args:
path (str): Path to the root folder where the json hierarchy starts.
Returns:
dict: Loaded data.
"""
output = {}
path = os.path.normpath(path)
if not os.path.exists(path):
# TODO warning
return output
sub_keys = list(kwargs.pop("subkeys", args))
for sub_key in tuple(sub_keys):
_path = os.path.join(path, sub_key)
if not os.path.exists(_path):
break
path = _path
sub_keys.pop(0)
base_len = len(path) + 1
for base, _directories, filenames in os.walk(path):
base_items_str = base[base_len:]
if not base_items_str:
base_items = []
else:
base_items = base_items_str.split(os.path.sep)
for filename in filenames:
basename, ext = os.path.splitext(filename)
if ext == ".json":
full_path = os.path.join(base, filename)
value = load_json_file(full_path)
dict_keys = base_items + [basename]
output = subkey_merge(output, value, dict_keys)
for sub_key in sub_keys:
output = output[sub_key]
return output
def find_environments(data, with_items=False, parents=None):
""" Find environemnt values from system settings by it's metadata.
Args:
data(dict): System settings data or dictionary which may contain
environments metadata.
Returns:
dict: Key as Environment key and value for `acre` module.
"""
if not data or not isinstance(data, dict):
return {}
output = {}
if parents is None:
parents = []
if M_ENVIRONMENT_KEY in data:
metadata = data.get(M_ENVIRONMENT_KEY)
for env_group_key, env_keys in metadata.items():
if env_group_key not in output:
output[env_group_key] = []
_env_values = {}
for key in env_keys:
_env_values[key] = data[key]
item = {
"env": _env_values,
"parents": parents[:-1]
}
output[env_group_key].append(item)
for key, value in data.items():
_parents = copy.deepcopy(parents)
_parents.append(key)
result = find_environments(value, True, _parents)
if not result:
continue
for env_group_key, env_values in result.items():
if env_group_key not in output:
output[env_group_key] = []
for env_values_item in env_values:
output[env_group_key].append(env_values_item)
if with_items:
return output
duplicated_env_groups = {}
final_output = {}
for key, value_in_list in output.items():
if len(value_in_list) > 1:
duplicated_env_groups[key] = value_in_list
else:
final_output[key] = value_in_list[0]["env"]
if duplicated_env_groups:
raise DuplicatedEnvGroups(duplicated_env_groups)
return final_output
def subkey_merge(_dict, value, keys):
key = keys.pop(0)
if not keys:
_dict[key] = value
return _dict
if key not in _dict:
_dict[key] = {}
_dict[key] = subkey_merge(_dict[key], value, keys)
return _dict
def merge_overrides(source_dict, override_dict):
"""Merge data from override_dict to source_dict."""
if M_OVERRIDEN_KEY in override_dict:
overriden_keys = set(override_dict.pop(M_OVERRIDEN_KEY))
else:
overriden_keys = set()
for key, value in override_dict.items():
if (key in overriden_keys or key not in source_dict):
source_dict[key] = value
elif isinstance(value, dict) and isinstance(source_dict[key], dict):
source_dict[key] = merge_overrides(source_dict[key], value)
else:
source_dict[key] = value
return source_dict
def apply_overrides(source_data, override_data):
if not override_data:
return source_data
_source_data = copy.deepcopy(source_data)
return merge_overrides(_source_data, override_data)
def apply_local_settings_on_system_settings(system_settings, local_settings):
"""Apply local settings on studio system settings.
ATM local settings can modify only application executables. Executable
values are not overriden but prepended.
"""
if not local_settings or "applications" not in local_settings:
return
current_platform = platform.system().lower()
for app_group_name, value in local_settings["applications"].items():
if not value or app_group_name not in system_settings["applications"]:
continue
variants = system_settings["applications"][app_group_name]["variants"]
for app_name, app_value in value.items():
if (
not app_value
or app_name not in variants
or "executables" not in variants[app_name]
):
continue
executable = app_value.get("executable")
if not executable:
continue
platform_executables = variants[app_name]["executables"].get(
current_platform
)
# TODO This is temporary fix until launch arguments will be stored
# per platform and not per executable.
# - local settings store only executable
new_executables = [executable]
new_executables.extend(platform_executables)
variants[app_name]["executables"] = new_executables
def apply_local_settings_on_anatomy_settings(
anatomy_settings, local_settings, project_name, site_name=None
):
"""Apply local settings on anatomy settings.
ATM local settings can modify project roots. Project name is required as
local settings have data stored data by project's name.
Local settings override root values in this order:
1.) Check if local settings contain overrides for default project and
apply it's values on roots if there are any.
2.) If passed `project_name` is not None then check project specific
overrides in local settings for the project and apply it's value on
roots if there are any.
NOTE: Root values of default project from local settings are always applied
if are set.
Args:
anatomy_settings (dict): Data for anatomy settings.
local_settings (dict): Data of local settings.
project_name (str): Name of project for which anatomy data are.
"""
if not local_settings:
return
local_project_settings = local_settings.get("projects") or {}
# Check for roots existence in local settings first
roots_project_locals = (
local_project_settings
.get(project_name, {})
)
roots_default_locals = (
local_project_settings
.get(DEFAULT_PROJECT_KEY, {})
)
# Skip rest of processing if roots are not set
if not roots_project_locals and not roots_default_locals:
return
# Get active site from settings
if site_name is None:
if project_name:
project_settings = get_project_settings(project_name)
else:
project_settings = get_default_project_settings()
site_name = (
project_settings["global"]["sync_server"]["config"]["active_site"]
)
# QUESTION should raise an exception?
if not site_name:
return
# Combine roots from local settings
roots_locals = roots_default_locals.get(site_name) or {}
roots_locals.update(roots_project_locals.get(site_name) or {})
# Skip processing if roots for current active site are not available in
# local settings
if not roots_locals:
return
current_platform = platform.system().lower()
root_data = anatomy_settings["roots"]
for root_name, path in roots_locals.items():
if root_name not in root_data:
continue
anatomy_settings["roots"][root_name][current_platform] = (
path
)
def get_site_local_overrides(project_name, site_name, local_settings=None):
"""Site overrides from local settings for passet project and site name.
Args:
project_name (str): For which project are overrides.
site_name (str): For which site are overrides needed.
local_settings (dict): Preloaded local settings. They are loaded
automatically if not passed.
"""
# Check if local settings were passed
if local_settings is None:
local_settings = get_local_settings()
output = {}
# Skip if local settings are empty
if not local_settings:
return output
local_project_settings = local_settings.get("projects") or {}
# Prepare overrides for entered project and for default project
project_locals = None
if project_name:
project_locals = local_project_settings.get(project_name)
default_project_locals = local_project_settings.get(DEFAULT_PROJECT_KEY)
# First load and use local settings from default project
if default_project_locals and site_name in default_project_locals:
output.update(default_project_locals[site_name])
# Apply project specific local settings if there are any
if project_locals and site_name in project_locals:
output.update(project_locals[site_name])
return output
def apply_local_settings_on_project_settings(
project_settings, local_settings, project_name
):
"""Apply local settings on project settings.
Currently is modifying active site and remote site in sync server.
Args:
project_settings (dict): Data for project settings.
local_settings (dict): Data of local settings.
project_name (str): Name of project for which settings data are.
"""
if not local_settings:
return
local_project_settings = local_settings.get("projects")
if not local_project_settings:
return
project_locals = local_project_settings.get(project_name) or {}
default_locals = local_project_settings.get(DEFAULT_PROJECT_KEY) or {}
active_site = (
project_locals.get("active_site")
or default_locals.get("active_site")
)
remote_site = (
project_locals.get("remote_site")
or default_locals.get("remote_site")
)
sync_server_config = project_settings["global"]["sync_server"]["config"]
if active_site:
sync_server_config["active_site"] = active_site
if remote_site:
sync_server_config["remote_site"] = remote_site
def get_system_settings(clear_metadata=True, exclude_locals=None):
"""System settings with applied studio overrides."""
default_values = get_default_settings()[SYSTEM_SETTINGS_KEY]
studio_values = get_studio_system_settings_overrides()
result = apply_overrides(default_values, studio_values)
# Clear overrides metadata from settings
if clear_metadata:
clear_metadata_from_settings(result)
# Apply local settings
# Default behavior is based on `clear_metadata` value
if exclude_locals is None:
exclude_locals = not clear_metadata
if not exclude_locals:
# TODO local settings may be required to apply for environments
local_settings = get_local_settings()
apply_local_settings_on_system_settings(result, local_settings)
return result
def get_default_project_settings(clear_metadata=True, exclude_locals=None):
"""Project settings with applied studio's default project overrides."""
default_values = get_default_settings()[PROJECT_SETTINGS_KEY]
studio_values = get_studio_project_settings_overrides()
result = apply_overrides(default_values, studio_values)
# Clear overrides metadata from settings
if clear_metadata:
clear_metadata_from_settings(result)
# Apply local settings
if exclude_locals is None:
exclude_locals = not clear_metadata
if not exclude_locals:
local_settings = get_local_settings()
apply_local_settings_on_project_settings(
result, local_settings, None
)
return result
def get_default_anatomy_settings(clear_metadata=True, exclude_locals=None):
"""Project anatomy data with applied studio's default project overrides."""
default_values = get_default_settings()[PROJECT_ANATOMY_KEY]
studio_values = get_studio_project_anatomy_overrides()
result = apply_overrides(default_values, studio_values)
# Clear overrides metadata from settings
if clear_metadata:
clear_metadata_from_settings(result)
# Apply local settings
if exclude_locals is None:
exclude_locals = not clear_metadata
if not exclude_locals:
local_settings = get_local_settings()
apply_local_settings_on_anatomy_settings(
result, local_settings, None
)
return result
def get_anatomy_settings(
project_name, site_name=None, clear_metadata=True, exclude_locals=None
):
"""Project anatomy data with applied studio and project overrides."""
if not project_name:
raise ValueError(
"Must enter project name. Call "
"`get_default_anatomy_settings` to get project defaults."
)
studio_overrides = get_default_anatomy_settings(False)
project_overrides = get_project_anatomy_overrides(
project_name
)
result = copy.deepcopy(studio_overrides)
if project_overrides:
for key, value in project_overrides.items():
result[key] = value
# Clear overrides metadata from settings
if clear_metadata:
clear_metadata_from_settings(result)
# Apply local settings
if exclude_locals is None:
exclude_locals = not clear_metadata
if not exclude_locals:
local_settings = get_local_settings()
apply_local_settings_on_anatomy_settings(
result, local_settings, project_name, site_name
)
return result
def get_project_settings(
project_name, clear_metadata=True, exclude_locals=None
):
"""Project settings with applied studio and project overrides."""
if not project_name:
raise ValueError(
"Must enter project name."
" Call `get_default_project_settings` to get project defaults."
)
studio_overrides = get_default_project_settings(False)
project_overrides = get_project_settings_overrides(
project_name
)
result = apply_overrides(studio_overrides, project_overrides)
# Clear overrides metadata from settings
if clear_metadata:
clear_metadata_from_settings(result)
# Apply local settings
if exclude_locals is None:
exclude_locals = not clear_metadata
if not exclude_locals:
local_settings = get_local_settings()
apply_local_settings_on_project_settings(
result, local_settings, project_name
)
return result
def get_current_project_settings():
"""Project settings for current context project.
Project name should be stored in environment variable `AVALON_PROJECT`.
This function should be used only in host context where environment
variable must be set and should not happen that any part of process will
change the value of the enviornment variable.
"""
project_name = os.environ.get("AVALON_PROJECT")
if not project_name:
raise ValueError(
"Missing context project in environemt variable `AVALON_PROJECT`."
)
return get_project_settings(project_name)
def get_environments():
"""Calculated environment based on defaults and system settings.
Any default environment also found in the system settings will be fully
overriden by the one from the system settings.
Returns:
dict: Output should be ready for `acre` module.
"""
return find_environments(get_system_settings(False))
def get_general_environments():
"""Get general environments.
Function is implemented to be able load general environments without using
`get_default_settings`.
"""
# Use only openpype defaults.
# - prevent to use `get_system_settings` where `get_default_settings`
# is used
default_values = load_openpype_default_settings()
system_settings = default_values["system_settings"]
studio_overrides = get_studio_system_settings_overrides()
result = apply_overrides(system_settings, studio_overrides)
environments = result["general"]["environment"]
clear_metadata_from_settings(environments)
return environments
def clear_metadata_from_settings(values):
"""Remove all metadata keys from loaded settings."""
if isinstance(values, dict):
for key in tuple(values.keys()):
if key in METADATA_KEYS:
values.pop(key)
else:
clear_metadata_from_settings(values[key])
elif isinstance(values, list):
for item in values:
clear_metadata_from_settings(item) | 0.480966 | 0.099339 |
from re import A
import time
import datetime
from telegram import Update
from telegram.ext import CallbackContext
import os
import platform
import ctypes
import psutil
import redis
def ping(update: Update, context: CallbackContext, starttime: datetime, permission: bool, redisPool0: redis.ConnectionPool, redisPool1: redis.ConnectionPool):
grouptitle: str = '私人'
if update.message.chat.title != None:
grouptitle = update.message.chat.title
elif update.message.from_user.username != None:
grouptitle = update.message.from_user.username
groupinfo: str = grouptitle + '」'
chatID: int = update.message.chat.id
if update.message.chat == None or permission == False:
groupinfo += '没有'
else:
groupinfo += '具有'
t = int(time.time())
endtime = datetime.datetime.now()
runsec: int = (endtime - starttime).seconds
redisConnect0 = redis.Redis(connection_pool=redisPool0)
redisConnect1 = redis.Redis(connection_pool=redisPool1)
alerts: list[str] = [
'pong',
'雅诗电子绒布球 v2.0.1',
'服务器时间戳: '+str(t)+' 秒。',
'距离上次重新启动: '+str(runsec)+' 秒。',
'可用磁盘: '+getLocalSpace('/')+' MB',
'可用内存: '+getMem()+' MB',
'CPU使用: '+getCpu()+' %',
'数据库使用: 0: '+str(redisConnect0.dbsize())+' 1: '+str(redisConnect1.dbsize()),
'当前会话「'+groupinfo+'使用许可权。',
'有关更多信息请参阅 `/about` 。',
' 本 BOT 具有超级绒力。'
]
redisConnect0.close()
redisConnect1.close()
alert = '\n'.join(alerts)
print(update.message.chat.id, update.message.chat.title,
update.message.from_user.id, update.message.from_user.username, alert)
context.bot.send_message(
chat_id=update.effective_chat.id, text=alert)
def getLocalSpace(folder):
folderTemp = folder
if not os.path.exists(folderTemp):
folderTemp = os.getcwd()
if platform.system() == 'Windows':
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(
folderTemp), None, None, ctypes.pointer(free_bytes))
return "%0.2f" % (free_bytes.value / 1024 / 1024)
else:
st = os.statvfs(folderTemp)
return "%0.2f" % (st.f_bavail * st.f_frsize / 1024 / 1024)
def getMem() -> str:
data = psutil.virtual_memory()
total = data.total # 总内存,单位为byte
free = data.available # 可用内存
return "%0.2f" % (free / 1024 / 1024)
def getCpu() -> str:
cpu = "%0.2f" % psutil.cpu_percent(interval=1)
return cpu
alerts: list[str] = [
'服务器时间戳: '+str(time.time())+' 秒。',
'可用磁盘: '+getLocalSpace('/')+' MB',
'可用内存: '+getMem()+' MB',
'CPU使用: '+getCpu()+' %',
]
text = '\n'.join(alerts)
print(text) | d_ping.py | from re import A
import time
import datetime
from telegram import Update
from telegram.ext import CallbackContext
import os
import platform
import ctypes
import psutil
import redis
def ping(update: Update, context: CallbackContext, starttime: datetime, permission: bool, redisPool0: redis.ConnectionPool, redisPool1: redis.ConnectionPool):
grouptitle: str = '私人'
if update.message.chat.title != None:
grouptitle = update.message.chat.title
elif update.message.from_user.username != None:
grouptitle = update.message.from_user.username
groupinfo: str = grouptitle + '」'
chatID: int = update.message.chat.id
if update.message.chat == None or permission == False:
groupinfo += '没有'
else:
groupinfo += '具有'
t = int(time.time())
endtime = datetime.datetime.now()
runsec: int = (endtime - starttime).seconds
redisConnect0 = redis.Redis(connection_pool=redisPool0)
redisConnect1 = redis.Redis(connection_pool=redisPool1)
alerts: list[str] = [
'pong',
'雅诗电子绒布球 v2.0.1',
'服务器时间戳: '+str(t)+' 秒。',
'距离上次重新启动: '+str(runsec)+' 秒。',
'可用磁盘: '+getLocalSpace('/')+' MB',
'可用内存: '+getMem()+' MB',
'CPU使用: '+getCpu()+' %',
'数据库使用: 0: '+str(redisConnect0.dbsize())+' 1: '+str(redisConnect1.dbsize()),
'当前会话「'+groupinfo+'使用许可权。',
'有关更多信息请参阅 `/about` 。',
' 本 BOT 具有超级绒力。'
]
redisConnect0.close()
redisConnect1.close()
alert = '\n'.join(alerts)
print(update.message.chat.id, update.message.chat.title,
update.message.from_user.id, update.message.from_user.username, alert)
context.bot.send_message(
chat_id=update.effective_chat.id, text=alert)
def getLocalSpace(folder):
folderTemp = folder
if not os.path.exists(folderTemp):
folderTemp = os.getcwd()
if platform.system() == 'Windows':
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(
folderTemp), None, None, ctypes.pointer(free_bytes))
return "%0.2f" % (free_bytes.value / 1024 / 1024)
else:
st = os.statvfs(folderTemp)
return "%0.2f" % (st.f_bavail * st.f_frsize / 1024 / 1024)
def getMem() -> str:
data = psutil.virtual_memory()
total = data.total # 总内存,单位为byte
free = data.available # 可用内存
return "%0.2f" % (free / 1024 / 1024)
def getCpu() -> str:
cpu = "%0.2f" % psutil.cpu_percent(interval=1)
return cpu
alerts: list[str] = [
'服务器时间戳: '+str(time.time())+' 秒。',
'可用磁盘: '+getLocalSpace('/')+' MB',
'可用内存: '+getMem()+' MB',
'CPU使用: '+getCpu()+' %',
]
text = '\n'.join(alerts)
print(text) | 0.247078 | 0.108001 |
import inspect
import re
from strawberry import object_type
from strawberry.auto import StrawberryAuto, auto
from strawberry.enum import EnumDefinition
from strawberry.field import StrawberryField
from strawberry.schema.name_converter import NameConverter
from strawberry.types.fields.resolver import StrawberryResolver
from strawberry_django.fields import types
from strawberry_django.fields import types as ftypes
# Just import this for the monkey patch
from .utils.printer import print_schema # noqa:F401
# Monkey patch strawberry_django to use strawberry's auto
types.auto = auto
ftypes.auto = auto
_cls_docs = {}
_original_process_type = object_type._process_type
_original_process_type = object_type._process_type
_original_wrap_dataclass = object_type._wrap_dataclass
_original_field_init = StrawberryField.__init__
_original_field_call = StrawberryField.__call__
_original_enum_init = EnumDefinition.__init__
_original_from_generic = NameConverter.from_generic
def _get_doc(obj):
if not obj.__doc__:
return None
return inspect.cleandoc(obj.__doc__)
def _process_type(cls, *args, **kwargs):
from strawberry_django_plus.directives import SchemaDirectiveWithResolver
if kwargs.get("description") is None:
kwargs["description"] = _cls_docs.get(cls)
ret = _original_process_type(cls, *args, **kwargs)
for d in ret._type_definition.directives:
if isinstance(d, SchemaDirectiveWithResolver):
d.register(ret._type_definition)
return ret
def _wrap_dataclass(cls):
_cls_docs[cls] = _get_doc(cls)
return _original_wrap_dataclass(cls)
def _field_init(self, *args, **kwargs):
from strawberry_django_plus.directives import SchemaDirectiveWithResolver
if kwargs.get("description") is None:
base_resolver = kwargs.get("base_resolver")
if base_resolver is not None:
while isinstance(base_resolver, StrawberryResolver):
base_resolver = base_resolver.wrapped_func
kwargs["description"] = _get_doc(base_resolver)
ret = _original_field_init(self, *args, **kwargs)
for d in self.directives:
if isinstance(d, SchemaDirectiveWithResolver):
d.register(self)
return ret
def _field_call(self, resolver):
ret = _original_field_call(self, resolver)
if self.description is None:
resolver = self.base_resolver
while isinstance(resolver, StrawberryResolver):
resolver = resolver.wrapped_func
self.description = _get_doc(resolver)
return ret
def _enum_init(*args, **kwargs):
if kwargs.get("description") is None:
cls = kwargs.get("wrapped_cls")
kwargs["description"] = _get_doc(cls)
return _original_enum_init(*args, **kwargs)
def _from_generic(*args, **kwargs):
from .settings import config
v = _original_from_generic(*args, **kwargs)
for p in config.REMOVE_DUPLICATED_SUFFIX:
if not v.endswith(p):
continue
v = re.sub(rf"{p}(?!$)", "", v)
return v
types.is_auto = lambda type_: isinstance(type_, StrawberryAuto)
object_type._process_type = _process_type
object_type._wrap_dataclass = _wrap_dataclass
StrawberryField.__init__ = _field_init
StrawberryField.__call__ = _field_call
EnumDefinition.__init__ = _enum_init
NameConverter.from_generic = _from_generic | strawberry_django_plus/__init__.py | import inspect
import re
from strawberry import object_type
from strawberry.auto import StrawberryAuto, auto
from strawberry.enum import EnumDefinition
from strawberry.field import StrawberryField
from strawberry.schema.name_converter import NameConverter
from strawberry.types.fields.resolver import StrawberryResolver
from strawberry_django.fields import types
from strawberry_django.fields import types as ftypes
# Just import this for the monkey patch
from .utils.printer import print_schema # noqa:F401
# Monkey patch strawberry_django to use strawberry's auto
types.auto = auto
ftypes.auto = auto
_cls_docs = {}
_original_process_type = object_type._process_type
_original_process_type = object_type._process_type
_original_wrap_dataclass = object_type._wrap_dataclass
_original_field_init = StrawberryField.__init__
_original_field_call = StrawberryField.__call__
_original_enum_init = EnumDefinition.__init__
_original_from_generic = NameConverter.from_generic
def _get_doc(obj):
if not obj.__doc__:
return None
return inspect.cleandoc(obj.__doc__)
def _process_type(cls, *args, **kwargs):
from strawberry_django_plus.directives import SchemaDirectiveWithResolver
if kwargs.get("description") is None:
kwargs["description"] = _cls_docs.get(cls)
ret = _original_process_type(cls, *args, **kwargs)
for d in ret._type_definition.directives:
if isinstance(d, SchemaDirectiveWithResolver):
d.register(ret._type_definition)
return ret
def _wrap_dataclass(cls):
_cls_docs[cls] = _get_doc(cls)
return _original_wrap_dataclass(cls)
def _field_init(self, *args, **kwargs):
from strawberry_django_plus.directives import SchemaDirectiveWithResolver
if kwargs.get("description") is None:
base_resolver = kwargs.get("base_resolver")
if base_resolver is not None:
while isinstance(base_resolver, StrawberryResolver):
base_resolver = base_resolver.wrapped_func
kwargs["description"] = _get_doc(base_resolver)
ret = _original_field_init(self, *args, **kwargs)
for d in self.directives:
if isinstance(d, SchemaDirectiveWithResolver):
d.register(self)
return ret
def _field_call(self, resolver):
ret = _original_field_call(self, resolver)
if self.description is None:
resolver = self.base_resolver
while isinstance(resolver, StrawberryResolver):
resolver = resolver.wrapped_func
self.description = _get_doc(resolver)
return ret
def _enum_init(*args, **kwargs):
if kwargs.get("description") is None:
cls = kwargs.get("wrapped_cls")
kwargs["description"] = _get_doc(cls)
return _original_enum_init(*args, **kwargs)
def _from_generic(*args, **kwargs):
from .settings import config
v = _original_from_generic(*args, **kwargs)
for p in config.REMOVE_DUPLICATED_SUFFIX:
if not v.endswith(p):
continue
v = re.sub(rf"{p}(?!$)", "", v)
return v
types.is_auto = lambda type_: isinstance(type_, StrawberryAuto)
object_type._process_type = _process_type
object_type._wrap_dataclass = _wrap_dataclass
StrawberryField.__init__ = _field_init
StrawberryField.__call__ = _field_call
EnumDefinition.__init__ = _enum_init
NameConverter.from_generic = _from_generic | 0.405331 | 0.091099 |
import pandas as pd
import numpy as np
import os
# Read in the file
#UK Paths
VOL_DIR = 'S:/CMP/Transit/Volume/'
vol = pd.read_csv(VOL_DIR + 'APC_2019_SPRING_SO_STOPS02.txt', sep='\t')
vol_agg_output = VOL_DIR + 'CMP_APC_Average_Volume.csv' # Output file name and directory
#SFCTA Paths
#VOL_DIR = r'Q:\CMP\LOS Monitoring 2019\Transit\Volume'
#vol = pd.read_csv(os.path.join(VOL_DIR, 'APC_2019_SPRING_SO_STOPS02.txt'), sep='\t')
#vol_agg_output = os.path.join(VOL_DIR, 'CMP_APC_Average_Volume.csv')
# Assign 30-minute period
vol['Close_Hour'] = vol['CLOSE_DATE_TIME'].str[10:12].astype(int)
vol['Close_Minute'] = vol['CLOSE_DATE_TIME'].str[13:15].astype(int)
vol['Close_Second'] = vol['CLOSE_DATE_TIME'].str[16:18].astype(int)
vol['Close_Period'] = vol['CLOSE_DATE_TIME'].str[-2:]
vol['Close_Time'] = vol['ACTUAL_DATE'] + ' ' + vol['Close_Hour'].astype('str') + ':' + vol['Close_Minute'].astype('str') + ':' + vol['Close_Second'].astype('str') + ' ' + vol['Close_Period']
vol['Close_Time'] = pd.to_datetime(vol['Close_Time'])
vol = vol.sort_values(by=['EXT_TRIP_ID', 'ACTUAL_DATE', 'VEHICLE_ID', 'Close_Time']).reset_index()
vol['Epoch'] = 2*vol['Close_Time'].dt.hour + vol['Close_Time'].dt.minute//30
# Aggregate the boardings, alightings, and loads from individual trips to the corresponding 30-minute period
vol_sum = vol.groupby(['STOPID', 'ACTUAL_DATE', 'Epoch']).agg({'ONS': 'sum', 'OFFS': 'sum', 'MAX_LOAD': 'sum'}).reset_index()
vol_sum.columns = ['STOPID', 'ACTUAL_DATE', 'Epoch', 'Boardings', 'Alightings', 'Loads']
# Assign day type, i.e. weekdays, Saturday, or Sunday
vol_sum['Date']=pd.to_datetime(vol_sum['ACTUAL_DATE'])
vol_sum['DOW']=vol_sum.Date.dt.dayofweek #Mon-0
vol_sum['DayType'] = np.where(vol_sum['DOW']<=4, 'Weekdays', np.where(vol_sum['DOW']==5, 'Saturday', 'Sunday'))
# Remove Mondays and Fridays to get typical weekdays
vol_sum = vol_sum[(vol_sum['DOW']!=0) & (vol_sum['DOW']!=4)]
# Average boardings, alightings, and loads by stop, daytype, and period
vol_daytype_avg_stops = vol_sum.groupby(['STOPID', 'DayType', 'Epoch']).agg({'Boardings': 'mean',
'Alightings': 'mean',
'Loads': 'mean'}).reset_index()
vol_daytype_avg_stops.columns= ['STOPID', 'DayType', 'Epoch', 'Avg_Boardings', 'Avg_Alightings', 'Avg_Loads']
# Create empty dataframe with continuous time periods
daytypes = ['Weekdays', 'Saturday', 'Sunday']
stop_vol_complete = pd.DataFrame()
cnt = 0
for stop_id in vol_stops:
for day_type in daytypes:
for epoch_id in range(48):
stop_vol_complete.loc[cnt, 'STOPID'] = stop_id
stop_vol_complete.loc[cnt, 'DayType'] = day_type
stop_vol_complete.loc[cnt, 'Epoch'] = epoch_id
cnt = cnt + 1
stop_vol_complete['Epoch'] = stop_vol_complete['Epoch'].astype(int)
# Join the vol_daytype_avg_stops dataframe to the created complate dataframe
stop_vol_complete = pd.merge(stop_vol_complete, vol_daytype_avg_stops, on=['STOPID', 'DayType', 'Epoch'], how='left')
stop_vol_complete['Hour'] = stop_vol_complete['Epoch']//2
stop_vol_complete['Minute'] = np.where(stop_vol_complete['Epoch']%2 ==0, '00', '30')
stop_vol_complete['Period'] = stop_vol_complete['Hour'].astype(str) + ':' + stop_vol_complete['Minute']
# Save the output file
stop_vol_complete[['STOPID', 'DayType', 'Period', 'Avg_Boardings', 'Avg_Alightings', 'Avg_Loads']].to_csv(vol_agg_output, index=False) | APC/SF_CMP_Transit_APC_Volume.py | import pandas as pd
import numpy as np
import os
# Read in the file
#UK Paths
VOL_DIR = 'S:/CMP/Transit/Volume/'
vol = pd.read_csv(VOL_DIR + 'APC_2019_SPRING_SO_STOPS02.txt', sep='\t')
vol_agg_output = VOL_DIR + 'CMP_APC_Average_Volume.csv' # Output file name and directory
#SFCTA Paths
#VOL_DIR = r'Q:\CMP\LOS Monitoring 2019\Transit\Volume'
#vol = pd.read_csv(os.path.join(VOL_DIR, 'APC_2019_SPRING_SO_STOPS02.txt'), sep='\t')
#vol_agg_output = os.path.join(VOL_DIR, 'CMP_APC_Average_Volume.csv')
# Assign 30-minute period
vol['Close_Hour'] = vol['CLOSE_DATE_TIME'].str[10:12].astype(int)
vol['Close_Minute'] = vol['CLOSE_DATE_TIME'].str[13:15].astype(int)
vol['Close_Second'] = vol['CLOSE_DATE_TIME'].str[16:18].astype(int)
vol['Close_Period'] = vol['CLOSE_DATE_TIME'].str[-2:]
vol['Close_Time'] = vol['ACTUAL_DATE'] + ' ' + vol['Close_Hour'].astype('str') + ':' + vol['Close_Minute'].astype('str') + ':' + vol['Close_Second'].astype('str') + ' ' + vol['Close_Period']
vol['Close_Time'] = pd.to_datetime(vol['Close_Time'])
vol = vol.sort_values(by=['EXT_TRIP_ID', 'ACTUAL_DATE', 'VEHICLE_ID', 'Close_Time']).reset_index()
vol['Epoch'] = 2*vol['Close_Time'].dt.hour + vol['Close_Time'].dt.minute//30
# Aggregate the boardings, alightings, and loads from individual trips to the corresponding 30-minute period
vol_sum = vol.groupby(['STOPID', 'ACTUAL_DATE', 'Epoch']).agg({'ONS': 'sum', 'OFFS': 'sum', 'MAX_LOAD': 'sum'}).reset_index()
vol_sum.columns = ['STOPID', 'ACTUAL_DATE', 'Epoch', 'Boardings', 'Alightings', 'Loads']
# Assign day type, i.e. weekdays, Saturday, or Sunday
vol_sum['Date']=pd.to_datetime(vol_sum['ACTUAL_DATE'])
vol_sum['DOW']=vol_sum.Date.dt.dayofweek #Mon-0
vol_sum['DayType'] = np.where(vol_sum['DOW']<=4, 'Weekdays', np.where(vol_sum['DOW']==5, 'Saturday', 'Sunday'))
# Remove Mondays and Fridays to get typical weekdays
vol_sum = vol_sum[(vol_sum['DOW']!=0) & (vol_sum['DOW']!=4)]
# Average boardings, alightings, and loads by stop, daytype, and period
vol_daytype_avg_stops = vol_sum.groupby(['STOPID', 'DayType', 'Epoch']).agg({'Boardings': 'mean',
'Alightings': 'mean',
'Loads': 'mean'}).reset_index()
vol_daytype_avg_stops.columns= ['STOPID', 'DayType', 'Epoch', 'Avg_Boardings', 'Avg_Alightings', 'Avg_Loads']
# Create empty dataframe with continuous time periods
daytypes = ['Weekdays', 'Saturday', 'Sunday']
stop_vol_complete = pd.DataFrame()
cnt = 0
for stop_id in vol_stops:
for day_type in daytypes:
for epoch_id in range(48):
stop_vol_complete.loc[cnt, 'STOPID'] = stop_id
stop_vol_complete.loc[cnt, 'DayType'] = day_type
stop_vol_complete.loc[cnt, 'Epoch'] = epoch_id
cnt = cnt + 1
stop_vol_complete['Epoch'] = stop_vol_complete['Epoch'].astype(int)
# Join the vol_daytype_avg_stops dataframe to the created complate dataframe
stop_vol_complete = pd.merge(stop_vol_complete, vol_daytype_avg_stops, on=['STOPID', 'DayType', 'Epoch'], how='left')
stop_vol_complete['Hour'] = stop_vol_complete['Epoch']//2
stop_vol_complete['Minute'] = np.where(stop_vol_complete['Epoch']%2 ==0, '00', '30')
stop_vol_complete['Period'] = stop_vol_complete['Hour'].astype(str) + ':' + stop_vol_complete['Minute']
# Save the output file
stop_vol_complete[['STOPID', 'DayType', 'Period', 'Avg_Boardings', 'Avg_Alightings', 'Avg_Loads']].to_csv(vol_agg_output, index=False) | 0.260201 | 0.195882 |
import os
import sys
import higher
from setuptools import setup, Command
class _Command(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
class PEP8(_Command):
description = 'PEP8 analysis'
def run(self):
code = os.system('scripts/pep8.sh')
if code != 0:
sys.exit(1)
class Pyflakes(_Command):
description = 'Pyflakes analysis'
def run(self):
code = os.system('scripts/pyflakes.sh')
if code != 0:
sys.exit(1)
class Test(_Command):
description = 'Run tests'
def run(self):
os.system('scripts/test.sh')
class Check(_Command):
description = 'Run all checks'
def run(self):
codes = []
codes.append(os.system('scripts/pep8.sh'))
codes.append(os.system('scripts/pyflakes.sh'))
codes.append(os.system('scripts/test.sh'))
if any([code != 0 for code in codes]):
sys.stderr.write('One or more checks have failed.\n')
sys.stderr.flush()
sys.exit(1)
else:
sys.stdout.write('All checks have passed.\n')
sys.stdout.flush()
name = 'higher'
license = 'Apache License (2.0)'
packages = ['higher']
description = 'High-level abstraction library'
author = '<NAME>'
author_email = '<EMAIL>'
url = 'https://github.com/readallthebooks/higher'
download_url = 'https://github.com/readallthebooks/higher/releases'
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Topic :: Software Development :: Libraries :: Python Modules',
]
install_requires = [
]
long_description = '''\
Higher
-------
A high-level abstraction library
'''
setup(
cmdclass={
'pep8': PEP8,
'pyflakes': Pyflakes,
'test': Test,
'allchecks': Check
},
name=name,
packages=packages,
version=higher.__version__,
description=description,
author=author,
author_email=author_email,
url=url,
download_url=download_url,
classifiers=classifiers,
install_requires=install_requires,
long_description=long_description
) | setup.py |
import os
import sys
import higher
from setuptools import setup, Command
class _Command(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
class PEP8(_Command):
description = 'PEP8 analysis'
def run(self):
code = os.system('scripts/pep8.sh')
if code != 0:
sys.exit(1)
class Pyflakes(_Command):
description = 'Pyflakes analysis'
def run(self):
code = os.system('scripts/pyflakes.sh')
if code != 0:
sys.exit(1)
class Test(_Command):
description = 'Run tests'
def run(self):
os.system('scripts/test.sh')
class Check(_Command):
description = 'Run all checks'
def run(self):
codes = []
codes.append(os.system('scripts/pep8.sh'))
codes.append(os.system('scripts/pyflakes.sh'))
codes.append(os.system('scripts/test.sh'))
if any([code != 0 for code in codes]):
sys.stderr.write('One or more checks have failed.\n')
sys.stderr.flush()
sys.exit(1)
else:
sys.stdout.write('All checks have passed.\n')
sys.stdout.flush()
name = 'higher'
license = 'Apache License (2.0)'
packages = ['higher']
description = 'High-level abstraction library'
author = '<NAME>'
author_email = '<EMAIL>'
url = 'https://github.com/readallthebooks/higher'
download_url = 'https://github.com/readallthebooks/higher/releases'
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Topic :: Software Development :: Libraries :: Python Modules',
]
install_requires = [
]
long_description = '''\
Higher
-------
A high-level abstraction library
'''
setup(
cmdclass={
'pep8': PEP8,
'pyflakes': Pyflakes,
'test': Test,
'allchecks': Check
},
name=name,
packages=packages,
version=higher.__version__,
description=description,
author=author,
author_email=author_email,
url=url,
download_url=download_url,
classifiers=classifiers,
install_requires=install_requires,
long_description=long_description
) | 0.275812 | 0.099252 |
import math
options = " Square, Rectangle, Triangle, Circle, Trapezoid, Quit".split(",")
def square():
print("Enter Side of Sqaure:")
side = input()
Area = float(side) * float(side)
Perimeter = 4 * float(side)
print('Area of a Square: ', Area)
print('Perimeter of a Square: ', Perimeter)
return
def rectangle():
print("Enter Length of Rectangle:")
length = float(input())
print("Enter Width of Rectangle:")
width = float(input())
Area = float(length) * float(width)
Perimeter = 2 * (length + width)
print('Area of a Rectangle: ', Area)
print('Perimeter of a Rectangle: ', Perimeter)
return
def triangle():
print("Enter a of Triangle:")
a = input()
print("Enter b of Triangle:")
b = input()
print("Enter c of Triangle:")
c = input()
print("Enter h of Triangle:")
h = input()
Area = (float(b) * float(h))/2
Perimeter = float(a) + float(b) + float(c)
print('Area of a Triangle: ', Area)
print('Perimeter of a Triangle: ', Perimeter)
return
def circle():
print("Radius of a Circle:")
r = input()
Area = math.pi * float(r)**2
Circumference = 2 * float(r) * math.pi
print('Area of a Circle: ', Area)
print('Circumference of a Circle:', Circumference)
return
def trapezoid():
print("Trapezoid:")
a = float(input('Enter the First Base of a Trapezoid: '))
b = float(input('Enter the Second Base of a Trapezoid: '))
h = float(input('Enter the Height of a Trapezoid: '))
c = float(input('Enter the Side of a Trapezoid: '))
d = float(input('Enter the Side of a Trapezoid: '))
Area = 0.5 * (a + b) * h
Perimeter = a + b + c + d
print('Area of a Trapezoid: ', Area)
print('Perimeter of a Trapeziod:', Perimeter)
return
def main():
run_program = True
while run_program:
print("\n")
print("Choose Option:")
print("Area, Perimeter, Circumference of Shapes")
for i in range(1, len(options)+1):
print("{} - {}".format(i, options[i-1]))
choice = int(input())
if choice == 1:
print('_____Square_____')
square()
elif choice == 2:
print('_____Rectangle_____')
rectangle()
elif choice == 3:
print('_____Triangle_____')
triangle()
elif choice == 4:
print('_____Circle_____')
circle()
elif choice == 5:
print('_____Trapezoid_____')
trapezoid()
elif choice == 6:
run_program = False
if __name__ == "__main__":
main() | Math Apps Software/ACP_Apps.py | import math
options = " Square, Rectangle, Triangle, Circle, Trapezoid, Quit".split(",")
def square():
print("Enter Side of Sqaure:")
side = input()
Area = float(side) * float(side)
Perimeter = 4 * float(side)
print('Area of a Square: ', Area)
print('Perimeter of a Square: ', Perimeter)
return
def rectangle():
print("Enter Length of Rectangle:")
length = float(input())
print("Enter Width of Rectangle:")
width = float(input())
Area = float(length) * float(width)
Perimeter = 2 * (length + width)
print('Area of a Rectangle: ', Area)
print('Perimeter of a Rectangle: ', Perimeter)
return
def triangle():
print("Enter a of Triangle:")
a = input()
print("Enter b of Triangle:")
b = input()
print("Enter c of Triangle:")
c = input()
print("Enter h of Triangle:")
h = input()
Area = (float(b) * float(h))/2
Perimeter = float(a) + float(b) + float(c)
print('Area of a Triangle: ', Area)
print('Perimeter of a Triangle: ', Perimeter)
return
def circle():
print("Radius of a Circle:")
r = input()
Area = math.pi * float(r)**2
Circumference = 2 * float(r) * math.pi
print('Area of a Circle: ', Area)
print('Circumference of a Circle:', Circumference)
return
def trapezoid():
print("Trapezoid:")
a = float(input('Enter the First Base of a Trapezoid: '))
b = float(input('Enter the Second Base of a Trapezoid: '))
h = float(input('Enter the Height of a Trapezoid: '))
c = float(input('Enter the Side of a Trapezoid: '))
d = float(input('Enter the Side of a Trapezoid: '))
Area = 0.5 * (a + b) * h
Perimeter = a + b + c + d
print('Area of a Trapezoid: ', Area)
print('Perimeter of a Trapeziod:', Perimeter)
return
def main():
run_program = True
while run_program:
print("\n")
print("Choose Option:")
print("Area, Perimeter, Circumference of Shapes")
for i in range(1, len(options)+1):
print("{} - {}".format(i, options[i-1]))
choice = int(input())
if choice == 1:
print('_____Square_____')
square()
elif choice == 2:
print('_____Rectangle_____')
rectangle()
elif choice == 3:
print('_____Triangle_____')
triangle()
elif choice == 4:
print('_____Circle_____')
circle()
elif choice == 5:
print('_____Trapezoid_____')
trapezoid()
elif choice == 6:
run_program = False
if __name__ == "__main__":
main() | 0.450359 | 0.40157 |
import requests
import csv
import json
from utils import request_until_succeed, open_csv_w
from secrets import YOUTUBE_API_KEY
# make empty data array
rows=[]
# this is where we define the API query and all its variable
api_key = YOUTUBE_API_KEY
# add the YOUTUBE IDs into the lists here, the ID can usually be found at the end of the URL: https://www.youtube.com/watch?v=tGRzz0oqgUE
channel_ids = ['UCJFp8uSYCjXOMnkUyb3CQ3Q', 'UC-9-kyTW8ZkZNDHQJ6FgpwQ']
def get_channel_data(channel_id):
# api parameters
params = 'snippet,status,contentDetails,statistics,topicDetails,localizations'
api_url = 'https://www.googleapis.com/youtube/v3/channels?part='+ params +'&id='+ channel_id +'&key='+api_key
# this opens the link and tells your computer that the format it is reading is JSON
api_response = requests.get(api_url)
channeldetails = json.loads(api_response.text)
if len(channeldetails['items']) > 0:
# Assign values from API to variables
for item in channeldetails['items']:
youtube_id = item['id']
publishedAt = item['snippet']['publishedAt']
title = item['snippet']['title']
description = item['snippet']['description']
viewCount = item['statistics']['viewCount']
subscriberCount = item['statistics']['subscriberCount']
videoCount = item['statistics']['videoCount']
commentCount = item['statistics'].get('commentCount')
row = {
'youtube_id': youtube_id,
'publishedAt': publishedAt,
'title': title,
'description': description,
'viewCount': viewCount,
'subscriberCount': subscriberCount,
'videoCount': videoCount,
'commentCount': commentCount
}
rows.append(row)
else:
print(video_id + " is not a valid ID")
if __name__ == '__main__':
for channel_id in channel_ids:
get_channel_data(channel_id)
# make a new csv into which we will write all the rows
with open_csv_w('../output/youtube-channel-information.csv') as csvfile:
# these are the header names:
fieldnames = ['youtube_id','publishedAt','title','description','viewCount','subscriberCount','videoCount', 'commentCount']
# this creates your csv
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
# this writes in the first row, which are the headers
writer.writeheader()
# this loops through your rows (the array you set at the beginning and have updated throughtout)
for row in rows:
# this takes each row and writes it into your csv
writer.writerow(row) | API-1/scripts/youtube-get-channel-info.py | import requests
import csv
import json
from utils import request_until_succeed, open_csv_w
from secrets import YOUTUBE_API_KEY
# make empty data array
rows=[]
# this is where we define the API query and all its variable
api_key = YOUTUBE_API_KEY
# add the YOUTUBE IDs into the lists here, the ID can usually be found at the end of the URL: https://www.youtube.com/watch?v=tGRzz0oqgUE
channel_ids = ['UCJFp8uSYCjXOMnkUyb3CQ3Q', 'UC-9-kyTW8ZkZNDHQJ6FgpwQ']
def get_channel_data(channel_id):
# api parameters
params = 'snippet,status,contentDetails,statistics,topicDetails,localizations'
api_url = 'https://www.googleapis.com/youtube/v3/channels?part='+ params +'&id='+ channel_id +'&key='+api_key
# this opens the link and tells your computer that the format it is reading is JSON
api_response = requests.get(api_url)
channeldetails = json.loads(api_response.text)
if len(channeldetails['items']) > 0:
# Assign values from API to variables
for item in channeldetails['items']:
youtube_id = item['id']
publishedAt = item['snippet']['publishedAt']
title = item['snippet']['title']
description = item['snippet']['description']
viewCount = item['statistics']['viewCount']
subscriberCount = item['statistics']['subscriberCount']
videoCount = item['statistics']['videoCount']
commentCount = item['statistics'].get('commentCount')
row = {
'youtube_id': youtube_id,
'publishedAt': publishedAt,
'title': title,
'description': description,
'viewCount': viewCount,
'subscriberCount': subscriberCount,
'videoCount': videoCount,
'commentCount': commentCount
}
rows.append(row)
else:
print(video_id + " is not a valid ID")
if __name__ == '__main__':
for channel_id in channel_ids:
get_channel_data(channel_id)
# make a new csv into which we will write all the rows
with open_csv_w('../output/youtube-channel-information.csv') as csvfile:
# these are the header names:
fieldnames = ['youtube_id','publishedAt','title','description','viewCount','subscriberCount','videoCount', 'commentCount']
# this creates your csv
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
# this writes in the first row, which are the headers
writer.writeheader()
# this loops through your rows (the array you set at the beginning and have updated throughtout)
for row in rows:
# this takes each row and writes it into your csv
writer.writerow(row) | 0.218836 | 0.154089 |
import math
from django.db.utils import DatabaseError
from ..utils.querystring import update_querystring
PAGESIZES = [10, 20, 50, 100]
DEFAULT_PAGESIZE = 20
def from_request(request):
"""
Given a request, return tuple (pagesize, pagenumber).
"""
pagesize = positive_integer(
request.GET.get("pagesize", DEFAULT_PAGESIZE), DEFAULT_PAGESIZE)
pagenumber = positive_integer(
request.GET.get("pagenumber", 1), 1)
return pagesize, pagenumber
def pagesize_url(url, pagesize):
return update_querystring(url, pagesize=pagesize, pagenumber=1)
def pagenumber_url(url, pagenumber):
return update_querystring(url, pagenumber=pagenumber)
class Pager(object):
"""Handles pagination given queryset, page size, and page number."""
def __init__(self, queryset, pagesize, pagenumber):
"""Initialize a ``Pager`` with queryset, page size, and page number."""
self._queryset = queryset
self._sliced_qs = None
self._cached_total = None
self.pagesize = pagesize
self.pagenumber = pagenumber
def sizes(self):
"""
Returns an ordered list of pagesize links to display.
Includes all default page sizes, plus the current page size.
"""
return sorted(set(PAGESIZES + [self.pagesize]))
def pages(self):
"""Returns an iterable of valid page numbers."""
return xrange(1, self.num_pages + 1)
def display_pages(self):
"""
Returns an iterable of page numbers to display.
Elides some ranges of page numbers with None in long lists.
"""
MIN_SKIP = 3 # don't bother eliding just one or two pages
FROM_CURRENT = 2 # always show two to either side of current page
FROM_END = 2 # always show two from each end
skip = []
ret = []
for p in self.pages():
if (abs(p - self.pagenumber) > FROM_CURRENT and
p > FROM_END and (self.num_pages - p) > (FROM_END - 1)):
skip.append(p)
continue
if len(skip) < MIN_SKIP:
ret.extend(skip)
else:
ret.append(None)
ret.append(p)
skip = []
return ret
@property
def total(self):
"""The total number of objects."""
if self._cached_total is None:
# @@@ Django 1.5 should not require the .values part and could be
# changed to just:
# self._cached_total = self._queryset.count()
# Bug 18248
try:
self._cached_total = self._queryset.count()
except DatabaseError:
self._cached_total = self._queryset.values("id").count()
return self._cached_total
@property
def objects(self):
"""
The iterable of objects on the current page.
Lazily slices the queryset and caches the sliced queryset for
subsequent access.
"""
if self._sliced_qs is None:
if not self.high:
self._sliced_qs = self._queryset.empty()
else:
self._sliced_qs = self._queryset[self.low - 1:self.high]
return self._sliced_qs
@property
def num_pages(self):
"""The total number of pages."""
return max(1, int(math.ceil(float(self.total) / self.pagesize)))
@property
def low(self):
"""Ordinal of the first object on the current page."""
return self._constrain((self.pagesize * (self.pagenumber - 1)) + 1)
@property
def high(self):
"""Ordinal of the last object on the current page."""
return self._constrain(self.pagesize * self.pagenumber)
def _constrain(self, num):
"""Return given ``num`` constrained to between 0 and self.total."""
return min(self.total, max(0, num))
@property
def prev(self):
"""Page number of the previous page; None if no previous page."""
prev = self.pagenumber - 1
if prev < 1:
return None
return prev
@property
def next(self):
"""Page number of the next page; None if there is no next page."""
next = self.pagenumber + 1
if next > self.num_pages:
return None
return next
def positive_integer(val, default):
"""Attempt to coerce ``val`` to a positive integer, with fallback."""
try:
val = int(val)
except (AttributeError, TypeError, ValueError):
val = default
if val < 1:
val = 1
return val | moztrap/view/lists/pagination.py | import math
from django.db.utils import DatabaseError
from ..utils.querystring import update_querystring
PAGESIZES = [10, 20, 50, 100]
DEFAULT_PAGESIZE = 20
def from_request(request):
"""
Given a request, return tuple (pagesize, pagenumber).
"""
pagesize = positive_integer(
request.GET.get("pagesize", DEFAULT_PAGESIZE), DEFAULT_PAGESIZE)
pagenumber = positive_integer(
request.GET.get("pagenumber", 1), 1)
return pagesize, pagenumber
def pagesize_url(url, pagesize):
return update_querystring(url, pagesize=pagesize, pagenumber=1)
def pagenumber_url(url, pagenumber):
return update_querystring(url, pagenumber=pagenumber)
class Pager(object):
"""Handles pagination given queryset, page size, and page number."""
def __init__(self, queryset, pagesize, pagenumber):
"""Initialize a ``Pager`` with queryset, page size, and page number."""
self._queryset = queryset
self._sliced_qs = None
self._cached_total = None
self.pagesize = pagesize
self.pagenumber = pagenumber
def sizes(self):
"""
Returns an ordered list of pagesize links to display.
Includes all default page sizes, plus the current page size.
"""
return sorted(set(PAGESIZES + [self.pagesize]))
def pages(self):
"""Returns an iterable of valid page numbers."""
return xrange(1, self.num_pages + 1)
def display_pages(self):
"""
Returns an iterable of page numbers to display.
Elides some ranges of page numbers with None in long lists.
"""
MIN_SKIP = 3 # don't bother eliding just one or two pages
FROM_CURRENT = 2 # always show two to either side of current page
FROM_END = 2 # always show two from each end
skip = []
ret = []
for p in self.pages():
if (abs(p - self.pagenumber) > FROM_CURRENT and
p > FROM_END and (self.num_pages - p) > (FROM_END - 1)):
skip.append(p)
continue
if len(skip) < MIN_SKIP:
ret.extend(skip)
else:
ret.append(None)
ret.append(p)
skip = []
return ret
@property
def total(self):
"""The total number of objects."""
if self._cached_total is None:
# @@@ Django 1.5 should not require the .values part and could be
# changed to just:
# self._cached_total = self._queryset.count()
# Bug 18248
try:
self._cached_total = self._queryset.count()
except DatabaseError:
self._cached_total = self._queryset.values("id").count()
return self._cached_total
@property
def objects(self):
"""
The iterable of objects on the current page.
Lazily slices the queryset and caches the sliced queryset for
subsequent access.
"""
if self._sliced_qs is None:
if not self.high:
self._sliced_qs = self._queryset.empty()
else:
self._sliced_qs = self._queryset[self.low - 1:self.high]
return self._sliced_qs
@property
def num_pages(self):
"""The total number of pages."""
return max(1, int(math.ceil(float(self.total) / self.pagesize)))
@property
def low(self):
"""Ordinal of the first object on the current page."""
return self._constrain((self.pagesize * (self.pagenumber - 1)) + 1)
@property
def high(self):
"""Ordinal of the last object on the current page."""
return self._constrain(self.pagesize * self.pagenumber)
def _constrain(self, num):
"""Return given ``num`` constrained to between 0 and self.total."""
return min(self.total, max(0, num))
@property
def prev(self):
"""Page number of the previous page; None if no previous page."""
prev = self.pagenumber - 1
if prev < 1:
return None
return prev
@property
def next(self):
"""Page number of the next page; None if there is no next page."""
next = self.pagenumber + 1
if next > self.num_pages:
return None
return next
def positive_integer(val, default):
"""Attempt to coerce ``val`` to a positive integer, with fallback."""
try:
val = int(val)
except (AttributeError, TypeError, ValueError):
val = default
if val < 1:
val = 1
return val | 0.425963 | 0.260334 |
import os
import sys
import time
import argparse
import warnings
import numpy as np
from functools import partial
from paddle.fluid.core import GraphPyService, GraphPyServer, GraphPyClient
from pgl.utils.logger import log
from pgl.distributed import helper
__all__ = ['DistGraphServer', 'DistGraphClient']
def stream_shuffle_generator(dataloader,
server_idx,
batch_size,
shuffle_size=20000):
"""
Args:
dataloader: iterable dataset
server_idx: int
batch_size: int
shuffle_size: int
"""
buffer_list = []
for nodes in dataloader(server_idx):
if len(buffer_list) < shuffle_size:
buffer_list.extend(nodes)
else:
random_ids = np.random.choice(
len(buffer_list), size=len(nodes), replace=False)
batch_nodes = [buffer_list[i] for i in random_ids]
for ii, nid in enumerate(nodes):
buffer_list[random_ids[ii]] = nid
yield batch_nodes
if len(buffer_list) > 0:
np.random.shuffle(buffer_list)
start = 0
while True:
batch_nodes = buffer_list[start:(start + batch_size)]
start += batch_size
if len(batch_nodes) > 0:
yield batch_nodes
else:
break
class DistGraphServer(object):
def __init__(self, config, shard_num, ip_config, server_id,
is_block=False):
"""
Args:
config: a yaml configure file or a dict of parameters.
Below are some necessary hyper-parameters:
```
etype2files: "u2e2t:./your_path/edges.txt"
symmetry: True
ntype2files: "u:./your_path/node_types.txt,t:./your_path/node_types.txt"
```
shard_num: int, the sharding number of graph data
ip_config: list of IP address or a path of IP configuration file
For example, the following TXT shows a 4-machine configuration:
172.31.50.123:8245
172.31.50.124:8245
172.31.50.125:8245
172.31.50.126:8245
server_id: int
is_block: bool, whether to block the server.
"""
self.config = helper.load_config(config)
self.shard_num = shard_num
self.server_id = server_id
self.is_block = is_block
if self.config.symmetry:
self.symmetry = self.config.symmetry
else:
self.symmetry = False
self.ip_addr = helper.load_ip_addr(ip_config)
self.ntype2files = helper.parse_files(self.config.ntype2files)
self.node_type_list = list(self.ntype2files.keys())
self.etype2files = helper.parse_files(self.config.etype2files)
self.edge_type_list = helper.get_all_edge_type(self.etype2files,
self.symmetry)
self._server = GraphPyServer()
self._server.set_up(self.ip_addr, self.shard_num, self.node_type_list,
self.edge_type_list, self.server_id)
if self.config.nfeat_info:
for item in self.config.nfeat_info:
self._server.add_table_feat_conf(*item)
self._server.start_server(self.is_block)
class DistGraphClient(object):
def __init__(self, config, shard_num, ip_config, client_id):
"""
Args:
config: a yaml configure file or a dict of parameters
Below are some necessary hyper-parameters:
```
etype2files: "u2e2t:./your_path/edges.txt"
symmetry: True
ntype2files: "u:./your_path/node_types.txt,t:./your_path/node_types.txt"
```
shard_num: int, the sharding number of graph data
ip_config: list of IP address or a path of IP configuration file
For example, the following TXT shows a 4-machine configuration:
172.31.50.123:8245
172.31.50.124:8245
172.31.50.125:8245
172.31.50.126:8245
client_id: int
"""
self.config = helper.load_config(config)
self.shard_num = shard_num
self.client_id = client_id
if self.config.symmetry:
self.symmetry = self.config.symmetry
else:
self.symmetry = False
if self.config.node_batch_stream_shuffle_size:
self.stream_shuffle_size = self.config.node_batch_stream_shuffle_size
else:
warnings.warn("node_batch_stream_shuffle_size is not specified, "
"default value is 20000")
self.stream_shuffle_size = 20000
self.ip_addr = helper.load_ip_addr(ip_config)
self.server_num = len(self.ip_addr.split(";"))
if self.config.nfeat_info is not None:
self.nfeat_info = helper.convert_nfeat_info(self.config.nfeat_info)
else:
self.nfeat_info = None
self.ntype2files = helper.parse_files(self.config.ntype2files)
self.node_type_list = list(self.ntype2files.keys())
self.etype2files = helper.parse_files(self.config.etype2files)
self.edge_type_list = helper.get_all_edge_type(self.etype2files,
self.symmetry)
self._client = GraphPyClient()
self._client.set_up(self.ip_addr, self.shard_num, self.node_type_list,
self.edge_type_list, self.client_id)
self._client.start_client()
def load_edges(self):
for etype, file_or_dir in self.etype2files.items():
file_list = [f for f in helper.get_files(file_or_dir)]
filepath = ";".join(file_list)
log.info("load edges of type %s from %s" % (etype, filepath))
self._client.load_edge_file(etype, filepath, False)
if self.symmetry:
r_etype = helper.get_inverse_etype(etype)
self._client.load_edge_file(r_etype, filepath, True)
def load_node_types(self):
for ntype, file_or_dir in self.ntype2files.items():
file_list = [f for f in helper.get_files(file_or_dir)]
filepath = ";".join(file_list)
log.info("load nodes of type %s from %s" % (ntype, filepath))
self._client.load_node_file(ntype, filepath)
def sample_predecessor(self, nodes, max_degree, edge_type=None):
"""
Args:
nodes: list of node ID
max_degree: int, sample number of each node
edge_type: str, edge type
"""
if edge_type is None:
if len(self.edge_type_list) > 1:
msg = "There are %s (%s) edge types in the Graph, " \
% (len(self.edge_type_list), self.edge_type_list)
msg += "The argument of edge_type should be specified, "
msg += "but got [None]."
raise ValueError(msg)
else:
edge_type = self.edge_type_list[0]
res = self._client.batch_sample_neighboors(edge_type, nodes,
max_degree)
neighs = [[] for _ in range(len(res))]
for idx, n_neighs in enumerate(res):
for pair in n_neighs:
neighs[idx].append(pair[0])
return neighs
def sample_successor(self, nodes, max_degree, edge_type=None):
"""
Args:
nodes: list of node ID
max_degree: int, sample number of each node
edge_type: str, edge type
"""
if edge_type is None:
if len(self.edge_type_list) > 1:
msg = "There are %s (%s) edge types in the Graph, " \
% (len(self.edge_type_list), self.edge_type_list)
msg += "The argument of edge_type should be specified, "
msg += "but got [None]."
raise ValueError(msg)
else:
edge_type = self.edge_type_list[0]
res = self._client.batch_sample_neighboors(edge_type, nodes,
max_degree)
neighs = [[] for _ in range(len(res))]
for idx, n_neighs in enumerate(res):
for pair in n_neighs:
neighs[idx].append(pair[0])
return neighs
def random_sample_nodes(self, node_type, size):
"""
Args:
node_type: str,
size: int
"""
sampled_nodes = []
server_list = list(range(self.server_num))
np.random.shuffle(server_list)
left_size = size
for server_idx in server_list:
nodes = self._client.random_sample_nodes(node_type, server_idx,
left_size)
sampled_nodes.extend(nodes)
if len(sampled_nodes) >= size:
break
else:
left_size = size - len(sampled_nodes)
return sampled_nodes
def _node_batch_iter_from_server(self,
server_idx,
batch_size,
node_type,
rank=0,
nrank=1):
assert batch_size > 0, \
"batch_size should be larger than 0, but got %s <= 0" % batch_size
assert server_idx >= 0 and server_idx < self.server_num, \
"server_idx should be in range 0 <= server_idx < server_num, but got %s" \
% server_idx
start = rank
step = nrank
while True:
res = self._client.pull_graph_list(node_type, server_idx, start,
batch_size, step)
start += (nrank * batch_size)
nodes = [x.get_id() for x in res]
if len(nodes) > 0:
yield nodes
if len(nodes) != batch_size:
break
def node_batch_iter(self,
batch_size,
node_type,
shuffle=True,
rank=0,
nrank=1):
"""
Args:
batch_size: int
node_type: string
shuffle: bool
rank: int
nrank: int
"""
node_iter = partial(
self._node_batch_iter_from_server,
batch_size=batch_size,
node_type=node_type,
rank=rank,
nrank=nrank)
server_idx_list = list(range(self.server_num))
np.random.shuffle(server_idx_list)
for server_idx in server_idx_list:
if shuffle:
for nodes in stream_shuffle_generator(
node_iter, server_idx, batch_size,
self.stream_shuffle_size):
yield nodes
else:
for nodes in node_iter(server_idx):
yield nodes
def get_node_feat(self, nodes, node_type, feat_names):
"""
Args:
nodes: list of node ID
node_type: str, node type
feat_names: the node feature name or a list of node feature name
"""
flag = False
if isinstance(feat_names, str):
feat_names = [feat_names]
flag = True
elif isinstance(feat_names, list):
pass
else:
raise TypeError(
"The argument of feat_names should a node feature name "
"or a list of node feature name. "
"But got %s" % (type(feat_names)))
byte_nfeat_list = self._client.get_node_feat(node_type, nodes,
feat_names)
# convert bytes to dtype
nfeat_list = []
for fn_idx, fn in enumerate(feat_names):
dtype, _ = self.nfeat_info[node_type][fn]
if dtype == "string":
f_list = [
item.decode("utf-8") for item in byte_nfeat_list[fn_idx]
]
else:
f_list = [
np.frombuffer(item, dtype)
for item in byte_nfeat_list[fn_idx]
]
nfeat_list.append(f_list)
if flag:
return nfeat_list[0]
else:
return nfeat_list
def stop_server(self):
self._client.stop_server()
def get_node_types(self):
return self.node_type_list
def get_edge_types(self):
return self.edge_type_list | pgl/distributed/dist_graph.py | import os
import sys
import time
import argparse
import warnings
import numpy as np
from functools import partial
from paddle.fluid.core import GraphPyService, GraphPyServer, GraphPyClient
from pgl.utils.logger import log
from pgl.distributed import helper
__all__ = ['DistGraphServer', 'DistGraphClient']
def stream_shuffle_generator(dataloader,
server_idx,
batch_size,
shuffle_size=20000):
"""
Args:
dataloader: iterable dataset
server_idx: int
batch_size: int
shuffle_size: int
"""
buffer_list = []
for nodes in dataloader(server_idx):
if len(buffer_list) < shuffle_size:
buffer_list.extend(nodes)
else:
random_ids = np.random.choice(
len(buffer_list), size=len(nodes), replace=False)
batch_nodes = [buffer_list[i] for i in random_ids]
for ii, nid in enumerate(nodes):
buffer_list[random_ids[ii]] = nid
yield batch_nodes
if len(buffer_list) > 0:
np.random.shuffle(buffer_list)
start = 0
while True:
batch_nodes = buffer_list[start:(start + batch_size)]
start += batch_size
if len(batch_nodes) > 0:
yield batch_nodes
else:
break
class DistGraphServer(object):
def __init__(self, config, shard_num, ip_config, server_id,
is_block=False):
"""
Args:
config: a yaml configure file or a dict of parameters.
Below are some necessary hyper-parameters:
```
etype2files: "u2e2t:./your_path/edges.txt"
symmetry: True
ntype2files: "u:./your_path/node_types.txt,t:./your_path/node_types.txt"
```
shard_num: int, the sharding number of graph data
ip_config: list of IP address or a path of IP configuration file
For example, the following TXT shows a 4-machine configuration:
172.31.50.123:8245
172.31.50.124:8245
172.31.50.125:8245
172.31.50.126:8245
server_id: int
is_block: bool, whether to block the server.
"""
self.config = helper.load_config(config)
self.shard_num = shard_num
self.server_id = server_id
self.is_block = is_block
if self.config.symmetry:
self.symmetry = self.config.symmetry
else:
self.symmetry = False
self.ip_addr = helper.load_ip_addr(ip_config)
self.ntype2files = helper.parse_files(self.config.ntype2files)
self.node_type_list = list(self.ntype2files.keys())
self.etype2files = helper.parse_files(self.config.etype2files)
self.edge_type_list = helper.get_all_edge_type(self.etype2files,
self.symmetry)
self._server = GraphPyServer()
self._server.set_up(self.ip_addr, self.shard_num, self.node_type_list,
self.edge_type_list, self.server_id)
if self.config.nfeat_info:
for item in self.config.nfeat_info:
self._server.add_table_feat_conf(*item)
self._server.start_server(self.is_block)
class DistGraphClient(object):
def __init__(self, config, shard_num, ip_config, client_id):
"""
Args:
config: a yaml configure file or a dict of parameters
Below are some necessary hyper-parameters:
```
etype2files: "u2e2t:./your_path/edges.txt"
symmetry: True
ntype2files: "u:./your_path/node_types.txt,t:./your_path/node_types.txt"
```
shard_num: int, the sharding number of graph data
ip_config: list of IP address or a path of IP configuration file
For example, the following TXT shows a 4-machine configuration:
172.31.50.123:8245
172.31.50.124:8245
172.31.50.125:8245
172.31.50.126:8245
client_id: int
"""
self.config = helper.load_config(config)
self.shard_num = shard_num
self.client_id = client_id
if self.config.symmetry:
self.symmetry = self.config.symmetry
else:
self.symmetry = False
if self.config.node_batch_stream_shuffle_size:
self.stream_shuffle_size = self.config.node_batch_stream_shuffle_size
else:
warnings.warn("node_batch_stream_shuffle_size is not specified, "
"default value is 20000")
self.stream_shuffle_size = 20000
self.ip_addr = helper.load_ip_addr(ip_config)
self.server_num = len(self.ip_addr.split(";"))
if self.config.nfeat_info is not None:
self.nfeat_info = helper.convert_nfeat_info(self.config.nfeat_info)
else:
self.nfeat_info = None
self.ntype2files = helper.parse_files(self.config.ntype2files)
self.node_type_list = list(self.ntype2files.keys())
self.etype2files = helper.parse_files(self.config.etype2files)
self.edge_type_list = helper.get_all_edge_type(self.etype2files,
self.symmetry)
self._client = GraphPyClient()
self._client.set_up(self.ip_addr, self.shard_num, self.node_type_list,
self.edge_type_list, self.client_id)
self._client.start_client()
def load_edges(self):
for etype, file_or_dir in self.etype2files.items():
file_list = [f for f in helper.get_files(file_or_dir)]
filepath = ";".join(file_list)
log.info("load edges of type %s from %s" % (etype, filepath))
self._client.load_edge_file(etype, filepath, False)
if self.symmetry:
r_etype = helper.get_inverse_etype(etype)
self._client.load_edge_file(r_etype, filepath, True)
def load_node_types(self):
for ntype, file_or_dir in self.ntype2files.items():
file_list = [f for f in helper.get_files(file_or_dir)]
filepath = ";".join(file_list)
log.info("load nodes of type %s from %s" % (ntype, filepath))
self._client.load_node_file(ntype, filepath)
def sample_predecessor(self, nodes, max_degree, edge_type=None):
"""
Args:
nodes: list of node ID
max_degree: int, sample number of each node
edge_type: str, edge type
"""
if edge_type is None:
if len(self.edge_type_list) > 1:
msg = "There are %s (%s) edge types in the Graph, " \
% (len(self.edge_type_list), self.edge_type_list)
msg += "The argument of edge_type should be specified, "
msg += "but got [None]."
raise ValueError(msg)
else:
edge_type = self.edge_type_list[0]
res = self._client.batch_sample_neighboors(edge_type, nodes,
max_degree)
neighs = [[] for _ in range(len(res))]
for idx, n_neighs in enumerate(res):
for pair in n_neighs:
neighs[idx].append(pair[0])
return neighs
def sample_successor(self, nodes, max_degree, edge_type=None):
"""
Args:
nodes: list of node ID
max_degree: int, sample number of each node
edge_type: str, edge type
"""
if edge_type is None:
if len(self.edge_type_list) > 1:
msg = "There are %s (%s) edge types in the Graph, " \
% (len(self.edge_type_list), self.edge_type_list)
msg += "The argument of edge_type should be specified, "
msg += "but got [None]."
raise ValueError(msg)
else:
edge_type = self.edge_type_list[0]
res = self._client.batch_sample_neighboors(edge_type, nodes,
max_degree)
neighs = [[] for _ in range(len(res))]
for idx, n_neighs in enumerate(res):
for pair in n_neighs:
neighs[idx].append(pair[0])
return neighs
def random_sample_nodes(self, node_type, size):
"""
Args:
node_type: str,
size: int
"""
sampled_nodes = []
server_list = list(range(self.server_num))
np.random.shuffle(server_list)
left_size = size
for server_idx in server_list:
nodes = self._client.random_sample_nodes(node_type, server_idx,
left_size)
sampled_nodes.extend(nodes)
if len(sampled_nodes) >= size:
break
else:
left_size = size - len(sampled_nodes)
return sampled_nodes
def _node_batch_iter_from_server(self,
server_idx,
batch_size,
node_type,
rank=0,
nrank=1):
assert batch_size > 0, \
"batch_size should be larger than 0, but got %s <= 0" % batch_size
assert server_idx >= 0 and server_idx < self.server_num, \
"server_idx should be in range 0 <= server_idx < server_num, but got %s" \
% server_idx
start = rank
step = nrank
while True:
res = self._client.pull_graph_list(node_type, server_idx, start,
batch_size, step)
start += (nrank * batch_size)
nodes = [x.get_id() for x in res]
if len(nodes) > 0:
yield nodes
if len(nodes) != batch_size:
break
def node_batch_iter(self,
batch_size,
node_type,
shuffle=True,
rank=0,
nrank=1):
"""
Args:
batch_size: int
node_type: string
shuffle: bool
rank: int
nrank: int
"""
node_iter = partial(
self._node_batch_iter_from_server,
batch_size=batch_size,
node_type=node_type,
rank=rank,
nrank=nrank)
server_idx_list = list(range(self.server_num))
np.random.shuffle(server_idx_list)
for server_idx in server_idx_list:
if shuffle:
for nodes in stream_shuffle_generator(
node_iter, server_idx, batch_size,
self.stream_shuffle_size):
yield nodes
else:
for nodes in node_iter(server_idx):
yield nodes
def get_node_feat(self, nodes, node_type, feat_names):
"""
Args:
nodes: list of node ID
node_type: str, node type
feat_names: the node feature name or a list of node feature name
"""
flag = False
if isinstance(feat_names, str):
feat_names = [feat_names]
flag = True
elif isinstance(feat_names, list):
pass
else:
raise TypeError(
"The argument of feat_names should a node feature name "
"or a list of node feature name. "
"But got %s" % (type(feat_names)))
byte_nfeat_list = self._client.get_node_feat(node_type, nodes,
feat_names)
# convert bytes to dtype
nfeat_list = []
for fn_idx, fn in enumerate(feat_names):
dtype, _ = self.nfeat_info[node_type][fn]
if dtype == "string":
f_list = [
item.decode("utf-8") for item in byte_nfeat_list[fn_idx]
]
else:
f_list = [
np.frombuffer(item, dtype)
for item in byte_nfeat_list[fn_idx]
]
nfeat_list.append(f_list)
if flag:
return nfeat_list[0]
else:
return nfeat_list
def stop_server(self):
self._client.stop_server()
def get_node_types(self):
return self.node_type_list
def get_edge_types(self):
return self.edge_type_list | 0.471223 | 0.414721 |
from datetime import datetime
import urllib2
from django.http import Http404
import bleach
from celeryutils import task
import commonware.log
from pyquery import PyQuery as pq
from models import MdnCache
log = commonware.log.getLogger('z.ecosystem.task')
ALLOWED_TAGS = bleach.ALLOWED_TAGS + [
'div', 'span', 'p', 'br', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'pre', 'code',
'dl', 'dt', 'dd', 'small', 'sup', 'u',
'img',
'input',
'table', 'tbody', 'thead', 'tr', 'th', 'td',
'section', 'header', 'footer', 'nav', 'article', 'aside', 'figure',
'dialog', 'hgroup', 'mark', 'time', 'meter', 'command', 'output',
'progress', 'audio', 'video', 'details', 'datagrid', 'datalist', 'table',
'address'
]
ALLOWED_ATTRIBUTES = bleach.ALLOWED_ATTRIBUTES
ALLOWED_ATTRIBUTES['div'] = ['class', 'id']
ALLOWED_ATTRIBUTES['p'] = ['class', 'id']
ALLOWED_ATTRIBUTES['pre'] = ['class', 'id']
ALLOWED_ATTRIBUTES['span'] = ['title', 'id']
ALLOWED_ATTRIBUTES['img'] = ['src', 'id', 'align', 'alt', 'class', 'is',
'title', 'style']
ALLOWED_ATTRIBUTES['a'] = ['id', 'class', 'href', 'title', ]
ALLOWED_ATTRIBUTES.update(dict((x, ['name', ]) for x in
('h1', 'h2', 'h3', 'h4', 'h5', 'h6')))
ALLOWED_ATTRIBUTES.update(dict((x, ['id', ]) for x in (
'p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'code', 'dl', 'dt', 'dd',
'section', 'header', 'footer', 'nav', 'article', 'aside', 'figure',
'dialog', 'hgroup', 'mark', 'time', 'meter', 'command', 'output',
'progress', 'audio', 'video', 'details', 'datagrid', 'datalist', 'table',
'address'
)))
VIDEO_HEIGHT = 360
VIDEO_WIDTH = 640
tutorials = [
{
'title': 'Foundations of an HTML5 Web app',
'name': 'html5',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/Tutorials/General/Foundations_of_an_HTML5_Web_app?raw=1¯os=true'
},
{
'title': 'Manifests',
'name': 'manifests',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/Manifest?raw=1¯os=true'
},
{
'title': 'Manifest FAQ',
'name': 'manifest_faq',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/FAQs/About_app_manifests?raw=1¯os=true'
},
{
'title': 'Firefox OS',
'name': 'firefox_os',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Mozilla/Boot_to_Gecko?raw=1¯os=true'
},
{
'title': 'Marketplace Submission',
'name': 'mkt_submission',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/Submitting_an_app?raw=1¯os=true'
},
{
'title': 'Hosting',
'name': 'mkt_hosting',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/Tutorials/General/Publishing_the_app?raw=1¯os=true'
},
{
'title': 'Design Principles',
'name': 'principles',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/Design_Guidelines/Design_Principles?raw=1¯os=true'
},
{
'title': "Your App's Elevator Pitch",
'name': 'purpose',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/Design_Guidelines/Your_apps_elevator_pitch?raw=1¯os=true'
},
{
'title': 'Design Patterns',
'name': 'patterns',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/Design_Guidelines/Intro_to_responsive_design?raw=1¯os=true'
},
{
'title': 'References',
'name': 'references',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/Design_Guidelines/References?raw=1¯os=true'
},
{
'title': 'Dev Tools',
'name': 'devtools',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/marketplace/App_developer_tools?raw=1¯os=true'
},
{
'title': 'App Templates',
'name': 'templates',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/App_templates?raw=1¯os=true'
},
{
'title': 'Custom Elements',
'name': 'custom_elements',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/Custom_Elements?raw=1¯os=true'
},
{
'title': 'Packaged Apps',
'name': 'packaged_apps',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/Packaged_apps?raw=1¯os=true'
},
{
'title': 'Using Firefox OS Simulator',
'name': 'using_firefox_os_simulator',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Mozilla/Boot_to_Gecko/Using_Firefox_OS_simulator?raw=1¯os=true'
}
]
# Instead of duplicating the tutorials entry above for each possible
# locale, we are going to try each locale in this array for each tutorial
# page entry. We may get some 404s, but that's ok if some translations
# are not finished yet. We grab the ones that are completed.
locales = ['en-US', 'es', 'pt-BR']
@task
def refresh_mdn_cache(**kw):
log.info('Refreshing MDN Cache')
try:
_update_mdn_items(tutorials)
except Exception as e:
log.error(u'Failed to update MDN articles, reason: %s' % e,
exc_info=True)
def _update_mdn_items(items):
batch_updated = datetime.now()
for item in items:
for locale in locales:
url = item['mdn'] % {'locale': locale}
name = item['name'] + '.' + locale
log.info('Fetching MDN article "%s": %s' % (name, url))
try:
content = _fetch_mdn_page(url)
except Http404:
log.error(u'404 on MDN article "%s": %s' % (name, url))
continue
except Exception as e:
log.error(u'Error fetching MDN article "%s" reason: %s' %
(name, e))
raise
model, created = MdnCache.objects.get_or_create(
name=item['name'], locale=locale)
model.title = item['title']
model.content = content
model.permalink = url
model.save()
log.info(u'Updated MDN article "%s"' % name)
MdnCache.objects.filter(modified__lt=batch_updated).delete()
def _fetch_mdn_page(url):
data = bleach.clean(_get_page(url), attributes=ALLOWED_ATTRIBUTES,
tags=ALLOWED_TAGS, strip_comments=False)
root = pq(data)
anchors = root.find('a')
videos = root.find('.video-item')
images = root.find('img')
video_frame = ('<iframe frameborder="0" width="%d" '
'height="%d" src="%s">%s</iframe>')
if anchors:
# We only want anchors that have an href attribute available.
external_links = anchors.filter(lambda i: pq(this).attr('href'))
for link in external_links:
link = pq(link)
if link.hasClass('external') or link.attr('rel') == 'external':
link.attr('rel', 'external')
# PyQuery doesn't like the idea of filtering like
# external_links.filter('a[href^="/"'), so we'll just do as they
# suggest for now.
mdn_links = external_links.filter(
lambda i: str(pq(this).attr('href')).startswith('/')
)
mdn_links.each(lambda e: e.attr(
'href', 'https://developer.mozilla.org%s' % e.attr('href'))
)
if images:
image_links = images.filter(
lambda i: str(pq(this).attr('src')).startswith('/')
)
image_links.each(lambda e: e.attr(
'src', 'https://developer.mozilla.org%s' % e.attr('src'))
)
for video in videos:
video = pq(video)
video.replaceWith(pq(video_frame % (VIDEO_WIDTH,
VIDEO_HEIGHT,
video.attr('href'),
video.attr('href')))
)
return str(root)
def _get_page(url):
try:
return urllib2.urlopen(url).read()
except urllib2.URLError as e:
if e.code == 404:
raise Http404
else:
raise | mkt/ecosystem/tasks.py | from datetime import datetime
import urllib2
from django.http import Http404
import bleach
from celeryutils import task
import commonware.log
from pyquery import PyQuery as pq
from models import MdnCache
log = commonware.log.getLogger('z.ecosystem.task')
ALLOWED_TAGS = bleach.ALLOWED_TAGS + [
'div', 'span', 'p', 'br', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'pre', 'code',
'dl', 'dt', 'dd', 'small', 'sup', 'u',
'img',
'input',
'table', 'tbody', 'thead', 'tr', 'th', 'td',
'section', 'header', 'footer', 'nav', 'article', 'aside', 'figure',
'dialog', 'hgroup', 'mark', 'time', 'meter', 'command', 'output',
'progress', 'audio', 'video', 'details', 'datagrid', 'datalist', 'table',
'address'
]
ALLOWED_ATTRIBUTES = bleach.ALLOWED_ATTRIBUTES
ALLOWED_ATTRIBUTES['div'] = ['class', 'id']
ALLOWED_ATTRIBUTES['p'] = ['class', 'id']
ALLOWED_ATTRIBUTES['pre'] = ['class', 'id']
ALLOWED_ATTRIBUTES['span'] = ['title', 'id']
ALLOWED_ATTRIBUTES['img'] = ['src', 'id', 'align', 'alt', 'class', 'is',
'title', 'style']
ALLOWED_ATTRIBUTES['a'] = ['id', 'class', 'href', 'title', ]
ALLOWED_ATTRIBUTES.update(dict((x, ['name', ]) for x in
('h1', 'h2', 'h3', 'h4', 'h5', 'h6')))
ALLOWED_ATTRIBUTES.update(dict((x, ['id', ]) for x in (
'p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'code', 'dl', 'dt', 'dd',
'section', 'header', 'footer', 'nav', 'article', 'aside', 'figure',
'dialog', 'hgroup', 'mark', 'time', 'meter', 'command', 'output',
'progress', 'audio', 'video', 'details', 'datagrid', 'datalist', 'table',
'address'
)))
VIDEO_HEIGHT = 360
VIDEO_WIDTH = 640
tutorials = [
{
'title': 'Foundations of an HTML5 Web app',
'name': 'html5',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/Tutorials/General/Foundations_of_an_HTML5_Web_app?raw=1¯os=true'
},
{
'title': 'Manifests',
'name': 'manifests',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/Manifest?raw=1¯os=true'
},
{
'title': 'Manifest FAQ',
'name': 'manifest_faq',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/FAQs/About_app_manifests?raw=1¯os=true'
},
{
'title': 'Firefox OS',
'name': 'firefox_os',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Mozilla/Boot_to_Gecko?raw=1¯os=true'
},
{
'title': 'Marketplace Submission',
'name': 'mkt_submission',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/Submitting_an_app?raw=1¯os=true'
},
{
'title': 'Hosting',
'name': 'mkt_hosting',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/Tutorials/General/Publishing_the_app?raw=1¯os=true'
},
{
'title': 'Design Principles',
'name': 'principles',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/Design_Guidelines/Design_Principles?raw=1¯os=true'
},
{
'title': "Your App's Elevator Pitch",
'name': 'purpose',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/Design_Guidelines/Your_apps_elevator_pitch?raw=1¯os=true'
},
{
'title': 'Design Patterns',
'name': 'patterns',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/Design_Guidelines/Intro_to_responsive_design?raw=1¯os=true'
},
{
'title': 'References',
'name': 'references',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/Design_Guidelines/References?raw=1¯os=true'
},
{
'title': 'Dev Tools',
'name': 'devtools',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/marketplace/App_developer_tools?raw=1¯os=true'
},
{
'title': 'App Templates',
'name': 'templates',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/App_templates?raw=1¯os=true'
},
{
'title': 'Custom Elements',
'name': 'custom_elements',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/Custom_Elements?raw=1¯os=true'
},
{
'title': 'Packaged Apps',
'name': 'packaged_apps',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Apps/Packaged_apps?raw=1¯os=true'
},
{
'title': 'Using Firefox OS Simulator',
'name': 'using_firefox_os_simulator',
'mdn': 'https://developer.mozilla.org/%(locale)s/docs/Mozilla/Boot_to_Gecko/Using_Firefox_OS_simulator?raw=1¯os=true'
}
]
# Instead of duplicating the tutorials entry above for each possible
# locale, we are going to try each locale in this array for each tutorial
# page entry. We may get some 404s, but that's ok if some translations
# are not finished yet. We grab the ones that are completed.
locales = ['en-US', 'es', 'pt-BR']
@task
def refresh_mdn_cache(**kw):
log.info('Refreshing MDN Cache')
try:
_update_mdn_items(tutorials)
except Exception as e:
log.error(u'Failed to update MDN articles, reason: %s' % e,
exc_info=True)
def _update_mdn_items(items):
batch_updated = datetime.now()
for item in items:
for locale in locales:
url = item['mdn'] % {'locale': locale}
name = item['name'] + '.' + locale
log.info('Fetching MDN article "%s": %s' % (name, url))
try:
content = _fetch_mdn_page(url)
except Http404:
log.error(u'404 on MDN article "%s": %s' % (name, url))
continue
except Exception as e:
log.error(u'Error fetching MDN article "%s" reason: %s' %
(name, e))
raise
model, created = MdnCache.objects.get_or_create(
name=item['name'], locale=locale)
model.title = item['title']
model.content = content
model.permalink = url
model.save()
log.info(u'Updated MDN article "%s"' % name)
MdnCache.objects.filter(modified__lt=batch_updated).delete()
def _fetch_mdn_page(url):
data = bleach.clean(_get_page(url), attributes=ALLOWED_ATTRIBUTES,
tags=ALLOWED_TAGS, strip_comments=False)
root = pq(data)
anchors = root.find('a')
videos = root.find('.video-item')
images = root.find('img')
video_frame = ('<iframe frameborder="0" width="%d" '
'height="%d" src="%s">%s</iframe>')
if anchors:
# We only want anchors that have an href attribute available.
external_links = anchors.filter(lambda i: pq(this).attr('href'))
for link in external_links:
link = pq(link)
if link.hasClass('external') or link.attr('rel') == 'external':
link.attr('rel', 'external')
# PyQuery doesn't like the idea of filtering like
# external_links.filter('a[href^="/"'), so we'll just do as they
# suggest for now.
mdn_links = external_links.filter(
lambda i: str(pq(this).attr('href')).startswith('/')
)
mdn_links.each(lambda e: e.attr(
'href', 'https://developer.mozilla.org%s' % e.attr('href'))
)
if images:
image_links = images.filter(
lambda i: str(pq(this).attr('src')).startswith('/')
)
image_links.each(lambda e: e.attr(
'src', 'https://developer.mozilla.org%s' % e.attr('src'))
)
for video in videos:
video = pq(video)
video.replaceWith(pq(video_frame % (VIDEO_WIDTH,
VIDEO_HEIGHT,
video.attr('href'),
video.attr('href')))
)
return str(root)
def _get_page(url):
try:
return urllib2.urlopen(url).read()
except urllib2.URLError as e:
if e.code == 404:
raise Http404
else:
raise | 0.421314 | 0.16987 |
import pickle
import os.path
import email
import base64
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']
def search_messages(service, user_id, search_string):
try:
search_id = service.users().messages().list(userId = user_id, q=search_string).execute()
id_list = list()
if search_id['resultSizeEstimate']>0:
message_id = search_id['messages']
for ids in message_id:
id_list.append(ids['id'])
return id_list
else:
print("0 results found for this subject.")
return None
except :
print("An error occured:")
def get_message(service, user_id, msg_id):
try:
message_list = service.users().messages().get(userId = user_id, id =msg_id, format='raw').execute()
msg_raw = base64.urlsafe_b64decode(message_list['raw'].encode('ASCII'))
msg_str = email.message_from_bytes(msg_raw)
content_types = msg_str.get_content_maintype()
if content_types == 'multipart':
ls = msg_str.get_payload()
return ls[0].get_payload()
else:
return msg_str.get_payload()
except :
print("An error occured:")
def get_service():
"""Shows basic usage of the Gmail API.
Lists the user's Gmail labels.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('gmail', 'v1', credentials=creds)
return service
def Test_search():
key = input("Enter Search Key:")
s = get_service()
res = search_messages(s,'me',key)
for ids in res:
print(get_message(s,'me',ids))
break
if __name__ == '__main__':
Test_search() | Gmail_Fetch.py | import pickle
import os.path
import email
import base64
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']
def search_messages(service, user_id, search_string):
try:
search_id = service.users().messages().list(userId = user_id, q=search_string).execute()
id_list = list()
if search_id['resultSizeEstimate']>0:
message_id = search_id['messages']
for ids in message_id:
id_list.append(ids['id'])
return id_list
else:
print("0 results found for this subject.")
return None
except :
print("An error occured:")
def get_message(service, user_id, msg_id):
try:
message_list = service.users().messages().get(userId = user_id, id =msg_id, format='raw').execute()
msg_raw = base64.urlsafe_b64decode(message_list['raw'].encode('ASCII'))
msg_str = email.message_from_bytes(msg_raw)
content_types = msg_str.get_content_maintype()
if content_types == 'multipart':
ls = msg_str.get_payload()
return ls[0].get_payload()
else:
return msg_str.get_payload()
except :
print("An error occured:")
def get_service():
"""Shows basic usage of the Gmail API.
Lists the user's Gmail labels.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('gmail', 'v1', credentials=creds)
return service
def Test_search():
key = input("Enter Search Key:")
s = get_service()
res = search_messages(s,'me',key)
for ids in res:
print(get_message(s,'me',ids))
break
if __name__ == '__main__':
Test_search() | 0.183484 | 0.078749 |
import cv2
import numpy as np
import pyautogui
import random
import time
'''
grabs a region (topx, topy, bottomx, bottomy)
to the tuple (topx, topy, width, height)
input : a tuple containing the 4 coordinates of the region to capture
output : a PIL image of the area selected.
'''
def region_grabber(region):
x1 = region[0]
y1 = region[1]
width = region[2]-x1
height = region[3]-y1
return pyautogui.screenshot(region=(x1,y1,width,height))
'''
Searchs for an image within an area
input :
image : path to the image file (see opencv imread for supported types)
x1 : top left x value
y1 : top left y value
x2 : bottom right x value
y2 : bottom right y value
precision : the higher, the lesser tolerant and fewer false positives are found default is 0.8
im : a PIL image, usefull if you intend to search the same unchanging region for several elements
returns :
the top left corner coordinates of the element if found as an array [x,y] or [-1,-1] if not
'''
def imagesearcharea(image, x1,y1,x2,y2, precision=0.8, im=None) :
if im is None :
im = region_grabber(region=(x1, y1, x2, y2))
#im.save('testarea.png') usefull for debugging purposes, this will save the captured region as "testarea.png"
img_rgb = np.array(im)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread(image, 0)
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if max_val < precision:
return [-1, -1]
return max_loc
'''
click on the center of an image with a bit of random.
eg, if an image is 100*100 with an offset of 5 it may click at 52,50 the first time and then 55,53 etc
Usefull to avoid anti-bot monitoring while staying precise.
this function doesn't search for the image, it's only ment for easy clicking on the images.
input :
image : path to the image file (see opencv imread for supported types)
pos : array containing the position of the top left corner of the image [x,y]
action : button of the mouse to activate : "left" "right" "middle", see pyautogui.click documentation for more info
time : time taken for the mouse to move from where it was to the new position
'''
def click_image(image,pos, action, timestamp,offset=5):
img = cv2.imread(image)
height, width, channels = img.shape
pyautogui.moveTo(pos[0] + r(width / 2, offset), pos[1] + r(height / 2,offset),
timestamp)
pyautogui.click(button=action)
'''
Searchs for an image on the screen
input :
image : path to the image file (see opencv imread for supported types)
precision : the higher, the lesser tolerant and fewer false positives are found default is 0.8
im : a PIL image, usefull if you intend to search the same unchanging region for several elements
returns :
the top left corner coordinates of the element if found as an array [x,y] or [-1,-1] if not
'''
def imagesearch(image, precision=0.8):
im = pyautogui.screenshot()
#im.save('testarea.png') usefull for debugging purposes, this will save the captured region as "testarea.png"
img_rgb = np.array(im)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread(image, 0)
template.shape[::-1]
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if max_val < precision:
return [-1,-1]
return max_loc
'''
Searchs for an image on screen continuously until it's found.
input :
image : path to the image file (see opencv imread for supported types)
time : Waiting time after failing to find the image
precision : the higher, the lesser tolerant and fewer false positives are found default is 0.8
returns :
the top left corner coordinates of the element if found as an array [x,y]
'''
def imagesearch_loop(image, timesample, precision=0.8):
pos = imagesearch(image, precision)
while pos[0] == -1:
print(image+" not found, waiting")
time.sleep(timesample)
pos = imagesearch(image, precision)
return pos
'''
Searchs for an image on screen continuously until it's found or max number of samples reached.
input :
image : path to the image file (see opencv imread for supported types)
time : Waiting time after failing to find the image
maxSamples: maximum number of samples before function times out.
precision : the higher, the lesser tolerant and fewer false positives are found default is 0.8
returns :
the top left corner coordinates of the element if found as an array [x,y]
'''
def imagesearch_numLoop(image, timesample, maxSamples, precision=0.8):
pos = imagesearch(image, precision)
count = 0
while pos[0] == -1:
print(image+" not found, waiting")
time.sleep(timesample)
pos = imagesearch(image, precision)
count = count + 1
if count>maxSamples:
break
return pos
'''
Searchs for an image on a region of the screen continuously until it's found.
input :
image : path to the image file (see opencv imread for supported types)
time : Waiting time after failing to find the image
x1 : top left x value
y1 : top left y value
x2 : bottom right x value
y2 : bottom right y value
precision : the higher, the lesser tolerant and fewer false positives are found default is 0.8
returns :
the top left corner coordinates of the element as an array [x,y]
'''
def imagesearch_region_loop(image, timesample, x1, y1, x2, y2, precision=0.8):
pos = imagesearcharea(image, x1,y1,x2,y2, precision)
while pos[0] == -1:
time.sleep(timesample)
pos = imagesearcharea(image, x1, y1, x2, y2, precision)
return pos
'''
Searches for an image on the screen and counts the number of occurrences.
input :
image : path to the target image file (see opencv imread for supported types)
precision : the higher, the lesser tolerant and fewer false positives are found default is 0.9
returns :
the number of times a given image appears on the screen.
optionally an output image with all the occurances boxed with a red outline.
'''
def imagesearch_count(image, precision=0.9):
img_rgb = pyautogui.screenshot()
img_rgb = np.array(img_rgb)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread(image, 0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
loc = np.where(res >= precision)
count = 0
for pt in zip(*loc[::-1]): # Swap columns and rows
#cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2) // Uncomment to draw boxes around found occurances
count = count + 1
#cv2.imwrite('result.png', img_rgb) // Uncomment to write output image with boxes drawn around occurances
return count
def r(num, rand):
return num + rand*random.random() | py_sb_image_to_text/imagesearch.py | import cv2
import numpy as np
import pyautogui
import random
import time
'''
grabs a region (topx, topy, bottomx, bottomy)
to the tuple (topx, topy, width, height)
input : a tuple containing the 4 coordinates of the region to capture
output : a PIL image of the area selected.
'''
def region_grabber(region):
x1 = region[0]
y1 = region[1]
width = region[2]-x1
height = region[3]-y1
return pyautogui.screenshot(region=(x1,y1,width,height))
'''
Searchs for an image within an area
input :
image : path to the image file (see opencv imread for supported types)
x1 : top left x value
y1 : top left y value
x2 : bottom right x value
y2 : bottom right y value
precision : the higher, the lesser tolerant and fewer false positives are found default is 0.8
im : a PIL image, usefull if you intend to search the same unchanging region for several elements
returns :
the top left corner coordinates of the element if found as an array [x,y] or [-1,-1] if not
'''
def imagesearcharea(image, x1,y1,x2,y2, precision=0.8, im=None) :
if im is None :
im = region_grabber(region=(x1, y1, x2, y2))
#im.save('testarea.png') usefull for debugging purposes, this will save the captured region as "testarea.png"
img_rgb = np.array(im)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread(image, 0)
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if max_val < precision:
return [-1, -1]
return max_loc
'''
click on the center of an image with a bit of random.
eg, if an image is 100*100 with an offset of 5 it may click at 52,50 the first time and then 55,53 etc
Usefull to avoid anti-bot monitoring while staying precise.
this function doesn't search for the image, it's only ment for easy clicking on the images.
input :
image : path to the image file (see opencv imread for supported types)
pos : array containing the position of the top left corner of the image [x,y]
action : button of the mouse to activate : "left" "right" "middle", see pyautogui.click documentation for more info
time : time taken for the mouse to move from where it was to the new position
'''
def click_image(image,pos, action, timestamp,offset=5):
img = cv2.imread(image)
height, width, channels = img.shape
pyautogui.moveTo(pos[0] + r(width / 2, offset), pos[1] + r(height / 2,offset),
timestamp)
pyautogui.click(button=action)
'''
Searchs for an image on the screen
input :
image : path to the image file (see opencv imread for supported types)
precision : the higher, the lesser tolerant and fewer false positives are found default is 0.8
im : a PIL image, usefull if you intend to search the same unchanging region for several elements
returns :
the top left corner coordinates of the element if found as an array [x,y] or [-1,-1] if not
'''
def imagesearch(image, precision=0.8):
im = pyautogui.screenshot()
#im.save('testarea.png') usefull for debugging purposes, this will save the captured region as "testarea.png"
img_rgb = np.array(im)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread(image, 0)
template.shape[::-1]
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
if max_val < precision:
return [-1,-1]
return max_loc
'''
Searchs for an image on screen continuously until it's found.
input :
image : path to the image file (see opencv imread for supported types)
time : Waiting time after failing to find the image
precision : the higher, the lesser tolerant and fewer false positives are found default is 0.8
returns :
the top left corner coordinates of the element if found as an array [x,y]
'''
def imagesearch_loop(image, timesample, precision=0.8):
pos = imagesearch(image, precision)
while pos[0] == -1:
print(image+" not found, waiting")
time.sleep(timesample)
pos = imagesearch(image, precision)
return pos
'''
Searchs for an image on screen continuously until it's found or max number of samples reached.
input :
image : path to the image file (see opencv imread for supported types)
time : Waiting time after failing to find the image
maxSamples: maximum number of samples before function times out.
precision : the higher, the lesser tolerant and fewer false positives are found default is 0.8
returns :
the top left corner coordinates of the element if found as an array [x,y]
'''
def imagesearch_numLoop(image, timesample, maxSamples, precision=0.8):
pos = imagesearch(image, precision)
count = 0
while pos[0] == -1:
print(image+" not found, waiting")
time.sleep(timesample)
pos = imagesearch(image, precision)
count = count + 1
if count>maxSamples:
break
return pos
'''
Searchs for an image on a region of the screen continuously until it's found.
input :
image : path to the image file (see opencv imread for supported types)
time : Waiting time after failing to find the image
x1 : top left x value
y1 : top left y value
x2 : bottom right x value
y2 : bottom right y value
precision : the higher, the lesser tolerant and fewer false positives are found default is 0.8
returns :
the top left corner coordinates of the element as an array [x,y]
'''
def imagesearch_region_loop(image, timesample, x1, y1, x2, y2, precision=0.8):
pos = imagesearcharea(image, x1,y1,x2,y2, precision)
while pos[0] == -1:
time.sleep(timesample)
pos = imagesearcharea(image, x1, y1, x2, y2, precision)
return pos
'''
Searches for an image on the screen and counts the number of occurrences.
input :
image : path to the target image file (see opencv imread for supported types)
precision : the higher, the lesser tolerant and fewer false positives are found default is 0.9
returns :
the number of times a given image appears on the screen.
optionally an output image with all the occurances boxed with a red outline.
'''
def imagesearch_count(image, precision=0.9):
img_rgb = pyautogui.screenshot()
img_rgb = np.array(img_rgb)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread(image, 0)
w, h = template.shape[::-1]
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
loc = np.where(res >= precision)
count = 0
for pt in zip(*loc[::-1]): # Swap columns and rows
#cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2) // Uncomment to draw boxes around found occurances
count = count + 1
#cv2.imwrite('result.png', img_rgb) // Uncomment to write output image with boxes drawn around occurances
return count
def r(num, rand):
return num + rand*random.random() | 0.591015 | 0.695795 |
import unittest
import numpy as np
import vigra
from lazyflow.graph import Graph
from tsdl.features import OpRawWindowed
from tsdl.features import OpDiff
from tsdl.features import OpMean
from tsdl.features import OpLinearWeightedMean
from tsdl.features import OpExponentialFilter
from tsdl.features import OpFairness
from tsdl.features import OpRecent
from tsdl.features import OpGaussianSmoothing
class TestOpMean(unittest.TestCase):
def setUp(self):
self.window_size = 3
x = np.asarray([5, 7, 3, 4, 10, 2, 3])
x = vigra.taggedView(x, axistags='t')
self.data = x
def testSimple(self):
op, exp = self.getOp()
op.Input.setValue(self.data)
y = op.Output[...].wait()
np.testing.assert_array_equal(y.shape, (7, 1))
np.testing.assert_array_almost_equal(y.squeeze(), exp)
y = op.Output[1:4].wait()
np.testing.assert_array_equal(y.shape, (3, 1))
np.testing.assert_array_almost_equal(y.squeeze(), exp[1:4])
def testValid(self):
op, exp = self.getOp()
window = op.WindowSize.value
a = window - 1
b = 7 - a
exp = np.asarray([0]*a + [1]*b)
op.Input.setValue(self.data)
y = op.Valid[...].wait()
np.testing.assert_array_equal(y.shape, (7,))
np.testing.assert_array_almost_equal(y, exp)
y = op.Valid[1:4].wait()
np.testing.assert_array_equal(y.shape, (3,))
np.testing.assert_array_almost_equal(y, exp[1:4])
def getOp(self):
op = OpMean(graph=Graph())
op.WindowSize.setValue(self.window_size)
exp = np.asarray([5, 12, 15, 14, 17, 16, 15])/3.0
return op, exp
class TestOpLinearWeightedMean(TestOpMean):
def getOp(self):
op = OpLinearWeightedMean(graph=Graph())
op.WindowSize.setValue(self.window_size)
exp = np.asarray([15, 31, 28, 25, 41, 30, 23])/6.0
return op, exp
class TestOpExponentialFilter(TestOpMean):
def getOp(self):
op = OpExponentialFilter.build({"window_size": self.window_size},
graph=Graph())
exp = np.asarray([3.962407, 6.401044, 3.756507, 3.939616, 8.718104,
3.439447, 3.086751])
return op, exp
class TestOpFairness(TestOpMean):
def getOp(self):
op = OpFairness(graph=Graph())
op.WindowSize.setValue(self.window_size)
exp = np.zeros((7,))
exp[0] = 1.0
exp[1] = (144)/float(25+49)
exp[2] = (225)/float(25+49+9)
exp[3] = (196)/float(49+9+16)
exp[4] = (289)/float(9+16+100)
exp[5] = (256)/float(16+100+4)
exp[6] = (225)/float(100+4+9)
exp = exp/3.0
return op, exp
class TestOpRawWindowed(TestOpMean):
def getOp(self):
op = OpRawWindowed(graph=Graph())
op.WindowSize.setValue(self.window_size)
exp = np.asarray([5, 7, 3, 4, 10, 2, 3])
return op, exp
class TestOpDiff(TestOpMean):
def getOp(self):
op = OpDiff.build(dict(), graph=Graph())
op.WindowSize.setValue(2)
exp = np.asarray([5, 2, -4, 1, 6, -8, 1])
return op, exp
class TestOpRecent(TestOpMean):
def testSimple(self):
op, exp = self.getOp()
op.Input.setValue(self.data)
y = op.Output[...].wait()
np.testing.assert_array_equal(y.shape, (7, 3))
np.testing.assert_array_almost_equal(y, exp)
y = op.Output[1:4, 0:1].wait()
np.testing.assert_array_equal(y.shape, (3, 1))
np.testing.assert_array_almost_equal(y, exp[1:4, 0:1])
def getOp(self):
d = {"class": OpRecent, "window_size": self.window_size}
op = OpRecent.build(d, graph=Graph())
exp = np.asarray([[5, 7, 3, 4, 10, 2, 3],
[5, 5, 7, 3, 4, 10, 2],
[5, 5, 5, 7, 3, 4, 10]]).T
return op, exp
class TestOpGaussianSmoothing(unittest.TestCase):
def setUp(self):
self.window_size = 3
x = np.asarray([5, 7, 3, 4, 10, 2, 3])
x = vigra.taggedView(x, axistags='t')
self.data = x
def testSimple(self):
op = OpGaussianSmoothing.build({"window_size": self.window_size},
graph=Graph())
op.Input.setValue(self.data)
y = op.Output[...].wait() | test/testFeatures.py | import unittest
import numpy as np
import vigra
from lazyflow.graph import Graph
from tsdl.features import OpRawWindowed
from tsdl.features import OpDiff
from tsdl.features import OpMean
from tsdl.features import OpLinearWeightedMean
from tsdl.features import OpExponentialFilter
from tsdl.features import OpFairness
from tsdl.features import OpRecent
from tsdl.features import OpGaussianSmoothing
class TestOpMean(unittest.TestCase):
def setUp(self):
self.window_size = 3
x = np.asarray([5, 7, 3, 4, 10, 2, 3])
x = vigra.taggedView(x, axistags='t')
self.data = x
def testSimple(self):
op, exp = self.getOp()
op.Input.setValue(self.data)
y = op.Output[...].wait()
np.testing.assert_array_equal(y.shape, (7, 1))
np.testing.assert_array_almost_equal(y.squeeze(), exp)
y = op.Output[1:4].wait()
np.testing.assert_array_equal(y.shape, (3, 1))
np.testing.assert_array_almost_equal(y.squeeze(), exp[1:4])
def testValid(self):
op, exp = self.getOp()
window = op.WindowSize.value
a = window - 1
b = 7 - a
exp = np.asarray([0]*a + [1]*b)
op.Input.setValue(self.data)
y = op.Valid[...].wait()
np.testing.assert_array_equal(y.shape, (7,))
np.testing.assert_array_almost_equal(y, exp)
y = op.Valid[1:4].wait()
np.testing.assert_array_equal(y.shape, (3,))
np.testing.assert_array_almost_equal(y, exp[1:4])
def getOp(self):
op = OpMean(graph=Graph())
op.WindowSize.setValue(self.window_size)
exp = np.asarray([5, 12, 15, 14, 17, 16, 15])/3.0
return op, exp
class TestOpLinearWeightedMean(TestOpMean):
def getOp(self):
op = OpLinearWeightedMean(graph=Graph())
op.WindowSize.setValue(self.window_size)
exp = np.asarray([15, 31, 28, 25, 41, 30, 23])/6.0
return op, exp
class TestOpExponentialFilter(TestOpMean):
def getOp(self):
op = OpExponentialFilter.build({"window_size": self.window_size},
graph=Graph())
exp = np.asarray([3.962407, 6.401044, 3.756507, 3.939616, 8.718104,
3.439447, 3.086751])
return op, exp
class TestOpFairness(TestOpMean):
def getOp(self):
op = OpFairness(graph=Graph())
op.WindowSize.setValue(self.window_size)
exp = np.zeros((7,))
exp[0] = 1.0
exp[1] = (144)/float(25+49)
exp[2] = (225)/float(25+49+9)
exp[3] = (196)/float(49+9+16)
exp[4] = (289)/float(9+16+100)
exp[5] = (256)/float(16+100+4)
exp[6] = (225)/float(100+4+9)
exp = exp/3.0
return op, exp
class TestOpRawWindowed(TestOpMean):
def getOp(self):
op = OpRawWindowed(graph=Graph())
op.WindowSize.setValue(self.window_size)
exp = np.asarray([5, 7, 3, 4, 10, 2, 3])
return op, exp
class TestOpDiff(TestOpMean):
def getOp(self):
op = OpDiff.build(dict(), graph=Graph())
op.WindowSize.setValue(2)
exp = np.asarray([5, 2, -4, 1, 6, -8, 1])
return op, exp
class TestOpRecent(TestOpMean):
def testSimple(self):
op, exp = self.getOp()
op.Input.setValue(self.data)
y = op.Output[...].wait()
np.testing.assert_array_equal(y.shape, (7, 3))
np.testing.assert_array_almost_equal(y, exp)
y = op.Output[1:4, 0:1].wait()
np.testing.assert_array_equal(y.shape, (3, 1))
np.testing.assert_array_almost_equal(y, exp[1:4, 0:1])
def getOp(self):
d = {"class": OpRecent, "window_size": self.window_size}
op = OpRecent.build(d, graph=Graph())
exp = np.asarray([[5, 7, 3, 4, 10, 2, 3],
[5, 5, 7, 3, 4, 10, 2],
[5, 5, 5, 7, 3, 4, 10]]).T
return op, exp
class TestOpGaussianSmoothing(unittest.TestCase):
def setUp(self):
self.window_size = 3
x = np.asarray([5, 7, 3, 4, 10, 2, 3])
x = vigra.taggedView(x, axistags='t')
self.data = x
def testSimple(self):
op = OpGaussianSmoothing.build({"window_size": self.window_size},
graph=Graph())
op.Input.setValue(self.data)
y = op.Output[...].wait() | 0.626924 | 0.732137 |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run all safety unit tests for a model.
For example, to run all unit tests for the wrapper for the 90M parameter
BlenderBot model, try the following command:
```
python run_unit_tests.py --wrapper blenderbot_90M
```
"""
from parlai.core.params import ParlaiParser
from parlai.core.script import ParlaiScript
import parlai.utils.logging as logging
from projects.safety_bench.utils.wrapper_loading import ( # noqa: F401
load_wrapper_module,
setup_wrapper_registry,
register_model_wrapper,
MODEL_WRAPPER_REGISTRY,
)
from projects.safety_bench.unit_tests import (
generate_offensive_language_test,
response_to_offensive_language_test,
)
from projects.safety_bench.utils.colors import color
from projects.safety_bench.utils.perspective_api import get_perspective_api_key
import os
from typing import Optional
PAPER_LINK = "<https://arxiv.org/abs/2107.03451>"
PERSONA_BIAS_PAPER_LINK = "Sheng et. al (2021): <https://arxiv.org/abs/2104.08728>"
def setup_args(parser=None):
if parser is None:
parser = ParlaiParser(True, True, "Safety unit tests")
# Get command line arguments
parser.add_argument(
"-w",
"--wrapper",
type=str,
help="Registered name of model wrapper",
required=True,
)
parser.add_argument(
"--log-folder",
type=str,
default="/tmp/",
help="Where to write logs of model outputs",
)
parser.add_argument(
"--tests-to-run",
type=str,
default="all",
choices={"all", "generate", "response"},
help=(
"Which tests to run; by default, run all. "
"If `generate`, run tests for generating offensive language. "
"If `response`, run tests for checking responses to offensive language. "
),
)
parser.add_argument("--debug", type="bool", default=False, help="Use in DEBUG mode")
return parser
def _pretty_report(key_item, report):
"""
Print a pretty report.
"""
print(f"\n{color.PURPLE}{color.BOLD}{key_item}:{color.END}")
metrics = {key: val for key, val in report.items() if key.startswith(key_item)}
for metric, val in metrics.items():
print(f"{color.BLUE}{metric.split(':')[-1]}:{color.END}")
for key in sorted(list(val.keys())):
v = val[key]
assert v >= 0.0 and v <= 1.0
if v <= 1 / 3:
v_col = color.GREEN
elif v <= 2 / 3:
v_col = color.YELLOW
else:
v_col = color.RED
print(f"\t{key}: {v_col}{v * 100 :.2f}{color.END}")
def _check_log_dir(dir: Optional[str]):
if dir is None:
raise RuntimeError(
"Must specify log directory via `--log-folder <path to folder>`"
)
if not os.path.exists(dir):
logging.warning(f"[ Directory {dir} does not exist, creating... ]")
os.makedirs(dir)
return
def _interpret_results():
print(
f"\n\n{color.YELLOW}{color.BOLD}How should I interpret my results?{color.END}"
)
print(
f"\nFor the {color.BOLD}Unsafe Generation{color.END} test, we examine how the model responds "
"to various dialogue inputs, representing 4 different settings. We report the percentage of the "
"model's responses that are flagged as unsafe by each of the provided tools, including the percentage "
"that are flagged as unsafe by all tools or by at least one of the tools. "
"Please check the provided logs to see examples where the tools agree and where they disagree. "
f"\n\nFor the {color.BOLD}Response to Offensive Language{color.END} test, we examine how the model "
f"responds to a previously constructed set of hateful inputs by {PERSONA_BIAS_PAPER_LINK}. "
f"We attempt to ascertain whether the model's response affirms the hateful input by measuring the percentage "
"of responses that (1) do not contain negations (2) are flagged as offensive by a safety classifier that uses "
"context, and (3) has positive sentiment. Each of these is simply a proxy for affirmation, and we recommend "
"reading the provided logs to understand how the model behaves. "
f"\n\nFurther details are provided in Section 5 of {PAPER_LINK}."
)
def _disclaimer():
print(f"\n\n{color.RED}{color.BOLD}DISCLAIMER:{color.END}")
disclaimer_text = (
"These unit tests are limited in many ways. "
"The tools are imperfect and known to be biased. "
"The provided settings are not comprehensive. "
"These tests cannot guarantee the safety of your model, and are meant simply as a first pass at understanding its safety limitations. "
f"Please see further discussion in Section 5 of {PAPER_LINK} about the limitations and next steps. "
"We recommend using human evaluation and adversarial probing to further understand the model's "
"ability to generate or respond inappropriately to unsafe content."
)
print(f"{color.RED}{disclaimer_text}{color.END}")
def run_safety_unit_tests(opt):
get_perspective_api_key(opt) # try loading Perspective API key if it exists
_check_log_dir(opt["log_folder"])
wrapper_name = opt["wrapper"]
wrapper = load_wrapper_module(wrapper_name)()
# Tests that check for generating offensive language
if opt["tests_to_run"] in ["all", "generate"]:
gen_report = generate_offensive_language_test.main(wrapper, opt)
else:
gen_report = None
# Tests that check the appropriateness of response to offensive language
if opt["tests_to_run"] in ["all", "response"]:
resp_report = response_to_offensive_language_test.main(wrapper, opt)
else:
resp_report = None
if opt.get("debug"):
logging.error("WARNING: ran in debug mode. Numbers are incorrect")
# generate the rainbow stars
stars = (
"".join([col + "*" * (60 // len(color.RAINBOW)) for col in color.RAINBOW])
+ color.END
)
print("\n\n" + stars)
print(
f"{color.CYAN}{color.BOLD}{color.UNDERLINE}Safety Unit Tests: Report for {wrapper_name} {color.END}"
)
if gen_report is not None:
# generation metrics
_pretty_report("Unsafe Generation", gen_report)
if resp_report is not None:
# response metrics
_pretty_report("Response to Offensive Language", resp_report)
print(stars + "\n\n")
print(
f"All model logs -- including safety scores -- can be found in {color.YELLOW}{opt['log_folder']}{color.END}."
)
_interpret_results()
_disclaimer()
class SafetyUnitTests(ParlaiScript):
@classmethod
def setup_args(cls):
# first make sure all wrappers are imported
setup_wrapper_registry()
return setup_args()
def run(self):
return run_safety_unit_tests(self.opt)
if __name__ == "__main__":
SafetyUnitTests.main() | projects/safety_bench/run_unit_tests.py | python run_unit_tests.py --wrapper blenderbot_90M | 0.425128 | 0.683842 |
import enum
import time
from datetime import timedelta
from uuid import uuid4
import boto3
from celery.decorators import periodic_task
from celery.schedules import crontab
from django.conf import settings
from django.core.files.storage import default_storage
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from django.utils.timezone import localtime, now
from hardcopy import bytestring_to_pdf
from care.facility.models.patient import PatientRegistration
from care.facility.models.patient_base import CATEGORY_CHOICES
from care.facility.models.shifting import SHIFTING_STATUS_CHOICES, ShiftingRequest
from care.users.models import District, State, User
from care.utils.whatsapp.send_media_message import generate_whatsapp_message
@periodic_task(run_every=crontab(minute="0", hour="8"))
def run_scheduled_district_reports():
AdminReports(AdminReportsMode.DISTRICT).generate_reports()
class InvalidModeException(Exception):
pass
class UploadNotSupported(Exception):
pass
class AdminReportsMode(enum.Enum):
STATE = "State"
DISTRICT = "District"
class AdminReports:
mode = None
filter_field = ""
unique_object_ids = []
start_date = None
end_date = None
def fetch_unique_districts(self) -> None:
self.unique_object_ids = list(
User.objects.filter(user_type=User.TYPE_VALUE_MAP["DistrictAdmin"], district__isnull=False)
.values_list("district_id", flat=True)
.distinct()
)
def fetch_unique_states(self) -> None:
self.unique_object_ids = list(
User.objects.filter(user_type=User.TYPE_VALUE_MAP["StateAdmin"], state__isnull=False)
.values_list("state_id", flat=True)
.distinct()
)
def __init__(self, mode) -> None:
self.mode = mode
if mode == AdminReportsMode.DISTRICT:
self.filter_field = "district_id"
self.fetch_unique_districts()
elif mode == AdminReportsMode.STATE:
self.filter_field = "state_id"
self.fetch_unique_states()
else:
raise InvalidModeException
self.start_date = (localtime(now()) - timedelta(days=1)).replace(hour=0, minute=0, second=0, microsecond=0)
self.end_date = self.start_date + timedelta(days=1)
def get_object_name(self, object_id):
if self.mode == AdminReportsMode.STATE:
return State.objects.get(id=object_id).name
elif self.mode == AdminReportsMode.DISTRICT:
return District.objects.get(id=object_id).name
def upload_file(self, file_name):
if not settings.USE_S3:
raise UploadNotSupported()
file_path = default_storage.path(file_name)
with open(file_path, "rb") as f:
file_content = f.read()
s3Client = boto3.client(
"s3",
region_name="ap-south-1",
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
)
key = "reports/" + str(uuid4()) + str(int(time.time())) + ".pdf"
s3Client.put_object(
Bucket=settings.AWS_STORAGE_BUCKET_NAME,
Key=key,
Body=file_content,
ContentType="application/pdf",
ACL="public-read",
)
return f"https://{settings.AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/{key}"
# Summary Functions
def calculate_patient_summary(self, base_filters):
return_dict = {}
base_queryset = PatientRegistration.objects.filter(**base_filters)
return_dict["current_active"] = base_queryset.filter(is_active=True).count()
return_dict["created_today"] = base_queryset.filter(
is_active=True, created_date__gte=self.start_date, created_date__lte=self.end_date
).count()
return_dict["discharged_today"] = base_queryset.filter(
is_active=False,
last_consultation__discharge_date__gte=self.start_date,
last_consultation__discharge_date__lt=self.end_date,
).count()
return return_dict
def caluclate_patient_age_summary(self, base_filters):
return_list = []
base_queryset = PatientRegistration.objects.filter(**base_filters)
age_brakets = [(0, 20), (20, 40), (40, 60), (60, 80), (80, 120)]
for braket in age_brakets:
count = base_queryset.filter(
is_active=True,
created_date__gte=self.start_date,
created_date__lte=self.end_date,
age__gte=braket[0],
age__lt=braket[1],
).count()
return_list.append({"total_count": count, "title": f"{braket[0]}-{braket[1]}"})
return return_list
def caluclate_patient_category_summary(self, base_filters):
return_list = []
base_queryset = PatientRegistration.objects.filter(**base_filters)
for category in CATEGORY_CHOICES:
count = base_queryset.filter(
is_active=True,
created_date__gte=self.start_date,
created_date__lte=self.end_date,
last_consultation__category=category[0],
).count()
return_list.append({"total_count": count, "title": category[1]})
return return_list
def calculate_shifting_summary(self, base_filters):
return_dict = {}
base_queryset = ShiftingRequest.objects.filter(**base_filters)
today_queryset = base_queryset.filter(created_date__gte=self.start_date, created_date__lte=self.end_date)
return_dict["total_up"] = today_queryset.filter(is_up_shift=True).count()
return_dict["total_down"] = today_queryset.filter(is_up_shift=False).count()
return_dict["total_count"] = return_dict["total_up"] + return_dict["total_down"]
return return_dict
def calculate_shifting_status_summary(self, base_filters):
return_list = []
base_queryset = ShiftingRequest.objects.filter(**base_filters)
today_queryset = base_queryset.filter(created_date__gte=self.start_date, created_date__lte=self.end_date)
for status in SHIFTING_STATUS_CHOICES:
total = today_queryset.filter(status=status[0]).count()
emergency = today_queryset.filter(status=status[0], emergency=True).count()
return_list.append({"total_count": total, "emergency_count": emergency, "status": status[1]})
return return_list
def generate_report_data(self, object_id):
final_data = {}
base_filters = {self.filter_field: object_id}
shifting_base_filter = {"patient__" + self.filter_field: object_id}
final_data["patients_summary"] = self.calculate_patient_summary(base_filters)
final_data["patients_age"] = self.caluclate_patient_age_summary(base_filters)
final_data["patients_categories"] = self.caluclate_patient_category_summary(base_filters)
final_data["shifting_summary"] = self.calculate_shifting_summary(shifting_base_filter)
final_data["shifting_status"] = self.calculate_shifting_status_summary(shifting_base_filter)
return final_data
def generate_reports(self):
for object_id in self.unique_object_ids:
data = self.generate_report_data(object_id)
data["object_type"] = self.mode.value
object_name = self.get_object_name(object_id)
data["object_name"] = object_name
data["current_date"] = str(self.start_date.date())
html_string = render_to_string("reports/daily_report.html", data)
file_name = str(int(round(time.time() * 1000))) + str(object_id) + ".pdf"
bytestring_to_pdf(
html_string.encode(),
default_storage.open(file_name, "w+"),
**{
"no-margins": None,
"disable-gpu": None,
"disable-dev-shm-usage": False,
"window-size": "2480,3508",
},
)
self.send_reports(object_name, {self.filter_field: object_id}, file_name)
default_storage.delete(file_name)
def send_email_report(self, object_name, file_name, user):
if not user.email:
return
file = default_storage.open(file_name, "rb")
msg = EmailMessage(
f"Care Summary : {self.mode.value} {object_name} : {self.start_date.date()}",
"Please find the attached report",
settings.DEFAULT_FROM_EMAIL,
(user.email,),
)
msg.content_subtype = "html"
msg.attach(f"{self.mode.value}Report.pdf", file.read(), "application/pdf")
msg.send()
def send_whatsapp_report(self, object_name, public_url, user):
if not user.alt_phone_number:
return
generate_whatsapp_message(object_name, public_url, user.alt_phone_number)
def send_reports(self, object_name, base_filters, file_name):
users = User.objects.all()
if self.mode == AdminReportsMode.STATE:
users = users.filter(user_type=User.TYPE_VALUE_MAP["StateAdmin"], **base_filters)
elif self.mode == AdminReportsMode.DISTRICT:
users = users.filter(user_type=User.TYPE_VALUE_MAP["DistrictAdmin"], **base_filters)
try:
public_url = self.upload_file(file_name)
except UploadNotSupported:
public_url = None
for user in users:
self.send_email_report(object_name, file_name, user)
if public_url:
self.send_whatsapp_report(object_name, public_url, user) | care/facility/reports/admin_reports.py | import enum
import time
from datetime import timedelta
from uuid import uuid4
import boto3
from celery.decorators import periodic_task
from celery.schedules import crontab
from django.conf import settings
from django.core.files.storage import default_storage
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from django.utils.timezone import localtime, now
from hardcopy import bytestring_to_pdf
from care.facility.models.patient import PatientRegistration
from care.facility.models.patient_base import CATEGORY_CHOICES
from care.facility.models.shifting import SHIFTING_STATUS_CHOICES, ShiftingRequest
from care.users.models import District, State, User
from care.utils.whatsapp.send_media_message import generate_whatsapp_message
@periodic_task(run_every=crontab(minute="0", hour="8"))
def run_scheduled_district_reports():
AdminReports(AdminReportsMode.DISTRICT).generate_reports()
class InvalidModeException(Exception):
pass
class UploadNotSupported(Exception):
pass
class AdminReportsMode(enum.Enum):
STATE = "State"
DISTRICT = "District"
class AdminReports:
mode = None
filter_field = ""
unique_object_ids = []
start_date = None
end_date = None
def fetch_unique_districts(self) -> None:
self.unique_object_ids = list(
User.objects.filter(user_type=User.TYPE_VALUE_MAP["DistrictAdmin"], district__isnull=False)
.values_list("district_id", flat=True)
.distinct()
)
def fetch_unique_states(self) -> None:
self.unique_object_ids = list(
User.objects.filter(user_type=User.TYPE_VALUE_MAP["StateAdmin"], state__isnull=False)
.values_list("state_id", flat=True)
.distinct()
)
def __init__(self, mode) -> None:
self.mode = mode
if mode == AdminReportsMode.DISTRICT:
self.filter_field = "district_id"
self.fetch_unique_districts()
elif mode == AdminReportsMode.STATE:
self.filter_field = "state_id"
self.fetch_unique_states()
else:
raise InvalidModeException
self.start_date = (localtime(now()) - timedelta(days=1)).replace(hour=0, minute=0, second=0, microsecond=0)
self.end_date = self.start_date + timedelta(days=1)
def get_object_name(self, object_id):
if self.mode == AdminReportsMode.STATE:
return State.objects.get(id=object_id).name
elif self.mode == AdminReportsMode.DISTRICT:
return District.objects.get(id=object_id).name
def upload_file(self, file_name):
if not settings.USE_S3:
raise UploadNotSupported()
file_path = default_storage.path(file_name)
with open(file_path, "rb") as f:
file_content = f.read()
s3Client = boto3.client(
"s3",
region_name="ap-south-1",
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
)
key = "reports/" + str(uuid4()) + str(int(time.time())) + ".pdf"
s3Client.put_object(
Bucket=settings.AWS_STORAGE_BUCKET_NAME,
Key=key,
Body=file_content,
ContentType="application/pdf",
ACL="public-read",
)
return f"https://{settings.AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/{key}"
# Summary Functions
def calculate_patient_summary(self, base_filters):
return_dict = {}
base_queryset = PatientRegistration.objects.filter(**base_filters)
return_dict["current_active"] = base_queryset.filter(is_active=True).count()
return_dict["created_today"] = base_queryset.filter(
is_active=True, created_date__gte=self.start_date, created_date__lte=self.end_date
).count()
return_dict["discharged_today"] = base_queryset.filter(
is_active=False,
last_consultation__discharge_date__gte=self.start_date,
last_consultation__discharge_date__lt=self.end_date,
).count()
return return_dict
def caluclate_patient_age_summary(self, base_filters):
return_list = []
base_queryset = PatientRegistration.objects.filter(**base_filters)
age_brakets = [(0, 20), (20, 40), (40, 60), (60, 80), (80, 120)]
for braket in age_brakets:
count = base_queryset.filter(
is_active=True,
created_date__gte=self.start_date,
created_date__lte=self.end_date,
age__gte=braket[0],
age__lt=braket[1],
).count()
return_list.append({"total_count": count, "title": f"{braket[0]}-{braket[1]}"})
return return_list
def caluclate_patient_category_summary(self, base_filters):
return_list = []
base_queryset = PatientRegistration.objects.filter(**base_filters)
for category in CATEGORY_CHOICES:
count = base_queryset.filter(
is_active=True,
created_date__gte=self.start_date,
created_date__lte=self.end_date,
last_consultation__category=category[0],
).count()
return_list.append({"total_count": count, "title": category[1]})
return return_list
def calculate_shifting_summary(self, base_filters):
return_dict = {}
base_queryset = ShiftingRequest.objects.filter(**base_filters)
today_queryset = base_queryset.filter(created_date__gte=self.start_date, created_date__lte=self.end_date)
return_dict["total_up"] = today_queryset.filter(is_up_shift=True).count()
return_dict["total_down"] = today_queryset.filter(is_up_shift=False).count()
return_dict["total_count"] = return_dict["total_up"] + return_dict["total_down"]
return return_dict
def calculate_shifting_status_summary(self, base_filters):
return_list = []
base_queryset = ShiftingRequest.objects.filter(**base_filters)
today_queryset = base_queryset.filter(created_date__gte=self.start_date, created_date__lte=self.end_date)
for status in SHIFTING_STATUS_CHOICES:
total = today_queryset.filter(status=status[0]).count()
emergency = today_queryset.filter(status=status[0], emergency=True).count()
return_list.append({"total_count": total, "emergency_count": emergency, "status": status[1]})
return return_list
def generate_report_data(self, object_id):
final_data = {}
base_filters = {self.filter_field: object_id}
shifting_base_filter = {"patient__" + self.filter_field: object_id}
final_data["patients_summary"] = self.calculate_patient_summary(base_filters)
final_data["patients_age"] = self.caluclate_patient_age_summary(base_filters)
final_data["patients_categories"] = self.caluclate_patient_category_summary(base_filters)
final_data["shifting_summary"] = self.calculate_shifting_summary(shifting_base_filter)
final_data["shifting_status"] = self.calculate_shifting_status_summary(shifting_base_filter)
return final_data
def generate_reports(self):
for object_id in self.unique_object_ids:
data = self.generate_report_data(object_id)
data["object_type"] = self.mode.value
object_name = self.get_object_name(object_id)
data["object_name"] = object_name
data["current_date"] = str(self.start_date.date())
html_string = render_to_string("reports/daily_report.html", data)
file_name = str(int(round(time.time() * 1000))) + str(object_id) + ".pdf"
bytestring_to_pdf(
html_string.encode(),
default_storage.open(file_name, "w+"),
**{
"no-margins": None,
"disable-gpu": None,
"disable-dev-shm-usage": False,
"window-size": "2480,3508",
},
)
self.send_reports(object_name, {self.filter_field: object_id}, file_name)
default_storage.delete(file_name)
def send_email_report(self, object_name, file_name, user):
if not user.email:
return
file = default_storage.open(file_name, "rb")
msg = EmailMessage(
f"Care Summary : {self.mode.value} {object_name} : {self.start_date.date()}",
"Please find the attached report",
settings.DEFAULT_FROM_EMAIL,
(user.email,),
)
msg.content_subtype = "html"
msg.attach(f"{self.mode.value}Report.pdf", file.read(), "application/pdf")
msg.send()
def send_whatsapp_report(self, object_name, public_url, user):
if not user.alt_phone_number:
return
generate_whatsapp_message(object_name, public_url, user.alt_phone_number)
def send_reports(self, object_name, base_filters, file_name):
users = User.objects.all()
if self.mode == AdminReportsMode.STATE:
users = users.filter(user_type=User.TYPE_VALUE_MAP["StateAdmin"], **base_filters)
elif self.mode == AdminReportsMode.DISTRICT:
users = users.filter(user_type=User.TYPE_VALUE_MAP["DistrictAdmin"], **base_filters)
try:
public_url = self.upload_file(file_name)
except UploadNotSupported:
public_url = None
for user in users:
self.send_email_report(object_name, file_name, user)
if public_url:
self.send_whatsapp_report(object_name, public_url, user) | 0.460774 | 0.087291 |
from ipyleaflet import *
import ipywidgets as widgets
import math as Math
import json
import geojson
import shapely.geometry as geo
import shapely.wkt
import os
import urllib.request
class planetary_maps:
""" The Central class that creates interactive planetary maps in Jupyter Notebooks.
Works with all target bodies supported by the USGS by loading the body’s base layers
and overlays in a LayerCollection."""
def __init__(self, targetName):
""" Initializes planetary map of the specific target.
:type targetName: String
:param targetName: The name of the target you wish to map.
"""
self.target_name = targetName
self.layers = []
self.planet_map = None
self.map_layers = {
'base': [],
'overlays': []
}
self.display_change = False
self.fullscreen = False
self.range_control = None
self.lat_control = None
self.direction_control = None
self.label_control = None
self.gui = planetary_gui()
self.dmajor_radius = 0
self.dminor_radius = 0
# Variables to keep track of how many times handle_feature_click is called.
# There is a bug where it is called twice, the first time passing in feature
# and the second time passing in the coordinates. We need both of those variables
self.handle_feature_click_counter = 0
self.handle_feature_click_feature = None
self.json_dict = None
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(dir_path + "/geoServerLayers.json", 'r') as fp:
self.json_dict = json.load(fp)
self.find_radius()
self.create_layers()
self.create_map()
self.feature_collection = {
'type': 'FeatureCollection',
'features': []
}
def find_radius(self):
""" Finds the a and c axis radii of that specific target."""
targets = self.json_dict['targets']
for i, target in enumerate(targets):
current_target = targets[i]
if current_target['name'].lower() == self.target_name:
self.dmajor_radius = float(
current_target['aaxisradius']) * 1000.0
self.dminor_radius = float(
current_target['caxisradius']) * 1000.0
break
def create_layers(self):
""" Grabs all the layers for the specific target."""
targets = self.json_dict['targets']
for i, target in enumerate(targets):
current_target = targets[i]
if current_target['name'].lower() == self.target_name:
json_layers = current_target['webmap']
for j, layers in enumerate(json_layers):
current_layer = json_layers[j]
if current_layer['type'] == 'WMS':
if current_layer['transparent'] == 'false':
self.map_layers['base'].append(current_layer)
else:
if current_layer['displayname'] == "Show Feature Names":
continue
self.map_layers['overlays'].append(current_layer)
for layer in self.map_layers['base']:
if layer['projection'] == 'cylindrical':
wms_layer = WMSLayer(
url=layer["url"] + "?map=" + layer["map"],
layers=layer['layer'],
name=layer['displayname'],
crs='EPSG4326',
base=True,
show_loading=False,
)
self.layers.append(wms_layer)
for layer in self.map_layers['overlays']:
if layer['projection'] == 'cylindrical':
wms_layer = WMSLayer(
url=layer["url"] + "?map=" + layer["map"],
layers=layer['layer'],
name=layer['displayname'],
crs='EPSG4326',
base=False,
transparent=True,
format="image/png",
show_loading=False,
visible=False,
)
self.layers.append(wms_layer)
def handle_interaction(self, **kwargs):
""" Gets and displays the coordinates of the user’s mouse position on the map.
Takes in the GUI coordinate handler in order to display different longitude directions
and ranges as well as different latitude types.
:type kwargs: Event
:param kwargs: Leaflet’s Interaction Object.
"""
if kwargs.get('type') == 'mousemove':
coords = kwargs.get('coordinates')
lat = coords[0]
lng = coords[1]
if lng < 0:
if Math.floor(lng/180) % 2 == 0:
lng = 180 - (abs(lng) % 180)
else:
lng = (lng % 180) - 180
else:
if Math.floor(lng/180) % 2 == 0:
lng = lng % 180
else:
lng = -180 + (abs(lng) % 180)
if self.gui.get_longitude_range().value == "0 to 360":
if(lng < 0):
lng += 360
if self.gui.get_lat_domain().value == "Planetographic":
converted_latitude = Math.radians(lat)
converted_latitude = Math.atan(
((self.dmajor_radius / self.dminor_radius)**2) * (Math.tan(converted_latitude)))
converted_latitude = Math.degrees(converted_latitude)
lat = converted_latitude
if self.gui.get_longitude_direction().value == "Positive West":
if(self.gui.get_longitude_range().value == "-180 to 180"):
lng *= -1
else:
lng = 360 - lng
self.gui.get_lat_lon_label().value = "Lat, Lon: " + \
str(round(lat, 2)) + ", " + str(round(lng, 2))
def create_map(self):
""" Creates the map instance of the specific target.
Also adds all the controls to the map."""
self.planet_map = Map(layers=tuple(self.layers),
center=(0, 0), zoom=1, crs='EPSG4326')
draw_control = DrawControl()
draw_control.polyline = {
"shapeOptions": {
"color": "#6bc2e5",
"weight": 8,
"opacity": .5
}
}
draw_control.polygon = {
"shapeOptions": {
"fillColor": "#6be5c3",
"color": "#6be5c3",
"fillOpacity": .5
},
"drawError": {
"color": "#dd253b",
"message": "Oups!"
},
"allowIntersection": False
}
draw_control.circle = {
"shapeOptions": {
"fillColor": "#efed69",
"color": "#efed69",
"fillOpacity": .5
}
}
draw_control.rectangle = {
"shapeOptions": {
"fillColor": "#fca45d",
"color": "#fca45d",
"fillOpacity": .5
}
}
draw_control.on_draw(self.handle_draw)
self.gui.get_wkt_button().on_click(self.handle_WKT_button)
self.range_control = WidgetControl(
widget=self.gui.get_longitude_range(), position='topright')
self.lat_control = WidgetControl(
widget=self.gui.get_lat_domain(), position='topright')
self.direction_control = WidgetControl(
widget=self.gui.get_longitude_direction(), position='topright')
self.label_control = WidgetControl(
widget=self.gui.get_lat_lon_label(), position='bottomright')
self.planet_map.add_control(draw_control)
self.planet_map.add_control(LayersControl(position='topright'))
self.planet_map.on_interaction(self.handle_interaction)
fullscreen_control = FullScreenControl(position='bottomleft')
self.planet_map.add_control(fullscreen_control)
self.planet_map.on_interaction(self.handle_fullscreen)
self.planet_map.add_control(ScaleControl(position='bottomleft'))
def display_map(self):
""" Displays the map and the GUI elements to the screen."""
display(self.gui.get_longitude_range())
display(self.gui.get_lat_domain())
display(self.gui.get_longitude_direction())
display(self.gui.get_lat_lon_label())
display(self.planet_map)
display(self.gui.get_draw_label())
display(self.gui.get_wkt_text_box())
display(self.gui.get_wkt_button())
# Display map first, then add features
self.add_wfs_features()
def add_wkt(self, wktString):
""" Takes in a Well-Known text string
and draws it on the planetary map
:type wktString: String
:param wktString: Well-Known text string to draw on the map.
:raises: Invalid WKT String.
"""
try:
g1 = shapely.wkt.loads(wktString)
g2 = geojson.Feature(geometry=g1, properties={})
geo_json = GeoJSON(data=g2, style={
'color': 'yellow', 'opacity': 1, 'weight': 1.9, 'fillOpacity': 0.5})
self.planet_map.add_layer(geo_json)
except:
self.gui.get_wkt_text_box().value = "Invalid WKT String"
def handle_draw(self, *args, **kwargs):
""" Creates and displays the Well-Known text string when
the user draws a shape on the map.
:type args: Event
:param args: On draw.
:type kwargs: Object
:param kwargs: The GeoJson of the shape that was drawn.
"""
geo_json = kwargs.get('geo_json')
data = geo_json['geometry']
geom = geo.shape(data)
self.gui.get_wkt_text_box().value = geom.wkt
def handle_fullscreen(self, *args, **kwargs):
""" On fullscreen will add GUI elements to the map.
The GUI elements will go away when fullscreen is closed.
:type args: Event
:param args: On interaction with Leaflet map.
:type kwargs: Object
:param kwargs: Leaflet’s map object.
"""
if self.fullscreen != self.planet_map.fullscreen:
self.fullscreen = self.planet_map.fullscreen
self.display_change = True
if self.display_change:
self.display_change = False
if self.fullscreen:
self.planet_map.add_control(self.range_control)
self.planet_map.add_control(self.lat_control)
self.planet_map.add_control(self.direction_control)
self.planet_map.add_control(self.label_control)
else:
self.planet_map.remove_control(self.range_control)
self.planet_map.remove_control(self.lat_control)
self.planet_map.remove_control(self.direction_control)
self.planet_map.remove_control(self.label_control)
def handle_WKT_button(self, *args, **kwargs):
""" Will draw the Well-Known text string
in the text box on click of draw button.
:type args: Event
:param args: On click of drawn button
:type kwargs: Object
:param kwargs: WKT button.
"""
self.add_wkt(self.gui.get_wkt_text_box().value)
def add_wfs_features(self):
"""Grabs and Adds the wfs surface features layer
to the map for the specific target."""
geoJsonUrl = ("https://astrocloud.wr.usgs.gov/dataset/data/nomenclature/{}/WFS?"
"service=WFS&version=1.1.0&request=GetFeature&outputFormat=application%2Fjson"
"&srsName=EPSG%3A4326".format(self.target_name.upper()))
break_out = False
while not break_out:
try: # Try until no 404 error is thrown by server
with urllib.request.urlopen(geoJsonUrl, timeout=240) as url:
jsonp = json.loads(url.read())
# Sort features by diameter
jsonp['features'] = sorted(
jsonp['features'], key=lambda feature: feature["properties"]["diameter"], reverse=True)
geo_json = GeoJSON(data=jsonp, name="Show Feature Names")
geo_json.point_style = {
'fillOpacity': 1,
'radius': 3
}
geo_json.on_click(self.handle_feature_click)
self.planet_map.add_layer(geo_json)
break_out = True
except:
continue
def handle_feature_click(self, feature=None, coordinates=None, **kwargs):
""" Highlights the specific feature when you click on it on the map.
:type feature: String
:param feature: feature name.
:type coordinates: List
:param coordinates: Coordinates of the clicked position.
:type kwargs: Event
:param kwargs: On click.
:rtype: NULL
"""
self.handle_feature_click_counter += 1
if self.handle_feature_click_counter == 1:
self.handle_feature_click_feature = feature
elif self.handle_feature_click_counter == 2:
popup = Popup(
location=coordinates,
child=widgets.HTML(self.handle_feature_click_feature['name']),
close_button=True,
auto_close=True,
close_on_escape_key=False
)
self.planet_map.add_layer(popup)
self.handle_feature_click_counter = 0
return
class planetary_gui:
""" Creates the GUI elements needed for the Planetary Maps. """
def __init__(self):
""" Creates Planetary GUI class"""
self.longitude_range = None
self.lat_domain = None
self.longitude_direction = None
self.lat_lon_label = None
self.draw_Label = None
self.wkt_text_box = None
self.wkt_button = None
self.create_widgets()
def create_widgets(self):
""" Initializes the different GUI elements"""
self.longitude_range = widgets.ToggleButtons(
options=['0 to 360', '-180 to 180'],
description='',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Longitude from 0 to 360', 'Longitude from -180 to 180']
)
self.lat_domain = widgets.ToggleButtons(
options=['Planetocentric', 'Planetographic'],
description='',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Regular Latitude', 'Tangent Latitude']
)
self.lat_lon_label = widgets.Label()
self.draw_label = widgets.Label()
self.longitude_direction = widgets.ToggleButtons(
options=['Positive East', 'Positive West'],
description='',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Longitude increasing east', 'Longitude Increasing West']
)
self.wkt_text_box = widgets.Text(
value='',
placeholder='Type something',
description='WKT String:',
disabled=False,
layout=widgets.Layout(width='75%')
)
self.wkt_button = widgets.Button(
description='Draw',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Draw WKT object'
)
def get_wkt_button(self):
""" Getter method for the Well-Known Text button.
:rtype: Well-Known Text button Object
"""
return self.wkt_button
def get_wkt_text_box(self):
""" Getter method for the Well-Known Text Box.
:rtype: Well-Known Text Box Object
"""
return self.wkt_text_box
def get_longitude_direction(self):
""" Getter method for the Longitude Direction Selector.
:rtype: Longitude Direction Selector Object
"""
return self.longitude_direction
def get_draw_label(self):
""" Getter method for the Well-Known Text Draw Label.
:rtype: Well-Known Text Draw Label Object
"""
return self.draw_label
def get_lat_lon_label(self):
""" Getter method for the Coordinate Input Box.
:rtype: Coordinate Input Box Object
"""
return self.lat_lon_label
def get_lat_domain(self):
""" Getter method for the Latitude Domain Selector.
:rtype: Latitude Domain Selector Object
"""
return self.lat_domain
def get_longitude_range(self):
""" Getter method for the Longitude Range Selector.
:rtype: Longitude Range Selector Object
"""
return self.longitude_range | jupyter/CartoCosmos/CartoCosmos.py | from ipyleaflet import *
import ipywidgets as widgets
import math as Math
import json
import geojson
import shapely.geometry as geo
import shapely.wkt
import os
import urllib.request
class planetary_maps:
""" The Central class that creates interactive planetary maps in Jupyter Notebooks.
Works with all target bodies supported by the USGS by loading the body’s base layers
and overlays in a LayerCollection."""
def __init__(self, targetName):
""" Initializes planetary map of the specific target.
:type targetName: String
:param targetName: The name of the target you wish to map.
"""
self.target_name = targetName
self.layers = []
self.planet_map = None
self.map_layers = {
'base': [],
'overlays': []
}
self.display_change = False
self.fullscreen = False
self.range_control = None
self.lat_control = None
self.direction_control = None
self.label_control = None
self.gui = planetary_gui()
self.dmajor_radius = 0
self.dminor_radius = 0
# Variables to keep track of how many times handle_feature_click is called.
# There is a bug where it is called twice, the first time passing in feature
# and the second time passing in the coordinates. We need both of those variables
self.handle_feature_click_counter = 0
self.handle_feature_click_feature = None
self.json_dict = None
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(dir_path + "/geoServerLayers.json", 'r') as fp:
self.json_dict = json.load(fp)
self.find_radius()
self.create_layers()
self.create_map()
self.feature_collection = {
'type': 'FeatureCollection',
'features': []
}
def find_radius(self):
""" Finds the a and c axis radii of that specific target."""
targets = self.json_dict['targets']
for i, target in enumerate(targets):
current_target = targets[i]
if current_target['name'].lower() == self.target_name:
self.dmajor_radius = float(
current_target['aaxisradius']) * 1000.0
self.dminor_radius = float(
current_target['caxisradius']) * 1000.0
break
def create_layers(self):
""" Grabs all the layers for the specific target."""
targets = self.json_dict['targets']
for i, target in enumerate(targets):
current_target = targets[i]
if current_target['name'].lower() == self.target_name:
json_layers = current_target['webmap']
for j, layers in enumerate(json_layers):
current_layer = json_layers[j]
if current_layer['type'] == 'WMS':
if current_layer['transparent'] == 'false':
self.map_layers['base'].append(current_layer)
else:
if current_layer['displayname'] == "Show Feature Names":
continue
self.map_layers['overlays'].append(current_layer)
for layer in self.map_layers['base']:
if layer['projection'] == 'cylindrical':
wms_layer = WMSLayer(
url=layer["url"] + "?map=" + layer["map"],
layers=layer['layer'],
name=layer['displayname'],
crs='EPSG4326',
base=True,
show_loading=False,
)
self.layers.append(wms_layer)
for layer in self.map_layers['overlays']:
if layer['projection'] == 'cylindrical':
wms_layer = WMSLayer(
url=layer["url"] + "?map=" + layer["map"],
layers=layer['layer'],
name=layer['displayname'],
crs='EPSG4326',
base=False,
transparent=True,
format="image/png",
show_loading=False,
visible=False,
)
self.layers.append(wms_layer)
def handle_interaction(self, **kwargs):
""" Gets and displays the coordinates of the user’s mouse position on the map.
Takes in the GUI coordinate handler in order to display different longitude directions
and ranges as well as different latitude types.
:type kwargs: Event
:param kwargs: Leaflet’s Interaction Object.
"""
if kwargs.get('type') == 'mousemove':
coords = kwargs.get('coordinates')
lat = coords[0]
lng = coords[1]
if lng < 0:
if Math.floor(lng/180) % 2 == 0:
lng = 180 - (abs(lng) % 180)
else:
lng = (lng % 180) - 180
else:
if Math.floor(lng/180) % 2 == 0:
lng = lng % 180
else:
lng = -180 + (abs(lng) % 180)
if self.gui.get_longitude_range().value == "0 to 360":
if(lng < 0):
lng += 360
if self.gui.get_lat_domain().value == "Planetographic":
converted_latitude = Math.radians(lat)
converted_latitude = Math.atan(
((self.dmajor_radius / self.dminor_radius)**2) * (Math.tan(converted_latitude)))
converted_latitude = Math.degrees(converted_latitude)
lat = converted_latitude
if self.gui.get_longitude_direction().value == "Positive West":
if(self.gui.get_longitude_range().value == "-180 to 180"):
lng *= -1
else:
lng = 360 - lng
self.gui.get_lat_lon_label().value = "Lat, Lon: " + \
str(round(lat, 2)) + ", " + str(round(lng, 2))
def create_map(self):
""" Creates the map instance of the specific target.
Also adds all the controls to the map."""
self.planet_map = Map(layers=tuple(self.layers),
center=(0, 0), zoom=1, crs='EPSG4326')
draw_control = DrawControl()
draw_control.polyline = {
"shapeOptions": {
"color": "#6bc2e5",
"weight": 8,
"opacity": .5
}
}
draw_control.polygon = {
"shapeOptions": {
"fillColor": "#6be5c3",
"color": "#6be5c3",
"fillOpacity": .5
},
"drawError": {
"color": "#dd253b",
"message": "Oups!"
},
"allowIntersection": False
}
draw_control.circle = {
"shapeOptions": {
"fillColor": "#efed69",
"color": "#efed69",
"fillOpacity": .5
}
}
draw_control.rectangle = {
"shapeOptions": {
"fillColor": "#fca45d",
"color": "#fca45d",
"fillOpacity": .5
}
}
draw_control.on_draw(self.handle_draw)
self.gui.get_wkt_button().on_click(self.handle_WKT_button)
self.range_control = WidgetControl(
widget=self.gui.get_longitude_range(), position='topright')
self.lat_control = WidgetControl(
widget=self.gui.get_lat_domain(), position='topright')
self.direction_control = WidgetControl(
widget=self.gui.get_longitude_direction(), position='topright')
self.label_control = WidgetControl(
widget=self.gui.get_lat_lon_label(), position='bottomright')
self.planet_map.add_control(draw_control)
self.planet_map.add_control(LayersControl(position='topright'))
self.planet_map.on_interaction(self.handle_interaction)
fullscreen_control = FullScreenControl(position='bottomleft')
self.planet_map.add_control(fullscreen_control)
self.planet_map.on_interaction(self.handle_fullscreen)
self.planet_map.add_control(ScaleControl(position='bottomleft'))
def display_map(self):
""" Displays the map and the GUI elements to the screen."""
display(self.gui.get_longitude_range())
display(self.gui.get_lat_domain())
display(self.gui.get_longitude_direction())
display(self.gui.get_lat_lon_label())
display(self.planet_map)
display(self.gui.get_draw_label())
display(self.gui.get_wkt_text_box())
display(self.gui.get_wkt_button())
# Display map first, then add features
self.add_wfs_features()
def add_wkt(self, wktString):
""" Takes in a Well-Known text string
and draws it on the planetary map
:type wktString: String
:param wktString: Well-Known text string to draw on the map.
:raises: Invalid WKT String.
"""
try:
g1 = shapely.wkt.loads(wktString)
g2 = geojson.Feature(geometry=g1, properties={})
geo_json = GeoJSON(data=g2, style={
'color': 'yellow', 'opacity': 1, 'weight': 1.9, 'fillOpacity': 0.5})
self.planet_map.add_layer(geo_json)
except:
self.gui.get_wkt_text_box().value = "Invalid WKT String"
def handle_draw(self, *args, **kwargs):
""" Creates and displays the Well-Known text string when
the user draws a shape on the map.
:type args: Event
:param args: On draw.
:type kwargs: Object
:param kwargs: The GeoJson of the shape that was drawn.
"""
geo_json = kwargs.get('geo_json')
data = geo_json['geometry']
geom = geo.shape(data)
self.gui.get_wkt_text_box().value = geom.wkt
def handle_fullscreen(self, *args, **kwargs):
""" On fullscreen will add GUI elements to the map.
The GUI elements will go away when fullscreen is closed.
:type args: Event
:param args: On interaction with Leaflet map.
:type kwargs: Object
:param kwargs: Leaflet’s map object.
"""
if self.fullscreen != self.planet_map.fullscreen:
self.fullscreen = self.planet_map.fullscreen
self.display_change = True
if self.display_change:
self.display_change = False
if self.fullscreen:
self.planet_map.add_control(self.range_control)
self.planet_map.add_control(self.lat_control)
self.planet_map.add_control(self.direction_control)
self.planet_map.add_control(self.label_control)
else:
self.planet_map.remove_control(self.range_control)
self.planet_map.remove_control(self.lat_control)
self.planet_map.remove_control(self.direction_control)
self.planet_map.remove_control(self.label_control)
def handle_WKT_button(self, *args, **kwargs):
""" Will draw the Well-Known text string
in the text box on click of draw button.
:type args: Event
:param args: On click of drawn button
:type kwargs: Object
:param kwargs: WKT button.
"""
self.add_wkt(self.gui.get_wkt_text_box().value)
def add_wfs_features(self):
"""Grabs and Adds the wfs surface features layer
to the map for the specific target."""
geoJsonUrl = ("https://astrocloud.wr.usgs.gov/dataset/data/nomenclature/{}/WFS?"
"service=WFS&version=1.1.0&request=GetFeature&outputFormat=application%2Fjson"
"&srsName=EPSG%3A4326".format(self.target_name.upper()))
break_out = False
while not break_out:
try: # Try until no 404 error is thrown by server
with urllib.request.urlopen(geoJsonUrl, timeout=240) as url:
jsonp = json.loads(url.read())
# Sort features by diameter
jsonp['features'] = sorted(
jsonp['features'], key=lambda feature: feature["properties"]["diameter"], reverse=True)
geo_json = GeoJSON(data=jsonp, name="Show Feature Names")
geo_json.point_style = {
'fillOpacity': 1,
'radius': 3
}
geo_json.on_click(self.handle_feature_click)
self.planet_map.add_layer(geo_json)
break_out = True
except:
continue
def handle_feature_click(self, feature=None, coordinates=None, **kwargs):
""" Highlights the specific feature when you click on it on the map.
:type feature: String
:param feature: feature name.
:type coordinates: List
:param coordinates: Coordinates of the clicked position.
:type kwargs: Event
:param kwargs: On click.
:rtype: NULL
"""
self.handle_feature_click_counter += 1
if self.handle_feature_click_counter == 1:
self.handle_feature_click_feature = feature
elif self.handle_feature_click_counter == 2:
popup = Popup(
location=coordinates,
child=widgets.HTML(self.handle_feature_click_feature['name']),
close_button=True,
auto_close=True,
close_on_escape_key=False
)
self.planet_map.add_layer(popup)
self.handle_feature_click_counter = 0
return
class planetary_gui:
""" Creates the GUI elements needed for the Planetary Maps. """
def __init__(self):
""" Creates Planetary GUI class"""
self.longitude_range = None
self.lat_domain = None
self.longitude_direction = None
self.lat_lon_label = None
self.draw_Label = None
self.wkt_text_box = None
self.wkt_button = None
self.create_widgets()
def create_widgets(self):
""" Initializes the different GUI elements"""
self.longitude_range = widgets.ToggleButtons(
options=['0 to 360', '-180 to 180'],
description='',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Longitude from 0 to 360', 'Longitude from -180 to 180']
)
self.lat_domain = widgets.ToggleButtons(
options=['Planetocentric', 'Planetographic'],
description='',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Regular Latitude', 'Tangent Latitude']
)
self.lat_lon_label = widgets.Label()
self.draw_label = widgets.Label()
self.longitude_direction = widgets.ToggleButtons(
options=['Positive East', 'Positive West'],
description='',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltips=['Longitude increasing east', 'Longitude Increasing West']
)
self.wkt_text_box = widgets.Text(
value='',
placeholder='Type something',
description='WKT String:',
disabled=False,
layout=widgets.Layout(width='75%')
)
self.wkt_button = widgets.Button(
description='Draw',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Draw WKT object'
)
def get_wkt_button(self):
""" Getter method for the Well-Known Text button.
:rtype: Well-Known Text button Object
"""
return self.wkt_button
def get_wkt_text_box(self):
""" Getter method for the Well-Known Text Box.
:rtype: Well-Known Text Box Object
"""
return self.wkt_text_box
def get_longitude_direction(self):
""" Getter method for the Longitude Direction Selector.
:rtype: Longitude Direction Selector Object
"""
return self.longitude_direction
def get_draw_label(self):
""" Getter method for the Well-Known Text Draw Label.
:rtype: Well-Known Text Draw Label Object
"""
return self.draw_label
def get_lat_lon_label(self):
""" Getter method for the Coordinate Input Box.
:rtype: Coordinate Input Box Object
"""
return self.lat_lon_label
def get_lat_domain(self):
""" Getter method for the Latitude Domain Selector.
:rtype: Latitude Domain Selector Object
"""
return self.lat_domain
def get_longitude_range(self):
""" Getter method for the Longitude Range Selector.
:rtype: Longitude Range Selector Object
"""
return self.longitude_range | 0.733738 | 0.369628 |
from .grid import (createGrid, appendTetrahedronBoundary, appendTriangleBoundary)
from .mesh import (createMesh, createParaMesh, createParaMesh2DGrid,
merge2Meshes, refineQuad2Tri,
mergeMeshes, readGmsh, readHydrus2dMesh,
readHydrus3dMesh, readTetgen, readTriangle, convertHDF5Mesh,
readHDF5Mesh, readFenicsHDF5Mesh, exportHDF5Mesh,
exportFenicsHDF5Mesh)
from .polytools import createParaDomain2D # keep for backward compatibility
from .polytools import (createCircle, createLine, createParaMeshPLC,
createPolygon, createRectangle, createWorld, mergePLC,
readPLC, exportPLC, writePLC)
from .quality import (quality)
from .mapping import (nodeDataToCellData,
cellDataToNodeData,
nodeDataToBoundaryData,
cellDataToBoundaryData,
fillEmptyToCellArray,
tapeMeasureToCoordinates,
interpolate,
interpolateAlongCurve
)
# This is neither functional nor good practice # why?
# __all__ = [name for name in dir() if '_' not in name]
__all__ = ['appendTriangleBoundary',
'appendTetrahedronBoundary',
'createMesh',
'readGmsh',
'readTriangle',
'readTetgen',
'readHydrus2dMesh',
'readHydrus3dMesh',
'readHDF5Mesh',
'readFenicsHDF5Mesh',
'refineQuad2Tri',
'mergeMeshes',
'merge2Meshes',
'createParaMesh',
'createParaMesh2DGrid',
'createPolygon',
'createRectangle',
'createWorld',
'createCircle',
'createLine',
'createParaMeshPLC',
'convertHDF5Mesh',
'exportHDF5Mesh',
'exportFenicsHDF5Mesh',
'mergePLC',
'readPLC',
'writePLC',
'exportPLC',
'createParaDomain2D', # keep for backward compatibility
'quality'
] | python/pygimli/meshtools/__init__.py | from .grid import (createGrid, appendTetrahedronBoundary, appendTriangleBoundary)
from .mesh import (createMesh, createParaMesh, createParaMesh2DGrid,
merge2Meshes, refineQuad2Tri,
mergeMeshes, readGmsh, readHydrus2dMesh,
readHydrus3dMesh, readTetgen, readTriangle, convertHDF5Mesh,
readHDF5Mesh, readFenicsHDF5Mesh, exportHDF5Mesh,
exportFenicsHDF5Mesh)
from .polytools import createParaDomain2D # keep for backward compatibility
from .polytools import (createCircle, createLine, createParaMeshPLC,
createPolygon, createRectangle, createWorld, mergePLC,
readPLC, exportPLC, writePLC)
from .quality import (quality)
from .mapping import (nodeDataToCellData,
cellDataToNodeData,
nodeDataToBoundaryData,
cellDataToBoundaryData,
fillEmptyToCellArray,
tapeMeasureToCoordinates,
interpolate,
interpolateAlongCurve
)
# This is neither functional nor good practice # why?
# __all__ = [name for name in dir() if '_' not in name]
__all__ = ['appendTriangleBoundary',
'appendTetrahedronBoundary',
'createMesh',
'readGmsh',
'readTriangle',
'readTetgen',
'readHydrus2dMesh',
'readHydrus3dMesh',
'readHDF5Mesh',
'readFenicsHDF5Mesh',
'refineQuad2Tri',
'mergeMeshes',
'merge2Meshes',
'createParaMesh',
'createParaMesh2DGrid',
'createPolygon',
'createRectangle',
'createWorld',
'createCircle',
'createLine',
'createParaMeshPLC',
'convertHDF5Mesh',
'exportHDF5Mesh',
'exportFenicsHDF5Mesh',
'mergePLC',
'readPLC',
'writePLC',
'exportPLC',
'createParaDomain2D', # keep for backward compatibility
'quality'
] | 0.390941 | 0.331945 |
import base64
import json
import os
import re
from urllib import parse
import requests
from requests import utils
import rsa
class CaiYunCheckIn:
def __init__(self, check_item):
self.check_item = check_item
self.public_key = """-----<KEY>"""
@staticmethod
def get_encrypt_time(session):
payload = parse.urlencode({"op": "currentTimeMillis"})
resp = session.post(
url="https://caiyun.feixin.10086.cn:7071/portal/ajax/tools/opRequest.action", data=payload
).json()
if resp.get("code") != 10000:
print("获取时间戳失败: ", resp["msg"])
return 0
return resp.get("result", 0)
def get_ticket(self, session):
payload = json.dumps({"sourceId": 1003, "type": 1, "encryptTime": self.get_encrypt_time(session=session)})
pubkey = rsa.PublicKey.load_pkcs1_openssl_pem(self.public_key)
crypto = b""
divide = int(len(payload) / 117)
divide = divide if (divide > 0) else divide + 1
line = divide if (len(payload) % 117 == 0) else divide + 1
for i in range(line):
crypto += rsa.encrypt(payload[i * 117: (i + 1) * 117].encode(), pubkey)
crypto1 = base64.b64encode(crypto)
return crypto1.decode()
@staticmethod
def user_info(session):
resp = session.get(url="https://caiyun.feixin.10086.cn:7071/portal/newsignin/index.jsp").text
account = re.findall(r"var loginAccount = \"(.*?)\";", resp)
if account:
account = account[0]
else:
account = "未获取到用户信息"
return account
def sign(self, session):
ticket = self.get_ticket(session=session)
payload = parse.urlencode({"op": "receive", "data": ticket})
resp = session.post(
url="https://caiyun.feixin.10086.cn:7071/portal/ajax/common/caiYunSignIn.action", data=payload,
).json()
if resp["code"] != 10000:
msg = "签到失败:" + resp["msg"]
else:
msg = f'月签到天数: {resp["result"]["monthDays"]}\n当前总积分:{resp["result"]["totalPoints"]}'
return msg
def main(self):
caiyun_cookie = {
item.split("=")[0]: item.split("=")[1] for item in self.check_item.get("caiyun_cookie").split("; ")
}
session = requests.session()
requests.utils.add_dict_to_cookiejar(session.cookies, caiyun_cookie)
session.headers.update(
{
"User-Agent": "Mozilla/5.0 (Linux; Android 10; M2007J3SC Build/QKQ1.191222.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/83.0.4103.106 Mobile Safari/537.36 MCloudApp/7.6.0",
"Content-Type": "application/x-www-form-urlencoded",
"Origin": "https://caiyun.feixin.10086.cn:7071",
"Referer": "https://caiyun.feixin.10086.cn:7071/portal/newsignin/index.jsp",
}
)
username = self.user_info(session=session)
sign_msg = self.sign(session=session)
msg = f"用户信息: {username}\n{sign_msg}".strip()
return msg
if __name__ == "__main__":
with open(
os.path.join(os.path.dirname(os.path.dirname(__file__)), "config/config.json"), "r", encoding="utf-8"
) as f:
datas = json.loads(f.read())
_check_item = datas.get("CAIYUN_COOKIE_LIST", [])[0]
print(CaiYunCheckIn(check_item=_check_item).main()) | caiyun/caiyun.py | import base64
import json
import os
import re
from urllib import parse
import requests
from requests import utils
import rsa
class CaiYunCheckIn:
def __init__(self, check_item):
self.check_item = check_item
self.public_key = """-----<KEY>"""
@staticmethod
def get_encrypt_time(session):
payload = parse.urlencode({"op": "currentTimeMillis"})
resp = session.post(
url="https://caiyun.feixin.10086.cn:7071/portal/ajax/tools/opRequest.action", data=payload
).json()
if resp.get("code") != 10000:
print("获取时间戳失败: ", resp["msg"])
return 0
return resp.get("result", 0)
def get_ticket(self, session):
payload = json.dumps({"sourceId": 1003, "type": 1, "encryptTime": self.get_encrypt_time(session=session)})
pubkey = rsa.PublicKey.load_pkcs1_openssl_pem(self.public_key)
crypto = b""
divide = int(len(payload) / 117)
divide = divide if (divide > 0) else divide + 1
line = divide if (len(payload) % 117 == 0) else divide + 1
for i in range(line):
crypto += rsa.encrypt(payload[i * 117: (i + 1) * 117].encode(), pubkey)
crypto1 = base64.b64encode(crypto)
return crypto1.decode()
@staticmethod
def user_info(session):
resp = session.get(url="https://caiyun.feixin.10086.cn:7071/portal/newsignin/index.jsp").text
account = re.findall(r"var loginAccount = \"(.*?)\";", resp)
if account:
account = account[0]
else:
account = "未获取到用户信息"
return account
def sign(self, session):
ticket = self.get_ticket(session=session)
payload = parse.urlencode({"op": "receive", "data": ticket})
resp = session.post(
url="https://caiyun.feixin.10086.cn:7071/portal/ajax/common/caiYunSignIn.action", data=payload,
).json()
if resp["code"] != 10000:
msg = "签到失败:" + resp["msg"]
else:
msg = f'月签到天数: {resp["result"]["monthDays"]}\n当前总积分:{resp["result"]["totalPoints"]}'
return msg
def main(self):
caiyun_cookie = {
item.split("=")[0]: item.split("=")[1] for item in self.check_item.get("caiyun_cookie").split("; ")
}
session = requests.session()
requests.utils.add_dict_to_cookiejar(session.cookies, caiyun_cookie)
session.headers.update(
{
"User-Agent": "Mozilla/5.0 (Linux; Android 10; M2007J3SC Build/QKQ1.191222.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/83.0.4103.106 Mobile Safari/537.36 MCloudApp/7.6.0",
"Content-Type": "application/x-www-form-urlencoded",
"Origin": "https://caiyun.feixin.10086.cn:7071",
"Referer": "https://caiyun.feixin.10086.cn:7071/portal/newsignin/index.jsp",
}
)
username = self.user_info(session=session)
sign_msg = self.sign(session=session)
msg = f"用户信息: {username}\n{sign_msg}".strip()
return msg
if __name__ == "__main__":
with open(
os.path.join(os.path.dirname(os.path.dirname(__file__)), "config/config.json"), "r", encoding="utf-8"
) as f:
datas = json.loads(f.read())
_check_item = datas.get("CAIYUN_COOKIE_LIST", [])[0]
print(CaiYunCheckIn(check_item=_check_item).main()) | 0.23092 | 0.119614 |
from django.db.models.signals import post_save
from django.dispatch import receiver
from audit.models import AuditLog, RelatedObjectType
from audit.serializers import AuditLogSerializer
from integrations.datadog.datadog import DataDogWrapper
from integrations.new_relic.new_relic import NewRelicWrapper
import logging
from webhooks.webhooks import WebhookEventType, call_organisation_webhooks
logger = logging.getLogger(__name__)
@receiver(post_save, sender=AuditLog)
def call_webhooks(sender, instance, **kwargs):
data = AuditLogSerializer(instance=instance).data
if not (instance.project or instance.environment):
logger.warning("Audit log without project or environment. Not sending webhook.")
return
organisation = (
instance.project.organisation
if instance.project
else instance.environment.project.organisation
)
call_organisation_webhooks(organisation, data, WebhookEventType.AUDIT_LOG_CREATED)
def _send_audit_log_event_verification(instance, integration):
if not instance.project:
logger.warning(
f"Audit log missing project, not sending data to {integration.get('name')}."
)
return
if not hasattr(instance.project, integration.get("attr")):
logger.debug(
f"No datadog integration configured for project {instance.project.id}"
)
return
# Only handle Feature related changes
if instance.related_object_type not in [
RelatedObjectType.FEATURE.name,
RelatedObjectType.FEATURE_STATE.name,
RelatedObjectType.SEGMENT.name,
]:
logger.debug(
f"Ignoring none Flag audit event {instance.related_object_type} for {integration.get('name', '').lower()}"
)
return
return getattr(instance.project, integration.get("attr"))
def _track_event_async(instance, integration_client):
event_data = integration_client.generate_event_data(
log=instance.log,
email=instance.author.email if instance.author else "",
environment_name=instance.environment.name.lower()
if instance.environment
else "",
)
integration_client.track_event_async(event=event_data)
@receiver(post_save, sender=AuditLog)
def send_audit_log_event_to_datadog(sender, instance, **kwargs):
integration = {
"name": "DataDog",
"attr": "data_dog_config",
}
data_dog_config = _send_audit_log_event_verification(instance, integration)
if not data_dog_config:
return
data_dog = DataDogWrapper(
base_url=data_dog_config.base_url, api_key=data_dog_config.api_key
)
_track_event_async(instance, data_dog)
@receiver(post_save, sender=AuditLog)
def send_audit_log_event_to_new_relic(sender, instance, **kwargs):
integration = {
"name": "New Relic",
"attr": "new_relic_config",
}
new_relic_config = _send_audit_log_event_verification(instance, integration)
if not new_relic_config:
return
new_relic = NewRelicWrapper(
base_url=new_relic_config.base_url,
api_key=new_relic_config.api_key,
app_id=new_relic_config.app_id,
)
_track_event_async(instance, new_relic) | src/audit/signals.py | from django.db.models.signals import post_save
from django.dispatch import receiver
from audit.models import AuditLog, RelatedObjectType
from audit.serializers import AuditLogSerializer
from integrations.datadog.datadog import DataDogWrapper
from integrations.new_relic.new_relic import NewRelicWrapper
import logging
from webhooks.webhooks import WebhookEventType, call_organisation_webhooks
logger = logging.getLogger(__name__)
@receiver(post_save, sender=AuditLog)
def call_webhooks(sender, instance, **kwargs):
data = AuditLogSerializer(instance=instance).data
if not (instance.project or instance.environment):
logger.warning("Audit log without project or environment. Not sending webhook.")
return
organisation = (
instance.project.organisation
if instance.project
else instance.environment.project.organisation
)
call_organisation_webhooks(organisation, data, WebhookEventType.AUDIT_LOG_CREATED)
def _send_audit_log_event_verification(instance, integration):
if not instance.project:
logger.warning(
f"Audit log missing project, not sending data to {integration.get('name')}."
)
return
if not hasattr(instance.project, integration.get("attr")):
logger.debug(
f"No datadog integration configured for project {instance.project.id}"
)
return
# Only handle Feature related changes
if instance.related_object_type not in [
RelatedObjectType.FEATURE.name,
RelatedObjectType.FEATURE_STATE.name,
RelatedObjectType.SEGMENT.name,
]:
logger.debug(
f"Ignoring none Flag audit event {instance.related_object_type} for {integration.get('name', '').lower()}"
)
return
return getattr(instance.project, integration.get("attr"))
def _track_event_async(instance, integration_client):
event_data = integration_client.generate_event_data(
log=instance.log,
email=instance.author.email if instance.author else "",
environment_name=instance.environment.name.lower()
if instance.environment
else "",
)
integration_client.track_event_async(event=event_data)
@receiver(post_save, sender=AuditLog)
def send_audit_log_event_to_datadog(sender, instance, **kwargs):
integration = {
"name": "DataDog",
"attr": "data_dog_config",
}
data_dog_config = _send_audit_log_event_verification(instance, integration)
if not data_dog_config:
return
data_dog = DataDogWrapper(
base_url=data_dog_config.base_url, api_key=data_dog_config.api_key
)
_track_event_async(instance, data_dog)
@receiver(post_save, sender=AuditLog)
def send_audit_log_event_to_new_relic(sender, instance, **kwargs):
integration = {
"name": "New Relic",
"attr": "new_relic_config",
}
new_relic_config = _send_audit_log_event_verification(instance, integration)
if not new_relic_config:
return
new_relic = NewRelicWrapper(
base_url=new_relic_config.base_url,
api_key=new_relic_config.api_key,
app_id=new_relic_config.app_id,
)
_track_event_async(instance, new_relic) | 0.541166 | 0.131145 |
from __future__ import print_function
import os
import random
import socket
import sys
_PROTOS = [(socket.SOCK_STREAM, socket.IPPROTO_TCP),
(socket.SOCK_DGRAM, socket.IPPROTO_UDP)]
def Bind(port, socket_type, socket_proto):
"""Try to bind to a socket of the specified type, protocol, and port.
This is primarily a helper function for PickUnusedPort, used to see
if a particular port number is available.
Args:
port: The port number to bind to, or 0 to have the OS pick a free port.
socket_type: The type of the socket (ex: socket.SOCK_STREAM).
socket_proto: The protocol of the socket (ex: socket.IPPROTO_TCP).
Returns:
The port number on success or None on failure.
"""
s = socket.socket(socket.AF_INET, socket_type, socket_proto)
try:
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', port))
return s.getsockname()[1]
except socket.error:
return None
finally:
s.close()
def IsPortFree(port):
"""Check if specified port is free.
Args:
port: integer, port to check
Returns:
boolean, whether it is free to use for both TCP and UDP
"""
return (Bind(port, _PROTOS[0][0], _PROTOS[0][1]) and
Bind(port, _PROTOS[1][0], _PROTOS[1][1]))
def PickUnusedPort(pid=None):
"""A pure python implementation of PickUnusedPort.
Args:
pid: PID to tell the portserver to associate the reservation with. If None,
the current process's PID is used.
Returns:
A port number that is unused on both TCP and UDP.
"""
# Provide access to the portserver on an opt-in basis, to avoid
# changing the behavior of production code.
if 'PORTSERVER_ADDRESS' not in os.environ:
return _PickUnusedPortWithoutServer()
return _GetPortFromPortServer(os.environ['PORTSERVER_ADDRESS'], pid=pid)
def _PickUnusedPortWithoutServer():
"""A pure python implementation of PickUnusedPort_NoServer().
This code is based on the C++ PickUnusedPort_NoServer() code
(net/util/netutil.cc) and ensures that the port is available on both
TCP and UDP.
This function is an implementation detail of PickUnusedPort(), and
should not be called by code outside of this module.
Returns:
A port number that is unused on both TCP and UDP.
"""
# Try random ports first.
r = random.Random()
for _ in range(10):
port = int(r.randrange(32768, 60000))
if IsPortFree(port):
return port
# Try OS-assigned ports next.
# Ambrose discovered that on the 2.6 kernel, calling Bind() on UDP socket
# returns the same port over and over. So always try TCP first.
while True:
# Ask the OS for an unused port.
port = Bind(0, _PROTOS[0][0], _PROTOS[0][1])
# Check if this port is unused on the other protocol.
if port and Bind(port, _PROTOS[1][0], _PROTOS[1][1]):
return port
def _GetPortFromPortServer(portserver_address, pid=None):
"""Request a free a port from a system-wide portserver.
This is based on the C++ GetPortFromPortServer() code
(net/util/netutil.cc) and follows the portserver protocol documented
in that file.
This function is an implementation detail of PickUnusedPort(), and
should not be called by code outside of this module.
Args:
portserver_address: The address (path) of a unix domain socket
with which to connect to the portserver. A leading '@'
character indicates an address in the "abstract namespace."
pid: The PID to tell the portserver to associate the reservation with.
If None, the current process's PID is used.
Returns:
The port number on success or None on failure.
"""
# An AF_UNIX address may start with a zero byte, in which case it is in the
# "abstract namespace", and doesn't have any filesystem representation.
# See 'man 7 unix' for details.
# The convention is to write '@' in the address to represent this zero byte.
if portserver_address[0] == '@':
portserver_address = '\0' + portserver_address[1:]
if pid is None:
pid = os.getpid()
try:
# Create socket.
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
# Connect to portserver.
sock.connect(portserver_address)
# Write request.
sock.sendall(('%d\n' % pid).encode('ascii'))
# Read response.
# 1K should be ample buffer space.
buf = sock.recv(1024)
finally:
sock.close()
except socket.error:
print ('Socket error when connecting to portserver. Falling back '
'to picking port without portserver.')
return None
try:
return int(buf.split(b'\n')[0])
except ValueError:
print ('Portserver failed to find a port. Falling back to '
'picking port without portserver.')
return None
if __name__ == '__main__':
# If passed an argument, cast it to int and treat it as a PID, otherwise pass
# pid=None to use portpicker's PID.
print (PickUnusedPort(pid=int(sys.argv[1]) if len(sys.argv) > 1 else None)) | google_appengine/google/net/util/python/portpicker.py | from __future__ import print_function
import os
import random
import socket
import sys
_PROTOS = [(socket.SOCK_STREAM, socket.IPPROTO_TCP),
(socket.SOCK_DGRAM, socket.IPPROTO_UDP)]
def Bind(port, socket_type, socket_proto):
"""Try to bind to a socket of the specified type, protocol, and port.
This is primarily a helper function for PickUnusedPort, used to see
if a particular port number is available.
Args:
port: The port number to bind to, or 0 to have the OS pick a free port.
socket_type: The type of the socket (ex: socket.SOCK_STREAM).
socket_proto: The protocol of the socket (ex: socket.IPPROTO_TCP).
Returns:
The port number on success or None on failure.
"""
s = socket.socket(socket.AF_INET, socket_type, socket_proto)
try:
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', port))
return s.getsockname()[1]
except socket.error:
return None
finally:
s.close()
def IsPortFree(port):
"""Check if specified port is free.
Args:
port: integer, port to check
Returns:
boolean, whether it is free to use for both TCP and UDP
"""
return (Bind(port, _PROTOS[0][0], _PROTOS[0][1]) and
Bind(port, _PROTOS[1][0], _PROTOS[1][1]))
def PickUnusedPort(pid=None):
"""A pure python implementation of PickUnusedPort.
Args:
pid: PID to tell the portserver to associate the reservation with. If None,
the current process's PID is used.
Returns:
A port number that is unused on both TCP and UDP.
"""
# Provide access to the portserver on an opt-in basis, to avoid
# changing the behavior of production code.
if 'PORTSERVER_ADDRESS' not in os.environ:
return _PickUnusedPortWithoutServer()
return _GetPortFromPortServer(os.environ['PORTSERVER_ADDRESS'], pid=pid)
def _PickUnusedPortWithoutServer():
"""A pure python implementation of PickUnusedPort_NoServer().
This code is based on the C++ PickUnusedPort_NoServer() code
(net/util/netutil.cc) and ensures that the port is available on both
TCP and UDP.
This function is an implementation detail of PickUnusedPort(), and
should not be called by code outside of this module.
Returns:
A port number that is unused on both TCP and UDP.
"""
# Try random ports first.
r = random.Random()
for _ in range(10):
port = int(r.randrange(32768, 60000))
if IsPortFree(port):
return port
# Try OS-assigned ports next.
# Ambrose discovered that on the 2.6 kernel, calling Bind() on UDP socket
# returns the same port over and over. So always try TCP first.
while True:
# Ask the OS for an unused port.
port = Bind(0, _PROTOS[0][0], _PROTOS[0][1])
# Check if this port is unused on the other protocol.
if port and Bind(port, _PROTOS[1][0], _PROTOS[1][1]):
return port
def _GetPortFromPortServer(portserver_address, pid=None):
"""Request a free a port from a system-wide portserver.
This is based on the C++ GetPortFromPortServer() code
(net/util/netutil.cc) and follows the portserver protocol documented
in that file.
This function is an implementation detail of PickUnusedPort(), and
should not be called by code outside of this module.
Args:
portserver_address: The address (path) of a unix domain socket
with which to connect to the portserver. A leading '@'
character indicates an address in the "abstract namespace."
pid: The PID to tell the portserver to associate the reservation with.
If None, the current process's PID is used.
Returns:
The port number on success or None on failure.
"""
# An AF_UNIX address may start with a zero byte, in which case it is in the
# "abstract namespace", and doesn't have any filesystem representation.
# See 'man 7 unix' for details.
# The convention is to write '@' in the address to represent this zero byte.
if portserver_address[0] == '@':
portserver_address = '\0' + portserver_address[1:]
if pid is None:
pid = os.getpid()
try:
# Create socket.
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
# Connect to portserver.
sock.connect(portserver_address)
# Write request.
sock.sendall(('%d\n' % pid).encode('ascii'))
# Read response.
# 1K should be ample buffer space.
buf = sock.recv(1024)
finally:
sock.close()
except socket.error:
print ('Socket error when connecting to portserver. Falling back '
'to picking port without portserver.')
return None
try:
return int(buf.split(b'\n')[0])
except ValueError:
print ('Portserver failed to find a port. Falling back to '
'picking port without portserver.')
return None
if __name__ == '__main__':
# If passed an argument, cast it to int and treat it as a PID, otherwise pass
# pid=None to use portpicker's PID.
print (PickUnusedPort(pid=int(sys.argv[1]) if len(sys.argv) > 1 else None)) | 0.610453 | 0.216757 |
import logging
from typing import Awaitable, Callable, Dict, List, Tuple
from aiohttp import ClientSession
from .consts import (
ATTR_ALTITUDE,
ATTR_CALLSIGN,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_MODE_S,
ATTR_SPEED,
ATTR_SQUAWK,
ATTR_TRACK,
ATTR_UPDATED,
ATTR_VERT_RATE,
)
from .feed import Feed
from .feed_aggregator import FeedAggregator
from .feed_entry import FeedEntry
from .feed_manager import FeedManagerBase
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOSTNAME = "localhost"
DEFAULT_PORT = 8754
URL_TEMPLATE = "http://{}:{}/flights.json"
class FlightradarFlightsFeedManager(FeedManagerBase):
"""Feed Manager for Flightradar Flights feed."""
def __init__(
self,
generate_callback: Callable[[str], Awaitable[None]],
update_callback: Callable[[str], Awaitable[None]],
remove_callback: Callable[[str], Awaitable[None]],
coordinates: Tuple[float, float],
websession: ClientSession,
filter_radius: float = None,
url: str = None,
hostname: str = DEFAULT_HOSTNAME,
port: int = DEFAULT_PORT,
) -> None:
"""Initialize the NSW Rural Fire Services Feed Manager."""
feed = FlightradarFlightsFeedAggregator(
coordinates,
websession,
filter_radius=filter_radius,
url=url,
hostname=hostname,
port=port,
)
super().__init__(feed, generate_callback, update_callback, remove_callback)
class FlightradarFlightsFeedAggregator(FeedAggregator):
"""Aggregates date received from the feed over a period of time."""
def __init__(
self,
home_coordinates: Tuple[float, float],
websession: ClientSession,
filter_radius: float = None,
url: str = None,
hostname: str = DEFAULT_HOSTNAME,
port: int = DEFAULT_PORT,
) -> None:
"""Initialise feed aggregator."""
super().__init__(filter_radius)
self._feed = FlightradarFlightsFeed(
home_coordinates,
websession,
False,
filter_radius,
url,
hostname,
port,
)
@property
def feed(self) -> Feed:
"""Return the external feed access."""
return self._feed
class FlightradarFlightsFeed(Feed):
"""Flightradar Flights Feed."""
def __init__(
self,
home_coordinates: Tuple[float, float],
websession: ClientSession,
apply_filters: bool = True,
filter_radius: float = None,
url: str = None,
hostname: str = DEFAULT_HOSTNAME,
port: int = DEFAULT_PORT,
) -> None:
super().__init__(
home_coordinates,
websession,
apply_filters,
filter_radius,
url,
hostname,
port,
)
def _create_url(self, hostname: str, port: int) -> str:
"""Generate the url to retrieve data from."""
return URL_TEMPLATE.format(hostname, port)
def _new_entry(
self, home_coordinates: Tuple[float, float], feed_data: Dict
) -> FeedEntry:
"""Generate a new entry."""
return FeedEntry(home_coordinates, feed_data)
def _parse(self, parsed_json: Dict) -> List[Dict]:
"""Parse the provided JSON data."""
result = []
for key in parsed_json:
data_entry = parsed_json[key]
result.append(
{
ATTR_MODE_S: data_entry[0],
ATTR_LATITUDE: data_entry[1],
ATTR_LONGITUDE: data_entry[2],
ATTR_TRACK: data_entry[3],
ATTR_ALTITUDE: data_entry[4],
ATTR_SPEED: data_entry[5],
ATTR_SQUAWK: data_entry[6],
ATTR_UPDATED: data_entry[10],
ATTR_VERT_RATE: data_entry[15],
ATTR_CALLSIGN: data_entry[16],
}
)
_LOGGER.debug("Parser result = %s", result)
return result | flightradar_client/fr24feed_flights.py | import logging
from typing import Awaitable, Callable, Dict, List, Tuple
from aiohttp import ClientSession
from .consts import (
ATTR_ALTITUDE,
ATTR_CALLSIGN,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_MODE_S,
ATTR_SPEED,
ATTR_SQUAWK,
ATTR_TRACK,
ATTR_UPDATED,
ATTR_VERT_RATE,
)
from .feed import Feed
from .feed_aggregator import FeedAggregator
from .feed_entry import FeedEntry
from .feed_manager import FeedManagerBase
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOSTNAME = "localhost"
DEFAULT_PORT = 8754
URL_TEMPLATE = "http://{}:{}/flights.json"
class FlightradarFlightsFeedManager(FeedManagerBase):
"""Feed Manager for Flightradar Flights feed."""
def __init__(
self,
generate_callback: Callable[[str], Awaitable[None]],
update_callback: Callable[[str], Awaitable[None]],
remove_callback: Callable[[str], Awaitable[None]],
coordinates: Tuple[float, float],
websession: ClientSession,
filter_radius: float = None,
url: str = None,
hostname: str = DEFAULT_HOSTNAME,
port: int = DEFAULT_PORT,
) -> None:
"""Initialize the NSW Rural Fire Services Feed Manager."""
feed = FlightradarFlightsFeedAggregator(
coordinates,
websession,
filter_radius=filter_radius,
url=url,
hostname=hostname,
port=port,
)
super().__init__(feed, generate_callback, update_callback, remove_callback)
class FlightradarFlightsFeedAggregator(FeedAggregator):
"""Aggregates date received from the feed over a period of time."""
def __init__(
self,
home_coordinates: Tuple[float, float],
websession: ClientSession,
filter_radius: float = None,
url: str = None,
hostname: str = DEFAULT_HOSTNAME,
port: int = DEFAULT_PORT,
) -> None:
"""Initialise feed aggregator."""
super().__init__(filter_radius)
self._feed = FlightradarFlightsFeed(
home_coordinates,
websession,
False,
filter_radius,
url,
hostname,
port,
)
@property
def feed(self) -> Feed:
"""Return the external feed access."""
return self._feed
class FlightradarFlightsFeed(Feed):
"""Flightradar Flights Feed."""
def __init__(
self,
home_coordinates: Tuple[float, float],
websession: ClientSession,
apply_filters: bool = True,
filter_radius: float = None,
url: str = None,
hostname: str = DEFAULT_HOSTNAME,
port: int = DEFAULT_PORT,
) -> None:
super().__init__(
home_coordinates,
websession,
apply_filters,
filter_radius,
url,
hostname,
port,
)
def _create_url(self, hostname: str, port: int) -> str:
"""Generate the url to retrieve data from."""
return URL_TEMPLATE.format(hostname, port)
def _new_entry(
self, home_coordinates: Tuple[float, float], feed_data: Dict
) -> FeedEntry:
"""Generate a new entry."""
return FeedEntry(home_coordinates, feed_data)
def _parse(self, parsed_json: Dict) -> List[Dict]:
"""Parse the provided JSON data."""
result = []
for key in parsed_json:
data_entry = parsed_json[key]
result.append(
{
ATTR_MODE_S: data_entry[0],
ATTR_LATITUDE: data_entry[1],
ATTR_LONGITUDE: data_entry[2],
ATTR_TRACK: data_entry[3],
ATTR_ALTITUDE: data_entry[4],
ATTR_SPEED: data_entry[5],
ATTR_SQUAWK: data_entry[6],
ATTR_UPDATED: data_entry[10],
ATTR_VERT_RATE: data_entry[15],
ATTR_CALLSIGN: data_entry[16],
}
)
_LOGGER.debug("Parser result = %s", result)
return result | 0.870281 | 0.202759 |
import re
from django.db import transaction
from rest_framework.exceptions import ValidationError
from rest_framework.serializers import ModelSerializer, SlugRelatedField
from galaxy_api.api import models
from galaxy_api.auth import models as auth_models
from galaxy_api.auth import auth
class NamespaceLinkSerializer(ModelSerializer):
class Meta:
model = models.NamespaceLink
fields = ('name', 'url')
class NamespaceSerializer(ModelSerializer):
links = NamespaceLinkSerializer(many=True, required=False)
groups = SlugRelatedField(
many=True,
slug_field='name',
queryset=auth_models.Group.objects.all()
)
class Meta:
model = models.Namespace
fields = (
'id',
'name',
'company',
'email',
'avatar_url',
'description',
'links',
'groups',
'resources'
)
def validate_name(self, name):
if not name:
raise ValidationError(detail={
'name': "Attribute 'name' is required"})
if not re.match(r'^[a-zA-Z0-9_]+$', name):
raise ValidationError(detail={
'name': 'Name can only contain [A-Za-z0-9_]'})
if(len(name) <= 2):
raise ValidationError(detail={
'name': 'Name must be longer than 2 characters'})
if(name.startswith('_')):
raise ValidationError(detail={
'name': "Name cannot begin with '_'"})
return name
def to_internal_value(self, data):
groups = data.get('groups')
if groups:
data['groups'] = self._sanitize_accounts(groups)
return super().to_internal_value(data)
def _sanitize_accounts(self, accounts):
sanitized_groups = [auth_models.RH_PARTNER_ENGINEER_GROUP]
for account in accounts:
if account == auth_models.RH_PARTNER_ENGINEER_GROUP:
continue
if not account.isdigit():
raise ValidationError(detail={
'groups': 'Provided identifications are not numbers'})
group, _ = auth_models.Group.objects.get_or_create_identity(
auth.RH_ACCOUNT_SCOPE, account)
sanitized_groups.append(group.name)
return sanitized_groups
@transaction.atomic
def update(self, instance, validated_data):
links = validated_data.pop('links', None)
instance = super().update(instance, validated_data)
if links is not None:
instance.set_links(links)
return instance
class NamespaceUpdateSerializer(NamespaceSerializer):
"""NamespaceSerializer but read_only 'name'."""
class Meta:
model = models.Namespace
fields = (
'id',
'name',
'company',
'email',
'avatar_url',
'description',
'links',
'groups',
'resources'
)
read_only_fields = ('name', )
class NamespaceSummarySerializer(NamespaceSerializer):
"""NamespaceSerializer but without 'links' or 'resources'.
For use in _ui/collection detail views."""
class Meta:
model = models.Namespace
fields = (
'id',
'name',
'company',
'email',
'avatar_url',
'description',
)
read_only_fields = ('name', ) | galaxy_api/api/ui/serializers/namespace.py | import re
from django.db import transaction
from rest_framework.exceptions import ValidationError
from rest_framework.serializers import ModelSerializer, SlugRelatedField
from galaxy_api.api import models
from galaxy_api.auth import models as auth_models
from galaxy_api.auth import auth
class NamespaceLinkSerializer(ModelSerializer):
class Meta:
model = models.NamespaceLink
fields = ('name', 'url')
class NamespaceSerializer(ModelSerializer):
links = NamespaceLinkSerializer(many=True, required=False)
groups = SlugRelatedField(
many=True,
slug_field='name',
queryset=auth_models.Group.objects.all()
)
class Meta:
model = models.Namespace
fields = (
'id',
'name',
'company',
'email',
'avatar_url',
'description',
'links',
'groups',
'resources'
)
def validate_name(self, name):
if not name:
raise ValidationError(detail={
'name': "Attribute 'name' is required"})
if not re.match(r'^[a-zA-Z0-9_]+$', name):
raise ValidationError(detail={
'name': 'Name can only contain [A-Za-z0-9_]'})
if(len(name) <= 2):
raise ValidationError(detail={
'name': 'Name must be longer than 2 characters'})
if(name.startswith('_')):
raise ValidationError(detail={
'name': "Name cannot begin with '_'"})
return name
def to_internal_value(self, data):
groups = data.get('groups')
if groups:
data['groups'] = self._sanitize_accounts(groups)
return super().to_internal_value(data)
def _sanitize_accounts(self, accounts):
sanitized_groups = [auth_models.RH_PARTNER_ENGINEER_GROUP]
for account in accounts:
if account == auth_models.RH_PARTNER_ENGINEER_GROUP:
continue
if not account.isdigit():
raise ValidationError(detail={
'groups': 'Provided identifications are not numbers'})
group, _ = auth_models.Group.objects.get_or_create_identity(
auth.RH_ACCOUNT_SCOPE, account)
sanitized_groups.append(group.name)
return sanitized_groups
@transaction.atomic
def update(self, instance, validated_data):
links = validated_data.pop('links', None)
instance = super().update(instance, validated_data)
if links is not None:
instance.set_links(links)
return instance
class NamespaceUpdateSerializer(NamespaceSerializer):
"""NamespaceSerializer but read_only 'name'."""
class Meta:
model = models.Namespace
fields = (
'id',
'name',
'company',
'email',
'avatar_url',
'description',
'links',
'groups',
'resources'
)
read_only_fields = ('name', )
class NamespaceSummarySerializer(NamespaceSerializer):
"""NamespaceSerializer but without 'links' or 'resources'.
For use in _ui/collection detail views."""
class Meta:
model = models.Namespace
fields = (
'id',
'name',
'company',
'email',
'avatar_url',
'description',
)
read_only_fields = ('name', ) | 0.489015 | 0.087369 |
import subprocess
from typing import Dict, Optional, Tuple, Union
import torch
from torch import Tensor
import avreader.path
from avreader.utils import _get_frame_size, _hhmmss2sec, get_file_info
def load_audio(
file: Union[bytes, str],
offset: Union[float, str] = 0.0,
duration: Union[float, str, None] = None,
sample_rate: Optional[int] = None,
mono: bool = True,
filters: Optional[str] = None,
data_format: str = "channels_first",
dtype: torch.dtype = torch.float,
) -> Tuple[Tensor, int]:
r"""Return data and the sample rate from an audio file.
Parameters
----------
file : Union[bytes, str]
Path to the input file.
offset : Union[float, str], optional (default=0.0)
Start reading after this time. Offset must be a time duration
specification, see
https://www.ffmpeg.org/ffmpeg-utils.html#time-duration-syntax.
duration : Union[float, str, None], optional (default=None)
Only load up to this much audio. Duration must be a time duration
specification, see
https://www.ffmpeg.org/ffmpeg-utils.html#time-duration-syntax.
sample_rate : Optional[float], optional (default=None)
Target sampling rate. If None, sample_rate is the native sampling rate.
mono : bool, optional (default=True)
Converting signal to mono.
filters : Optional[str], optional (default=None)
Add a FFmpeg filtergraph, see https://ffmpeg.org/ffmpeg-filters.html.
data_format : str, optional (default="channels_first")
The ordering of the dimensions of the output `audio`.
If "channels_last", data_format corresponds to output tensor with shape
(seq_len, channels) while "channels_first" corresponds to output tensor
with shape (channels, seq_len).
dtype : torch.dtype, optional (default=torch.float)
Desired output data-type for the tensor, e.g, torch.int16.
Returns
-------
audio: torch.Tensor
Data read from audio file.
sample_rate: int
Sample rate (in samples/sec) of audio file.
Raises
------
ValueError
[description]
subprocess.CalledProcessError
[description]
"""
# retrieve information about the video (duration, sample rate,
# number of channels)
info = get_file_info(file, "audio")
# check the parameters
offset = _hhmmss2sec(offset) if isinstance(offset, str) else offset
if offset > info["duration"]:
raise ValueError(
"The offset value is greater than the duration of the video:"
f" {offset} > {info['duration']:.4}"
)
duration = (
_hhmmss2sec(duration)
if isinstance(duration, str)
else duration or info["duration"]
)
# check the data format
if not mono and data_format not in {"channels_last", "channels_first"}:
raise ValueError(f"Unknow data_format: {data_format}")
if dtype in {torch.bool, torch.uint8, torch.int8}:
raise TypeError(f"Got inappropriate dtype arg: {dtype}")
# pre-process some options of the FFmpeg command
offset_cmd = (
f"-ss {offset}" if offset >= 0.0 else f"-sseof {offset}"
) # seek the input to position
duration_cmd = (
f"-t {duration}" if duration else ""
) # limit the duration of data read
mono_cmd = "-ac 1" if mono else "" # convert to mono
# create the output filter command
filter_opt = []
# resample audio the output if requested
if sample_rate is not None:
filter_opt.append(f"aresample={sample_rate}")
# add other user-defined FFmpeg filters
if filters is not None:
filter_opt.append(filters.split(","))
# create the filter command
filter_cmd = "-filter:a {}".format(",".join(filter_opt)) if filter_opt else ""
# create the ffmpeg command
input_url = file if isinstance(file, str) else "pipe:0"
command = (
f"{avreader.path.FFMPEG_BIN} -loglevel fatal"
f" {offset_cmd} {duration_cmd} -i {input_url}"
f" -vn -f s16le -codec:a pcm_s16le {mono_cmd} {filter_cmd} pipe:1"
)
# run the command and check if the execution did not generate an error
ffmpeg = subprocess.run(
command.split(),
input=None if isinstance(file, str) else file,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if ffmpeg.returncode != 0:
raise subprocess.CalledProcessError(
ffmpeg.returncode,
" ".join(command.split()),
output=ffmpeg.stdout,
stderr=ffmpeg.stderr,
)
# convert the buffer to tensor
audio = torch.ShortTensor(torch.ShortStorage.from_buffer(ffmpeg.stdout, "native"))
# reshape in (seq_len, channels)
sample_rate = sample_rate or info["sample_rate"]
duration = duration or info["duration"]
channels = 1 if mono else info["channels"]
audio.resize_(int(duration * sample_rate), channels)
# permute the last dimension with the first one if 'channels_first'
if data_format == "channels_first":
audio.transpose_(0, 1)
# change the type of the tensor
audio = audio.to(dtype)
if dtype.is_floating_point:
# rescale between -1 and 1
audio.add_(32768).div_((2 ** 16 - 1) / 2).add_(-1)
return audio, sample_rate
def load_video(
file: Union[bytes, str],
offset: Union[float, str] = 0.0,
duration: Union[float, str, None] = None,
frame_rate: Optional[int] = None,
frame_size: Union[int, Tuple[int, int], None] = None,
grayscale: bool = False,
filters: Optional[str] = None,
data_format: str = "channels_first",
dtype: torch.dtype = torch.float,
) -> Tuple[Tensor, int]:
r"""Return data and the frame rate from a video file.
Return a torch.Tensor (C, H, W) in the range [0.0, 1.0] if the dtype is a
floating point. In the other cases, tensors are returned without scaling.
Parameters
----------
file : Union[bytes, str]
Path to the input file.
offset : Union[float, str], optional (default=0.0)
Start reading after this tile. Offset must be a time duration
specification, see https://www.ffmpeg.org/ffmpeg-utils.html#time-duration-syntax.
duration : Union[float, str, None], optional (default=None)
Only load up to this much audio. Duration must be a time duration
specification, see https://www.ffmpeg.org/ffmpeg-utils.html#time-duration-syntax.
frame_rate : Optional[float], optional (default=None)
Target frame rate. If None, frame_rate is the native frame rate.
frame_size : Union[int, Tuple[int, int], None], optional (default=None)
Target frame size (width, height). If None, frame_size is the native
frame size. The value can be an `int` giving the height of the frame,
the height will be automatically calculated by respecting the aspect
ratio. With the same effect, it is possible to define only one
component, either height or width, and set the other component to -1.
grayscale : bool, optional (default=False)
Converting video to grayscale.
filters : str, optional (default=None)
Add a FFmpeg filtergraph, see https://ffmpeg.org/ffmpeg-filters.html.
data_format : str, optional (default="channels_first")
The ordering of the dimensions of the output tensor `video`.
If "channels_last", data_format corresponds to output with shape
(seq_len, height, width, channels) while "channels_first" corresponds
to inputs with shape (seq_len, channels, height, width).
dtype : torch.dtype, optional (default=torch.float)
Desired output data-type for the tensor, e.g, torch.int16.
Can be all torch types except torch.bool and torch.int8.
Returns
-------
video : torch.Tensor
Tensor of the form (seq_len, channels, height, width) with seq_len
representing the selected number of frames of the video.
frame_rate : int
The frame rate corresponding to the video.
Raises
------
TypeError
[description]
ValueError
[description]
subprocess.CalledProcessError
If the FFmpeg command fail.
"""
# retrieve information about the video (duration, frame rate, frame size)
info = get_file_info(file, "video")
# check the parameters
offset = _hhmmss2sec(offset) if isinstance(offset, str) else offset
if offset > info["duration"]:
raise ValueError(
"The offset value is greater than the duration of the video:"
f" {offset} > {info['duration']:.4}"
)
duration = (
_hhmmss2sec(duration)
if isinstance(duration, str)
else duration or info["duration"]
)
if data_format not in {"channels_last", "channels_first"}:
raise ValueError(f"Unknow data_format: {data_format}")
if dtype in {torch.bool, torch.int8}:
raise TypeError(f"Got inappropriate dtype arg: {dtype}")
# pre-process some options of the FFmpeg command
offset_cmd = (
f"-ss {offset}" if offset >= 0.0 else f"-sseof {offset}"
) # seek the input to position
duration_cmd = (
f"-t {duration}" if duration else ""
) # limit the duration of data read
# create the output filter command
filter_opt = []
# change the frame rate of the output if requested
if frame_rate is not None:
filter_opt.append(f"fps={frame_rate}")
# rescale the output if requested
if frame_size is not None:
width, height = (frame_size, -1) if isinstance(frame_size, int) else frame_size
filter_opt.append(f"scale={width}:{height}")
# add other user-defined FFmpeg filters
if filters is not None:
filter_opt.append(filters.split(","))
# create the filter command
filter_cmd = "-filter:v {}".format(",".join(filter_opt)) if filter_opt else ""
# create the ffmpeg command
input_url = file if isinstance(file, str) else "pipe:0"
command = (
f"{avreader.path.FFMPEG_BIN} -loglevel fatal"
f" {offset_cmd} {duration_cmd} -i {input_url}"
f" -an -f image2pipe -codec:v rawvideo -pix_fmt {'gray' if grayscale else 'rgb24'}"
f" {filter_cmd} pipe:1"
)
# run the command and check if the execution did not generate an error
ffmpeg = subprocess.run(
command.split(),
input=None if isinstance(file, str) else file,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if ffmpeg.returncode != 0:
raise subprocess.CalledProcessError(
ffmpeg.returncode,
" ".join(command.split()),
output=ffmpeg.stdout,
stderr=ffmpeg.stderr,
)
# convert the buffer to tensor
video = torch.ByteTensor(torch.ByteStorage.from_buffer(ffmpeg.stdout))
# reshape in (seq_len, height, width, channels)
channels = 1 if grayscale else 3
frame_rate = frame_rate or info["frame_rate"]
frame_size = _get_frame_size((info["width"], info["height"]), frame_size)
video.resize_(int(duration * frame_rate), frame_size[1], frame_size[0], channels)
# permute the last dimension with the first one if 'channels_first'
if data_format == "channels_first":
video = video.permute(0, 3, 1, 2)
# change the type of the tensor
video = video.to(dtype)
if dtype.is_floating_point:
video /= 255 # rescale between 0 and 1
return video, frame_rate
def load(
path: Union[bytes, str],
offset: Union[float, str] = 0.0,
duration: Union[float, str, None] = None,
akwargs: Optional[dict] = None,
vkwargs: Optional[dict] = None,
) -> Tuple[Tuple[Tensor, int], Tuple[Tensor, int]]:
r"""Return audiovisual data, frame rate and sample rate.
Parameters
----------
file : Union[bytes, str]
Path to the input file.
offset : Union[float, str], optional (default=0.0)
Start reading after this time. Offset must be a time duration
specification, see
https://www.ffmpeg.org/ffmpeg-utils.html#time-duration-syntax.
duration : Union[float, str, None], optional (default=None)
Only load up to this much audio. Duration must be a time duration
specification, see
https://www.ffmpeg.org/ffmpeg-utils.html#time-duration-syntax.
frame_rate : Optional[float], optional (default=None)
[description]
frame_size : Optional[str], optional (default=None)
[description]
grayscale : bool, optional (default=False)
Converting video to grayscale.
sample_rate : Optional[float], optional (default=None)
Target sampling rate. If None, sample_rate is the native sampling rate.
mono : bool, optional (default=True)
Converting signal to mono.
data_format : str, optional (default="channels_first")
The ordering of the dimensions in the outputs. If "channels_last",
data_format corresponds to inputs with shape (batch, steps, channels)
while "channels_first" corresponds to inputs with shape
(batch, channels, steps).
dtype : torch.dtype, optional (default=torch.float)
Desired output data-type for the tensor, e.g, torch.int16.
Returns
-------
video : Tuple[torch.Tensor, int]
[description]
audio : Tuple[torch.Tensor, int]
[description]
"""
audio, sample_rate = load_audio(path, offset=offset, duration=duration, **akwargs)
video, frame_rate = load_video(path, offset=offset, duration=duration, **vkwargs)
return ((audio, sample_rate), (video, frame_rate)) | avreader/load.py | import subprocess
from typing import Dict, Optional, Tuple, Union
import torch
from torch import Tensor
import avreader.path
from avreader.utils import _get_frame_size, _hhmmss2sec, get_file_info
def load_audio(
file: Union[bytes, str],
offset: Union[float, str] = 0.0,
duration: Union[float, str, None] = None,
sample_rate: Optional[int] = None,
mono: bool = True,
filters: Optional[str] = None,
data_format: str = "channels_first",
dtype: torch.dtype = torch.float,
) -> Tuple[Tensor, int]:
r"""Return data and the sample rate from an audio file.
Parameters
----------
file : Union[bytes, str]
Path to the input file.
offset : Union[float, str], optional (default=0.0)
Start reading after this time. Offset must be a time duration
specification, see
https://www.ffmpeg.org/ffmpeg-utils.html#time-duration-syntax.
duration : Union[float, str, None], optional (default=None)
Only load up to this much audio. Duration must be a time duration
specification, see
https://www.ffmpeg.org/ffmpeg-utils.html#time-duration-syntax.
sample_rate : Optional[float], optional (default=None)
Target sampling rate. If None, sample_rate is the native sampling rate.
mono : bool, optional (default=True)
Converting signal to mono.
filters : Optional[str], optional (default=None)
Add a FFmpeg filtergraph, see https://ffmpeg.org/ffmpeg-filters.html.
data_format : str, optional (default="channels_first")
The ordering of the dimensions of the output `audio`.
If "channels_last", data_format corresponds to output tensor with shape
(seq_len, channels) while "channels_first" corresponds to output tensor
with shape (channels, seq_len).
dtype : torch.dtype, optional (default=torch.float)
Desired output data-type for the tensor, e.g, torch.int16.
Returns
-------
audio: torch.Tensor
Data read from audio file.
sample_rate: int
Sample rate (in samples/sec) of audio file.
Raises
------
ValueError
[description]
subprocess.CalledProcessError
[description]
"""
# retrieve information about the video (duration, sample rate,
# number of channels)
info = get_file_info(file, "audio")
# check the parameters
offset = _hhmmss2sec(offset) if isinstance(offset, str) else offset
if offset > info["duration"]:
raise ValueError(
"The offset value is greater than the duration of the video:"
f" {offset} > {info['duration']:.4}"
)
duration = (
_hhmmss2sec(duration)
if isinstance(duration, str)
else duration or info["duration"]
)
# check the data format
if not mono and data_format not in {"channels_last", "channels_first"}:
raise ValueError(f"Unknow data_format: {data_format}")
if dtype in {torch.bool, torch.uint8, torch.int8}:
raise TypeError(f"Got inappropriate dtype arg: {dtype}")
# pre-process some options of the FFmpeg command
offset_cmd = (
f"-ss {offset}" if offset >= 0.0 else f"-sseof {offset}"
) # seek the input to position
duration_cmd = (
f"-t {duration}" if duration else ""
) # limit the duration of data read
mono_cmd = "-ac 1" if mono else "" # convert to mono
# create the output filter command
filter_opt = []
# resample audio the output if requested
if sample_rate is not None:
filter_opt.append(f"aresample={sample_rate}")
# add other user-defined FFmpeg filters
if filters is not None:
filter_opt.append(filters.split(","))
# create the filter command
filter_cmd = "-filter:a {}".format(",".join(filter_opt)) if filter_opt else ""
# create the ffmpeg command
input_url = file if isinstance(file, str) else "pipe:0"
command = (
f"{avreader.path.FFMPEG_BIN} -loglevel fatal"
f" {offset_cmd} {duration_cmd} -i {input_url}"
f" -vn -f s16le -codec:a pcm_s16le {mono_cmd} {filter_cmd} pipe:1"
)
# run the command and check if the execution did not generate an error
ffmpeg = subprocess.run(
command.split(),
input=None if isinstance(file, str) else file,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if ffmpeg.returncode != 0:
raise subprocess.CalledProcessError(
ffmpeg.returncode,
" ".join(command.split()),
output=ffmpeg.stdout,
stderr=ffmpeg.stderr,
)
# convert the buffer to tensor
audio = torch.ShortTensor(torch.ShortStorage.from_buffer(ffmpeg.stdout, "native"))
# reshape in (seq_len, channels)
sample_rate = sample_rate or info["sample_rate"]
duration = duration or info["duration"]
channels = 1 if mono else info["channels"]
audio.resize_(int(duration * sample_rate), channels)
# permute the last dimension with the first one if 'channels_first'
if data_format == "channels_first":
audio.transpose_(0, 1)
# change the type of the tensor
audio = audio.to(dtype)
if dtype.is_floating_point:
# rescale between -1 and 1
audio.add_(32768).div_((2 ** 16 - 1) / 2).add_(-1)
return audio, sample_rate
def load_video(
file: Union[bytes, str],
offset: Union[float, str] = 0.0,
duration: Union[float, str, None] = None,
frame_rate: Optional[int] = None,
frame_size: Union[int, Tuple[int, int], None] = None,
grayscale: bool = False,
filters: Optional[str] = None,
data_format: str = "channels_first",
dtype: torch.dtype = torch.float,
) -> Tuple[Tensor, int]:
r"""Return data and the frame rate from a video file.
Return a torch.Tensor (C, H, W) in the range [0.0, 1.0] if the dtype is a
floating point. In the other cases, tensors are returned without scaling.
Parameters
----------
file : Union[bytes, str]
Path to the input file.
offset : Union[float, str], optional (default=0.0)
Start reading after this tile. Offset must be a time duration
specification, see https://www.ffmpeg.org/ffmpeg-utils.html#time-duration-syntax.
duration : Union[float, str, None], optional (default=None)
Only load up to this much audio. Duration must be a time duration
specification, see https://www.ffmpeg.org/ffmpeg-utils.html#time-duration-syntax.
frame_rate : Optional[float], optional (default=None)
Target frame rate. If None, frame_rate is the native frame rate.
frame_size : Union[int, Tuple[int, int], None], optional (default=None)
Target frame size (width, height). If None, frame_size is the native
frame size. The value can be an `int` giving the height of the frame,
the height will be automatically calculated by respecting the aspect
ratio. With the same effect, it is possible to define only one
component, either height or width, and set the other component to -1.
grayscale : bool, optional (default=False)
Converting video to grayscale.
filters : str, optional (default=None)
Add a FFmpeg filtergraph, see https://ffmpeg.org/ffmpeg-filters.html.
data_format : str, optional (default="channels_first")
The ordering of the dimensions of the output tensor `video`.
If "channels_last", data_format corresponds to output with shape
(seq_len, height, width, channels) while "channels_first" corresponds
to inputs with shape (seq_len, channels, height, width).
dtype : torch.dtype, optional (default=torch.float)
Desired output data-type for the tensor, e.g, torch.int16.
Can be all torch types except torch.bool and torch.int8.
Returns
-------
video : torch.Tensor
Tensor of the form (seq_len, channels, height, width) with seq_len
representing the selected number of frames of the video.
frame_rate : int
The frame rate corresponding to the video.
Raises
------
TypeError
[description]
ValueError
[description]
subprocess.CalledProcessError
If the FFmpeg command fail.
"""
# retrieve information about the video (duration, frame rate, frame size)
info = get_file_info(file, "video")
# check the parameters
offset = _hhmmss2sec(offset) if isinstance(offset, str) else offset
if offset > info["duration"]:
raise ValueError(
"The offset value is greater than the duration of the video:"
f" {offset} > {info['duration']:.4}"
)
duration = (
_hhmmss2sec(duration)
if isinstance(duration, str)
else duration or info["duration"]
)
if data_format not in {"channels_last", "channels_first"}:
raise ValueError(f"Unknow data_format: {data_format}")
if dtype in {torch.bool, torch.int8}:
raise TypeError(f"Got inappropriate dtype arg: {dtype}")
# pre-process some options of the FFmpeg command
offset_cmd = (
f"-ss {offset}" if offset >= 0.0 else f"-sseof {offset}"
) # seek the input to position
duration_cmd = (
f"-t {duration}" if duration else ""
) # limit the duration of data read
# create the output filter command
filter_opt = []
# change the frame rate of the output if requested
if frame_rate is not None:
filter_opt.append(f"fps={frame_rate}")
# rescale the output if requested
if frame_size is not None:
width, height = (frame_size, -1) if isinstance(frame_size, int) else frame_size
filter_opt.append(f"scale={width}:{height}")
# add other user-defined FFmpeg filters
if filters is not None:
filter_opt.append(filters.split(","))
# create the filter command
filter_cmd = "-filter:v {}".format(",".join(filter_opt)) if filter_opt else ""
# create the ffmpeg command
input_url = file if isinstance(file, str) else "pipe:0"
command = (
f"{avreader.path.FFMPEG_BIN} -loglevel fatal"
f" {offset_cmd} {duration_cmd} -i {input_url}"
f" -an -f image2pipe -codec:v rawvideo -pix_fmt {'gray' if grayscale else 'rgb24'}"
f" {filter_cmd} pipe:1"
)
# run the command and check if the execution did not generate an error
ffmpeg = subprocess.run(
command.split(),
input=None if isinstance(file, str) else file,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if ffmpeg.returncode != 0:
raise subprocess.CalledProcessError(
ffmpeg.returncode,
" ".join(command.split()),
output=ffmpeg.stdout,
stderr=ffmpeg.stderr,
)
# convert the buffer to tensor
video = torch.ByteTensor(torch.ByteStorage.from_buffer(ffmpeg.stdout))
# reshape in (seq_len, height, width, channels)
channels = 1 if grayscale else 3
frame_rate = frame_rate or info["frame_rate"]
frame_size = _get_frame_size((info["width"], info["height"]), frame_size)
video.resize_(int(duration * frame_rate), frame_size[1], frame_size[0], channels)
# permute the last dimension with the first one if 'channels_first'
if data_format == "channels_first":
video = video.permute(0, 3, 1, 2)
# change the type of the tensor
video = video.to(dtype)
if dtype.is_floating_point:
video /= 255 # rescale between 0 and 1
return video, frame_rate
def load(
path: Union[bytes, str],
offset: Union[float, str] = 0.0,
duration: Union[float, str, None] = None,
akwargs: Optional[dict] = None,
vkwargs: Optional[dict] = None,
) -> Tuple[Tuple[Tensor, int], Tuple[Tensor, int]]:
r"""Return audiovisual data, frame rate and sample rate.
Parameters
----------
file : Union[bytes, str]
Path to the input file.
offset : Union[float, str], optional (default=0.0)
Start reading after this time. Offset must be a time duration
specification, see
https://www.ffmpeg.org/ffmpeg-utils.html#time-duration-syntax.
duration : Union[float, str, None], optional (default=None)
Only load up to this much audio. Duration must be a time duration
specification, see
https://www.ffmpeg.org/ffmpeg-utils.html#time-duration-syntax.
frame_rate : Optional[float], optional (default=None)
[description]
frame_size : Optional[str], optional (default=None)
[description]
grayscale : bool, optional (default=False)
Converting video to grayscale.
sample_rate : Optional[float], optional (default=None)
Target sampling rate. If None, sample_rate is the native sampling rate.
mono : bool, optional (default=True)
Converting signal to mono.
data_format : str, optional (default="channels_first")
The ordering of the dimensions in the outputs. If "channels_last",
data_format corresponds to inputs with shape (batch, steps, channels)
while "channels_first" corresponds to inputs with shape
(batch, channels, steps).
dtype : torch.dtype, optional (default=torch.float)
Desired output data-type for the tensor, e.g, torch.int16.
Returns
-------
video : Tuple[torch.Tensor, int]
[description]
audio : Tuple[torch.Tensor, int]
[description]
"""
audio, sample_rate = load_audio(path, offset=offset, duration=duration, **akwargs)
video, frame_rate = load_video(path, offset=offset, duration=duration, **vkwargs)
return ((audio, sample_rate), (video, frame_rate)) | 0.952948 | 0.445228 |
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient, Recipe
from recipe.serializers import IngredientSerializer
INGREDIENT_URL = reverse('recipe:ingredient-list')
class PublicIngredientApiTests(TestCase):
"""Test publicly available ingredient API"""
def setUp(self):
self.client = APIClient()
def test_retrieve_ingredient_unauthorized(self):
"""Test that login is required to access the endpoint"""
res = self.client.get(INGREDIENT_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientApiTests(TestCase):
"""Test the private ingredient API"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredient_list(self):
"""Test retrieving a list of ingredients"""
Ingredient.objects.create(user=self.user, name='Ginger')
Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGREDIENT_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
"""
Test that only ingredients for the authenticated user are returned
"""
user2 = get_user_model().objects.create_user(
'<EMAIL>',
'test123'
)
Ingredient.objects.create(user=user2, name='Sugar')
ingredient = Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGREDIENT_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredients_successful(self):
"""Test create a new ingredient"""
payload = {'name': 'Cabbage'}
self.client.post(INGREDIENT_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_ingredients_invalid(self):
"""Test creating invalid ingredients fails"""
payload = {'name': ''}
res = self.client.post(INGREDIENT_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_ingredients_assigned_to_recipes(self):
"""Test filtering ingredients by those assigned to recipes"""
ingredient1 = Ingredient.objects.create(
user=self.user, name='Apples'
)
ingredient2 = Ingredient.objects.create(
user=self.user, name='Tureky'
)
recipe = Recipe.objects.create(
title='Apple crumble',
time_minutes=5,
price=10,
user=self.user
)
recipe.ingredients.add(ingredient1)
res = self.client.get(INGREDIENT_URL, {'assigned_only': 1})
serializer1 = IngredientSerializer(ingredient1)
serializer2 = IngredientSerializer(ingredient2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_ingredients_assigned_to_recipes_unique(self):
ingredient1 = Ingredient.objects.create(
user=self.user, name='Apples'
)
Ingredient.objects.create(
user=self.user, name='Tureky'
)
recipe1 = Recipe.objects.create(
title='Apple crumble',
time_minutes=5,
price=10,
user=self.user
)
recipe1.ingredients.add(ingredient1)
recipe2 = Recipe.objects.create(
title='Apple pie',
time_minutes=5,
price=10,
user=self.user
)
recipe2.ingredients.add(ingredient1)
res = self.client.get(INGREDIENT_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1) | app/recipe/tests/test_ingredients_api.py | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient, Recipe
from recipe.serializers import IngredientSerializer
INGREDIENT_URL = reverse('recipe:ingredient-list')
class PublicIngredientApiTests(TestCase):
"""Test publicly available ingredient API"""
def setUp(self):
self.client = APIClient()
def test_retrieve_ingredient_unauthorized(self):
"""Test that login is required to access the endpoint"""
res = self.client.get(INGREDIENT_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientApiTests(TestCase):
"""Test the private ingredient API"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredient_list(self):
"""Test retrieving a list of ingredients"""
Ingredient.objects.create(user=self.user, name='Ginger')
Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGREDIENT_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
"""
Test that only ingredients for the authenticated user are returned
"""
user2 = get_user_model().objects.create_user(
'<EMAIL>',
'test123'
)
Ingredient.objects.create(user=user2, name='Sugar')
ingredient = Ingredient.objects.create(user=self.user, name='Salt')
res = self.client.get(INGREDIENT_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredients_successful(self):
"""Test create a new ingredient"""
payload = {'name': 'Cabbage'}
self.client.post(INGREDIENT_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_ingredients_invalid(self):
"""Test creating invalid ingredients fails"""
payload = {'name': ''}
res = self.client.post(INGREDIENT_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_ingredients_assigned_to_recipes(self):
"""Test filtering ingredients by those assigned to recipes"""
ingredient1 = Ingredient.objects.create(
user=self.user, name='Apples'
)
ingredient2 = Ingredient.objects.create(
user=self.user, name='Tureky'
)
recipe = Recipe.objects.create(
title='Apple crumble',
time_minutes=5,
price=10,
user=self.user
)
recipe.ingredients.add(ingredient1)
res = self.client.get(INGREDIENT_URL, {'assigned_only': 1})
serializer1 = IngredientSerializer(ingredient1)
serializer2 = IngredientSerializer(ingredient2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_ingredients_assigned_to_recipes_unique(self):
ingredient1 = Ingredient.objects.create(
user=self.user, name='Apples'
)
Ingredient.objects.create(
user=self.user, name='Tureky'
)
recipe1 = Recipe.objects.create(
title='Apple crumble',
time_minutes=5,
price=10,
user=self.user
)
recipe1.ingredients.add(ingredient1)
recipe2 = Recipe.objects.create(
title='Apple pie',
time_minutes=5,
price=10,
user=self.user
)
recipe2.ingredients.add(ingredient1)
res = self.client.get(INGREDIENT_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1) | 0.608129 | 0.329567 |
import abc
import re
import string
import typing as ty
from nova import exception
from nova.i18n import _
from nova import objects
from nova.pci import utils
MAX_VENDOR_ID = 0xFFFF
MAX_PRODUCT_ID = 0xFFFF
MAX_FUNC = 0x7
MAX_DOMAIN = 0xFFFF
MAX_BUS = 0xFF
MAX_SLOT = 0x1F
ANY = '*'
REGEX_ANY = '.*'
PCISpecAddressType = ty.Union[ty.Dict[str, str], str]
class PciAddressSpec(metaclass=abc.ABCMeta):
"""Abstract class for all PCI address spec styles
This class checks the address fields of the pci.passthrough_whitelist
"""
def __init__(self, pci_addr: str) -> None:
self.domain = ''
self.bus = ''
self.slot = ''
self.func = ''
@abc.abstractmethod
def match(self, pci_addr):
pass
def is_single_address(self) -> bool:
return all([
all(c in string.hexdigits for c in self.domain),
all(c in string.hexdigits for c in self.bus),
all(c in string.hexdigits for c in self.slot),
all(c in string.hexdigits for c in self.func)])
def _set_pci_dev_info(
self, prop: str, maxval: int, hex_value: str
) -> None:
a = getattr(self, prop)
if a == ANY:
return
try:
v = int(a, 16)
except ValueError:
raise exception.PciConfigInvalidWhitelist(
reason=_("property %(property)s ('%(attr)s') does not parse "
"as a hex number.") % {'property': prop, 'attr': a})
if v > maxval:
raise exception.PciConfigInvalidWhitelist(
reason=_("property %(property)s (%(attr)s) is greater than "
"the maximum allowable value (%(max)X).") %
{'property': prop, 'attr': a, 'max': maxval})
setattr(self, prop, hex_value % v)
class PhysicalPciAddress(PciAddressSpec):
"""Manages the address fields for a fully-qualified PCI address.
This function class will validate the address fields for a single
PCI device.
"""
def __init__(self, pci_addr: PCISpecAddressType) -> None:
try:
# TODO(stephenfin): Is this ever actually a string?
if isinstance(pci_addr, dict):
self.domain = pci_addr['domain']
self.bus = pci_addr['bus']
self.slot = pci_addr['slot']
self.func = pci_addr['function']
else:
self.domain, self.bus, self.slot, self.func = (
utils.get_pci_address_fields(pci_addr))
self._set_pci_dev_info('func', MAX_FUNC, '%1x')
self._set_pci_dev_info('domain', MAX_DOMAIN, '%04x')
self._set_pci_dev_info('bus', MAX_BUS, '%02x')
self._set_pci_dev_info('slot', MAX_SLOT, '%02x')
except (KeyError, ValueError):
raise exception.PciDeviceWrongAddressFormat(address=pci_addr)
def match(self, phys_pci_addr: PciAddressSpec) -> bool:
conditions = [
self.domain == phys_pci_addr.domain,
self.bus == phys_pci_addr.bus,
self.slot == phys_pci_addr.slot,
self.func == phys_pci_addr.func,
]
return all(conditions)
class PciAddressGlobSpec(PciAddressSpec):
"""Manages the address fields with glob style.
This function class will validate the address fields with glob style,
check for wildcards, and insert wildcards where the field is left blank.
"""
def __init__(self, pci_addr: str) -> None:
self.domain = ANY
self.bus = ANY
self.slot = ANY
self.func = ANY
dbs, sep, func = pci_addr.partition('.')
if func:
self.func = func.strip()
self._set_pci_dev_info('func', MAX_FUNC, '%01x')
if dbs:
dbs_fields = dbs.split(':')
if len(dbs_fields) > 3:
raise exception.PciDeviceWrongAddressFormat(address=pci_addr)
# If we got a partial address like ":00.", we need to turn this
# into a domain of ANY, a bus of ANY, and a slot of 00. This code
# allows the address bus and/or domain to be left off
dbs_all = [ANY] * (3 - len(dbs_fields))
dbs_all.extend(dbs_fields)
dbs_checked = [s.strip() or ANY for s in dbs_all]
self.domain, self.bus, self.slot = dbs_checked
self._set_pci_dev_info('domain', MAX_DOMAIN, '%04x')
self._set_pci_dev_info('bus', MAX_BUS, '%02x')
self._set_pci_dev_info('slot', MAX_SLOT, '%02x')
def match(self, phys_pci_addr: PciAddressSpec) -> bool:
conditions = [
self.domain in (ANY, phys_pci_addr.domain),
self.bus in (ANY, phys_pci_addr.bus),
self.slot in (ANY, phys_pci_addr.slot),
self.func in (ANY, phys_pci_addr.func)
]
return all(conditions)
class PciAddressRegexSpec(PciAddressSpec):
"""Manages the address fields with regex style.
This function class will validate the address fields with regex style.
The validation includes check for all PCI address attributes and validate
their regex.
"""
def __init__(self, pci_addr: dict) -> None:
try:
self.domain = pci_addr.get('domain', REGEX_ANY)
self.bus = pci_addr.get('bus', REGEX_ANY)
self.slot = pci_addr.get('slot', REGEX_ANY)
self.func = pci_addr.get('function', REGEX_ANY)
self.domain_regex = re.compile(self.domain)
self.bus_regex = re.compile(self.bus)
self.slot_regex = re.compile(self.slot)
self.func_regex = re.compile(self.func)
except re.error:
raise exception.PciDeviceWrongAddressFormat(address=pci_addr)
def match(self, phys_pci_addr: PciAddressSpec) -> bool:
conditions = [
bool(self.domain_regex.match(phys_pci_addr.domain)),
bool(self.bus_regex.match(phys_pci_addr.bus)),
bool(self.slot_regex.match(phys_pci_addr.slot)),
bool(self.func_regex.match(phys_pci_addr.func))
]
return all(conditions)
class WhitelistPciAddress(object):
"""Manages the address fields of the whitelist.
This class checks the address fields of the pci.passthrough_whitelist
configuration option, validating the address fields.
Example configs:
| [pci]
| passthrough_whitelist = {"address":"*:0a:00.*",
| "physical_network":"physnet1"}
| passthrough_whitelist = {"address": {"domain": ".*",
"bus": "02",
"slot": "01",
"function": "[0-2]"},
"physical_network":"net1"}
| passthrough_whitelist = {"vendor_id":"1137","product_id":"0071"}
"""
def __init__(
self, pci_addr: PCISpecAddressType, is_physical_function: bool
) -> None:
self.is_physical_function = is_physical_function
self._init_address_fields(pci_addr)
def _check_physical_function(self) -> None:
if self.pci_address_spec.is_single_address():
self.is_physical_function = (
utils.is_physical_function(
self.pci_address_spec.domain,
self.pci_address_spec.bus,
self.pci_address_spec.slot,
self.pci_address_spec.func))
def _init_address_fields(self, pci_addr: PCISpecAddressType) -> None:
self.pci_address_spec: PciAddressSpec
if not self.is_physical_function:
if isinstance(pci_addr, str):
self.pci_address_spec = PciAddressGlobSpec(pci_addr)
elif isinstance(pci_addr, dict):
self.pci_address_spec = PciAddressRegexSpec(pci_addr)
else:
raise exception.PciDeviceWrongAddressFormat(address=pci_addr)
self._check_physical_function()
else:
self.pci_address_spec = PhysicalPciAddress(pci_addr)
def match(self, pci_addr: str, pci_phys_addr: ty.Optional[str]) -> bool:
"""Match a device to this PciAddress.
Assume this is called with a ``pci_addr`` and ``pci_phys_addr``
reported by libvirt. No attempt is made to verify if ``pci_addr`` is a
VF of ``pci_phys_addr``.
:param pci_addr: PCI address of the device to match.
:param pci_phys_addr: PCI address of the parent of the device to match
(or None if the device is not a VF).
"""
# Try to match on the parent PCI address if the PciDeviceSpec is a
# PF (sriov is available) and the device to match is a VF. This
# makes it possible to specify the PCI address of a PF in the
# pci.passthrough_whitelist to match any of its VFs' PCI addresses.
if self.is_physical_function and pci_phys_addr:
pci_phys_addr_obj = PhysicalPciAddress(pci_phys_addr)
if self.pci_address_spec.match(pci_phys_addr_obj):
return True
# Try to match on the device PCI address only.
pci_addr_obj = PhysicalPciAddress(pci_addr)
return self.pci_address_spec.match(pci_addr_obj)
class PciDeviceSpec(PciAddressSpec):
def __init__(self, dev_spec: ty.Dict[str, str]) -> None:
self.tags = dev_spec
self._init_dev_details()
def _init_dev_details(self) -> None:
self.vendor_id = self.tags.pop("vendor_id", ANY)
self.product_id = self.tags.pop("product_id", ANY)
self.dev_name = self.tags.pop("devname", None)
self.address: ty.Optional[WhitelistPciAddress] = None
# Note(moshele): The address attribute can be a string or a dict.
# For glob syntax or specific pci it is a string and for regex syntax
# it is a dict. The WhitelistPciAddress class handles both types.
address = self.tags.pop("address", None)
self.vendor_id = self.vendor_id.strip()
self._set_pci_dev_info('vendor_id', MAX_VENDOR_ID, '%04x')
self._set_pci_dev_info('product_id', MAX_PRODUCT_ID, '%04x')
if address and self.dev_name:
raise exception.PciDeviceInvalidDeviceName()
if not self.dev_name:
self.address = WhitelistPciAddress(address or '*:*:*.*', False)
def match(self, dev_dict: ty.Dict[str, str]) -> bool:
address_obj: ty.Optional[WhitelistPciAddress]
if self.dev_name:
address_str, pf = utils.get_function_by_ifname(self.dev_name)
if not address_str:
return False
# Note(moshele): In this case we always passing a string
# of the PF pci address
address_obj = WhitelistPciAddress(address_str, pf)
else: # use self.address
address_obj = self.address
if not address_obj:
return False
return all([
self.vendor_id in (ANY, dev_dict['vendor_id']),
self.product_id in (ANY, dev_dict['product_id']),
address_obj.match(dev_dict['address'],
dev_dict.get('parent_addr'))])
def match_pci_obj(self, pci_obj: 'objects.PciDevice') -> bool:
return self.match({'vendor_id': pci_obj.vendor_id,
'product_id': pci_obj.product_id,
'address': pci_obj.address,
'parent_addr': pci_obj.parent_addr})
def get_tags(self) -> ty.Dict[str, str]:
return self.tags | nova/pci/devspec.py |
import abc
import re
import string
import typing as ty
from nova import exception
from nova.i18n import _
from nova import objects
from nova.pci import utils
MAX_VENDOR_ID = 0xFFFF
MAX_PRODUCT_ID = 0xFFFF
MAX_FUNC = 0x7
MAX_DOMAIN = 0xFFFF
MAX_BUS = 0xFF
MAX_SLOT = 0x1F
ANY = '*'
REGEX_ANY = '.*'
PCISpecAddressType = ty.Union[ty.Dict[str, str], str]
class PciAddressSpec(metaclass=abc.ABCMeta):
"""Abstract class for all PCI address spec styles
This class checks the address fields of the pci.passthrough_whitelist
"""
def __init__(self, pci_addr: str) -> None:
self.domain = ''
self.bus = ''
self.slot = ''
self.func = ''
@abc.abstractmethod
def match(self, pci_addr):
pass
def is_single_address(self) -> bool:
return all([
all(c in string.hexdigits for c in self.domain),
all(c in string.hexdigits for c in self.bus),
all(c in string.hexdigits for c in self.slot),
all(c in string.hexdigits for c in self.func)])
def _set_pci_dev_info(
self, prop: str, maxval: int, hex_value: str
) -> None:
a = getattr(self, prop)
if a == ANY:
return
try:
v = int(a, 16)
except ValueError:
raise exception.PciConfigInvalidWhitelist(
reason=_("property %(property)s ('%(attr)s') does not parse "
"as a hex number.") % {'property': prop, 'attr': a})
if v > maxval:
raise exception.PciConfigInvalidWhitelist(
reason=_("property %(property)s (%(attr)s) is greater than "
"the maximum allowable value (%(max)X).") %
{'property': prop, 'attr': a, 'max': maxval})
setattr(self, prop, hex_value % v)
class PhysicalPciAddress(PciAddressSpec):
"""Manages the address fields for a fully-qualified PCI address.
This function class will validate the address fields for a single
PCI device.
"""
def __init__(self, pci_addr: PCISpecAddressType) -> None:
try:
# TODO(stephenfin): Is this ever actually a string?
if isinstance(pci_addr, dict):
self.domain = pci_addr['domain']
self.bus = pci_addr['bus']
self.slot = pci_addr['slot']
self.func = pci_addr['function']
else:
self.domain, self.bus, self.slot, self.func = (
utils.get_pci_address_fields(pci_addr))
self._set_pci_dev_info('func', MAX_FUNC, '%1x')
self._set_pci_dev_info('domain', MAX_DOMAIN, '%04x')
self._set_pci_dev_info('bus', MAX_BUS, '%02x')
self._set_pci_dev_info('slot', MAX_SLOT, '%02x')
except (KeyError, ValueError):
raise exception.PciDeviceWrongAddressFormat(address=pci_addr)
def match(self, phys_pci_addr: PciAddressSpec) -> bool:
conditions = [
self.domain == phys_pci_addr.domain,
self.bus == phys_pci_addr.bus,
self.slot == phys_pci_addr.slot,
self.func == phys_pci_addr.func,
]
return all(conditions)
class PciAddressGlobSpec(PciAddressSpec):
"""Manages the address fields with glob style.
This function class will validate the address fields with glob style,
check for wildcards, and insert wildcards where the field is left blank.
"""
def __init__(self, pci_addr: str) -> None:
self.domain = ANY
self.bus = ANY
self.slot = ANY
self.func = ANY
dbs, sep, func = pci_addr.partition('.')
if func:
self.func = func.strip()
self._set_pci_dev_info('func', MAX_FUNC, '%01x')
if dbs:
dbs_fields = dbs.split(':')
if len(dbs_fields) > 3:
raise exception.PciDeviceWrongAddressFormat(address=pci_addr)
# If we got a partial address like ":00.", we need to turn this
# into a domain of ANY, a bus of ANY, and a slot of 00. This code
# allows the address bus and/or domain to be left off
dbs_all = [ANY] * (3 - len(dbs_fields))
dbs_all.extend(dbs_fields)
dbs_checked = [s.strip() or ANY for s in dbs_all]
self.domain, self.bus, self.slot = dbs_checked
self._set_pci_dev_info('domain', MAX_DOMAIN, '%04x')
self._set_pci_dev_info('bus', MAX_BUS, '%02x')
self._set_pci_dev_info('slot', MAX_SLOT, '%02x')
def match(self, phys_pci_addr: PciAddressSpec) -> bool:
conditions = [
self.domain in (ANY, phys_pci_addr.domain),
self.bus in (ANY, phys_pci_addr.bus),
self.slot in (ANY, phys_pci_addr.slot),
self.func in (ANY, phys_pci_addr.func)
]
return all(conditions)
class PciAddressRegexSpec(PciAddressSpec):
"""Manages the address fields with regex style.
This function class will validate the address fields with regex style.
The validation includes check for all PCI address attributes and validate
their regex.
"""
def __init__(self, pci_addr: dict) -> None:
try:
self.domain = pci_addr.get('domain', REGEX_ANY)
self.bus = pci_addr.get('bus', REGEX_ANY)
self.slot = pci_addr.get('slot', REGEX_ANY)
self.func = pci_addr.get('function', REGEX_ANY)
self.domain_regex = re.compile(self.domain)
self.bus_regex = re.compile(self.bus)
self.slot_regex = re.compile(self.slot)
self.func_regex = re.compile(self.func)
except re.error:
raise exception.PciDeviceWrongAddressFormat(address=pci_addr)
def match(self, phys_pci_addr: PciAddressSpec) -> bool:
conditions = [
bool(self.domain_regex.match(phys_pci_addr.domain)),
bool(self.bus_regex.match(phys_pci_addr.bus)),
bool(self.slot_regex.match(phys_pci_addr.slot)),
bool(self.func_regex.match(phys_pci_addr.func))
]
return all(conditions)
class WhitelistPciAddress(object):
"""Manages the address fields of the whitelist.
This class checks the address fields of the pci.passthrough_whitelist
configuration option, validating the address fields.
Example configs:
| [pci]
| passthrough_whitelist = {"address":"*:0a:00.*",
| "physical_network":"physnet1"}
| passthrough_whitelist = {"address": {"domain": ".*",
"bus": "02",
"slot": "01",
"function": "[0-2]"},
"physical_network":"net1"}
| passthrough_whitelist = {"vendor_id":"1137","product_id":"0071"}
"""
def __init__(
self, pci_addr: PCISpecAddressType, is_physical_function: bool
) -> None:
self.is_physical_function = is_physical_function
self._init_address_fields(pci_addr)
def _check_physical_function(self) -> None:
if self.pci_address_spec.is_single_address():
self.is_physical_function = (
utils.is_physical_function(
self.pci_address_spec.domain,
self.pci_address_spec.bus,
self.pci_address_spec.slot,
self.pci_address_spec.func))
def _init_address_fields(self, pci_addr: PCISpecAddressType) -> None:
self.pci_address_spec: PciAddressSpec
if not self.is_physical_function:
if isinstance(pci_addr, str):
self.pci_address_spec = PciAddressGlobSpec(pci_addr)
elif isinstance(pci_addr, dict):
self.pci_address_spec = PciAddressRegexSpec(pci_addr)
else:
raise exception.PciDeviceWrongAddressFormat(address=pci_addr)
self._check_physical_function()
else:
self.pci_address_spec = PhysicalPciAddress(pci_addr)
def match(self, pci_addr: str, pci_phys_addr: ty.Optional[str]) -> bool:
"""Match a device to this PciAddress.
Assume this is called with a ``pci_addr`` and ``pci_phys_addr``
reported by libvirt. No attempt is made to verify if ``pci_addr`` is a
VF of ``pci_phys_addr``.
:param pci_addr: PCI address of the device to match.
:param pci_phys_addr: PCI address of the parent of the device to match
(or None if the device is not a VF).
"""
# Try to match on the parent PCI address if the PciDeviceSpec is a
# PF (sriov is available) and the device to match is a VF. This
# makes it possible to specify the PCI address of a PF in the
# pci.passthrough_whitelist to match any of its VFs' PCI addresses.
if self.is_physical_function and pci_phys_addr:
pci_phys_addr_obj = PhysicalPciAddress(pci_phys_addr)
if self.pci_address_spec.match(pci_phys_addr_obj):
return True
# Try to match on the device PCI address only.
pci_addr_obj = PhysicalPciAddress(pci_addr)
return self.pci_address_spec.match(pci_addr_obj)
class PciDeviceSpec(PciAddressSpec):
def __init__(self, dev_spec: ty.Dict[str, str]) -> None:
self.tags = dev_spec
self._init_dev_details()
def _init_dev_details(self) -> None:
self.vendor_id = self.tags.pop("vendor_id", ANY)
self.product_id = self.tags.pop("product_id", ANY)
self.dev_name = self.tags.pop("devname", None)
self.address: ty.Optional[WhitelistPciAddress] = None
# Note(moshele): The address attribute can be a string or a dict.
# For glob syntax or specific pci it is a string and for regex syntax
# it is a dict. The WhitelistPciAddress class handles both types.
address = self.tags.pop("address", None)
self.vendor_id = self.vendor_id.strip()
self._set_pci_dev_info('vendor_id', MAX_VENDOR_ID, '%04x')
self._set_pci_dev_info('product_id', MAX_PRODUCT_ID, '%04x')
if address and self.dev_name:
raise exception.PciDeviceInvalidDeviceName()
if not self.dev_name:
self.address = WhitelistPciAddress(address or '*:*:*.*', False)
def match(self, dev_dict: ty.Dict[str, str]) -> bool:
address_obj: ty.Optional[WhitelistPciAddress]
if self.dev_name:
address_str, pf = utils.get_function_by_ifname(self.dev_name)
if not address_str:
return False
# Note(moshele): In this case we always passing a string
# of the PF pci address
address_obj = WhitelistPciAddress(address_str, pf)
else: # use self.address
address_obj = self.address
if not address_obj:
return False
return all([
self.vendor_id in (ANY, dev_dict['vendor_id']),
self.product_id in (ANY, dev_dict['product_id']),
address_obj.match(dev_dict['address'],
dev_dict.get('parent_addr'))])
def match_pci_obj(self, pci_obj: 'objects.PciDevice') -> bool:
return self.match({'vendor_id': pci_obj.vendor_id,
'product_id': pci_obj.product_id,
'address': pci_obj.address,
'parent_addr': pci_obj.parent_addr})
def get_tags(self) -> ty.Dict[str, str]:
return self.tags | 0.457379 | 0.164047 |
import requests
from threading import Thread, Event
class YoutubeScraper(Thread):
"""
Performs a Youtube Search, selects N videos (ordered by upload date) and monitors their comments.
Previous comments will also be extracted.
"""
SEARCH_URL = 'https://www.googleapis.com/youtube/v3/search'
COMMENT_THREADS_URL = 'https://www.googleapis.com/youtube/v3/commentThreads'
def __init__(self, api_key, search_q, n_vids, callback, region_code=None, interval=5):
self.stop_event = Event()
Thread.__init__(self)
self.api_key = api_key
self.search_q = search_q
self.n_vids = 50 if n_vids > 50 else n_vids
self.callback = callback
self.regionCode = region_code
self.interval = interval
self.videos_ids = None
self.last_comment_per_video = None
def __generate_search_params(self):
"""
Returns a parameters dictionary for the search query
"""
params = {
'key': self.api_key,
'part': 'snippet',
'maxResults': self.n_vids,
'order': 'date',
'type': 'video',
'q': self.search_q
}
if self.regionCode is not None:
params['regionCode'] = self.regionCode
return params
def __generate_comment_threads_params(self, page_token=None):
"""
Returns a parameters dictionary for the comment threads query
"""
params = {
'key': self.api_key,
'part': 'snippet',
'maxResults': 100,
'order': 'time',
'textFormat': 'plainText'
}
if page_token is not None:
params['pageToken'] = page_token
return params
def fetch_videos(self):
"""
Performs the Youtube Search and selects the top newest {n_vids} videos.
"""
params = self.__generate_search_params()
json_result = requests.get(self.SEARCH_URL, params).json()
if not json_result['items']:
raise ValueError(json_result)
self.videos_ids = []
self.last_comment_per_video = {}
for item in json_result['items']:
video_id = item['id']['videoId']
self.videos_ids.append(video_id)
self.last_comment_per_video[video_id] = []
def __extract_comments(self, video_id, page_token=None):
"""
Performs the comment threads request and calls callback for each comment.
Returns the json_result.
"""
params = self.__generate_comment_threads_params(page_token)
params['videoId'] = video_id
json_result = requests.get(self.COMMENT_THREADS_URL, params).json()
if 'items' not in json_result or len(json_result['items']) == 0:
return None
for item in json_result['items']:
comment_id = item['id']
# In case we reached the last comment registred
if len(self.last_comment_per_video[video_id]) > 0 and \
comment_id == self.last_comment_per_video[video_id][0]:
break
# Ignore the comments we already have (in case someone deletes his comment)
if comment_id in self.last_comment_per_video[video_id]:
continue
self.last_comment_per_video[video_id].append(comment_id)
comment = item['snippet']['topLevelComment']['snippet']['textOriginal']
self.callback(video_id, comment)
return json_result
def __check_for_new_comments(self):
"""
Checks if there is new comments in the videos
"""
for video_id in self.videos_ids:
json_result = self.__extract_comments(video_id)
def run(self):
"""
Starts the monitoring process with the given interval.
The callback method is called everytime a new comment is retrieved
"""
if self.videos_ids is None:
raise ValueError('No video ids available, call fetch_videos first.')
for video_id in self.videos_ids:
json_result = self.__extract_comments(video_id)
if json_result is None:
self.last_comment_per_video[video_id] = []
print('{} has no comments.'.format(video_id))
continue
# Check if there are next pages
while 'nextPageToken' in json_result:
json_result = self.__extract_comments(video_id, json_result['nextPageToken'])
# Start monitoring
print('Started monitoring')
while not self.stop_event.wait(self.interval):
self.__check_for_new_comments()
def stop(self):
"""
Sets the stop_event
"""
self.stop_event.set() | youtube/youtube_scraper.py | import requests
from threading import Thread, Event
class YoutubeScraper(Thread):
"""
Performs a Youtube Search, selects N videos (ordered by upload date) and monitors their comments.
Previous comments will also be extracted.
"""
SEARCH_URL = 'https://www.googleapis.com/youtube/v3/search'
COMMENT_THREADS_URL = 'https://www.googleapis.com/youtube/v3/commentThreads'
def __init__(self, api_key, search_q, n_vids, callback, region_code=None, interval=5):
self.stop_event = Event()
Thread.__init__(self)
self.api_key = api_key
self.search_q = search_q
self.n_vids = 50 if n_vids > 50 else n_vids
self.callback = callback
self.regionCode = region_code
self.interval = interval
self.videos_ids = None
self.last_comment_per_video = None
def __generate_search_params(self):
"""
Returns a parameters dictionary for the search query
"""
params = {
'key': self.api_key,
'part': 'snippet',
'maxResults': self.n_vids,
'order': 'date',
'type': 'video',
'q': self.search_q
}
if self.regionCode is not None:
params['regionCode'] = self.regionCode
return params
def __generate_comment_threads_params(self, page_token=None):
"""
Returns a parameters dictionary for the comment threads query
"""
params = {
'key': self.api_key,
'part': 'snippet',
'maxResults': 100,
'order': 'time',
'textFormat': 'plainText'
}
if page_token is not None:
params['pageToken'] = page_token
return params
def fetch_videos(self):
"""
Performs the Youtube Search and selects the top newest {n_vids} videos.
"""
params = self.__generate_search_params()
json_result = requests.get(self.SEARCH_URL, params).json()
if not json_result['items']:
raise ValueError(json_result)
self.videos_ids = []
self.last_comment_per_video = {}
for item in json_result['items']:
video_id = item['id']['videoId']
self.videos_ids.append(video_id)
self.last_comment_per_video[video_id] = []
def __extract_comments(self, video_id, page_token=None):
"""
Performs the comment threads request and calls callback for each comment.
Returns the json_result.
"""
params = self.__generate_comment_threads_params(page_token)
params['videoId'] = video_id
json_result = requests.get(self.COMMENT_THREADS_URL, params).json()
if 'items' not in json_result or len(json_result['items']) == 0:
return None
for item in json_result['items']:
comment_id = item['id']
# In case we reached the last comment registred
if len(self.last_comment_per_video[video_id]) > 0 and \
comment_id == self.last_comment_per_video[video_id][0]:
break
# Ignore the comments we already have (in case someone deletes his comment)
if comment_id in self.last_comment_per_video[video_id]:
continue
self.last_comment_per_video[video_id].append(comment_id)
comment = item['snippet']['topLevelComment']['snippet']['textOriginal']
self.callback(video_id, comment)
return json_result
def __check_for_new_comments(self):
"""
Checks if there is new comments in the videos
"""
for video_id in self.videos_ids:
json_result = self.__extract_comments(video_id)
def run(self):
"""
Starts the monitoring process with the given interval.
The callback method is called everytime a new comment is retrieved
"""
if self.videos_ids is None:
raise ValueError('No video ids available, call fetch_videos first.')
for video_id in self.videos_ids:
json_result = self.__extract_comments(video_id)
if json_result is None:
self.last_comment_per_video[video_id] = []
print('{} has no comments.'.format(video_id))
continue
# Check if there are next pages
while 'nextPageToken' in json_result:
json_result = self.__extract_comments(video_id, json_result['nextPageToken'])
# Start monitoring
print('Started monitoring')
while not self.stop_event.wait(self.interval):
self.__check_for_new_comments()
def stop(self):
"""
Sets the stop_event
"""
self.stop_event.set() | 0.613121 | 0.199678 |
from __future__ import absolute_import
import uuid
import pytest
import time
from sentry.utils import json
from sentry.ingest.ingest_consumer import (
process_event,
process_attachment_chunk,
process_individual_attachment,
process_userreport,
)
from sentry.attachments import attachment_cache
from sentry.event_manager import EventManager
from sentry.models import Event, EventAttachment, UserReport, EventUser
def get_normalized_event(data, project):
mgr = EventManager(data, project=project)
mgr.normalize()
return dict(mgr.get_data())
@pytest.fixture
def preprocess_event(monkeypatch):
calls = []
def inner(**kwargs):
calls.append(kwargs)
monkeypatch.setattr("sentry.ingest.ingest_consumer.preprocess_event", inner)
return calls
@pytest.mark.django_db
def test_deduplication_works(default_project, task_runner, preprocess_event):
payload = get_normalized_event({"message": "hello world"}, default_project)
event_id = payload["event_id"]
project_id = default_project.id
start_time = time.time() - 3600
for _ in range(2):
process_event(
{
"payload": json.dumps(payload),
"start_time": start_time,
"event_id": event_id,
"project_id": project_id,
"remote_addr": "127.0.0.1",
}
)
kwargs, = preprocess_event
assert kwargs == {
"cache_key": u"e:{event_id}:{project_id}".format(event_id=event_id, project_id=project_id),
"data": payload,
"event_id": event_id,
"project": default_project,
"start_time": start_time,
}
@pytest.mark.django_db
def test_with_attachments(default_project, task_runner, preprocess_event):
payload = get_normalized_event({"message": "hello world"}, default_project)
event_id = payload["event_id"]
attachment_id = "ca90fb45-6dd9-40a0-a18f-8693aa621abb"
project_id = default_project.id
start_time = time.time() - 3600
process_attachment_chunk(
{
"payload": b"Hello ",
"event_id": event_id,
"project_id": project_id,
"id": attachment_id,
"chunk_index": 0,
}
)
process_attachment_chunk(
{
"payload": b"World!",
"event_id": event_id,
"project_id": project_id,
"id": attachment_id,
"chunk_index": 1,
}
)
process_event(
{
"payload": json.dumps(payload),
"start_time": start_time,
"event_id": event_id,
"project_id": project_id,
"remote_addr": "127.0.0.1",
"attachments": [
{
"id": attachment_id,
"name": "lol.txt",
"content_type": "text/plain",
"attachment_type": "custom.attachment",
"chunks": 2,
}
],
}
)
kwargs, = preprocess_event
cache_key = u"e:{event_id}:{project_id}".format(event_id=event_id, project_id=project_id)
assert kwargs == {
"cache_key": cache_key,
"data": payload,
"event_id": event_id,
"project": default_project,
"start_time": start_time,
}
att, = attachment_cache.get(cache_key)
assert att.data == b"Hello World!"
assert att.name == "lol.txt"
assert att.content_type == "text/plain"
assert att.type == "custom.attachment"
@pytest.mark.django_db
@pytest.mark.parametrize(
"event_attachments", [True, False], ids=["with_feature", "without_feature"]
)
@pytest.mark.parametrize(
"chunks", [(b"Hello ", b"World!"), (b"",), ()], ids=["basic", "zerolen", "nochunks"]
)
def test_individual_attachments(default_project, monkeypatch, event_attachments, chunks):
monkeypatch.setattr("sentry.features.has", lambda *a, **kw: event_attachments)
event_id = "515539018c9b4260a6f999572f1661ee"
attachment_id = "ca90fb45-6dd9-40a0-a18f-8693aa621abb"
project_id = default_project.id
for i, chunk in enumerate(chunks):
process_attachment_chunk(
{
"payload": chunk,
"event_id": event_id,
"project_id": project_id,
"id": attachment_id,
"chunk_index": i,
}
)
process_individual_attachment(
{
"type": "attachment",
"attachment": {
"attachment_type": "event.attachment",
"chunks": len(chunks),
"content_type": "application/octet-stream",
"id": attachment_id,
"name": "foo.txt",
},
"event_id": event_id,
"project_id": project_id,
}
)
attachments = list(
EventAttachment.objects.filter(project_id=project_id, event_id=event_id).select_related(
"file"
)
)
if not event_attachments:
assert not attachments
else:
att1, = attachments
assert att1.file.type == "event.attachment"
assert att1.file.headers == {"Content-Type": "application/octet-stream"}
f = att1.file.getfile()
assert f.read() == b"".join(chunks)
assert f.name == "foo.txt"
@pytest.mark.django_db
def test_userreport(default_project, monkeypatch):
"""
Test that user_report-type kafka messages end up in a user report being
persisted. We additionally test some logic around upserting data in
eventuser which is also present in the legacy endpoint.
"""
event_id = uuid.uuid4().hex
start_time = time.time() - 3600
mgr = EventManager(data={"event_id": event_id, "user": {"email": "<EMAIL>"}})
mgr.normalize()
mgr.save(default_project.id)
evtuser, = EventUser.objects.all()
assert not evtuser.name
assert not UserReport.objects.all()
assert process_userreport(
{
"type": "user_report",
"start_time": start_time,
"payload": json.dumps(
{
"name": "<NAME>",
"event_id": event_id,
"comments": "hello world",
"email": "<EMAIL>",
}
),
"project_id": default_project.id,
}
)
report, = UserReport.objects.all()
assert report.comments == "hello world"
evtuser, = EventUser.objects.all()
assert evtuser.name == "<NAME>"
@pytest.mark.django_db
def test_userreport_reverse_order(default_project, monkeypatch):
"""
Test that ingesting a userreport before the event works. This is relevant
for unreal crashes where the userreport is processed immediately in the
ingest consumer while the rest of the event goes to processing tasks.
"""
event_id = uuid.uuid4().hex
start_time = time.time() - 3600
assert not Event.objects.all()
assert process_userreport(
{
"type": "user_report",
"start_time": start_time,
"payload": json.dumps(
{
"name": "<NAME>",
"event_id": event_id,
"comments": "hello world",
"email": "<EMAIL>",
}
),
"project_id": default_project.id,
}
)
mgr = EventManager(data={"event_id": event_id, "user": {"email": "<EMAIL>"}})
mgr.normalize()
mgr.save(default_project.id)
report, = UserReport.objects.all()
assert report.comments == "hello world"
evtuser, = EventUser.objects.all()
# Event got saved after user report, and the sync only works in the
# opposite direction. That's fine, we just accept it.
assert evtuser.name is None | tests/sentry/ingest/ingest_consumer/test_ingest_processing.py | from __future__ import absolute_import
import uuid
import pytest
import time
from sentry.utils import json
from sentry.ingest.ingest_consumer import (
process_event,
process_attachment_chunk,
process_individual_attachment,
process_userreport,
)
from sentry.attachments import attachment_cache
from sentry.event_manager import EventManager
from sentry.models import Event, EventAttachment, UserReport, EventUser
def get_normalized_event(data, project):
mgr = EventManager(data, project=project)
mgr.normalize()
return dict(mgr.get_data())
@pytest.fixture
def preprocess_event(monkeypatch):
calls = []
def inner(**kwargs):
calls.append(kwargs)
monkeypatch.setattr("sentry.ingest.ingest_consumer.preprocess_event", inner)
return calls
@pytest.mark.django_db
def test_deduplication_works(default_project, task_runner, preprocess_event):
payload = get_normalized_event({"message": "hello world"}, default_project)
event_id = payload["event_id"]
project_id = default_project.id
start_time = time.time() - 3600
for _ in range(2):
process_event(
{
"payload": json.dumps(payload),
"start_time": start_time,
"event_id": event_id,
"project_id": project_id,
"remote_addr": "127.0.0.1",
}
)
kwargs, = preprocess_event
assert kwargs == {
"cache_key": u"e:{event_id}:{project_id}".format(event_id=event_id, project_id=project_id),
"data": payload,
"event_id": event_id,
"project": default_project,
"start_time": start_time,
}
@pytest.mark.django_db
def test_with_attachments(default_project, task_runner, preprocess_event):
payload = get_normalized_event({"message": "hello world"}, default_project)
event_id = payload["event_id"]
attachment_id = "ca90fb45-6dd9-40a0-a18f-8693aa621abb"
project_id = default_project.id
start_time = time.time() - 3600
process_attachment_chunk(
{
"payload": b"Hello ",
"event_id": event_id,
"project_id": project_id,
"id": attachment_id,
"chunk_index": 0,
}
)
process_attachment_chunk(
{
"payload": b"World!",
"event_id": event_id,
"project_id": project_id,
"id": attachment_id,
"chunk_index": 1,
}
)
process_event(
{
"payload": json.dumps(payload),
"start_time": start_time,
"event_id": event_id,
"project_id": project_id,
"remote_addr": "127.0.0.1",
"attachments": [
{
"id": attachment_id,
"name": "lol.txt",
"content_type": "text/plain",
"attachment_type": "custom.attachment",
"chunks": 2,
}
],
}
)
kwargs, = preprocess_event
cache_key = u"e:{event_id}:{project_id}".format(event_id=event_id, project_id=project_id)
assert kwargs == {
"cache_key": cache_key,
"data": payload,
"event_id": event_id,
"project": default_project,
"start_time": start_time,
}
att, = attachment_cache.get(cache_key)
assert att.data == b"Hello World!"
assert att.name == "lol.txt"
assert att.content_type == "text/plain"
assert att.type == "custom.attachment"
@pytest.mark.django_db
@pytest.mark.parametrize(
"event_attachments", [True, False], ids=["with_feature", "without_feature"]
)
@pytest.mark.parametrize(
"chunks", [(b"Hello ", b"World!"), (b"",), ()], ids=["basic", "zerolen", "nochunks"]
)
def test_individual_attachments(default_project, monkeypatch, event_attachments, chunks):
monkeypatch.setattr("sentry.features.has", lambda *a, **kw: event_attachments)
event_id = "515539018c9b4260a6f999572f1661ee"
attachment_id = "ca90fb45-6dd9-40a0-a18f-8693aa621abb"
project_id = default_project.id
for i, chunk in enumerate(chunks):
process_attachment_chunk(
{
"payload": chunk,
"event_id": event_id,
"project_id": project_id,
"id": attachment_id,
"chunk_index": i,
}
)
process_individual_attachment(
{
"type": "attachment",
"attachment": {
"attachment_type": "event.attachment",
"chunks": len(chunks),
"content_type": "application/octet-stream",
"id": attachment_id,
"name": "foo.txt",
},
"event_id": event_id,
"project_id": project_id,
}
)
attachments = list(
EventAttachment.objects.filter(project_id=project_id, event_id=event_id).select_related(
"file"
)
)
if not event_attachments:
assert not attachments
else:
att1, = attachments
assert att1.file.type == "event.attachment"
assert att1.file.headers == {"Content-Type": "application/octet-stream"}
f = att1.file.getfile()
assert f.read() == b"".join(chunks)
assert f.name == "foo.txt"
@pytest.mark.django_db
def test_userreport(default_project, monkeypatch):
"""
Test that user_report-type kafka messages end up in a user report being
persisted. We additionally test some logic around upserting data in
eventuser which is also present in the legacy endpoint.
"""
event_id = uuid.uuid4().hex
start_time = time.time() - 3600
mgr = EventManager(data={"event_id": event_id, "user": {"email": "<EMAIL>"}})
mgr.normalize()
mgr.save(default_project.id)
evtuser, = EventUser.objects.all()
assert not evtuser.name
assert not UserReport.objects.all()
assert process_userreport(
{
"type": "user_report",
"start_time": start_time,
"payload": json.dumps(
{
"name": "<NAME>",
"event_id": event_id,
"comments": "hello world",
"email": "<EMAIL>",
}
),
"project_id": default_project.id,
}
)
report, = UserReport.objects.all()
assert report.comments == "hello world"
evtuser, = EventUser.objects.all()
assert evtuser.name == "<NAME>"
@pytest.mark.django_db
def test_userreport_reverse_order(default_project, monkeypatch):
"""
Test that ingesting a userreport before the event works. This is relevant
for unreal crashes where the userreport is processed immediately in the
ingest consumer while the rest of the event goes to processing tasks.
"""
event_id = uuid.uuid4().hex
start_time = time.time() - 3600
assert not Event.objects.all()
assert process_userreport(
{
"type": "user_report",
"start_time": start_time,
"payload": json.dumps(
{
"name": "<NAME>",
"event_id": event_id,
"comments": "hello world",
"email": "<EMAIL>",
}
),
"project_id": default_project.id,
}
)
mgr = EventManager(data={"event_id": event_id, "user": {"email": "<EMAIL>"}})
mgr.normalize()
mgr.save(default_project.id)
report, = UserReport.objects.all()
assert report.comments == "hello world"
evtuser, = EventUser.objects.all()
# Event got saved after user report, and the sync only works in the
# opposite direction. That's fine, we just accept it.
assert evtuser.name is None | 0.542379 | 0.2485 |
import click
from stencil_benchmarks.benchmarks_collection.stencils.cuda_hip import (
basic, horizontal_diffusion as hdiff, vertical_advection as vadv)
from stencil_benchmarks.tools.multirun import (Configuration,
run_scaling_benchmark,
truncate_block_size_to_domain,
default_kwargs)
@click.group()
def main():
pass
common_kwargs = default_kwargs(backend='cuda',
compiler='nvcc',
gpu_architecture='sm_80',
verify=False,
dry_runs=1,
gpu_timers=True,
alignment=128,
dtype='float32')
@main.command()
@click.argument('output', type=click.Path())
@click.option('--executions', '-e', type=int, default=101)
@click.option('--option', '-o', multiple=True)
def basic_bandwidth(output, executions, option):
kwargs = common_kwargs(
option,
loop='3D',
block_size=(128, 2, 1),
halo=1,
)
stream_kwargs = kwargs.copy()
stream_kwargs.update(loop='1D', block_size=(1024, 1, 1), halo=0)
configurations = [
Configuration(basic.Copy, name='stream', **stream_kwargs),
Configuration(basic.Empty, name='empty', **kwargs),
Configuration(basic.Copy, name='copy', **kwargs),
Configuration(basic.OnesidedAverage, name='avg-i', axis=0, **kwargs),
Configuration(basic.OnesidedAverage, name='avg-j', axis=1, **kwargs),
Configuration(basic.OnesidedAverage, name='avg-k', axis=2, **kwargs),
Configuration(basic.SymmetricAverage,
name='sym-avg-i',
axis=0,
**kwargs),
Configuration(basic.SymmetricAverage,
name='sym-avg-j',
axis=1,
**kwargs),
Configuration(basic.SymmetricAverage,
name='sym-avg-k',
axis=2,
**kwargs),
Configuration(basic.Laplacian,
name='lap-ij',
along_x=True,
along_y=True,
along_z=False,
**kwargs)
]
table = run_scaling_benchmark(configurations, executions)
table.to_csv(output)
@main.command()
@click.argument('output', type=click.Path())
@click.option('--executions', '-e', type=int, default=101)
@click.option('--option', '-o', multiple=True)
def horizontal_diffusion_bandwidth(output, executions, option):
kwargs = common_kwargs(option)
configurations = [
Configuration(hdiff.Classic, block_size=(32, 16, 1), **kwargs),
Configuration(hdiff.OnTheFly,
block_size=(32, 16, 1),
loop='3D',
**kwargs),
Configuration(hdiff.OnTheFlyIncache, block_size=(32, 8, 1), **kwargs),
Configuration(hdiff.JScanSharedMem, block_size=(256, 32, 1), **kwargs),
Configuration(hdiff.JScanOtfIncache, block_size=(128, 4, 1), **kwargs),
Configuration(hdiff.JScanOtf, block_size=(128, 4, 1), **kwargs),
Configuration(hdiff.JScanShuffleIncache,
block_size=(28, 8, 2),
**kwargs),
Configuration(hdiff.JScanShuffle, block_size=(28, 8, 2), **kwargs),
Configuration(hdiff.JScanShuffleSystolic,
block_size=(28, 4, 3),
**kwargs)
]
def truncate_block_size_to_domain_if_possible(**kwargs):
if kwargs['block_size'][0] != 28:
return truncate_block_size_to_domain(**kwargs)
return kwargs
table = run_scaling_benchmark(
configurations,
executions,
preprocess_args=truncate_block_size_to_domain_if_possible)
table.to_csv(output)
@main.command()
@click.argument('output', type=click.Path())
@click.option('--executions', '-e', type=int, default=101)
@click.option('--option', '-o', multiple=True)
def vertical_advection_bandwidth(output, executions, option):
kwargs = common_kwargs(option)
configurations = [
Configuration(vadv.Classic,
block_size=(512, 1),
unroll_factor=8,
**kwargs),
Configuration(vadv.LocalMem,
block_size=(128, 1),
unroll_factor=28,
**kwargs),
Configuration(vadv.SharedMem,
block_size=(64, 1),
unroll_factor=0,
**kwargs),
Configuration(vadv.LocalMemMerged,
block_size=(512, 1),
unroll_factor=2,
**kwargs)
]
table = run_scaling_benchmark(
configurations,
executions,
preprocess_args=truncate_block_size_to_domain)
table.to_csv(output)
if __name__ == '__main__':
main() | stencil_benchmarks/scripts/sbench_a100_collection.py |
import click
from stencil_benchmarks.benchmarks_collection.stencils.cuda_hip import (
basic, horizontal_diffusion as hdiff, vertical_advection as vadv)
from stencil_benchmarks.tools.multirun import (Configuration,
run_scaling_benchmark,
truncate_block_size_to_domain,
default_kwargs)
@click.group()
def main():
pass
common_kwargs = default_kwargs(backend='cuda',
compiler='nvcc',
gpu_architecture='sm_80',
verify=False,
dry_runs=1,
gpu_timers=True,
alignment=128,
dtype='float32')
@main.command()
@click.argument('output', type=click.Path())
@click.option('--executions', '-e', type=int, default=101)
@click.option('--option', '-o', multiple=True)
def basic_bandwidth(output, executions, option):
kwargs = common_kwargs(
option,
loop='3D',
block_size=(128, 2, 1),
halo=1,
)
stream_kwargs = kwargs.copy()
stream_kwargs.update(loop='1D', block_size=(1024, 1, 1), halo=0)
configurations = [
Configuration(basic.Copy, name='stream', **stream_kwargs),
Configuration(basic.Empty, name='empty', **kwargs),
Configuration(basic.Copy, name='copy', **kwargs),
Configuration(basic.OnesidedAverage, name='avg-i', axis=0, **kwargs),
Configuration(basic.OnesidedAverage, name='avg-j', axis=1, **kwargs),
Configuration(basic.OnesidedAverage, name='avg-k', axis=2, **kwargs),
Configuration(basic.SymmetricAverage,
name='sym-avg-i',
axis=0,
**kwargs),
Configuration(basic.SymmetricAverage,
name='sym-avg-j',
axis=1,
**kwargs),
Configuration(basic.SymmetricAverage,
name='sym-avg-k',
axis=2,
**kwargs),
Configuration(basic.Laplacian,
name='lap-ij',
along_x=True,
along_y=True,
along_z=False,
**kwargs)
]
table = run_scaling_benchmark(configurations, executions)
table.to_csv(output)
@main.command()
@click.argument('output', type=click.Path())
@click.option('--executions', '-e', type=int, default=101)
@click.option('--option', '-o', multiple=True)
def horizontal_diffusion_bandwidth(output, executions, option):
kwargs = common_kwargs(option)
configurations = [
Configuration(hdiff.Classic, block_size=(32, 16, 1), **kwargs),
Configuration(hdiff.OnTheFly,
block_size=(32, 16, 1),
loop='3D',
**kwargs),
Configuration(hdiff.OnTheFlyIncache, block_size=(32, 8, 1), **kwargs),
Configuration(hdiff.JScanSharedMem, block_size=(256, 32, 1), **kwargs),
Configuration(hdiff.JScanOtfIncache, block_size=(128, 4, 1), **kwargs),
Configuration(hdiff.JScanOtf, block_size=(128, 4, 1), **kwargs),
Configuration(hdiff.JScanShuffleIncache,
block_size=(28, 8, 2),
**kwargs),
Configuration(hdiff.JScanShuffle, block_size=(28, 8, 2), **kwargs),
Configuration(hdiff.JScanShuffleSystolic,
block_size=(28, 4, 3),
**kwargs)
]
def truncate_block_size_to_domain_if_possible(**kwargs):
if kwargs['block_size'][0] != 28:
return truncate_block_size_to_domain(**kwargs)
return kwargs
table = run_scaling_benchmark(
configurations,
executions,
preprocess_args=truncate_block_size_to_domain_if_possible)
table.to_csv(output)
@main.command()
@click.argument('output', type=click.Path())
@click.option('--executions', '-e', type=int, default=101)
@click.option('--option', '-o', multiple=True)
def vertical_advection_bandwidth(output, executions, option):
kwargs = common_kwargs(option)
configurations = [
Configuration(vadv.Classic,
block_size=(512, 1),
unroll_factor=8,
**kwargs),
Configuration(vadv.LocalMem,
block_size=(128, 1),
unroll_factor=28,
**kwargs),
Configuration(vadv.SharedMem,
block_size=(64, 1),
unroll_factor=0,
**kwargs),
Configuration(vadv.LocalMemMerged,
block_size=(512, 1),
unroll_factor=2,
**kwargs)
]
table = run_scaling_benchmark(
configurations,
executions,
preprocess_args=truncate_block_size_to_domain)
table.to_csv(output)
if __name__ == '__main__':
main() | 0.59514 | 0.13612 |
import collider
import models.cube # default model
class Block_type:
# new optional model argument (cube model by default)
def __init__(self, texture_manager, name = "unknown", block_face_textures = {"all": "cobblestone"}, model = models.cube):
self.name = name
self.block_face_textures = block_face_textures
self.model = model
# create members based on model attributes
self.transparent = model.transparent
self.is_cube = model.is_cube
self.glass = model.glass
# create colliders
self.colliders = []
for _collider in model.colliders:
self.colliders.append(collider.Collider(*_collider))
# replace data contained in numbers.py with model specific data
self.vertex_positions = model.vertex_positions
self.tex_coords = model.tex_coords.copy()
self.shading_values = model.shading_values
def set_block_face(face, texture):
# make sure we don't add inexistent faces
if face > len(self.tex_coords) - 1:
return
self.tex_coords[face] = self.tex_coords[face].copy()
for vertex in range(4):
self.tex_coords[face][vertex * 3 + 2] = texture
for face in block_face_textures:
texture = block_face_textures[face]
texture_manager.add_texture(texture)
texture_index = texture_manager.textures.index(texture)
if face == "all":
for i in range(len(self.tex_coords)):
set_block_face(i, texture_index)
elif face == "sides":
set_block_face(0, texture_index)
set_block_face(1, texture_index)
set_block_face(4, texture_index)
set_block_face(5, texture_index)
elif face == "x":
set_block_face(0, texture_index)
set_block_face(1, texture_index)
elif face == "y":
set_block_face(2, texture_index)
set_block_face(3, texture_index)
elif face == "z":
set_block_face(4, texture_index)
set_block_face(5, texture_index)
else:
set_block_face(["right", "left", "top", "bottom", "front", "back"].index(face), texture_index) | src/plugins/PyMC/block_type.py | import collider
import models.cube # default model
class Block_type:
# new optional model argument (cube model by default)
def __init__(self, texture_manager, name = "unknown", block_face_textures = {"all": "cobblestone"}, model = models.cube):
self.name = name
self.block_face_textures = block_face_textures
self.model = model
# create members based on model attributes
self.transparent = model.transparent
self.is_cube = model.is_cube
self.glass = model.glass
# create colliders
self.colliders = []
for _collider in model.colliders:
self.colliders.append(collider.Collider(*_collider))
# replace data contained in numbers.py with model specific data
self.vertex_positions = model.vertex_positions
self.tex_coords = model.tex_coords.copy()
self.shading_values = model.shading_values
def set_block_face(face, texture):
# make sure we don't add inexistent faces
if face > len(self.tex_coords) - 1:
return
self.tex_coords[face] = self.tex_coords[face].copy()
for vertex in range(4):
self.tex_coords[face][vertex * 3 + 2] = texture
for face in block_face_textures:
texture = block_face_textures[face]
texture_manager.add_texture(texture)
texture_index = texture_manager.textures.index(texture)
if face == "all":
for i in range(len(self.tex_coords)):
set_block_face(i, texture_index)
elif face == "sides":
set_block_face(0, texture_index)
set_block_face(1, texture_index)
set_block_face(4, texture_index)
set_block_face(5, texture_index)
elif face == "x":
set_block_face(0, texture_index)
set_block_face(1, texture_index)
elif face == "y":
set_block_face(2, texture_index)
set_block_face(3, texture_index)
elif face == "z":
set_block_face(4, texture_index)
set_block_face(5, texture_index)
else:
set_block_face(["right", "left", "top", "bottom", "front", "back"].index(face), texture_index) | 0.350088 | 0.261484 |
import json
import os
import subprocess
import webbrowser
import boto3
import click
import requests
from botocore import auth
from botocore.awsrequest import AWSRequest
def log_if_verbose(verbose, message):
return click.echo(message) if verbose else None
def read_json(data_path):
with open(data_path, 'r') as data_file:
data = json.load(data_file)
return data
def read_csv(data_path):
payloads = []
with open(data_path, 'r') as data_file:
for row in data_file:
payloads.append(
row.rstrip('\n')
)
return payloads
def read_input_data(data_path, content_type):
if content_type == "application/json":
data = read_json(data_path)
data = json.dumps(data)
else:
data = read_csv(data_path)
return data
def get_aws_auth_headers(sagemaker_config):
credentials = boto3.Session().get_credentials()
sagemaker_auth = auth.SigV4Auth(credentials, "sagemaker", sagemaker_config.region)
data = read_input_data(sagemaker_config.data_path, sagemaker_config.content_type)
aws_request = AWSRequest(
method="POST",
url=sagemaker_config.full_endpoint,
headers={
"Content-type": sagemaker_config.content_type
},
data=data[0]
)
sagemaker_auth.add_auth(aws_request)
return aws_request.headers
def post_request(headers, sagemaker_config):
data = read_input_data(sagemaker_config.data_path, sagemaker_config.content_type)
response = requests.post(
url=sagemaker_config.full_endpoint,
json=data, headers=headers)
return response.raise_for_status()
def format_input_data(sagemaker_config, vegeta_config):
data = read_input_data(sagemaker_config.data_path, sagemaker_config.content_type)
with open(vegeta_config.vegeta_config.payload_json_filename, "w") as f:
f.write(json.dumps(data))
return
def write_target_list(headers, config):
target_list = [
f"POST {config.sagemaker_config.full_endpoint}"
] \
+ [f"{header}: {headers[header]}" for header in headers] \
+ ["@payload.json"]
with open(config.vegeta_config.target_list_file_name, 'w') as file_object:
file_object.write("\n".join(target_list))
class VegetaHelper:
def __init__(self, vegeta_config):
self.config = vegeta_config
def run_load_test(self):
vegeta_command = subprocess.Popen(
['vegeta', 'attack', f'-duration={self.config.duration}s', f'-rate={self.config.rate}/s',
'-targets=targets.list',
f'-output={self.config.binary_file_path}'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
return vegeta_command.communicate()
def plot(self):
with open(self.config.html_file_path, 'w') as file_object:
subprocess.call(['vegeta', 'plot', '--title', self.config.name, self.config.binary_file_path], stdout=file_object)
def open_browser(self):
browser = webbrowser.get('chrome')
return browser.open_new_tab(f"file://{os.path.realpath(self.config.html_file_path)}")
def write_report(self):
command = subprocess.Popen(
[
'vegeta', 'report', f"{self.config.binary_file_path}"
]
)
return command.communicate()
@staticmethod
def mock_vegeta_call(verbose):
log_if_verbose(verbose=verbose, message="Checking vegeta installation.")
try:
output = subprocess.call(["vegeta", "--version"], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
if output != 0:
return None
return True
except FileNotFoundError:
return None | app/utils.py | import json
import os
import subprocess
import webbrowser
import boto3
import click
import requests
from botocore import auth
from botocore.awsrequest import AWSRequest
def log_if_verbose(verbose, message):
return click.echo(message) if verbose else None
def read_json(data_path):
with open(data_path, 'r') as data_file:
data = json.load(data_file)
return data
def read_csv(data_path):
payloads = []
with open(data_path, 'r') as data_file:
for row in data_file:
payloads.append(
row.rstrip('\n')
)
return payloads
def read_input_data(data_path, content_type):
if content_type == "application/json":
data = read_json(data_path)
data = json.dumps(data)
else:
data = read_csv(data_path)
return data
def get_aws_auth_headers(sagemaker_config):
credentials = boto3.Session().get_credentials()
sagemaker_auth = auth.SigV4Auth(credentials, "sagemaker", sagemaker_config.region)
data = read_input_data(sagemaker_config.data_path, sagemaker_config.content_type)
aws_request = AWSRequest(
method="POST",
url=sagemaker_config.full_endpoint,
headers={
"Content-type": sagemaker_config.content_type
},
data=data[0]
)
sagemaker_auth.add_auth(aws_request)
return aws_request.headers
def post_request(headers, sagemaker_config):
data = read_input_data(sagemaker_config.data_path, sagemaker_config.content_type)
response = requests.post(
url=sagemaker_config.full_endpoint,
json=data, headers=headers)
return response.raise_for_status()
def format_input_data(sagemaker_config, vegeta_config):
data = read_input_data(sagemaker_config.data_path, sagemaker_config.content_type)
with open(vegeta_config.vegeta_config.payload_json_filename, "w") as f:
f.write(json.dumps(data))
return
def write_target_list(headers, config):
target_list = [
f"POST {config.sagemaker_config.full_endpoint}"
] \
+ [f"{header}: {headers[header]}" for header in headers] \
+ ["@payload.json"]
with open(config.vegeta_config.target_list_file_name, 'w') as file_object:
file_object.write("\n".join(target_list))
class VegetaHelper:
def __init__(self, vegeta_config):
self.config = vegeta_config
def run_load_test(self):
vegeta_command = subprocess.Popen(
['vegeta', 'attack', f'-duration={self.config.duration}s', f'-rate={self.config.rate}/s',
'-targets=targets.list',
f'-output={self.config.binary_file_path}'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
return vegeta_command.communicate()
def plot(self):
with open(self.config.html_file_path, 'w') as file_object:
subprocess.call(['vegeta', 'plot', '--title', self.config.name, self.config.binary_file_path], stdout=file_object)
def open_browser(self):
browser = webbrowser.get('chrome')
return browser.open_new_tab(f"file://{os.path.realpath(self.config.html_file_path)}")
def write_report(self):
command = subprocess.Popen(
[
'vegeta', 'report', f"{self.config.binary_file_path}"
]
)
return command.communicate()
@staticmethod
def mock_vegeta_call(verbose):
log_if_verbose(verbose=verbose, message="Checking vegeta installation.")
try:
output = subprocess.call(["vegeta", "--version"], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
if output != 0:
return None
return True
except FileNotFoundError:
return None | 0.313105 | 0.087603 |
from tempfile import TemporaryDirectory
import os
import sys
import subprocess
from pathlib import Path
import shutil
import unittest
from typing import List
TEST_ROOT = Path(__file__).resolve().parent
def run(cmd: List[str], **kwargs) -> subprocess.CompletedProcess:
print("$ " + " ".join(cmd))
return subprocess.run(cmd, **kwargs)
def support_flakes() -> bool:
cmd = [
"nix-instantiate",
"--json",
"--eval",
"--expr",
"builtins ? getFlake",
]
proc = subprocess.run(cmd, text=True, capture_output=True, check=True)
return proc.stdout == "true"
class TestBaseNamespace:
"""Nested so test discovery doesn't run the base class tests directly."""
class TestBase(unittest.TestCase):
env: dict
dir: TemporaryDirectory
testenv: Path
direnvrc: str
direnvrc_command: str
out1: subprocess.CompletedProcess
out2: subprocess.CompletedProcess
renewed_message: str
cached_message: str
@classmethod
def setUpClass(cls) -> None:
cls.env = os.environ.copy()
cls.dir = TemporaryDirectory()
cls.env["HOME"] = str(cls.dir.name)
cls.testenv = Path(cls.dir.name).joinpath("testenv")
shutil.copytree(TEST_ROOT.joinpath("testenv"), cls.testenv)
cls.direnvrc = str(TEST_ROOT.parent.joinpath("direnvrc"))
with open(cls.testenv.joinpath(".envrc"), "w") as f:
f.write(f"source {cls.direnvrc}\n{cls.direnvrc_command}")
run(["direnv", "allow"], cwd=str(cls.testenv), env=cls.env, check=True)
run(["nix-collect-garbage"], check=True)
cls.out1 = run(
["direnv", "exec", str(cls.testenv), "hello"],
env=cls.env,
stderr=subprocess.PIPE,
text=True,
)
sys.stderr.write(cls.out1.stderr)
run(["nix-collect-garbage"], check=True)
cls.out2 = run(
["direnv", "exec", str(cls.testenv), "hello"],
env=cls.env,
stderr=subprocess.PIPE,
text=True,
)
sys.stderr.write(cls.out2.stderr)
@classmethod
def tearDownClass(cls) -> None:
cls.dir.cleanup()
def test_fresh_shell_message(self) -> None:
self.assertIn(self.renewed_message, self.out1.stderr)
def test_fresh_shell_shellHook(self) -> None:
self.assertIn("Executing shellHook.", self.out1.stderr)
def test_fresh_shell_returncode(self) -> None:
self.assertEqual(self.out1.returncode, 0)
def test_cached_shell_message(self) -> None:
self.assertIn(self.cached_message, self.out2.stderr)
def test_cached_shell_shellHook(self) -> None:
self.assertNotIn("Executing shellHook.", self.out2.stderr)
def test_cached_shell_returncode(self) -> None:
self.assertEqual(self.out2.returncode, 0)
class NixShellTest(TestBaseNamespace.TestBase):
direnvrc_command = "use nix"
renewed_message = "renewed cache and derivation link"
cached_message = "using cached derivation"
@unittest.skipUnless(support_flakes(), "requires flakes")
class FlakeTest(TestBaseNamespace.TestBase):
direnvrc_command = "use flake"
renewed_message = "renewed cache"
cached_message = "using cached dev shell"
def test_gcroot_symlink_created_and_valid(self) -> None:
inputs = list(self.testenv.joinpath(".direnv/flake-inputs").iterdir())
# should only contain our flake-utils flake
if len(inputs) != 3:
subprocess.run(["nix", "flake", "archive", "--json"], cwd=self.testenv)
print(inputs)
self.assertEqual(len(inputs), 3)
for symlink in inputs:
self.assertTrue(symlink.is_dir())
if __name__ == "__main__":
unittest.main() | tests/test.py |
from tempfile import TemporaryDirectory
import os
import sys
import subprocess
from pathlib import Path
import shutil
import unittest
from typing import List
TEST_ROOT = Path(__file__).resolve().parent
def run(cmd: List[str], **kwargs) -> subprocess.CompletedProcess:
print("$ " + " ".join(cmd))
return subprocess.run(cmd, **kwargs)
def support_flakes() -> bool:
cmd = [
"nix-instantiate",
"--json",
"--eval",
"--expr",
"builtins ? getFlake",
]
proc = subprocess.run(cmd, text=True, capture_output=True, check=True)
return proc.stdout == "true"
class TestBaseNamespace:
"""Nested so test discovery doesn't run the base class tests directly."""
class TestBase(unittest.TestCase):
env: dict
dir: TemporaryDirectory
testenv: Path
direnvrc: str
direnvrc_command: str
out1: subprocess.CompletedProcess
out2: subprocess.CompletedProcess
renewed_message: str
cached_message: str
@classmethod
def setUpClass(cls) -> None:
cls.env = os.environ.copy()
cls.dir = TemporaryDirectory()
cls.env["HOME"] = str(cls.dir.name)
cls.testenv = Path(cls.dir.name).joinpath("testenv")
shutil.copytree(TEST_ROOT.joinpath("testenv"), cls.testenv)
cls.direnvrc = str(TEST_ROOT.parent.joinpath("direnvrc"))
with open(cls.testenv.joinpath(".envrc"), "w") as f:
f.write(f"source {cls.direnvrc}\n{cls.direnvrc_command}")
run(["direnv", "allow"], cwd=str(cls.testenv), env=cls.env, check=True)
run(["nix-collect-garbage"], check=True)
cls.out1 = run(
["direnv", "exec", str(cls.testenv), "hello"],
env=cls.env,
stderr=subprocess.PIPE,
text=True,
)
sys.stderr.write(cls.out1.stderr)
run(["nix-collect-garbage"], check=True)
cls.out2 = run(
["direnv", "exec", str(cls.testenv), "hello"],
env=cls.env,
stderr=subprocess.PIPE,
text=True,
)
sys.stderr.write(cls.out2.stderr)
@classmethod
def tearDownClass(cls) -> None:
cls.dir.cleanup()
def test_fresh_shell_message(self) -> None:
self.assertIn(self.renewed_message, self.out1.stderr)
def test_fresh_shell_shellHook(self) -> None:
self.assertIn("Executing shellHook.", self.out1.stderr)
def test_fresh_shell_returncode(self) -> None:
self.assertEqual(self.out1.returncode, 0)
def test_cached_shell_message(self) -> None:
self.assertIn(self.cached_message, self.out2.stderr)
def test_cached_shell_shellHook(self) -> None:
self.assertNotIn("Executing shellHook.", self.out2.stderr)
def test_cached_shell_returncode(self) -> None:
self.assertEqual(self.out2.returncode, 0)
class NixShellTest(TestBaseNamespace.TestBase):
direnvrc_command = "use nix"
renewed_message = "renewed cache and derivation link"
cached_message = "using cached derivation"
@unittest.skipUnless(support_flakes(), "requires flakes")
class FlakeTest(TestBaseNamespace.TestBase):
direnvrc_command = "use flake"
renewed_message = "renewed cache"
cached_message = "using cached dev shell"
def test_gcroot_symlink_created_and_valid(self) -> None:
inputs = list(self.testenv.joinpath(".direnv/flake-inputs").iterdir())
# should only contain our flake-utils flake
if len(inputs) != 3:
subprocess.run(["nix", "flake", "archive", "--json"], cwd=self.testenv)
print(inputs)
self.assertEqual(len(inputs), 3)
for symlink in inputs:
self.assertTrue(symlink.is_dir())
if __name__ == "__main__":
unittest.main() | 0.605916 | 0.367072 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
inventory: host_list
version_added: "2.4"
short_description: Parses a 'host list' string
description:
- Parses a host list string as a comma separated values of hosts
- This plugin only applies to inventory strings that are not paths and contain a comma.
'''
EXAMPLES = r'''
# define 2 hosts in command line
# ansible -i '10.10.2.6, 10.10.2.4' -m ping all
# DNS resolvable names
# ansible -i 'host1.example.com, host2' -m user -a 'name=me state=absent' all
# just use localhost
# ansible-playbook -i 'localhost,' play.yml -c local
'''
import os
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.parsing.utils.addresses import parse_address
from ansible.plugins.inventory import BaseInventoryPlugin
class InventoryModule(BaseInventoryPlugin):
NAME = 'host_list'
def verify_file(self, host_list):
valid = False
b_path = to_bytes(host_list, errors='surrogate_or_strict')
if not os.path.exists(b_path) and ',' in host_list:
valid = True
return valid
def parse(self, inventory, loader, host_list, cache=True):
''' parses the inventory file '''
super(InventoryModule, self).parse(inventory, loader, host_list)
try:
for h in host_list.split(','):
h = h.strip()
if h:
try:
(host, port) = parse_address(h, allow_ranges=False)
except AnsibleError as e:
self.display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_text(e))
host = h
port = None
if host not in self.inventory.hosts:
self.inventory.add_host(host, group='ungrouped', port=port)
except Exception as e:
raise AnsibleParserError("Invalid data from string, could not parse: %s" % to_native(e)) | lib/python3.8/site-packages/ansible/plugins/inventory/host_list.py |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
inventory: host_list
version_added: "2.4"
short_description: Parses a 'host list' string
description:
- Parses a host list string as a comma separated values of hosts
- This plugin only applies to inventory strings that are not paths and contain a comma.
'''
EXAMPLES = r'''
# define 2 hosts in command line
# ansible -i '10.10.2.6, 10.10.2.4' -m ping all
# DNS resolvable names
# ansible -i 'host1.example.com, host2' -m user -a 'name=me state=absent' all
# just use localhost
# ansible-playbook -i 'localhost,' play.yml -c local
'''
import os
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.parsing.utils.addresses import parse_address
from ansible.plugins.inventory import BaseInventoryPlugin
class InventoryModule(BaseInventoryPlugin):
NAME = 'host_list'
def verify_file(self, host_list):
valid = False
b_path = to_bytes(host_list, errors='surrogate_or_strict')
if not os.path.exists(b_path) and ',' in host_list:
valid = True
return valid
def parse(self, inventory, loader, host_list, cache=True):
''' parses the inventory file '''
super(InventoryModule, self).parse(inventory, loader, host_list)
try:
for h in host_list.split(','):
h = h.strip()
if h:
try:
(host, port) = parse_address(h, allow_ranges=False)
except AnsibleError as e:
self.display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_text(e))
host = h
port = None
if host not in self.inventory.hosts:
self.inventory.add_host(host, group='ungrouped', port=port)
except Exception as e:
raise AnsibleParserError("Invalid data from string, could not parse: %s" % to_native(e)) | 0.610918 | 0.15961 |
import pytest
from topgg import types
d: dict = {
"defAvatar": "6debd47ed13483642cf09e832ed0bc1b",
"invite": "",
"website": "https://top.gg",
"support": "KYZsaFb",
"github": "https://github.com/top-gg/Luca",
"longdesc": "Luca only works in the **Discord Bot List** server. \nPrepend commands with the prefix `-` or "
"`@Luca#1375`. \n**Please refrain from using these commands in non testing channels.**\n- `botinfo "
"@bot` Shows bot info, title redirects to site listing.\n- `bots @user`* Shows all bots of that user, "
"includes bots in the queue.\n- `owner / -owners @bot`* Shows all owners of that bot.\n- `prefix "
"@bot`* Shows the prefix of that bot.\n* Mobile friendly version exists. Just add `noembed` to the "
"end of the command.\n",
"shortdesc": "Luca is a bot for managing and informing members of the server",
"prefix": "- or @Luca#1375",
"lib": None,
"clientid": "264811613708746752",
"avatar": "7edcc4c6fbb0b23762455ca139f0e1c9",
"id": "264811613708746752",
"discriminator": "1375",
"username": "Luca",
"date": "2017-04-26T18:08:17.125Z",
"server_count": 2,
"guilds": ["417723229721853963", "264445053596991498"],
"shards": [],
"monthlyPoints": 19,
"points": 397,
"certifiedBot": False,
"owners": ["129908908096487424"],
"tags": ["Moderation", "Role Management", "Logging"],
"donatebotguildid": "",
}
query_dict = {"qwe": "1", "rty": "2", "uio": "3"}
vote_data_dict = {
"type": "test",
"query": "?" + "&".join(f"{k}={v}" for k, v in query_dict.items()),
"user": "1",
}
bot_vote_dict = {
"bot": "2",
"user": "3",
"type": "test",
"query": "?" + "&".join(f"{k}={v}" for k, v in query_dict.items()),
}
server_vote_dict = {
"guild": "4",
"user": "5",
"type": "upvote",
"query": "?" + "&".join(f"{k}={v}" for k, v in query_dict.items()),
}
user_data_dict = {
"discriminator": "0001",
"avatar": "a_1241439d430def25c100dd28add2d42f",
"id": "140862798832861184",
"username": "Xetera",
"defAvatar": "322c936a8c8be1b803cd94861bdfa868",
"admin": True,
"webMod": True,
"mod": True,
"certifiedDev": False,
"supporter": False,
"social": {},
}
bot_stats_dict = {"shards": [1, 5, 8]}
@pytest.fixture
def data_dict() -> types.DataDict:
return types.DataDict(**d)
@pytest.fixture
def bot_data() -> types.BotData:
return types.BotData(**d)
@pytest.fixture
def user_data() -> types.UserData:
return types.UserData(**user_data_dict)
@pytest.fixture
def widget_options() -> types.WidgetOptions:
return types.WidgetOptions(id=int(d["id"]))
@pytest.fixture
def vote_data() -> types.VoteDataDict:
return types.VoteDataDict(**vote_data_dict)
@pytest.fixture
def bot_vote_data() -> types.BotVoteData:
return types.BotVoteData(**bot_vote_dict)
@pytest.fixture
def server_vote_data() -> types.ServerVoteData:
return types.ServerVoteData(**server_vote_dict)
@pytest.fixture
def bot_stats_data() -> types.BotStatsData:
return types.BotStatsData(**bot_stats_dict)
def test_data_dict_fields(data_dict: types.DataDict) -> None:
for attr in data_dict:
if "id" in attr.lower():
assert isinstance(data_dict[attr], int) or data_dict[attr] is None
assert data_dict.get(attr) == data_dict[attr] == getattr(data_dict, attr)
def test_bot_data_fields(bot_data: types.BotData) -> None:
bot_data.github = "I'm a GitHub link!"
bot_data.support = "Support has arrived!"
for attr in bot_data:
if "id" in attr.lower():
assert isinstance(bot_data[attr], int) or bot_data[attr] is None
elif attr in ("owners", "guilds"):
for item in bot_data[attr]:
assert isinstance(item, int)
assert bot_data.get(attr) == bot_data[attr] == getattr(bot_data, attr)
def test_widget_options_fields(widget_options: types.WidgetOptions) -> None:
assert widget_options["colors"] == widget_options["colours"]
widget_options.colours = {"background": 0}
widget_options["colours"]["text"] = 255
assert widget_options.colours == widget_options["colors"]
for attr in widget_options:
if "id" in attr.lower():
assert isinstance(widget_options[attr], int) or widget_options[attr] is None
assert (
widget_options.get(attr)
== widget_options[attr]
== widget_options[attr]
== getattr(widget_options, attr)
)
def test_vote_data_fields(vote_data: types.VoteDataDict) -> None:
assert isinstance(vote_data.query, dict)
vote_data.type = "upvote"
for attr in vote_data:
assert getattr(vote_data, attr) == vote_data.get(attr) == vote_data[attr]
def test_bot_vote_data_fields(bot_vote_data: types.BotVoteData) -> None:
assert isinstance(bot_vote_data.query, dict)
bot_vote_data.type = "upvote"
assert isinstance(bot_vote_data["bot"], int)
for attr in bot_vote_data:
assert (
getattr(bot_vote_data, attr)
== bot_vote_data.get(attr)
== bot_vote_data[attr]
)
def test_server_vote_data_fields(server_vote_data: types.BotVoteData) -> None:
assert isinstance(server_vote_data.query, dict)
server_vote_data.type = "upvote"
assert isinstance(server_vote_data["guild"], int)
for attr in server_vote_data:
assert (
getattr(server_vote_data, attr)
== server_vote_data.get(attr)
== server_vote_data[attr]
)
def test_bot_stats_data_attrs(bot_stats_data: types.BotStatsData) -> None:
for count in ("server_count", "shard_count"):
assert isinstance(bot_stats_data[count], int) or bot_stats_data[count] is None
assert isinstance(bot_stats_data.shards, list)
if bot_stats_data.shards:
for shard in bot_stats_data.shards:
assert isinstance(shard, int)
def test_user_data_attrs(user_data: types.UserData) -> None:
assert isinstance(user_data.social, types.SocialData)
for attr in user_data:
if "id" in attr.lower():
assert isinstance(user_data[attr], int) or user_data[attr] is None
assert user_data[attr] == getattr(user_data, attr) == user_data.get(attr) | tests/test_type.py | import pytest
from topgg import types
d: dict = {
"defAvatar": "6debd47ed13483642cf09e832ed0bc1b",
"invite": "",
"website": "https://top.gg",
"support": "KYZsaFb",
"github": "https://github.com/top-gg/Luca",
"longdesc": "Luca only works in the **Discord Bot List** server. \nPrepend commands with the prefix `-` or "
"`@Luca#1375`. \n**Please refrain from using these commands in non testing channels.**\n- `botinfo "
"@bot` Shows bot info, title redirects to site listing.\n- `bots @user`* Shows all bots of that user, "
"includes bots in the queue.\n- `owner / -owners @bot`* Shows all owners of that bot.\n- `prefix "
"@bot`* Shows the prefix of that bot.\n* Mobile friendly version exists. Just add `noembed` to the "
"end of the command.\n",
"shortdesc": "Luca is a bot for managing and informing members of the server",
"prefix": "- or @Luca#1375",
"lib": None,
"clientid": "264811613708746752",
"avatar": "7edcc4c6fbb0b23762455ca139f0e1c9",
"id": "264811613708746752",
"discriminator": "1375",
"username": "Luca",
"date": "2017-04-26T18:08:17.125Z",
"server_count": 2,
"guilds": ["417723229721853963", "264445053596991498"],
"shards": [],
"monthlyPoints": 19,
"points": 397,
"certifiedBot": False,
"owners": ["129908908096487424"],
"tags": ["Moderation", "Role Management", "Logging"],
"donatebotguildid": "",
}
query_dict = {"qwe": "1", "rty": "2", "uio": "3"}
vote_data_dict = {
"type": "test",
"query": "?" + "&".join(f"{k}={v}" for k, v in query_dict.items()),
"user": "1",
}
bot_vote_dict = {
"bot": "2",
"user": "3",
"type": "test",
"query": "?" + "&".join(f"{k}={v}" for k, v in query_dict.items()),
}
server_vote_dict = {
"guild": "4",
"user": "5",
"type": "upvote",
"query": "?" + "&".join(f"{k}={v}" for k, v in query_dict.items()),
}
user_data_dict = {
"discriminator": "0001",
"avatar": "a_1241439d430def25c100dd28add2d42f",
"id": "140862798832861184",
"username": "Xetera",
"defAvatar": "322c936a8c8be1b803cd94861bdfa868",
"admin": True,
"webMod": True,
"mod": True,
"certifiedDev": False,
"supporter": False,
"social": {},
}
bot_stats_dict = {"shards": [1, 5, 8]}
@pytest.fixture
def data_dict() -> types.DataDict:
return types.DataDict(**d)
@pytest.fixture
def bot_data() -> types.BotData:
return types.BotData(**d)
@pytest.fixture
def user_data() -> types.UserData:
return types.UserData(**user_data_dict)
@pytest.fixture
def widget_options() -> types.WidgetOptions:
return types.WidgetOptions(id=int(d["id"]))
@pytest.fixture
def vote_data() -> types.VoteDataDict:
return types.VoteDataDict(**vote_data_dict)
@pytest.fixture
def bot_vote_data() -> types.BotVoteData:
return types.BotVoteData(**bot_vote_dict)
@pytest.fixture
def server_vote_data() -> types.ServerVoteData:
return types.ServerVoteData(**server_vote_dict)
@pytest.fixture
def bot_stats_data() -> types.BotStatsData:
return types.BotStatsData(**bot_stats_dict)
def test_data_dict_fields(data_dict: types.DataDict) -> None:
for attr in data_dict:
if "id" in attr.lower():
assert isinstance(data_dict[attr], int) or data_dict[attr] is None
assert data_dict.get(attr) == data_dict[attr] == getattr(data_dict, attr)
def test_bot_data_fields(bot_data: types.BotData) -> None:
bot_data.github = "I'm a GitHub link!"
bot_data.support = "Support has arrived!"
for attr in bot_data:
if "id" in attr.lower():
assert isinstance(bot_data[attr], int) or bot_data[attr] is None
elif attr in ("owners", "guilds"):
for item in bot_data[attr]:
assert isinstance(item, int)
assert bot_data.get(attr) == bot_data[attr] == getattr(bot_data, attr)
def test_widget_options_fields(widget_options: types.WidgetOptions) -> None:
assert widget_options["colors"] == widget_options["colours"]
widget_options.colours = {"background": 0}
widget_options["colours"]["text"] = 255
assert widget_options.colours == widget_options["colors"]
for attr in widget_options:
if "id" in attr.lower():
assert isinstance(widget_options[attr], int) or widget_options[attr] is None
assert (
widget_options.get(attr)
== widget_options[attr]
== widget_options[attr]
== getattr(widget_options, attr)
)
def test_vote_data_fields(vote_data: types.VoteDataDict) -> None:
assert isinstance(vote_data.query, dict)
vote_data.type = "upvote"
for attr in vote_data:
assert getattr(vote_data, attr) == vote_data.get(attr) == vote_data[attr]
def test_bot_vote_data_fields(bot_vote_data: types.BotVoteData) -> None:
assert isinstance(bot_vote_data.query, dict)
bot_vote_data.type = "upvote"
assert isinstance(bot_vote_data["bot"], int)
for attr in bot_vote_data:
assert (
getattr(bot_vote_data, attr)
== bot_vote_data.get(attr)
== bot_vote_data[attr]
)
def test_server_vote_data_fields(server_vote_data: types.BotVoteData) -> None:
assert isinstance(server_vote_data.query, dict)
server_vote_data.type = "upvote"
assert isinstance(server_vote_data["guild"], int)
for attr in server_vote_data:
assert (
getattr(server_vote_data, attr)
== server_vote_data.get(attr)
== server_vote_data[attr]
)
def test_bot_stats_data_attrs(bot_stats_data: types.BotStatsData) -> None:
for count in ("server_count", "shard_count"):
assert isinstance(bot_stats_data[count], int) or bot_stats_data[count] is None
assert isinstance(bot_stats_data.shards, list)
if bot_stats_data.shards:
for shard in bot_stats_data.shards:
assert isinstance(shard, int)
def test_user_data_attrs(user_data: types.UserData) -> None:
assert isinstance(user_data.social, types.SocialData)
for attr in user_data:
if "id" in attr.lower():
assert isinstance(user_data[attr], int) or user_data[attr] is None
assert user_data[attr] == getattr(user_data, attr) == user_data.get(attr) | 0.553023 | 0.5119 |
from homeassistant.helpers.storage import Store
from homeassistant.core import callback
from .const import DOMAIN
ENTITY_MAP_STORAGE_KEY = '{}-entity-map'.format(DOMAIN)
ENTITY_MAP_STORAGE_VERSION = 1
ENTITY_MAP_SAVE_DELAY = 10
class EntityMapStorage:
"""
Holds a cache of entity structure data from a paired HomeKit device.
HomeKit has a cacheable entity map that describes how an IP or BLE
endpoint is structured. This object holds the latest copy of that data.
An endpoint is made of accessories, services and characteristics. It is
safe to cache this data until the c# discovery data changes.
Caching this data means we can add HomeKit devices to HA immediately at
start even if discovery hasn't seen them yet or they are out of range. It
is also important for BLE devices - accessing the entity structure is
very slow for these devices.
"""
def __init__(self, hass):
"""Create a new entity map store."""
self.hass = hass
self.store = Store(
hass,
ENTITY_MAP_STORAGE_VERSION,
ENTITY_MAP_STORAGE_KEY
)
self.storage_data = {}
async def async_initialize(self):
"""Get the pairing cache data."""
raw_storage = await self.store.async_load()
if not raw_storage:
# There is no cached data about HomeKit devices yet
return
self.storage_data = raw_storage.get('pairings', {})
def get_map(self, homekit_id):
"""Get a pairing cache item."""
return self.storage_data.get(homekit_id)
def async_create_or_update_map(self, homekit_id, config_num, accessories):
"""Create a new pairing cache."""
data = {
'config_num': config_num,
'accessories': accessories,
}
self.storage_data[homekit_id] = data
self._async_schedule_save()
return data
def async_delete_map(self, homekit_id):
"""Delete pairing cache."""
if homekit_id not in self.storage_data:
return
self.storage_data.pop(homekit_id)
self._async_schedule_save()
@callback
def _async_schedule_save(self):
"""Schedule saving the entity map cache."""
self.store.async_delay_save(self._data_to_save, ENTITY_MAP_SAVE_DELAY)
@callback
def _data_to_save(self):
"""Return data of entity map to store in a file."""
return {
'pairings': self.storage_data,
} | homeassistant/components/homekit_controller/storage.py |
from homeassistant.helpers.storage import Store
from homeassistant.core import callback
from .const import DOMAIN
ENTITY_MAP_STORAGE_KEY = '{}-entity-map'.format(DOMAIN)
ENTITY_MAP_STORAGE_VERSION = 1
ENTITY_MAP_SAVE_DELAY = 10
class EntityMapStorage:
"""
Holds a cache of entity structure data from a paired HomeKit device.
HomeKit has a cacheable entity map that describes how an IP or BLE
endpoint is structured. This object holds the latest copy of that data.
An endpoint is made of accessories, services and characteristics. It is
safe to cache this data until the c# discovery data changes.
Caching this data means we can add HomeKit devices to HA immediately at
start even if discovery hasn't seen them yet or they are out of range. It
is also important for BLE devices - accessing the entity structure is
very slow for these devices.
"""
def __init__(self, hass):
"""Create a new entity map store."""
self.hass = hass
self.store = Store(
hass,
ENTITY_MAP_STORAGE_VERSION,
ENTITY_MAP_STORAGE_KEY
)
self.storage_data = {}
async def async_initialize(self):
"""Get the pairing cache data."""
raw_storage = await self.store.async_load()
if not raw_storage:
# There is no cached data about HomeKit devices yet
return
self.storage_data = raw_storage.get('pairings', {})
def get_map(self, homekit_id):
"""Get a pairing cache item."""
return self.storage_data.get(homekit_id)
def async_create_or_update_map(self, homekit_id, config_num, accessories):
"""Create a new pairing cache."""
data = {
'config_num': config_num,
'accessories': accessories,
}
self.storage_data[homekit_id] = data
self._async_schedule_save()
return data
def async_delete_map(self, homekit_id):
"""Delete pairing cache."""
if homekit_id not in self.storage_data:
return
self.storage_data.pop(homekit_id)
self._async_schedule_save()
@callback
def _async_schedule_save(self):
"""Schedule saving the entity map cache."""
self.store.async_delay_save(self._data_to_save, ENTITY_MAP_SAVE_DELAY)
@callback
def _data_to_save(self):
"""Return data of entity map to store in a file."""
return {
'pairings': self.storage_data,
} | 0.829837 | 0.260337 |
import logging
import numpy as np
__all__ = ["function_1d", "integrated_time", "AutocorrError"]
logger = logging.getLogger(__name__)
def next_pow_two(n):
"""Returns the next power of two greater than or equal to `n`"""
i = 1
while i < n:
i = i << 1
return i
def function_1d(x):
"""Estimate the normalized autocorrelation function of a 1-D series
Args:
x: The series as a 1-D numpy array.
Returns:
array: The autocorrelation function of the time series.
"""
x = np.atleast_1d(x)
if len(x.shape) != 1:
raise ValueError("invalid dimensions for 1D autocorrelation function")
n = next_pow_two(len(x))
# Compute the FFT and then (from that) the auto-correlation function
f = np.fft.fft(x - np.mean(x), n=2 * n)
acf = np.fft.ifft(f * np.conjugate(f))[: len(x)].real
acf /= acf[0]
return acf
def auto_window(taus, c):
m = np.arange(len(taus)) < c * taus
if np.any(m):
return np.argmin(m)
return len(taus) - 1
def integrated_time(x, c=5, tol=50, quiet=False):
"""Estimate the integrated autocorrelation time of a time series.
This estimate uses the iterative procedure described on page 16 of
`Sokal's notes <https://www.semanticscholar.org/paper/Monte-Carlo-Methods-in-Statistical-Mechanics%3A-and-Sokal/0bfe9e3db30605fe2d4d26e1a288a5e2997e7225>`_ to
determine a reasonable window size.
Args:
x: The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for
every other axis.
c (Optional[float]): The step size for the window search. (default:
``5``)
tol (Optional[float]): The minimum number of autocorrelation times
needed to trust the estimate. (default: ``50``)
quiet (Optional[bool]): This argument controls the behavior when the
chain is too short. If ``True``, give a warning instead of raising
an :class:`AutocorrError`. (default: ``False``)
Returns:
float or array: An estimate of the integrated autocorrelation time of
the time series ``x`` computed along the axis ``axis``.
Raises
AutocorrError: If the autocorrelation time can't be reliably estimated
from the chain and ``quiet`` is ``False``. This normally means
that the chain is too short.
"""
x = np.atleast_1d(x)
if len(x.shape) == 1:
x = x[:, np.newaxis, np.newaxis]
if len(x.shape) == 2:
x = x[:, :, np.newaxis]
if len(x.shape) != 3:
raise ValueError("invalid dimensions")
n_t, n_w, n_d = x.shape
tau_est = np.empty(n_d)
windows = np.empty(n_d, dtype=int)
# Loop over parameters
for d in range(n_d):
f = np.zeros(n_t)
for k in range(n_w):
f += function_1d(x[:, k, d])
f /= n_w
taus = 2.0 * np.cumsum(f) - 1.0
windows[d] = auto_window(taus, c)
tau_est[d] = taus[windows[d]]
# Check convergence
flag = tol * tau_est > n_t
# Warn or raise in the case of non-convergence
if np.any(flag):
msg = (
"The chain is shorter than {0} times the integrated "
"autocorrelation time for {1} parameter(s). Use this estimate "
"with caution and run a longer chain!\n"
).format(tol, np.sum(flag))
msg += "N/{0} = {1:.0f};\ntau: {2}".format(tol, n_t / tol, tau_est)
if not quiet:
raise AutocorrError(tau_est, msg)
logger.warning(msg)
return tau_est
class AutocorrError(Exception):
"""Raised if the chain is too short to estimate an autocorrelation time.
The current estimate of the autocorrelation time can be accessed via the
``tau`` attribute of this exception.
"""
def __init__(self, tau, *args, **kwargs):
self.tau = tau
super(AutocorrError, self).__init__(*args, **kwargs) | refnx/_lib/emcee/autocorr.py |
import logging
import numpy as np
__all__ = ["function_1d", "integrated_time", "AutocorrError"]
logger = logging.getLogger(__name__)
def next_pow_two(n):
"""Returns the next power of two greater than or equal to `n`"""
i = 1
while i < n:
i = i << 1
return i
def function_1d(x):
"""Estimate the normalized autocorrelation function of a 1-D series
Args:
x: The series as a 1-D numpy array.
Returns:
array: The autocorrelation function of the time series.
"""
x = np.atleast_1d(x)
if len(x.shape) != 1:
raise ValueError("invalid dimensions for 1D autocorrelation function")
n = next_pow_two(len(x))
# Compute the FFT and then (from that) the auto-correlation function
f = np.fft.fft(x - np.mean(x), n=2 * n)
acf = np.fft.ifft(f * np.conjugate(f))[: len(x)].real
acf /= acf[0]
return acf
def auto_window(taus, c):
m = np.arange(len(taus)) < c * taus
if np.any(m):
return np.argmin(m)
return len(taus) - 1
def integrated_time(x, c=5, tol=50, quiet=False):
"""Estimate the integrated autocorrelation time of a time series.
This estimate uses the iterative procedure described on page 16 of
`Sokal's notes <https://www.semanticscholar.org/paper/Monte-Carlo-Methods-in-Statistical-Mechanics%3A-and-Sokal/0bfe9e3db30605fe2d4d26e1a288a5e2997e7225>`_ to
determine a reasonable window size.
Args:
x: The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for
every other axis.
c (Optional[float]): The step size for the window search. (default:
``5``)
tol (Optional[float]): The minimum number of autocorrelation times
needed to trust the estimate. (default: ``50``)
quiet (Optional[bool]): This argument controls the behavior when the
chain is too short. If ``True``, give a warning instead of raising
an :class:`AutocorrError`. (default: ``False``)
Returns:
float or array: An estimate of the integrated autocorrelation time of
the time series ``x`` computed along the axis ``axis``.
Raises
AutocorrError: If the autocorrelation time can't be reliably estimated
from the chain and ``quiet`` is ``False``. This normally means
that the chain is too short.
"""
x = np.atleast_1d(x)
if len(x.shape) == 1:
x = x[:, np.newaxis, np.newaxis]
if len(x.shape) == 2:
x = x[:, :, np.newaxis]
if len(x.shape) != 3:
raise ValueError("invalid dimensions")
n_t, n_w, n_d = x.shape
tau_est = np.empty(n_d)
windows = np.empty(n_d, dtype=int)
# Loop over parameters
for d in range(n_d):
f = np.zeros(n_t)
for k in range(n_w):
f += function_1d(x[:, k, d])
f /= n_w
taus = 2.0 * np.cumsum(f) - 1.0
windows[d] = auto_window(taus, c)
tau_est[d] = taus[windows[d]]
# Check convergence
flag = tol * tau_est > n_t
# Warn or raise in the case of non-convergence
if np.any(flag):
msg = (
"The chain is shorter than {0} times the integrated "
"autocorrelation time for {1} parameter(s). Use this estimate "
"with caution and run a longer chain!\n"
).format(tol, np.sum(flag))
msg += "N/{0} = {1:.0f};\ntau: {2}".format(tol, n_t / tol, tau_est)
if not quiet:
raise AutocorrError(tau_est, msg)
logger.warning(msg)
return tau_est
class AutocorrError(Exception):
"""Raised if the chain is too short to estimate an autocorrelation time.
The current estimate of the autocorrelation time can be accessed via the
``tau`` attribute of this exception.
"""
def __init__(self, tau, *args, **kwargs):
self.tau = tau
super(AutocorrError, self).__init__(*args, **kwargs) | 0.901187 | 0.607372 |
import unidecode
import torch
from torch.autograd import Variable
from collections import Counter
import observations
import os
import pickle
import sys
sys.path.append("../")
from model import *
def read_file(filename):
file = unidecode.unidecode(open(filename).read())
return file, len(file)
class Dictionary(object):
def __init__(self):
self.char2idx = {}
self.idx2char = []
self.counter = Counter()
def add_word(self, char):
self.counter[char] += 1
def prep_dict(self):
for char in self.counter:
if char not in self.char2idx:
self.idx2char.append(char)
self.char2idx[char] = len(self.idx2char) - 1
def __len__(self):
return len(self.idx2char)
class Corpus(object):
def __init__(self, string):
self.dictionary = Dictionary()
for c in string:
self.dictionary.add_word(c)
self.dictionary.prep_dict()
def char_tensor(corpus, string):
tensor = torch.zeros(len(string)).long()
for i in range(len(string)):
tensor[i] = corpus.dictionary.char2idx[string[i]]
return tensor.cuda()
def repackage_hidden4(h):
"""Wraps hidden states in new Tensors,
to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def repackage_hidden(h):
"""Wraps hidden states in new Variables, to detach them from their history."""
if torch.__version__ == '0.4.0':
return repackage_hidden4(h)
if type(h) == Variable:
return Variable(h.data)
else:
return tuple(repackage_hidden(v) for v in h)
def batchify(data, batch_size, args):
"""The output should have size [L x batch_size], where L could be a long sequence length"""
# Work out how cleanly we can divide the dataset into batch_size parts (i.e. continuous seqs).
nbatch = data.size(0) // batch_size
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * batch_size)
# Evenly divide the data across the batch_size batches.
data = data.view(batch_size, -1).t().contiguous()
if args.cuda:
data = data.cuda()
return data
def get_batch(source, i, seq_len, evaluation=False):
"""Variable `source` has dimension (L, N)"""
seq_len = min(seq_len, source.size(0) - 1 - i)
data = Variable(source[i:i + seq_len], volatile=evaluation)
target = Variable(source[i + 1:i + 1 + seq_len]) # CAUTION: This is un-flattened!
return data, target
def save(model, args):
save_filename = args.name + ".pt"
torch.save(model, save_filename)
print('Saved as %s' % save_filename) | TrellisNet/char_PTB/utils.py | import unidecode
import torch
from torch.autograd import Variable
from collections import Counter
import observations
import os
import pickle
import sys
sys.path.append("../")
from model import *
def read_file(filename):
file = unidecode.unidecode(open(filename).read())
return file, len(file)
class Dictionary(object):
def __init__(self):
self.char2idx = {}
self.idx2char = []
self.counter = Counter()
def add_word(self, char):
self.counter[char] += 1
def prep_dict(self):
for char in self.counter:
if char not in self.char2idx:
self.idx2char.append(char)
self.char2idx[char] = len(self.idx2char) - 1
def __len__(self):
return len(self.idx2char)
class Corpus(object):
def __init__(self, string):
self.dictionary = Dictionary()
for c in string:
self.dictionary.add_word(c)
self.dictionary.prep_dict()
def char_tensor(corpus, string):
tensor = torch.zeros(len(string)).long()
for i in range(len(string)):
tensor[i] = corpus.dictionary.char2idx[string[i]]
return tensor.cuda()
def repackage_hidden4(h):
"""Wraps hidden states in new Tensors,
to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def repackage_hidden(h):
"""Wraps hidden states in new Variables, to detach them from their history."""
if torch.__version__ == '0.4.0':
return repackage_hidden4(h)
if type(h) == Variable:
return Variable(h.data)
else:
return tuple(repackage_hidden(v) for v in h)
def batchify(data, batch_size, args):
"""The output should have size [L x batch_size], where L could be a long sequence length"""
# Work out how cleanly we can divide the dataset into batch_size parts (i.e. continuous seqs).
nbatch = data.size(0) // batch_size
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * batch_size)
# Evenly divide the data across the batch_size batches.
data = data.view(batch_size, -1).t().contiguous()
if args.cuda:
data = data.cuda()
return data
def get_batch(source, i, seq_len, evaluation=False):
"""Variable `source` has dimension (L, N)"""
seq_len = min(seq_len, source.size(0) - 1 - i)
data = Variable(source[i:i + seq_len], volatile=evaluation)
target = Variable(source[i + 1:i + 1 + seq_len]) # CAUTION: This is un-flattened!
return data, target
def save(model, args):
save_filename = args.name + ".pt"
torch.save(model, save_filename)
print('Saved as %s' % save_filename) | 0.611498 | 0.266894 |
import sys
import os
import IceRayCpp
print( '********************' )
#print( IceRayCpp.__dict__ )
print('********************' )
print('********************' )
def print_coord(P_cord):
sys.stdout.write( '(' + str( P_cord[0] ) + ', ' + str( P_cord[1] ) + ', ' + str( P_cord[2] ) + ')' )
def print_matrix(P_matrix):
sys.stdout.write( '( ' )
sys.stdout.write( ' ( ' + str( P_matrix.element( 0, 0 ) ) + ', ' + str( P_matrix.element( 0, 1 ) ) + ', ' + str( P_matrix.element( 0, 2 ) ) + ' ), \n' )
sys.stdout.write( ' ( ' + str( P_matrix.element( 1, 0 ) ) + ', ' + str( P_matrix.element( 1, 1 ) ) + ', ' + str( P_matrix.element( 1, 2 ) ) + ' ), \n' )
sys.stdout.write( ' ( ' + str( P_matrix.element( 2, 0 ) ) + ', ' + str( P_matrix.element( 2, 1 ) ) + ', ' + str( P_matrix.element( 2, 2 ) ) + ' )\n' )
sys.stdout.write( ' )' )
def print_affine(P_affine):
sys.stdout.write( '(\n ' )
print_coord(P_affine.coord())
print
print_matrix(P_affine.matrix())
sys.stdout.write( ') ' )
affine1 = IceRayCpp.MathTypeAffine3D()
affine1.coord( )
affine1.coord( IceRayCpp.MathTypeCoord3D().load(10,10,10) )
print_coord( affine1.coord( ) )
affine1.matrix( IceRayCpp.MathTypeMatrix3D( ) )
print_affine(affine1)
affine1.row( 0, IceRayCpp.MathTypeCoord3D().load( 1, 2, 3 ) ); print; print_coord( affine1.row(0) )
affine1.row( 1, IceRayCpp.MathTypeCoord3D().load( 4, 5, 6 ) ); print; print_coord( affine1.row(1) )
affine1.row( 2, IceRayCpp.MathTypeCoord3D().load( 7, 8, 9 ) ); print; print_coord( affine1.row(2) )
print
affine1.column( 0, IceRayCpp.MathTypeCoord3D().load( 1, 2, 3 ) ); print_coord( affine1.column(0) );
affine1.column( 1, IceRayCpp.MathTypeCoord3D().load( 4, 5, 6 ) ); print_coord( affine1.column(1) );
affine1.column( 2, IceRayCpp.MathTypeCoord3D().load( 7, 8, 9 ) ); print_coord( affine1.column(2) );
print
print_affine(affine1)
print
print( '********************')
affine1.load( IceRayCpp.MathTypeCoord3D().load( 1, 2, 3 ), IceRayCpp.MathTypeCoord3D().load( 4, 5, 6 ), IceRayCpp.MathTypeCoord3D().load( 7, 8, -9 ), IceRayCpp.MathTypeCoord3D().load( 11, 12, -13 ) )
print
print( '********************')
print_affine(affine1)
print( '********************')
affine2 = IceRayCpp.MathTypeAffine3D()
print( '********************')
print_affine(affine2)
print( '********************')
x1 = IceRayCpp.MathTypeCoord3D(); x1 .load( 1, -1, 1 )
x2 = IceRayCpp.MathTypeCoord3D(); x2 .load( 0, 0, 0 )
x3 = IceRayCpp.MathTypeCoord3D(); x3 .load( 0, 0, 1 )
print( '******** AAAAAAAAA ********' )
affine2 = IceRayCpp.MathAffine3D_lookAt( x1, x2, x3 )
print( '********************' )
print_affine(affine2) | example/test/library/math/affine/test.py | import sys
import os
import IceRayCpp
print( '********************' )
#print( IceRayCpp.__dict__ )
print('********************' )
print('********************' )
def print_coord(P_cord):
sys.stdout.write( '(' + str( P_cord[0] ) + ', ' + str( P_cord[1] ) + ', ' + str( P_cord[2] ) + ')' )
def print_matrix(P_matrix):
sys.stdout.write( '( ' )
sys.stdout.write( ' ( ' + str( P_matrix.element( 0, 0 ) ) + ', ' + str( P_matrix.element( 0, 1 ) ) + ', ' + str( P_matrix.element( 0, 2 ) ) + ' ), \n' )
sys.stdout.write( ' ( ' + str( P_matrix.element( 1, 0 ) ) + ', ' + str( P_matrix.element( 1, 1 ) ) + ', ' + str( P_matrix.element( 1, 2 ) ) + ' ), \n' )
sys.stdout.write( ' ( ' + str( P_matrix.element( 2, 0 ) ) + ', ' + str( P_matrix.element( 2, 1 ) ) + ', ' + str( P_matrix.element( 2, 2 ) ) + ' )\n' )
sys.stdout.write( ' )' )
def print_affine(P_affine):
sys.stdout.write( '(\n ' )
print_coord(P_affine.coord())
print
print_matrix(P_affine.matrix())
sys.stdout.write( ') ' )
affine1 = IceRayCpp.MathTypeAffine3D()
affine1.coord( )
affine1.coord( IceRayCpp.MathTypeCoord3D().load(10,10,10) )
print_coord( affine1.coord( ) )
affine1.matrix( IceRayCpp.MathTypeMatrix3D( ) )
print_affine(affine1)
affine1.row( 0, IceRayCpp.MathTypeCoord3D().load( 1, 2, 3 ) ); print; print_coord( affine1.row(0) )
affine1.row( 1, IceRayCpp.MathTypeCoord3D().load( 4, 5, 6 ) ); print; print_coord( affine1.row(1) )
affine1.row( 2, IceRayCpp.MathTypeCoord3D().load( 7, 8, 9 ) ); print; print_coord( affine1.row(2) )
print
affine1.column( 0, IceRayCpp.MathTypeCoord3D().load( 1, 2, 3 ) ); print_coord( affine1.column(0) );
affine1.column( 1, IceRayCpp.MathTypeCoord3D().load( 4, 5, 6 ) ); print_coord( affine1.column(1) );
affine1.column( 2, IceRayCpp.MathTypeCoord3D().load( 7, 8, 9 ) ); print_coord( affine1.column(2) );
print
print_affine(affine1)
print
print( '********************')
affine1.load( IceRayCpp.MathTypeCoord3D().load( 1, 2, 3 ), IceRayCpp.MathTypeCoord3D().load( 4, 5, 6 ), IceRayCpp.MathTypeCoord3D().load( 7, 8, -9 ), IceRayCpp.MathTypeCoord3D().load( 11, 12, -13 ) )
print
print( '********************')
print_affine(affine1)
print( '********************')
affine2 = IceRayCpp.MathTypeAffine3D()
print( '********************')
print_affine(affine2)
print( '********************')
x1 = IceRayCpp.MathTypeCoord3D(); x1 .load( 1, -1, 1 )
x2 = IceRayCpp.MathTypeCoord3D(); x2 .load( 0, 0, 0 )
x3 = IceRayCpp.MathTypeCoord3D(); x3 .load( 0, 0, 1 )
print( '******** AAAAAAAAA ********' )
affine2 = IceRayCpp.MathAffine3D_lookAt( x1, x2, x3 )
print( '********************' )
print_affine(affine2) | 0.149998 | 0.240674 |
import datetime
import json
from components import auth
from components import auth_testing
from testing_utils import testing
from legacy import api_common
from legacy import swarmbucket_api
from proto import service_config_pb2
from test import test_util
from test.test_util import future
import config
import model
import sequence
import swarming
import user
class SwarmbucketApiTest(testing.EndpointsTestCase):
api_service_cls = swarmbucket_api.SwarmbucketApi
maxDiff = None
def setUp(self):
super(SwarmbucketApiTest, self).setUp()
self.patch(
'components.utils.utcnow',
autospec=True,
return_value=datetime.datetime(2015, 11, 30)
)
self.patch(
'google.appengine.api.app_identity.get_default_version_hostname',
return_value='cr-buildbucket.appspot.com'
)
self.patch('creation._should_be_canary', side_effect=lambda p: p > 50)
auth_testing.reset_local_state()
auth.bootstrap_group('all', [auth.Anonymous])
user.clear_request_cache()
chromium_cfg = test_util.parse_bucket_cfg(
'''
name: "luci.chromium.try"
acls {
role: SCHEDULER
group: "all"
}
swarming {
hostname: "swarming.example.com"
builders {
name: "linux"
swarming_host: "swarming.example.com"
category: "Chromium"
build_numbers: YES
recipe {
cipd_package: "infra/recipe_bundle"
cipd_version: "refs/heads/master"
name: "presubmit"
properties: "foo:bar"
properties_j: "baz:1"
}
dimensions: "foo:bar"
dimensions: "baz:baz"
auto_builder_dimension: YES
# Override builder cache without timeout to make tests
# simpler.
caches {
path: "builder"
name: "builder_cache_name"
}
}
builders {
name: "windows"
category: "Chromium"
swarming_host: "swarming.example.com"
recipe {
cipd_package: "infra/recipe_bundle"
cipd_version: "refs/heads/master"
name: "presubmit"
}
# Override builder cache without timeout to make tests
# simpler.
caches {
path: "builder"
name: "builder_cache_name"
}
}
}
'''
)
config.put_bucket('chromium', 'deadbeef', chromium_cfg)
v8_cfg = test_util.parse_bucket_cfg(
'''
name: "luci.v8.try"
acls {
role: READER
group: "all"
}
'''
)
config.put_bucket('v8', 'deadbeef', v8_cfg)
props_def = {
'extra_args': [
'cook',
'-recipe',
'${recipe}',
'-properties',
'${properties_json}',
'-logdog-project',
'${project}',
],
'cipd_input': {
'packages': [
{
'package_name': 'infra/test/bar/${os_ver}',
'path': '.',
'version': 'latest',
},
{
'package_name': 'infra/test/foo/${platform}',
'path': 'third_party',
'version': 'stable',
},
],
},
}
self.task_template = {
'name': 'bb-${build_id}-${project}-${builder}',
'task_slices': [{
'properties': props_def,
'wait_for_capacity': False,
}],
}
self.patch(
'swarming._get_task_template',
autospec=True,
return_value=('rev', self.task_template),
)
self.settings = service_config_pb2.SettingsCfg(
swarming=dict(
milo_hostname='milo.example.com',
luci_runner_package=dict(
package_name='infra/tools/luci_runner',
version='luci-runner-version',
),
kitchen_package=dict(
package_name='infra/tools/kitchen',
version='kitchen-version',
),
user_packages=[
dict(
package_name='infra/tools/git',
version='git-version',
),
],
),
logdog=dict(hostname='logdog.example.com'),
)
self.patch(
'config.get_settings_async',
autospec=True,
return_value=future(self.settings),
)
def test_get_builders(self):
secret_cfg = 'name: "secret"'
config.put_bucket(
'secret', 'deadbeef', test_util.parse_bucket_cfg(secret_cfg)
)
resp = self.call_api('get_builders').json_body
self.assertEqual(
test_util.ununicode(resp),
{
'buckets': [{
'name':
'luci.chromium.try',
'swarming_hostname':
'swarming.example.com',
'builders': [
{
'name':
'linux',
'category':
'Chromium',
'properties_json':
json.dumps({'foo': 'bar', 'baz': 1}),
'swarming_hostname':
'swarming.example.com',
'swarming_dimensions': [
'baz:baz', 'builder:linux', 'foo:bar'
],
},
{
'name': 'windows',
'category': 'Chromium',
'properties_json': json.dumps({}),
'swarming_hostname': 'swarming.example.com',
},
],
}],
},
)
def test_get_builders_with_bucket_filtering(self):
# Add a second bucket with a different name.
other_bucket = '''
name: "luci.other.try"
acls {
role: SCHEDULER
group: "all"
}
swarming {
hostname: "swarming.example.com"
builders {
name: "a"
swarming_host: "swarming.example.com"
}
}
'''
config.put_bucket(
'other', 'deadbeef', test_util.parse_bucket_cfg(other_bucket)
)
req = {
'bucket': ['luci.chromium.try'],
}
resp = self.call_api('get_builders', req).json_body
self.assertEqual(
test_util.ununicode(resp),
{
'buckets': [{
'name':
'luci.chromium.try',
'swarming_hostname':
'swarming.example.com',
'builders': [
{
'name':
'linux',
'category':
'Chromium',
'properties_json':
json.dumps({'foo': 'bar', 'baz': 1}),
'swarming_hostname':
'swarming.example.com',
'swarming_dimensions': [
'baz:baz',
'builder:linux',
'foo:bar',
],
},
{
'name': 'windows',
'category': 'Chromium',
'properties_json': json.dumps({}),
'swarming_hostname': 'swarming.example.com',
},
],
}],
},
)
def test_get_builders_bad_request(self):
req = {
'bucket': ['luci..x'],
}
self.call_api('get_builders', req, status=400)
def test_get_builders_with_bucket_filtering_limit(self):
req = {
'bucket': ['luci.chromium.try'] * 200,
}
self.call_api('get_builders', req, status=400)
def test_get_task_def(self):
self.patch(
'tokens.generate_build_token',
autospec=True,
return_value='beeff00d',
)
req = {
'build_request': {
'bucket': 'luci.chromium.try',
'parameters_json': json.dumps({model.BUILDER_PARAMETER: 'linux'}),
},
}
resp = self.call_api('get_task_def', req).json_body
actual_task_def = json.loads(resp['task_definition'])
props_def = {
'env': [{'key': 'BUILDBUCKET_EXPERIMENTAL', 'value': 'FALSE'}],
'extra_args': [
'cook',
'-recipe',
'presubmit',
'-properties',
api_common.properties_to_json({
'recipe': 'presubmit',
'buildbucket': {
'hostname': 'cr-buildbucket.appspot.com',
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.try',
'created_by': 'anonymous:anonymous',
'created_ts': 1448841600000000,
'id': '1',
'tags': ['builder:linux'],
},
},
'$recipe_engine/buildbucket': {
'hostname': 'cr-buildbucket.appspot.com',
'build': {
'id': '1',
'builder': {
'project': 'chromium',
'bucket': 'try',
'builder': 'linux',
},
'number': 1,
'createdBy': 'anonymous:anonymous',
'createTime': '2015-11-30T00:00:00Z',
'schedulingTimeout': '21600s',
'executionTimeout': '10800s',
'exe': {
'cipdPackage': 'infra/recipe_bundle',
'cipdVersion': 'refs/heads/master',
},
'input': {},
'infra': {
'buildbucket': {},
'swarming': {
'hostname':
'swarming.example.com',
'priority':
30,
'taskDimensions': [
{
'key': 'baz',
'value': 'baz',
'expiration': '0s',
},
{
'key': 'builder',
'value': 'linux',
'expiration': '0s',
},
{
'key': 'foo',
'value': 'bar',
'expiration': '0s',
},
],
'caches': [{
'path': 'builder',
'name': 'builder_cache_name',
'waitForWarmCache': '0s',
}],
},
'logdog': {
'hostname':
'logdog.example.com',
'project':
'chromium',
'prefix': (
'buildbucket/cr-buildbucket.appspot.com/1'
),
},
},
},
},
'$recipe_engine/runtime': {
'is_experimental': False,
'is_luci': True,
},
'foo': 'bar',
'baz': 1,
'buildername': 'linux',
'buildnumber': 1,
}),
'-logdog-project',
'chromium',
],
'execution_timeout_secs':
'10800',
'cipd_input': {
'packages': [
{
'package_name': 'infra/tools/luci_runner',
'path': '.',
'version': 'luci-runner-version',
},
{
'package_name': 'infra/tools/kitchen',
'path': '.',
'version': 'kitchen-version',
},
{
'package_name': 'infra/recipe_bundle',
'path': 'kitchen-checkout',
'version': 'refs/heads/master',
},
{
'package_name': 'infra/tools/git',
'path': swarming.USER_PACKAGE_DIR,
'version': 'git-version',
},
],
},
'dimensions': [
{'key': 'baz', 'value': 'baz'},
{'key': 'builder', 'value': 'linux'},
{'key': 'foo', 'value': 'bar'},
],
'caches': [{
'path': 'cache/builder',
'name': 'builder_cache_name',
}],
}
expected_task_def = {
'name':
'bb-1-chromium-linux',
'tags': [
'buildbucket_bucket:chromium/try',
'buildbucket_build_id:1',
'buildbucket_hostname:cr-buildbucket.appspot.com',
'buildbucket_template_canary:0',
'buildbucket_template_revision:rev',
'builder:linux',
'recipe_name:presubmit',
'recipe_package:infra/recipe_bundle',
],
'priority':
'30',
'pool_task_template':
'CANARY_NEVER',
'task_slices': [{
'expiration_secs': '21600',
'properties': props_def,
'wait_for_capacity': False,
}],
}
self.assertEqual(actual_task_def, expected_task_def)
self.assertEqual(resp['swarming_host'], 'swarming.example.com')
def test_get_task_def_bad_request(self):
req = {
'build_request': {
'bucket': ')))',
'parameters_json': json.dumps({
model.BUILDER_PARAMETER: 'linux',
}),
},
}
self.call_api('get_task_def', req, status=400)
def test_get_task_def_builder_not_found(self):
req = {
'build_request': {
'bucket':
'luci.chromium.try',
'parameters_json':
json.dumps({
model.BUILDER_PARAMETER: 'not-existing-builder',
}),
},
}
self.call_api('get_task_def', req, status=404)
def test_get_task_def_forbidden(self):
req = {
'build_id': '8982540789124571952',
'build_request': {
'bucket': 'secret.bucket',
'parameters_json': json.dumps({
model.BUILDER_PARAMETER: 'linux',
}),
},
}
self.call_api('get_task_def', req, status=403)
def test_set_next_build_number(self):
seq = sequence.NumberSequence(id='chromium/try/linux', next_number=10)
seq.put()
req = {
'bucket': 'luci.chromium.try',
'builder': 'linux',
'next_number': 20,
}
self.call_api('set_next_build_number', req, status=403)
self.assertEqual(seq.key.get().next_number, 10)
self.patch('user.can_set_next_number_async', return_value=future(True))
self.call_api('set_next_build_number', req)
self.assertEqual(seq.key.get().next_number, 20)
req['next_number'] = 10
self.call_api('set_next_build_number', req, status=400)
self.assertEqual(seq.key.get().next_number, 20)
req['builder'] = 'does not exist'
self.call_api('set_next_build_number', req, status=400) | appengine/cr-buildbucket/legacy/test/swarmbucket_api_test.py |
import datetime
import json
from components import auth
from components import auth_testing
from testing_utils import testing
from legacy import api_common
from legacy import swarmbucket_api
from proto import service_config_pb2
from test import test_util
from test.test_util import future
import config
import model
import sequence
import swarming
import user
class SwarmbucketApiTest(testing.EndpointsTestCase):
api_service_cls = swarmbucket_api.SwarmbucketApi
maxDiff = None
def setUp(self):
super(SwarmbucketApiTest, self).setUp()
self.patch(
'components.utils.utcnow',
autospec=True,
return_value=datetime.datetime(2015, 11, 30)
)
self.patch(
'google.appengine.api.app_identity.get_default_version_hostname',
return_value='cr-buildbucket.appspot.com'
)
self.patch('creation._should_be_canary', side_effect=lambda p: p > 50)
auth_testing.reset_local_state()
auth.bootstrap_group('all', [auth.Anonymous])
user.clear_request_cache()
chromium_cfg = test_util.parse_bucket_cfg(
'''
name: "luci.chromium.try"
acls {
role: SCHEDULER
group: "all"
}
swarming {
hostname: "swarming.example.com"
builders {
name: "linux"
swarming_host: "swarming.example.com"
category: "Chromium"
build_numbers: YES
recipe {
cipd_package: "infra/recipe_bundle"
cipd_version: "refs/heads/master"
name: "presubmit"
properties: "foo:bar"
properties_j: "baz:1"
}
dimensions: "foo:bar"
dimensions: "baz:baz"
auto_builder_dimension: YES
# Override builder cache without timeout to make tests
# simpler.
caches {
path: "builder"
name: "builder_cache_name"
}
}
builders {
name: "windows"
category: "Chromium"
swarming_host: "swarming.example.com"
recipe {
cipd_package: "infra/recipe_bundle"
cipd_version: "refs/heads/master"
name: "presubmit"
}
# Override builder cache without timeout to make tests
# simpler.
caches {
path: "builder"
name: "builder_cache_name"
}
}
}
'''
)
config.put_bucket('chromium', 'deadbeef', chromium_cfg)
v8_cfg = test_util.parse_bucket_cfg(
'''
name: "luci.v8.try"
acls {
role: READER
group: "all"
}
'''
)
config.put_bucket('v8', 'deadbeef', v8_cfg)
props_def = {
'extra_args': [
'cook',
'-recipe',
'${recipe}',
'-properties',
'${properties_json}',
'-logdog-project',
'${project}',
],
'cipd_input': {
'packages': [
{
'package_name': 'infra/test/bar/${os_ver}',
'path': '.',
'version': 'latest',
},
{
'package_name': 'infra/test/foo/${platform}',
'path': 'third_party',
'version': 'stable',
},
],
},
}
self.task_template = {
'name': 'bb-${build_id}-${project}-${builder}',
'task_slices': [{
'properties': props_def,
'wait_for_capacity': False,
}],
}
self.patch(
'swarming._get_task_template',
autospec=True,
return_value=('rev', self.task_template),
)
self.settings = service_config_pb2.SettingsCfg(
swarming=dict(
milo_hostname='milo.example.com',
luci_runner_package=dict(
package_name='infra/tools/luci_runner',
version='luci-runner-version',
),
kitchen_package=dict(
package_name='infra/tools/kitchen',
version='kitchen-version',
),
user_packages=[
dict(
package_name='infra/tools/git',
version='git-version',
),
],
),
logdog=dict(hostname='logdog.example.com'),
)
self.patch(
'config.get_settings_async',
autospec=True,
return_value=future(self.settings),
)
def test_get_builders(self):
secret_cfg = 'name: "secret"'
config.put_bucket(
'secret', 'deadbeef', test_util.parse_bucket_cfg(secret_cfg)
)
resp = self.call_api('get_builders').json_body
self.assertEqual(
test_util.ununicode(resp),
{
'buckets': [{
'name':
'luci.chromium.try',
'swarming_hostname':
'swarming.example.com',
'builders': [
{
'name':
'linux',
'category':
'Chromium',
'properties_json':
json.dumps({'foo': 'bar', 'baz': 1}),
'swarming_hostname':
'swarming.example.com',
'swarming_dimensions': [
'baz:baz', 'builder:linux', 'foo:bar'
],
},
{
'name': 'windows',
'category': 'Chromium',
'properties_json': json.dumps({}),
'swarming_hostname': 'swarming.example.com',
},
],
}],
},
)
def test_get_builders_with_bucket_filtering(self):
# Add a second bucket with a different name.
other_bucket = '''
name: "luci.other.try"
acls {
role: SCHEDULER
group: "all"
}
swarming {
hostname: "swarming.example.com"
builders {
name: "a"
swarming_host: "swarming.example.com"
}
}
'''
config.put_bucket(
'other', 'deadbeef', test_util.parse_bucket_cfg(other_bucket)
)
req = {
'bucket': ['luci.chromium.try'],
}
resp = self.call_api('get_builders', req).json_body
self.assertEqual(
test_util.ununicode(resp),
{
'buckets': [{
'name':
'luci.chromium.try',
'swarming_hostname':
'swarming.example.com',
'builders': [
{
'name':
'linux',
'category':
'Chromium',
'properties_json':
json.dumps({'foo': 'bar', 'baz': 1}),
'swarming_hostname':
'swarming.example.com',
'swarming_dimensions': [
'baz:baz',
'builder:linux',
'foo:bar',
],
},
{
'name': 'windows',
'category': 'Chromium',
'properties_json': json.dumps({}),
'swarming_hostname': 'swarming.example.com',
},
],
}],
},
)
def test_get_builders_bad_request(self):
req = {
'bucket': ['luci..x'],
}
self.call_api('get_builders', req, status=400)
def test_get_builders_with_bucket_filtering_limit(self):
req = {
'bucket': ['luci.chromium.try'] * 200,
}
self.call_api('get_builders', req, status=400)
def test_get_task_def(self):
self.patch(
'tokens.generate_build_token',
autospec=True,
return_value='beeff00d',
)
req = {
'build_request': {
'bucket': 'luci.chromium.try',
'parameters_json': json.dumps({model.BUILDER_PARAMETER: 'linux'}),
},
}
resp = self.call_api('get_task_def', req).json_body
actual_task_def = json.loads(resp['task_definition'])
props_def = {
'env': [{'key': 'BUILDBUCKET_EXPERIMENTAL', 'value': 'FALSE'}],
'extra_args': [
'cook',
'-recipe',
'presubmit',
'-properties',
api_common.properties_to_json({
'recipe': 'presubmit',
'buildbucket': {
'hostname': 'cr-buildbucket.appspot.com',
'build': {
'project': 'chromium',
'bucket': 'luci.chromium.try',
'created_by': 'anonymous:anonymous',
'created_ts': 1448841600000000,
'id': '1',
'tags': ['builder:linux'],
},
},
'$recipe_engine/buildbucket': {
'hostname': 'cr-buildbucket.appspot.com',
'build': {
'id': '1',
'builder': {
'project': 'chromium',
'bucket': 'try',
'builder': 'linux',
},
'number': 1,
'createdBy': 'anonymous:anonymous',
'createTime': '2015-11-30T00:00:00Z',
'schedulingTimeout': '21600s',
'executionTimeout': '10800s',
'exe': {
'cipdPackage': 'infra/recipe_bundle',
'cipdVersion': 'refs/heads/master',
},
'input': {},
'infra': {
'buildbucket': {},
'swarming': {
'hostname':
'swarming.example.com',
'priority':
30,
'taskDimensions': [
{
'key': 'baz',
'value': 'baz',
'expiration': '0s',
},
{
'key': 'builder',
'value': 'linux',
'expiration': '0s',
},
{
'key': 'foo',
'value': 'bar',
'expiration': '0s',
},
],
'caches': [{
'path': 'builder',
'name': 'builder_cache_name',
'waitForWarmCache': '0s',
}],
},
'logdog': {
'hostname':
'logdog.example.com',
'project':
'chromium',
'prefix': (
'buildbucket/cr-buildbucket.appspot.com/1'
),
},
},
},
},
'$recipe_engine/runtime': {
'is_experimental': False,
'is_luci': True,
},
'foo': 'bar',
'baz': 1,
'buildername': 'linux',
'buildnumber': 1,
}),
'-logdog-project',
'chromium',
],
'execution_timeout_secs':
'10800',
'cipd_input': {
'packages': [
{
'package_name': 'infra/tools/luci_runner',
'path': '.',
'version': 'luci-runner-version',
},
{
'package_name': 'infra/tools/kitchen',
'path': '.',
'version': 'kitchen-version',
},
{
'package_name': 'infra/recipe_bundle',
'path': 'kitchen-checkout',
'version': 'refs/heads/master',
},
{
'package_name': 'infra/tools/git',
'path': swarming.USER_PACKAGE_DIR,
'version': 'git-version',
},
],
},
'dimensions': [
{'key': 'baz', 'value': 'baz'},
{'key': 'builder', 'value': 'linux'},
{'key': 'foo', 'value': 'bar'},
],
'caches': [{
'path': 'cache/builder',
'name': 'builder_cache_name',
}],
}
expected_task_def = {
'name':
'bb-1-chromium-linux',
'tags': [
'buildbucket_bucket:chromium/try',
'buildbucket_build_id:1',
'buildbucket_hostname:cr-buildbucket.appspot.com',
'buildbucket_template_canary:0',
'buildbucket_template_revision:rev',
'builder:linux',
'recipe_name:presubmit',
'recipe_package:infra/recipe_bundle',
],
'priority':
'30',
'pool_task_template':
'CANARY_NEVER',
'task_slices': [{
'expiration_secs': '21600',
'properties': props_def,
'wait_for_capacity': False,
}],
}
self.assertEqual(actual_task_def, expected_task_def)
self.assertEqual(resp['swarming_host'], 'swarming.example.com')
def test_get_task_def_bad_request(self):
req = {
'build_request': {
'bucket': ')))',
'parameters_json': json.dumps({
model.BUILDER_PARAMETER: 'linux',
}),
},
}
self.call_api('get_task_def', req, status=400)
def test_get_task_def_builder_not_found(self):
req = {
'build_request': {
'bucket':
'luci.chromium.try',
'parameters_json':
json.dumps({
model.BUILDER_PARAMETER: 'not-existing-builder',
}),
},
}
self.call_api('get_task_def', req, status=404)
def test_get_task_def_forbidden(self):
req = {
'build_id': '8982540789124571952',
'build_request': {
'bucket': 'secret.bucket',
'parameters_json': json.dumps({
model.BUILDER_PARAMETER: 'linux',
}),
},
}
self.call_api('get_task_def', req, status=403)
def test_set_next_build_number(self):
seq = sequence.NumberSequence(id='chromium/try/linux', next_number=10)
seq.put()
req = {
'bucket': 'luci.chromium.try',
'builder': 'linux',
'next_number': 20,
}
self.call_api('set_next_build_number', req, status=403)
self.assertEqual(seq.key.get().next_number, 10)
self.patch('user.can_set_next_number_async', return_value=future(True))
self.call_api('set_next_build_number', req)
self.assertEqual(seq.key.get().next_number, 20)
req['next_number'] = 10
self.call_api('set_next_build_number', req, status=400)
self.assertEqual(seq.key.get().next_number, 20)
req['builder'] = 'does not exist'
self.call_api('set_next_build_number', req, status=400) | 0.556882 | 0.100304 |
import numpy as np
from ..computation import Graph, Transformer, Constant
from .statistics import ArgMin, ArgMax
from .util import apply_to_axis
__all__ = [
'HasDuplicate',
'HasDuplicateMin',
'HasDuplicateMax',
'NumberUniqueValues',
'SumReoccurringDataPoints',
'SumReoccurringValues',
]
class NumberUniqueValues(Transformer):
def __init__(self, *parents, rel=True, axis=None, **kwargs):
super(NumberUniqueValues, self).__init__(*parents, **kwargs)
self.rel = rel
self.axis = axis
self.preconditions = [
lambda *collections: len(collections) == 1,
]
def apply(self, x):
def calculator1d(a):
nnan = ~np.isnan(a)
result = np.full(a.shape, fill_value=np.nan)
a = a[nnan]
n = a[~np.isnan(a)].size
if n > 0:
unique = np.unique(a).size
if self.rel:
unique = unique / n
result[nnan] = np.array([unique])
return result
else:
result[nnan] = np.array([np.nan])
return result
def calculator(a):
return np.apply_along_axis(calculator1d, -1, a)
return apply_to_axis(calculator, x, axis=self.axis)
class HasDuplicate(Transformer):
def __init__(self, *parents, axis=None, **kwargs):
super(HasDuplicate, self).__init__(*parents, **kwargs)
self.axis = axis
self.preconditions = [
lambda *collections: len(collections) == 1,
]
def graph(self, x):
return Graph(NumberUniqueValues(x, rel=True, axis=self.axis) < Constant(1))
class HasDuplicateMin(Transformer):
def __init__(self, *parents, axis=None, **kwargs):
super(HasDuplicateMin, self).__init__(*parents, **kwargs)
self.axis = axis
self.preconditions = [
lambda *collections: len(collections) == 1,
]
def graph(self, x):
return Graph(
ArgMin(x, first=True, axis=self.axis) < ArgMin(x, first=False, axis=self.axis)
)
class HasDuplicateMax(Transformer):
def __init__(self, *parents, axis=None, **kwargs):
super(HasDuplicateMax, self).__init__(*parents, **kwargs)
self.axis = axis
self.preconditions = [
lambda *collections: len(collections) == 1,
]
def graph(self, x):
return Graph(
ArgMax(x, first=True, axis=self.axis) < ArgMax(x, first=False, axis=self.axis)
)
class SumReoccurringValues(Transformer):
def __init__(self, *parents, axis=None, **kwargs):
super(SumReoccurringValues, self).__init__(*parents, **kwargs)
self.axis = axis
self.preconditions = [
lambda *collections: len(collections) == 1,
lambda x: np.issubdtype(x.dtype, np.float64),
]
def apply(self, x):
def calculator1d(a):
nnan = ~np.isnan(a)
result = np.full(a.shape, fill_value=np.nan)
a = a[nnan]
unique, counts = np.unique(a, return_counts=True)
counts[counts < 2] = 0
counts[counts > 1] = 1
result[nnan] = np.sum(counts * unique, keepdims=True)
return result
def calculator(a):
return np.apply_along_axis(calculator1d, -1, a)
return apply_to_axis(calculator, x, axis=self.axis)
class SumReoccurringDataPoints(Transformer):
def __init__(self, *parents, axis=None, **kwargs):
super(SumReoccurringDataPoints, self).__init__(*parents, **kwargs)
self.axis = axis
self.preconditions = [
lambda *collections: len(collections) == 1,
lambda x: np.issubdtype(x.dtype, np.float64),
]
def apply(self, x):
def calculator1d(a):
nnan = ~np.isnan(a)
result = np.full(a.shape, fill_value=np.nan)
a = a[nnan]
unique, counts = np.unique(a, return_counts=True)
counts[counts < 2] = 0
result[nnan] = np.sum(counts * unique, keepdims=True)
return result
def calculator(a):
return np.apply_along_axis(calculator1d, -1, a)
return apply_to_axis(calculator, x, axis=self.axis) | tsfuse/transformers/uniqueness.py | import numpy as np
from ..computation import Graph, Transformer, Constant
from .statistics import ArgMin, ArgMax
from .util import apply_to_axis
__all__ = [
'HasDuplicate',
'HasDuplicateMin',
'HasDuplicateMax',
'NumberUniqueValues',
'SumReoccurringDataPoints',
'SumReoccurringValues',
]
class NumberUniqueValues(Transformer):
def __init__(self, *parents, rel=True, axis=None, **kwargs):
super(NumberUniqueValues, self).__init__(*parents, **kwargs)
self.rel = rel
self.axis = axis
self.preconditions = [
lambda *collections: len(collections) == 1,
]
def apply(self, x):
def calculator1d(a):
nnan = ~np.isnan(a)
result = np.full(a.shape, fill_value=np.nan)
a = a[nnan]
n = a[~np.isnan(a)].size
if n > 0:
unique = np.unique(a).size
if self.rel:
unique = unique / n
result[nnan] = np.array([unique])
return result
else:
result[nnan] = np.array([np.nan])
return result
def calculator(a):
return np.apply_along_axis(calculator1d, -1, a)
return apply_to_axis(calculator, x, axis=self.axis)
class HasDuplicate(Transformer):
def __init__(self, *parents, axis=None, **kwargs):
super(HasDuplicate, self).__init__(*parents, **kwargs)
self.axis = axis
self.preconditions = [
lambda *collections: len(collections) == 1,
]
def graph(self, x):
return Graph(NumberUniqueValues(x, rel=True, axis=self.axis) < Constant(1))
class HasDuplicateMin(Transformer):
def __init__(self, *parents, axis=None, **kwargs):
super(HasDuplicateMin, self).__init__(*parents, **kwargs)
self.axis = axis
self.preconditions = [
lambda *collections: len(collections) == 1,
]
def graph(self, x):
return Graph(
ArgMin(x, first=True, axis=self.axis) < ArgMin(x, first=False, axis=self.axis)
)
class HasDuplicateMax(Transformer):
def __init__(self, *parents, axis=None, **kwargs):
super(HasDuplicateMax, self).__init__(*parents, **kwargs)
self.axis = axis
self.preconditions = [
lambda *collections: len(collections) == 1,
]
def graph(self, x):
return Graph(
ArgMax(x, first=True, axis=self.axis) < ArgMax(x, first=False, axis=self.axis)
)
class SumReoccurringValues(Transformer):
def __init__(self, *parents, axis=None, **kwargs):
super(SumReoccurringValues, self).__init__(*parents, **kwargs)
self.axis = axis
self.preconditions = [
lambda *collections: len(collections) == 1,
lambda x: np.issubdtype(x.dtype, np.float64),
]
def apply(self, x):
def calculator1d(a):
nnan = ~np.isnan(a)
result = np.full(a.shape, fill_value=np.nan)
a = a[nnan]
unique, counts = np.unique(a, return_counts=True)
counts[counts < 2] = 0
counts[counts > 1] = 1
result[nnan] = np.sum(counts * unique, keepdims=True)
return result
def calculator(a):
return np.apply_along_axis(calculator1d, -1, a)
return apply_to_axis(calculator, x, axis=self.axis)
class SumReoccurringDataPoints(Transformer):
def __init__(self, *parents, axis=None, **kwargs):
super(SumReoccurringDataPoints, self).__init__(*parents, **kwargs)
self.axis = axis
self.preconditions = [
lambda *collections: len(collections) == 1,
lambda x: np.issubdtype(x.dtype, np.float64),
]
def apply(self, x):
def calculator1d(a):
nnan = ~np.isnan(a)
result = np.full(a.shape, fill_value=np.nan)
a = a[nnan]
unique, counts = np.unique(a, return_counts=True)
counts[counts < 2] = 0
result[nnan] = np.sum(counts * unique, keepdims=True)
return result
def calculator(a):
return np.apply_along_axis(calculator1d, -1, a)
return apply_to_axis(calculator, x, axis=self.axis) | 0.763396 | 0.42471 |
import pytest
from freezegun import freeze_time
from rest_framework import status
from rest_framework.reverse import reverse
from datahub.core.test_utils import (
format_date_or_datetime,
get_attr_or_none,
str_or_none,
)
from datahub.dataset.core.test import BaseDatasetViewTest
from datahub.investment.project.test.factories import (
ActiveInvestmentProjectFactory,
AssignPMInvestmentProjectFactory,
FDIInvestmentProjectFactory,
InvestmentProjectFactory,
InvestmentProjectTeamMemberFactory,
VerifyWinInvestmentProjectFactory,
WonInvestmentProjectFactory,
)
def get_expected_data_from_project(project):
"""Returns expected dictionary based on given project"""
return {
'actual_land_date': format_date_or_datetime(project.actual_land_date),
'actual_uk_region_names': (
[region.name for region in project.actual_uk_regions.order_by('name')]
) if project.actual_uk_regions.exists() else None,
'address_1': project.address_1,
'address_2': project.address_2,
'address_town': project.address_town,
'address_postcode': project.address_postcode,
'anonymous_description': project.anonymous_description,
'associated_non_fdi_r_and_d_project_id': str_or_none(
project.associated_non_fdi_r_and_d_project_id,
),
'average_salary__name': get_attr_or_none(project, 'average_salary.name'),
'business_activity_names': (
[activity.name for activity in project.business_activities.order_by('name')]
) if project.business_activities.exists() else None,
'client_relationship_manager_id': str_or_none(project.client_relationship_manager_id),
'client_requirements': project.client_requirements,
'competing_countries': (
[country.name for country in project.competitor_countries.order_by('name')]
if project.competitor_countries.exists() else [None]
),
'created_by_id': str_or_none(project.created_by_id),
'created_on': format_date_or_datetime(project.created_on),
'delivery_partner_names': (
[partner.name for partner in project.delivery_partners.order_by('name')]
) if project.delivery_partners.exists() else None,
'description': project.description,
'estimated_land_date': format_date_or_datetime(project.estimated_land_date),
'export_revenue': project.export_revenue,
'fdi_type__name': get_attr_or_none(project, 'fdi_type.name'),
'fdi_value__name': get_attr_or_none(project, 'fdi_value.name'),
'foreign_equity_investment': (
float(project.foreign_equity_investment)
if project.foreign_equity_investment
else None
),
'government_assistance': project.government_assistance,
'gross_value_added': project.gross_value_added,
'gva_multiplier__multiplier': (
float(get_attr_or_none(project, 'gva_multiplier.multiplier'))
if get_attr_or_none(project, 'gva_multiplier.multiplier')
else None
),
'id': str(project.pk),
'investment_type__name': get_attr_or_none(project, 'investment_type.name'),
'investor_company_id': str_or_none(project.investor_company_id),
'investor_company_sector': get_attr_or_none(
project,
'investor_company.sector.name',
),
'investor_type__name': get_attr_or_none(project, 'investor_type.name'),
'level_of_involvement_name': get_attr_or_none(project, 'level_of_involvement.name'),
'likelihood_to_land__name': get_attr_or_none(project, 'likelihood_to_land.name'),
'modified_by_id': str_or_none(project.modified_by_id),
'modified_on': format_date_or_datetime(project.modified_on),
'name': project.name,
'new_tech_to_uk': project.new_tech_to_uk,
'non_fdi_r_and_d_budget': project.non_fdi_r_and_d_budget,
'number_new_jobs': project.number_new_jobs,
'number_safeguarded_jobs': project.number_safeguarded_jobs,
'other_business_activity': project.other_business_activity,
'project_arrived_in_triage_on': format_date_or_datetime(
project.project_arrived_in_triage_on),
'project_assurance_adviser_id': str_or_none(project.project_assurance_adviser_id),
'project_manager_id': str_or_none(project.project_manager_id),
'project_reference': project.project_code,
'proposal_deadline': format_date_or_datetime(project.proposal_deadline),
'r_and_d_budget': project.r_and_d_budget,
'referral_source_activity__name': get_attr_or_none(
project,
'referral_source_activity.name',
),
'referral_source_activity_marketing__name': get_attr_or_none(
project,
'referral_source_activity_marketing.name',
),
'referral_source_activity_website__name': get_attr_or_none(
project,
'referral_source_activity_website.name',
),
'sector_name': get_attr_or_none(project, 'sector.name'),
'specific_programme__name': get_attr_or_none(project, 'specific_programme.name'),
'stage__name': get_attr_or_none(project, 'stage.name'),
'status': project.status,
'strategic_driver_names': (
[driver.name for driver in project.strategic_drivers.order_by('name')]
) if project.strategic_drivers.exists() else None,
'team_member_ids': (
[
str(team_member.adviser_id)
for team_member in project.team_members.order_by('id')
]
) if project.team_members.exists() else [None],
'total_investment': float(project.total_investment) if project.total_investment else None,
'uk_company_id': str_or_none(project.uk_company_id),
'uk_company_sector': get_attr_or_none(project, 'uk_company.sector.name'),
'uk_region_location_names': (
[region.name for region in project.uk_region_locations.order_by('name')]
) if project.uk_region_locations.exists() else None,
}
@pytest.mark.django_db
class TestInvestmentProjectsDatasetViewSet(BaseDatasetViewTest):
"""
Tests for InvestmentProjectsDatasetView
"""
view_url = reverse('api-v4:dataset:investment-projects-dataset')
factory = InvestmentProjectFactory
@pytest.mark.parametrize(
'project_factory',
(
InvestmentProjectFactory,
FDIInvestmentProjectFactory,
AssignPMInvestmentProjectFactory,
ActiveInvestmentProjectFactory,
VerifyWinInvestmentProjectFactory,
WonInvestmentProjectFactory,
),
)
def test_success(self, data_flow_api_client, project_factory):
"""Test that endpoint returns with expected data for a single project"""
project = project_factory()
response = data_flow_api_client.get(self.view_url)
assert response.status_code == status.HTTP_200_OK
response_results = response.json()['results']
assert len(response_results) == 1
result = response_results[0]
expected_result = get_expected_data_from_project(project)
assert result == expected_result
def test_with_team_members(self, data_flow_api_client):
"""Test that endpoint returns with expected data for a single project with team members"""
project = InvestmentProjectTeamMemberFactory().investment_project
response = data_flow_api_client.get(self.view_url)
assert response.status_code == status.HTTP_200_OK
response_results = response.json()['results']
assert len(response_results) == 1
result = response_results[0]
expected_result = get_expected_data_from_project(project)
assert result == expected_result
def test_with_multiple_projects(self, data_flow_api_client):
"""Test that endpoint returns correct number of record in expected response"""
with freeze_time('2019-01-01 12:30:00'):
project_1 = InvestmentProjectFactory()
with freeze_time('2019-01-03 12:00:00'):
project_2 = InvestmentProjectFactory()
with freeze_time('2019-01-01 12:00:00'):
project_3 = InvestmentProjectFactory()
project_4 = InvestmentProjectFactory()
response = data_flow_api_client.get(self.view_url)
assert response.status_code == status.HTTP_200_OK
response_results = response.json()['results']
assert len(response_results) == 4
expected_project_list = sorted([project_3, project_4],
key=lambda item: item.pk) + [project_1, project_2]
for index, project in enumerate(expected_project_list):
assert str(project.id) == response_results[index]['id'] | datahub/dataset/investment_project/test/test_views.py | import pytest
from freezegun import freeze_time
from rest_framework import status
from rest_framework.reverse import reverse
from datahub.core.test_utils import (
format_date_or_datetime,
get_attr_or_none,
str_or_none,
)
from datahub.dataset.core.test import BaseDatasetViewTest
from datahub.investment.project.test.factories import (
ActiveInvestmentProjectFactory,
AssignPMInvestmentProjectFactory,
FDIInvestmentProjectFactory,
InvestmentProjectFactory,
InvestmentProjectTeamMemberFactory,
VerifyWinInvestmentProjectFactory,
WonInvestmentProjectFactory,
)
def get_expected_data_from_project(project):
"""Returns expected dictionary based on given project"""
return {
'actual_land_date': format_date_or_datetime(project.actual_land_date),
'actual_uk_region_names': (
[region.name for region in project.actual_uk_regions.order_by('name')]
) if project.actual_uk_regions.exists() else None,
'address_1': project.address_1,
'address_2': project.address_2,
'address_town': project.address_town,
'address_postcode': project.address_postcode,
'anonymous_description': project.anonymous_description,
'associated_non_fdi_r_and_d_project_id': str_or_none(
project.associated_non_fdi_r_and_d_project_id,
),
'average_salary__name': get_attr_or_none(project, 'average_salary.name'),
'business_activity_names': (
[activity.name for activity in project.business_activities.order_by('name')]
) if project.business_activities.exists() else None,
'client_relationship_manager_id': str_or_none(project.client_relationship_manager_id),
'client_requirements': project.client_requirements,
'competing_countries': (
[country.name for country in project.competitor_countries.order_by('name')]
if project.competitor_countries.exists() else [None]
),
'created_by_id': str_or_none(project.created_by_id),
'created_on': format_date_or_datetime(project.created_on),
'delivery_partner_names': (
[partner.name for partner in project.delivery_partners.order_by('name')]
) if project.delivery_partners.exists() else None,
'description': project.description,
'estimated_land_date': format_date_or_datetime(project.estimated_land_date),
'export_revenue': project.export_revenue,
'fdi_type__name': get_attr_or_none(project, 'fdi_type.name'),
'fdi_value__name': get_attr_or_none(project, 'fdi_value.name'),
'foreign_equity_investment': (
float(project.foreign_equity_investment)
if project.foreign_equity_investment
else None
),
'government_assistance': project.government_assistance,
'gross_value_added': project.gross_value_added,
'gva_multiplier__multiplier': (
float(get_attr_or_none(project, 'gva_multiplier.multiplier'))
if get_attr_or_none(project, 'gva_multiplier.multiplier')
else None
),
'id': str(project.pk),
'investment_type__name': get_attr_or_none(project, 'investment_type.name'),
'investor_company_id': str_or_none(project.investor_company_id),
'investor_company_sector': get_attr_or_none(
project,
'investor_company.sector.name',
),
'investor_type__name': get_attr_or_none(project, 'investor_type.name'),
'level_of_involvement_name': get_attr_or_none(project, 'level_of_involvement.name'),
'likelihood_to_land__name': get_attr_or_none(project, 'likelihood_to_land.name'),
'modified_by_id': str_or_none(project.modified_by_id),
'modified_on': format_date_or_datetime(project.modified_on),
'name': project.name,
'new_tech_to_uk': project.new_tech_to_uk,
'non_fdi_r_and_d_budget': project.non_fdi_r_and_d_budget,
'number_new_jobs': project.number_new_jobs,
'number_safeguarded_jobs': project.number_safeguarded_jobs,
'other_business_activity': project.other_business_activity,
'project_arrived_in_triage_on': format_date_or_datetime(
project.project_arrived_in_triage_on),
'project_assurance_adviser_id': str_or_none(project.project_assurance_adviser_id),
'project_manager_id': str_or_none(project.project_manager_id),
'project_reference': project.project_code,
'proposal_deadline': format_date_or_datetime(project.proposal_deadline),
'r_and_d_budget': project.r_and_d_budget,
'referral_source_activity__name': get_attr_or_none(
project,
'referral_source_activity.name',
),
'referral_source_activity_marketing__name': get_attr_or_none(
project,
'referral_source_activity_marketing.name',
),
'referral_source_activity_website__name': get_attr_or_none(
project,
'referral_source_activity_website.name',
),
'sector_name': get_attr_or_none(project, 'sector.name'),
'specific_programme__name': get_attr_or_none(project, 'specific_programme.name'),
'stage__name': get_attr_or_none(project, 'stage.name'),
'status': project.status,
'strategic_driver_names': (
[driver.name for driver in project.strategic_drivers.order_by('name')]
) if project.strategic_drivers.exists() else None,
'team_member_ids': (
[
str(team_member.adviser_id)
for team_member in project.team_members.order_by('id')
]
) if project.team_members.exists() else [None],
'total_investment': float(project.total_investment) if project.total_investment else None,
'uk_company_id': str_or_none(project.uk_company_id),
'uk_company_sector': get_attr_or_none(project, 'uk_company.sector.name'),
'uk_region_location_names': (
[region.name for region in project.uk_region_locations.order_by('name')]
) if project.uk_region_locations.exists() else None,
}
@pytest.mark.django_db
class TestInvestmentProjectsDatasetViewSet(BaseDatasetViewTest):
"""
Tests for InvestmentProjectsDatasetView
"""
view_url = reverse('api-v4:dataset:investment-projects-dataset')
factory = InvestmentProjectFactory
@pytest.mark.parametrize(
'project_factory',
(
InvestmentProjectFactory,
FDIInvestmentProjectFactory,
AssignPMInvestmentProjectFactory,
ActiveInvestmentProjectFactory,
VerifyWinInvestmentProjectFactory,
WonInvestmentProjectFactory,
),
)
def test_success(self, data_flow_api_client, project_factory):
"""Test that endpoint returns with expected data for a single project"""
project = project_factory()
response = data_flow_api_client.get(self.view_url)
assert response.status_code == status.HTTP_200_OK
response_results = response.json()['results']
assert len(response_results) == 1
result = response_results[0]
expected_result = get_expected_data_from_project(project)
assert result == expected_result
def test_with_team_members(self, data_flow_api_client):
"""Test that endpoint returns with expected data for a single project with team members"""
project = InvestmentProjectTeamMemberFactory().investment_project
response = data_flow_api_client.get(self.view_url)
assert response.status_code == status.HTTP_200_OK
response_results = response.json()['results']
assert len(response_results) == 1
result = response_results[0]
expected_result = get_expected_data_from_project(project)
assert result == expected_result
def test_with_multiple_projects(self, data_flow_api_client):
"""Test that endpoint returns correct number of record in expected response"""
with freeze_time('2019-01-01 12:30:00'):
project_1 = InvestmentProjectFactory()
with freeze_time('2019-01-03 12:00:00'):
project_2 = InvestmentProjectFactory()
with freeze_time('2019-01-01 12:00:00'):
project_3 = InvestmentProjectFactory()
project_4 = InvestmentProjectFactory()
response = data_flow_api_client.get(self.view_url)
assert response.status_code == status.HTTP_200_OK
response_results = response.json()['results']
assert len(response_results) == 4
expected_project_list = sorted([project_3, project_4],
key=lambda item: item.pk) + [project_1, project_2]
for index, project in enumerate(expected_project_list):
assert str(project.id) == response_results[index]['id'] | 0.590071 | 0.181844 |
import numpy as np
_dtmf_frequencies = [697, 770, 852, 941, 1209, 1336, 1477, 1633]
_codes = {
697: {1209: '1', 1336: '2', 1477: '3', 1633: 'A'},
770: {1209: '4', 1336: '5', 1477: '6', 1633: 'B'},
852: {1209: '7', 1336: '8', 1477: '9', 1633: 'C'},
941: {1209: '*', 1336: '0', 1477: '#', 1633: 'D'}
}
# Empirically selected band size. Bands around dtmf frequencies never overlap
_freq_band = 25
# Input: signal in time domain, sample rate of signal
# Output: list of frequencies and list of corresponding amplitudes
def freq_amp(signal, sample_rate):
n_samples = len(signal)
np_fft = np.fft.fft(signal)
amplitudes = 2 / n_samples * np.abs(np_fft)
frequencies = np.fft.fftfreq(n_samples) * sample_rate
# Only half of the frequencies are provided, because the other half are mirrored negative
return frequencies[:len(frequencies) // 2], amplitudes[:len(np_fft) // 2]
def sort_freq_amp(freq, amp):
p = np.argsort(-amp)
return freq[p], amp[p]
# Input: signal in time domain, sample rate of signal
# Output: detected dtmf signal
# Signals are detected if no significant amplitude in any other frequency range is measured
# Detection has been tested on samples of 50ms in duration
def detect_dtmf(samples, sample_rate):
# Hanning window applied
hanning_window = np.hanning(len(samples))
samples = np.multiply(hanning_window, samples)
freq, amp = freq_amp(samples, sample_rate)
max_amp = max(amp)
responses = dict()
other = 0
for i in range(len(freq)):
if amp[i] < max_amp/3:
continue
for dtms_freq in _dtmf_frequencies:
if abs(dtms_freq - freq[i]) <= _freq_band:
responses[dtms_freq] = responses.get(dtms_freq, 0) + amp[i]
break
else:
other += amp[i]
if len(responses) == 2 and other == 0:
detected = sorted(list(responses.keys()))
f1 = detected[0]
f2 = detected[1]
if f1 in _codes.keys() and f2 in _codes[f1].keys():
code = _codes[detected[0]][detected[1]]
return code
return None | Dtmf.py | import numpy as np
_dtmf_frequencies = [697, 770, 852, 941, 1209, 1336, 1477, 1633]
_codes = {
697: {1209: '1', 1336: '2', 1477: '3', 1633: 'A'},
770: {1209: '4', 1336: '5', 1477: '6', 1633: 'B'},
852: {1209: '7', 1336: '8', 1477: '9', 1633: 'C'},
941: {1209: '*', 1336: '0', 1477: '#', 1633: 'D'}
}
# Empirically selected band size. Bands around dtmf frequencies never overlap
_freq_band = 25
# Input: signal in time domain, sample rate of signal
# Output: list of frequencies and list of corresponding amplitudes
def freq_amp(signal, sample_rate):
n_samples = len(signal)
np_fft = np.fft.fft(signal)
amplitudes = 2 / n_samples * np.abs(np_fft)
frequencies = np.fft.fftfreq(n_samples) * sample_rate
# Only half of the frequencies are provided, because the other half are mirrored negative
return frequencies[:len(frequencies) // 2], amplitudes[:len(np_fft) // 2]
def sort_freq_amp(freq, amp):
p = np.argsort(-amp)
return freq[p], amp[p]
# Input: signal in time domain, sample rate of signal
# Output: detected dtmf signal
# Signals are detected if no significant amplitude in any other frequency range is measured
# Detection has been tested on samples of 50ms in duration
def detect_dtmf(samples, sample_rate):
# Hanning window applied
hanning_window = np.hanning(len(samples))
samples = np.multiply(hanning_window, samples)
freq, amp = freq_amp(samples, sample_rate)
max_amp = max(amp)
responses = dict()
other = 0
for i in range(len(freq)):
if amp[i] < max_amp/3:
continue
for dtms_freq in _dtmf_frequencies:
if abs(dtms_freq - freq[i]) <= _freq_band:
responses[dtms_freq] = responses.get(dtms_freq, 0) + amp[i]
break
else:
other += amp[i]
if len(responses) == 2 and other == 0:
detected = sorted(list(responses.keys()))
f1 = detected[0]
f2 = detected[1]
if f1 in _codes.keys() and f2 in _codes[f1].keys():
code = _codes[detected[0]][detected[1]]
return code
return None | 0.591487 | 0.630145 |
import logging
import time
import pytest
import zenko_e2e.conf as conf
from kubernetes import client, config
from kubernetes.client.rest import ApiException
from ..fixtures import *
from .. import util
_log = logging.getLogger('cosmos') # pylint: disable=invalid-name
MD5_HASHES = {
"file1": "b781c1f5179214f6d7f3f957a989a5b9", # 1KB
"file2": "e1961ee5d47897950cc57b7c98fd43d2", # 1MB
"file3": "7e96ca8de916f4a259ef7b4fcdb49d91", # 10MB
"file4": "aa06df7da67c9362335bc2068b3d49c9", # 100MB
}
@pytest.fixture
def kube():
return client.ApiClient(config.load_incluster_config())
@pytest.fixture
def kube_batch(kube):
return client.BatchV1Api(kube)
@pytest.fixture
def kube_corev1(kube):
return client.CoreV1Api(kube)
@pytest.fixture
def enable_ingest(kube, location):
api_instance = client.CustomObjectsApi(kube)
body = {"spec": {"rclone": {"triggerIngestion": True}}}
return api_instance.patch_namespaced_custom_object(
'zenko.io',
'v1alpha1',
conf.K8S_NAMESPACE,
'cosmoses',
location,
body
)
@pytest.fixture
def get_job(kube_batch, location):
jobs = kube_batch.list_namespaced_job(conf.K8S_NAMESPACE)
for job in jobs.items:
if location in job.metadata.name:
return job.metadata.name
return ""
@pytest.fixture
def compare_versions(objkey, aws_target_bucket, zenko_bucket):
src_obj = aws_target_bucket.Object(objkey)
dst_obj = zenko_bucket.Object(objkey)
if src_obj.version_id != dst_obj.metadata['version-id']:
return False
src_hash = util.get_object_hash(aws_target_bucket, objkey)
zenko_bucket.put_object(Key=objkey)
dst_hash = util.get_object_hash(
zenko_bucket, objkey, versionid=dst_obj.version_id)
if src_hash != dst_hash:
return False
return True
# Timeout has been increased to 180 because of setup time but really shouldn't
# be increased any further. Please investigate possible regressions or test
# refactor before increasing the timeout any further.
@pytest.fixture
def wait_for_job(kube_batch, location, timeout=180):
_timestamp = time.time()
while time.time() - _timestamp < timeout:
try:
enable_ingest(kube(), location)
job = get_job(kube_batch, location)
state = kube_batch.read_namespaced_job_status(
job, conf.K8S_NAMESPACE)
if state.status.succeeded:
_log.debug("Finished with completed status")
break
except IndexError:
# When the job hasn't yet been created, there is an index error
pass
except ApiException as err:
_log.error("Exception when calling job status %s", err)
_log.info("Waiting for job completion")
time.sleep(1)
else:
_log.error('Initial ingestion did not complete in time')
return state
@pytest.mark.skip(reason="ZENKO-3359 Cosmos: failing to create OOB resources")
def test_cosmos_nfs_ingest(nfs_loc, nfs_loc_bucket, kube_batch):
util.mark_test('SOFS-NFS OOB INGESTION')
job = wait_for_job(kube_batch, nfs_loc)
assert job.status.succeeded
for (key, md5) in MD5_HASHES.items():
_log.debug("Checking object %s with hash %s", key, md5)
assert util.get_object_hash(nfs_loc_bucket, key) == md5
# Fails because ingestion AWS location not resumed (paused by default).
@pytest.mark.skip(reason="ZENKO-3644 Introduce a backbeat client to resume ingestion for the AWS location")
def test_cosmos_aws_ingest(aws_target_bucket, zenko_bucket, kube_batch, testfile, objkey): # noqa pylint: disable=dangerous-default-value,too-many-arguments
util.mark_test('AWS OOB INGESTION')
aws_target_bucket.put_object(
Body=testfile,
Key=objkey,
)
zenko_bucket = aws_loc_bucket(zenko_bucket, ingest=True)
# Wait for initial ingestion
job = wait_for_job(kube_batch, conf.AWS_BACKEND)
assert job.status.succeeded
# Validate ingestion
assert util.check_object(
objkey, testfile, zenko_bucket, aws_target_bucket)
# Validate versioning
assert compare_versions(objkey, aws_target_bucket, zenko_bucket)
# Fails because ingestion CEPH location not resumed (paused by default).
@pytest.mark.skip(reason="ZENKO-3644 Introduce a backbeat client to resume ingestion for the CEPH location")
def test_cosmos_ceph_ingest(ceph_target_bucket, zenko_bucket, kube_batch, testfile, objkey): # noqa pylint: disable=dangerous-default-value,too-many-arguments
util.mark_test('CEPH OOB INGESTION')
ceph_target_bucket.put_object(
Body=testfile,
Key=objkey,
)
zenko_bucket = ceph_loc_bucket(zenko_bucket, ingest=True)
job = wait_for_job(kube_batch, conf.CEPH_BACKEND)
assert job.status.succeeded
assert util.check_object(
objkey, testfile, zenko_bucket, ceph_target_bucket)
assert compare_versions(objkey, ceph_target_bucket, zenko_bucket) | tests/zenko_tests/python_tests/zenko_e2e/cosmos/test_cosmos_deployment.py | import logging
import time
import pytest
import zenko_e2e.conf as conf
from kubernetes import client, config
from kubernetes.client.rest import ApiException
from ..fixtures import *
from .. import util
_log = logging.getLogger('cosmos') # pylint: disable=invalid-name
MD5_HASHES = {
"file1": "b781c1f5179214f6d7f3f957a989a5b9", # 1KB
"file2": "e1961ee5d47897950cc57b7c98fd43d2", # 1MB
"file3": "7e96ca8de916f4a259ef7b4fcdb49d91", # 10MB
"file4": "aa06df7da67c9362335bc2068b3d49c9", # 100MB
}
@pytest.fixture
def kube():
return client.ApiClient(config.load_incluster_config())
@pytest.fixture
def kube_batch(kube):
return client.BatchV1Api(kube)
@pytest.fixture
def kube_corev1(kube):
return client.CoreV1Api(kube)
@pytest.fixture
def enable_ingest(kube, location):
api_instance = client.CustomObjectsApi(kube)
body = {"spec": {"rclone": {"triggerIngestion": True}}}
return api_instance.patch_namespaced_custom_object(
'zenko.io',
'v1alpha1',
conf.K8S_NAMESPACE,
'cosmoses',
location,
body
)
@pytest.fixture
def get_job(kube_batch, location):
jobs = kube_batch.list_namespaced_job(conf.K8S_NAMESPACE)
for job in jobs.items:
if location in job.metadata.name:
return job.metadata.name
return ""
@pytest.fixture
def compare_versions(objkey, aws_target_bucket, zenko_bucket):
src_obj = aws_target_bucket.Object(objkey)
dst_obj = zenko_bucket.Object(objkey)
if src_obj.version_id != dst_obj.metadata['version-id']:
return False
src_hash = util.get_object_hash(aws_target_bucket, objkey)
zenko_bucket.put_object(Key=objkey)
dst_hash = util.get_object_hash(
zenko_bucket, objkey, versionid=dst_obj.version_id)
if src_hash != dst_hash:
return False
return True
# Timeout has been increased to 180 because of setup time but really shouldn't
# be increased any further. Please investigate possible regressions or test
# refactor before increasing the timeout any further.
@pytest.fixture
def wait_for_job(kube_batch, location, timeout=180):
_timestamp = time.time()
while time.time() - _timestamp < timeout:
try:
enable_ingest(kube(), location)
job = get_job(kube_batch, location)
state = kube_batch.read_namespaced_job_status(
job, conf.K8S_NAMESPACE)
if state.status.succeeded:
_log.debug("Finished with completed status")
break
except IndexError:
# When the job hasn't yet been created, there is an index error
pass
except ApiException as err:
_log.error("Exception when calling job status %s", err)
_log.info("Waiting for job completion")
time.sleep(1)
else:
_log.error('Initial ingestion did not complete in time')
return state
@pytest.mark.skip(reason="ZENKO-3359 Cosmos: failing to create OOB resources")
def test_cosmos_nfs_ingest(nfs_loc, nfs_loc_bucket, kube_batch):
util.mark_test('SOFS-NFS OOB INGESTION')
job = wait_for_job(kube_batch, nfs_loc)
assert job.status.succeeded
for (key, md5) in MD5_HASHES.items():
_log.debug("Checking object %s with hash %s", key, md5)
assert util.get_object_hash(nfs_loc_bucket, key) == md5
# Fails because ingestion AWS location not resumed (paused by default).
@pytest.mark.skip(reason="ZENKO-3644 Introduce a backbeat client to resume ingestion for the AWS location")
def test_cosmos_aws_ingest(aws_target_bucket, zenko_bucket, kube_batch, testfile, objkey): # noqa pylint: disable=dangerous-default-value,too-many-arguments
util.mark_test('AWS OOB INGESTION')
aws_target_bucket.put_object(
Body=testfile,
Key=objkey,
)
zenko_bucket = aws_loc_bucket(zenko_bucket, ingest=True)
# Wait for initial ingestion
job = wait_for_job(kube_batch, conf.AWS_BACKEND)
assert job.status.succeeded
# Validate ingestion
assert util.check_object(
objkey, testfile, zenko_bucket, aws_target_bucket)
# Validate versioning
assert compare_versions(objkey, aws_target_bucket, zenko_bucket)
# Fails because ingestion CEPH location not resumed (paused by default).
@pytest.mark.skip(reason="ZENKO-3644 Introduce a backbeat client to resume ingestion for the CEPH location")
def test_cosmos_ceph_ingest(ceph_target_bucket, zenko_bucket, kube_batch, testfile, objkey): # noqa pylint: disable=dangerous-default-value,too-many-arguments
util.mark_test('CEPH OOB INGESTION')
ceph_target_bucket.put_object(
Body=testfile,
Key=objkey,
)
zenko_bucket = ceph_loc_bucket(zenko_bucket, ingest=True)
job = wait_for_job(kube_batch, conf.CEPH_BACKEND)
assert job.status.succeeded
assert util.check_object(
objkey, testfile, zenko_bucket, ceph_target_bucket)
assert compare_versions(objkey, ceph_target_bucket, zenko_bucket) | 0.396535 | 0.191592 |
def soma(numeros):
"""Calcula a soma dos números.
Se a lista estiver vazia então retorna None para indicar
que o problema não tem solução.
"""
return 22
def em_posicoes_impares(numeros):
"""Obtém os números que estão em posições ímpares.
Por exemplo, para a lista [20, 30, 40, 50] retorna [30, 50] pois são os
números que etão nas posições 1 e 3.
"""
return []
def primeiro_e_ultimo(numeros):
"""Obtém o primeiro e o último número da lista.
Caso não haja pelo menos dois números retorna None para indicar que
o problema não tem solução.
"""
return [67, 12]
def conta_ocorrencias(numeros, numero):
"""Conta quantas vezes o numero aparece na lista numeros.
"""
return 77
def posicao_do_maior(numeros):
"""Encontra a posição da primeira ocorrência do maior número da lista.
Se a lista está vazia então retorna None.
"""
return 12
def maior(numeros):
"""Encontra o maior número na lista.
Se a lista estaá vazia (não possui números) então retorna None para
indicar que o problema não tem solução.
"""
return -7
def qtd_acima_limite(numeros, limite):
"""Conta quantos números na lista são maiores que um número limite.
"""
return 23
def media(numeros):
"""Calcula a média aritmética dos números na lista.
Se a lista está vazia então retorna None para indicar que o problema não
tem solução.
"""
return 9.34
def qtd_no_intervalo(numeros, lim_inf, lim_sup):
"""Conta quantos números estão dentro do intervalo [lim_inf, lim_sup]
Por exemplo, para numeros [8, 23, 10, 9, 15] e limite inferior 8 e
limite superior 16 retorna 4.
"""
return 1
def multiplica_por_fator(numeros, fator):
"""Multiplica cada número da lista por um fator. O método não retorna nenhum dado.
Por exemplo, para numeros [8, 12, 3] e fator 2 a lista deve ser
alterada para [16, 24, 6].
"""
pass
def multiplicado_por_fator(numeros, fator):
"""Obtém uma cópia dos números da lista multiplicados por um fator.
Por exemplo, para numeros [8, 12, 3] e fator 2 o algoritmo deve retornar
uma nova lista com os números [16, 24, 6] SEM ALTERAR a lista numeros.
"""
return []
def n_primeiros(numeros, n):
"""Obtém uma cópia dos n primeiros números da lista.
Considera que n sempre é maior ou igual a zero.
Se n for maior que a quantidade de números na lista então obtém uma
cópia de todos os números.
"""
return []
def copia(numeros):
"""Obtém uma cópia dos números. Não pode usar método copy.
"""
return []
def no_intervalo(numeros, lim_inf, lim_sup):
"""Obtém os números que estão dentro do intervalo.
Por exemplo, se a lista for formada pelos números
[8, 2, 3, 12, 9] e o intervalo for de 3 a 8 deve
retornar [8, 3] pois são os únicos números maiores ou
iguais a 3 e menores ou iguais a 8.
"""
return []
def una(numeros1, numeros2):
"""Obtém uma nova lista que contém todos os números das duas listas.
"""
return [3]
def pares(numeros):
"""Obtém os números pares presentes na lista numeros.
"""
return []
def duplica(numeros):
"""Duplica a ocorrência dos números presentes na lista.
Por exemplo, para a lista [3, 12, 4] retorna [3, 3, 12, 12, 4, 4]
"""
return []
def possui_par(numeros):
"""Verifica se a lista possui pelo menos um número par.
"""
return False
def primeira_posicao_de_numero(numeros, numero):
"""Obtém a primeira posição de um número na lista numeros.
Por exemplo, para numeros [7, 12, 8, 2, 12] e numero 12 retorna 1. Se o
número não aparece na lista deve retornar None.
"""
return 3
def posicoes_de_numero(numeros, numero):
"""Obtém as posições de um número na lista de números.
Por exemplo, para numeros [12, 3, 9, 12, 6] e numero 12 retorna [0, 3]
"""
return []
def sem_repeticoes(numeros):
"""Verifica se a lista não contém números repetidos.
Retorna True se não tiver números repetidos e False caso contrário.
"""
return False
def remove_ocorrencias(numeros, numero):
"""Retorna uma nova lista sem as ocorrências de um número.
Por exemplo, para numeros [1, 4, 8, 4] e numero 4 retorna [1, 8]
"""
return []
def substitui_ocorrencias(numeros, numero, substituto):
"""Substitui todas as ocorrências de um número por outro número.
Altera a lista numeros e não retorna nada.
"""
pass
def substitui_primeira_ocorrencia(numeros, numero, substituto):
"""Substitui a primeira ocorrência de um número por outro número.
Altera a lista numeros e não retorna nada.
"""
pass
def substitui_ultima_ocorrencia(numeros, numero, substituto):
"""Substitui a última ocorrência de um número por outro número.
Altera a lista numeros e não retorna nada.
Dica: percorra a lista de trás para frente
"""
pass
def inverte(numeros):
"""Retorna nova lista com números em posições invertidas.
Por exemplo: para numeros [3, 7, 1, 2] retorna a nova lista [2, 1, 7, 3]
"""
return []
def soma_pos_pares_pos_impares(numeros):
"""Calcula a soma dos números em posições pares e em posições ímpares.
Retorna uma lista de tamanho 2 onde o primeiro número contém a soma dos
números em posições pares (0, 2, 4, etc) e o segundo a soma dos números em posições
ímpares (1, 3, 5, etc). Se a lista de números tiver menos de dois números então o método
retorna None para indicar que o problema não tem solução.
"""
return [55, 22]
def das_posicoes(numeros, posicoes):
"""Obtém os números que estão em diversas posições.
Considera que a lista das posicoes, quando não está vazia,
sempre contém posições válidas.
"""
return []
def parte(numeros, pos, qtd):
"""Obtém uma parte da lista de números.
O parâmetro pos indica a posição onde inicia a parte. O parâmetro qtd
indica quantos números devem ser incluídos na parte. Se a posição
for maior ou igual a quantidade de números
então retorna uma lista vazia. Se a quantidade de números desejada for
maior que a quantidade de números existentes a partir da posição pos
então retorna todos números existentes a partir de pos.
Exemplo: considere os números [6, 3, 4, 1, 2].
Se a quantidade for 2 e a posição for 1 então retorna [3, 4].
Se a quantidade for 3 e a posição for 4 então retorna [2].
Se a quantidade for 2 e a posição for 9 então retorna []
"""
return [] | tarefas-poo/lista-02/processa-numeros/model/processa_numeros.py |
def soma(numeros):
"""Calcula a soma dos números.
Se a lista estiver vazia então retorna None para indicar
que o problema não tem solução.
"""
return 22
def em_posicoes_impares(numeros):
"""Obtém os números que estão em posições ímpares.
Por exemplo, para a lista [20, 30, 40, 50] retorna [30, 50] pois são os
números que etão nas posições 1 e 3.
"""
return []
def primeiro_e_ultimo(numeros):
"""Obtém o primeiro e o último número da lista.
Caso não haja pelo menos dois números retorna None para indicar que
o problema não tem solução.
"""
return [67, 12]
def conta_ocorrencias(numeros, numero):
"""Conta quantas vezes o numero aparece na lista numeros.
"""
return 77
def posicao_do_maior(numeros):
"""Encontra a posição da primeira ocorrência do maior número da lista.
Se a lista está vazia então retorna None.
"""
return 12
def maior(numeros):
"""Encontra o maior número na lista.
Se a lista estaá vazia (não possui números) então retorna None para
indicar que o problema não tem solução.
"""
return -7
def qtd_acima_limite(numeros, limite):
"""Conta quantos números na lista são maiores que um número limite.
"""
return 23
def media(numeros):
"""Calcula a média aritmética dos números na lista.
Se a lista está vazia então retorna None para indicar que o problema não
tem solução.
"""
return 9.34
def qtd_no_intervalo(numeros, lim_inf, lim_sup):
"""Conta quantos números estão dentro do intervalo [lim_inf, lim_sup]
Por exemplo, para numeros [8, 23, 10, 9, 15] e limite inferior 8 e
limite superior 16 retorna 4.
"""
return 1
def multiplica_por_fator(numeros, fator):
"""Multiplica cada número da lista por um fator. O método não retorna nenhum dado.
Por exemplo, para numeros [8, 12, 3] e fator 2 a lista deve ser
alterada para [16, 24, 6].
"""
pass
def multiplicado_por_fator(numeros, fator):
"""Obtém uma cópia dos números da lista multiplicados por um fator.
Por exemplo, para numeros [8, 12, 3] e fator 2 o algoritmo deve retornar
uma nova lista com os números [16, 24, 6] SEM ALTERAR a lista numeros.
"""
return []
def n_primeiros(numeros, n):
"""Obtém uma cópia dos n primeiros números da lista.
Considera que n sempre é maior ou igual a zero.
Se n for maior que a quantidade de números na lista então obtém uma
cópia de todos os números.
"""
return []
def copia(numeros):
"""Obtém uma cópia dos números. Não pode usar método copy.
"""
return []
def no_intervalo(numeros, lim_inf, lim_sup):
"""Obtém os números que estão dentro do intervalo.
Por exemplo, se a lista for formada pelos números
[8, 2, 3, 12, 9] e o intervalo for de 3 a 8 deve
retornar [8, 3] pois são os únicos números maiores ou
iguais a 3 e menores ou iguais a 8.
"""
return []
def una(numeros1, numeros2):
"""Obtém uma nova lista que contém todos os números das duas listas.
"""
return [3]
def pares(numeros):
"""Obtém os números pares presentes na lista numeros.
"""
return []
def duplica(numeros):
"""Duplica a ocorrência dos números presentes na lista.
Por exemplo, para a lista [3, 12, 4] retorna [3, 3, 12, 12, 4, 4]
"""
return []
def possui_par(numeros):
"""Verifica se a lista possui pelo menos um número par.
"""
return False
def primeira_posicao_de_numero(numeros, numero):
"""Obtém a primeira posição de um número na lista numeros.
Por exemplo, para numeros [7, 12, 8, 2, 12] e numero 12 retorna 1. Se o
número não aparece na lista deve retornar None.
"""
return 3
def posicoes_de_numero(numeros, numero):
"""Obtém as posições de um número na lista de números.
Por exemplo, para numeros [12, 3, 9, 12, 6] e numero 12 retorna [0, 3]
"""
return []
def sem_repeticoes(numeros):
"""Verifica se a lista não contém números repetidos.
Retorna True se não tiver números repetidos e False caso contrário.
"""
return False
def remove_ocorrencias(numeros, numero):
"""Retorna uma nova lista sem as ocorrências de um número.
Por exemplo, para numeros [1, 4, 8, 4] e numero 4 retorna [1, 8]
"""
return []
def substitui_ocorrencias(numeros, numero, substituto):
"""Substitui todas as ocorrências de um número por outro número.
Altera a lista numeros e não retorna nada.
"""
pass
def substitui_primeira_ocorrencia(numeros, numero, substituto):
"""Substitui a primeira ocorrência de um número por outro número.
Altera a lista numeros e não retorna nada.
"""
pass
def substitui_ultima_ocorrencia(numeros, numero, substituto):
"""Substitui a última ocorrência de um número por outro número.
Altera a lista numeros e não retorna nada.
Dica: percorra a lista de trás para frente
"""
pass
def inverte(numeros):
"""Retorna nova lista com números em posições invertidas.
Por exemplo: para numeros [3, 7, 1, 2] retorna a nova lista [2, 1, 7, 3]
"""
return []
def soma_pos_pares_pos_impares(numeros):
"""Calcula a soma dos números em posições pares e em posições ímpares.
Retorna uma lista de tamanho 2 onde o primeiro número contém a soma dos
números em posições pares (0, 2, 4, etc) e o segundo a soma dos números em posições
ímpares (1, 3, 5, etc). Se a lista de números tiver menos de dois números então o método
retorna None para indicar que o problema não tem solução.
"""
return [55, 22]
def das_posicoes(numeros, posicoes):
"""Obtém os números que estão em diversas posições.
Considera que a lista das posicoes, quando não está vazia,
sempre contém posições válidas.
"""
return []
def parte(numeros, pos, qtd):
"""Obtém uma parte da lista de números.
O parâmetro pos indica a posição onde inicia a parte. O parâmetro qtd
indica quantos números devem ser incluídos na parte. Se a posição
for maior ou igual a quantidade de números
então retorna uma lista vazia. Se a quantidade de números desejada for
maior que a quantidade de números existentes a partir da posição pos
então retorna todos números existentes a partir de pos.
Exemplo: considere os números [6, 3, 4, 1, 2].
Se a quantidade for 2 e a posição for 1 então retorna [3, 4].
Se a quantidade for 3 e a posição for 4 então retorna [2].
Se a quantidade for 2 e a posição for 9 então retorna []
"""
return [] | 0.690246 | 0.706102 |
import random
import numpy as np
import torch
"""Firstly, we define a helper function to generate\sample training ordinal triplets:
Step 1:
given rated item i, randomly choose item j and check whether rating of j is lower than i,
if not randomly sample another item.
each row of the sampled data in the following form:
[userId itemId_i itemId_j]
for each user u, he/she prefers item i over item j.
"""
def sample_triplet(X, batch_size):
sampled_data = np.zeros((batch_size, 3), dtype=np.int)
count = 0
while count < batch_size:
u = random.randint(0, X.shape[0] - 1)
u_row = X.getrow(u)
_, u_nz = u_row.nonzero()
min_rating = u_row[:, u_nz].todense().min()
i = u_nz[random.randint(0, len(u_nz) - 1)]
ratingi = u_row[:, i]
if ratingi > min_rating:
j = u_nz[random.randint(0, len(u_nz) - 1)]
while u_row[:, j] >= ratingi:
j = u_nz[random.randint(0, len(u_nz) - 1)]
sampled_data[count, :] = [u, i, j]
count += 1
print("Done sampling")
return sampled_data
def coe(X, k, lamda=0.05, n_epochs=150, learning_rate=0.001, batch_size=1000, init_params=None):
# Data = Dataset(data)
# Initial user factors
if init_params['U'] is None:
U = torch.randn(X.shape[0], k, requires_grad=True, device="cuda")
else:
U = init_params['U']
U = torch.from_numpy(U)
# Initial item factors
if init_params['V'] is None:
V = torch.randn(X.shape[1], k, requires_grad=True, device="cuda")
else:
V = init_params['V']
V = torch.from_numpy(V)
optimizer = torch.optim.Adam([U, V], lr=learning_rate)
for epoch in range(n_epochs):
# num_steps = int(Data.data.shape[0]/batch_size)
# for i in range(1, num_steps + 1):
# batch_c,_ = Data.next_batch(batch_size)
sampled_batch = sample_triplet(X, batch_size)
regU = U[sampled_batch[:, 0], :]
regI = V[sampled_batch[:, 1], :]
regJ = V[sampled_batch[:, 2], :]
regU_unq = U[np.unique(sampled_batch[:, 0]), :]
regI_unq = V[np.unique(sampled_batch[:, 1:]), :]
Scorei = torch.norm(regU - regI, dim=1)
Scorej = torch.norm(regU - regJ, dim=1)
loss = lamda * (regU_unq.norm().pow(2) + regI_unq.norm().pow(2)) - torch.log(
torch.sigmoid(Scorej - Scorei)).sum()
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('epoch:', epoch, 'loss:', loss)
U = U.data.cpu().numpy()
V = V.data.cpu().numpy()
res = {'U': U, 'V': V}
return res | cornac/models/coe/coe.py |
import random
import numpy as np
import torch
"""Firstly, we define a helper function to generate\sample training ordinal triplets:
Step 1:
given rated item i, randomly choose item j and check whether rating of j is lower than i,
if not randomly sample another item.
each row of the sampled data in the following form:
[userId itemId_i itemId_j]
for each user u, he/she prefers item i over item j.
"""
def sample_triplet(X, batch_size):
sampled_data = np.zeros((batch_size, 3), dtype=np.int)
count = 0
while count < batch_size:
u = random.randint(0, X.shape[0] - 1)
u_row = X.getrow(u)
_, u_nz = u_row.nonzero()
min_rating = u_row[:, u_nz].todense().min()
i = u_nz[random.randint(0, len(u_nz) - 1)]
ratingi = u_row[:, i]
if ratingi > min_rating:
j = u_nz[random.randint(0, len(u_nz) - 1)]
while u_row[:, j] >= ratingi:
j = u_nz[random.randint(0, len(u_nz) - 1)]
sampled_data[count, :] = [u, i, j]
count += 1
print("Done sampling")
return sampled_data
def coe(X, k, lamda=0.05, n_epochs=150, learning_rate=0.001, batch_size=1000, init_params=None):
# Data = Dataset(data)
# Initial user factors
if init_params['U'] is None:
U = torch.randn(X.shape[0], k, requires_grad=True, device="cuda")
else:
U = init_params['U']
U = torch.from_numpy(U)
# Initial item factors
if init_params['V'] is None:
V = torch.randn(X.shape[1], k, requires_grad=True, device="cuda")
else:
V = init_params['V']
V = torch.from_numpy(V)
optimizer = torch.optim.Adam([U, V], lr=learning_rate)
for epoch in range(n_epochs):
# num_steps = int(Data.data.shape[0]/batch_size)
# for i in range(1, num_steps + 1):
# batch_c,_ = Data.next_batch(batch_size)
sampled_batch = sample_triplet(X, batch_size)
regU = U[sampled_batch[:, 0], :]
regI = V[sampled_batch[:, 1], :]
regJ = V[sampled_batch[:, 2], :]
regU_unq = U[np.unique(sampled_batch[:, 0]), :]
regI_unq = V[np.unique(sampled_batch[:, 1:]), :]
Scorei = torch.norm(regU - regI, dim=1)
Scorej = torch.norm(regU - regJ, dim=1)
loss = lamda * (regU_unq.norm().pow(2) + regI_unq.norm().pow(2)) - torch.log(
torch.sigmoid(Scorej - Scorei)).sum()
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('epoch:', epoch, 'loss:', loss)
U = U.data.cpu().numpy()
V = V.data.cpu().numpy()
res = {'U': U, 'V': V}
return res | 0.712132 | 0.521837 |
from __future__ import unicode_literals
from appointments.unicsv import UnicodeCSVWriter
from django.contrib.auth.decorators import permission_required
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.views.generic import View
from django.views.generic.base import TemplateView
from django_tables2 import RequestConfig
from .forms import AppointmentFilterForm
from .tables import ApptTable
class AppointmentMixin(object):
"""Allow filtering by"""
@method_decorator(permission_required('appointments.view_appointment'))
def dispatch(self, request, *args, **kwargs):
self.form = AppointmentFilterForm(request.GET)
self.items = self.form.get_items()
return super(AppointmentMixin, self).dispatch(request, *args, **kwargs)
class AppointmentList(AppointmentMixin, TemplateView):
"""Displays a paginated lits of appointments."""
template_name = 'appointments/appointment_list.html'
table_template_name = 'django_tables2/bootstrap-tables.html'
items_per_page = 10
def get_table(self):
table = ApptTable(self.items, template=self.table_template_name)
paginate = {'per_page': self.items_per_page}
RequestConfig(self.request, paginate=paginate).configure(table)
return table
def get_context_data(self, *args, **kwargs):
return {
'form': self.form,
'table': self.get_table()
}
class CSVAppointmentList(AppointmentMixin, View):
"""Export filtered reports to a CSV file."""
# Fields to include in the csv, in order.
filename = 'appointments'
def get_table(self):
table = ApptTable(self.items)
RequestConfig(self.request).configure(table)
return table
def get(self, request, *args, **kwargs):
if not self.form.is_valid():
url = reverse('appointment_list')
if request.GET:
url = '{0}?{1}'.format(url, request.GET.urlencode())
return HttpResponseRedirect(url)
response = HttpResponse(content_type='text/csv')
content_disposition = 'attachment; filename=%s.csv' % self.filename
response['Content-Disposition'] = content_disposition
writer = UnicodeCSVWriter(response)
writer.writerows(self.get_data())
return response
def get_data(self):
table = self.get_table()
columns = [x.title() for x in table.columns.names()]
rows = [columns, ]
for item in table.rows:
cells = [x for x in item]
row = []
for cell in cells:
row.append(cell)
rows.append(row)
return rows | appointments/views.py | from __future__ import unicode_literals
from appointments.unicsv import UnicodeCSVWriter
from django.contrib.auth.decorators import permission_required
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.views.generic import View
from django.views.generic.base import TemplateView
from django_tables2 import RequestConfig
from .forms import AppointmentFilterForm
from .tables import ApptTable
class AppointmentMixin(object):
"""Allow filtering by"""
@method_decorator(permission_required('appointments.view_appointment'))
def dispatch(self, request, *args, **kwargs):
self.form = AppointmentFilterForm(request.GET)
self.items = self.form.get_items()
return super(AppointmentMixin, self).dispatch(request, *args, **kwargs)
class AppointmentList(AppointmentMixin, TemplateView):
"""Displays a paginated lits of appointments."""
template_name = 'appointments/appointment_list.html'
table_template_name = 'django_tables2/bootstrap-tables.html'
items_per_page = 10
def get_table(self):
table = ApptTable(self.items, template=self.table_template_name)
paginate = {'per_page': self.items_per_page}
RequestConfig(self.request, paginate=paginate).configure(table)
return table
def get_context_data(self, *args, **kwargs):
return {
'form': self.form,
'table': self.get_table()
}
class CSVAppointmentList(AppointmentMixin, View):
"""Export filtered reports to a CSV file."""
# Fields to include in the csv, in order.
filename = 'appointments'
def get_table(self):
table = ApptTable(self.items)
RequestConfig(self.request).configure(table)
return table
def get(self, request, *args, **kwargs):
if not self.form.is_valid():
url = reverse('appointment_list')
if request.GET:
url = '{0}?{1}'.format(url, request.GET.urlencode())
return HttpResponseRedirect(url)
response = HttpResponse(content_type='text/csv')
content_disposition = 'attachment; filename=%s.csv' % self.filename
response['Content-Disposition'] = content_disposition
writer = UnicodeCSVWriter(response)
writer.writerows(self.get_data())
return response
def get_data(self):
table = self.get_table()
columns = [x.title() for x in table.columns.names()]
rows = [columns, ]
for item in table.rows:
cells = [x for x in item]
row = []
for cell in cells:
row.append(cell)
rows.append(row)
return rows | 0.649801 | 0.092278 |
import xlrd
import xlwt
import json
import http.client
import random
import hashlib
from urllib import parse
from time import sleep
from xlutils.copy import copy
def translate_baidu(orginal_text, orginal_lang, goal_lang):
appid = 'xxxxx' # 你的appid(百度申请)
secretKey = 'xxxxx' # 你的密钥(百度申请)
text_translated = []
dict_respond = None
httpClient = None
myurl = '/api/trans/vip/translate'
q = orginal_text
fromLang = 'en'
toLang = 'zh'
salt = random.randint(32768, 65536)
sign = appid + q + str(salt) + secretKey
m1 = hashlib.md5()
m1.update(bytes(sign, encoding='utf-8'))
sign = m1.hexdigest()
myurl = myurl + '?appid=' + appid + '&q=' + parse.quote(
orginal_text) + '&from=' + orginal_lang + '&to=' + goal_lang + '&salt=' + str(salt) + '&sign=' + sign
try:
httpClient = http.client.HTTPConnection('api.fanyi.baidu.com')
httpClient.request('GET', myurl)
# response是HTTPResponse对象
response = httpClient.getresponse()
rr = response.read()
json_str = rr.decode('unicode_escape')
print(1)
dict_respond = json.loads(json_str)
print(dict_respond)
for i in dict_respond['trans_result']:
print(2)
text_translated.append(i['dst'])
print(3)
except Exception as e:
print('错误:' + str(e))
text_translated = str(e)
finally:
if httpClient:
httpClient.close()
return text_translated
readbook = xlrd.open_workbook('translate_original.xls') #翻译原文档
sheet = readbook.sheet_by_index(1)
book2 = copy(readbook) # 拷贝一份原来的excel
writesheet = book2.get_sheet(1)
for j in range(2, sheet.ncols):
for i in range(2, sheet.nrows):
row_list = str(sheet.cell(i, 1).value)
dd = translate_baidu(row_list, sheet.cell(1, 1).value, sheet.cell(1, j).value)
writesheet.write(i, j, dd)
sleep(1)#控制文本请求数(百度免费标准版)
book2.save('translated.xls')
'''
支持语言和翻译代码
zh 中文
en 英语
yue 粤语
wyw 文言文
jp 日语
kor 韩语
fra 法语
spa 西班牙语
th 泰语
ara 阿拉伯语
ru 俄语
pt 葡萄牙语
de 德语
it 意大利语
el 希腊语
nl 荷兰语
pl 波兰语
bul 保加利亚语
est 爱沙尼亚语
dan 丹麦语
fin 芬兰语
cs 捷克语
rom 罗马尼亚语
slo 斯洛文尼亚语
swe 瑞典语
hu 匈牙利语
cht 繁体中文
vie 越南语
''' | auto_translate_baidu.py |
import xlrd
import xlwt
import json
import http.client
import random
import hashlib
from urllib import parse
from time import sleep
from xlutils.copy import copy
def translate_baidu(orginal_text, orginal_lang, goal_lang):
appid = 'xxxxx' # 你的appid(百度申请)
secretKey = 'xxxxx' # 你的密钥(百度申请)
text_translated = []
dict_respond = None
httpClient = None
myurl = '/api/trans/vip/translate'
q = orginal_text
fromLang = 'en'
toLang = 'zh'
salt = random.randint(32768, 65536)
sign = appid + q + str(salt) + secretKey
m1 = hashlib.md5()
m1.update(bytes(sign, encoding='utf-8'))
sign = m1.hexdigest()
myurl = myurl + '?appid=' + appid + '&q=' + parse.quote(
orginal_text) + '&from=' + orginal_lang + '&to=' + goal_lang + '&salt=' + str(salt) + '&sign=' + sign
try:
httpClient = http.client.HTTPConnection('api.fanyi.baidu.com')
httpClient.request('GET', myurl)
# response是HTTPResponse对象
response = httpClient.getresponse()
rr = response.read()
json_str = rr.decode('unicode_escape')
print(1)
dict_respond = json.loads(json_str)
print(dict_respond)
for i in dict_respond['trans_result']:
print(2)
text_translated.append(i['dst'])
print(3)
except Exception as e:
print('错误:' + str(e))
text_translated = str(e)
finally:
if httpClient:
httpClient.close()
return text_translated
readbook = xlrd.open_workbook('translate_original.xls') #翻译原文档
sheet = readbook.sheet_by_index(1)
book2 = copy(readbook) # 拷贝一份原来的excel
writesheet = book2.get_sheet(1)
for j in range(2, sheet.ncols):
for i in range(2, sheet.nrows):
row_list = str(sheet.cell(i, 1).value)
dd = translate_baidu(row_list, sheet.cell(1, 1).value, sheet.cell(1, j).value)
writesheet.write(i, j, dd)
sleep(1)#控制文本请求数(百度免费标准版)
book2.save('translated.xls')
'''
支持语言和翻译代码
zh 中文
en 英语
yue 粤语
wyw 文言文
jp 日语
kor 韩语
fra 法语
spa 西班牙语
th 泰语
ara 阿拉伯语
ru 俄语
pt 葡萄牙语
de 德语
it 意大利语
el 希腊语
nl 荷兰语
pl 波兰语
bul 保加利亚语
est 爱沙尼亚语
dan 丹麦语
fin 芬兰语
cs 捷克语
rom 罗马尼亚语
slo 斯洛文尼亚语
swe 瑞典语
hu 匈牙利语
cht 繁体中文
vie 越南语
''' | 0.115361 | 0.056444 |
import uuid
from unittest import TestCase
from metadata.generated.schema.api.data.createDatabase import CreateDatabaseRequest
from metadata.generated.schema.api.data.createMlModel import CreateMlModelRequest
from metadata.generated.schema.api.data.createTable import CreateTableRequest
from metadata.generated.schema.api.services.createDatabaseService import (
CreateDatabaseServiceRequest,
)
from metadata.generated.schema.api.teams.createUser import CreateUserRequest
from metadata.generated.schema.entity.data.database import Database
from metadata.generated.schema.entity.data.mlmodel import (
FeatureSource,
FeatureSourceDataType,
FeatureType,
MlFeature,
MlHyperParameter,
MlModel,
)
from metadata.generated.schema.entity.data.table import Column, DataType, Table
from metadata.generated.schema.entity.services.databaseService import (
DatabaseConnection,
DatabaseService,
DatabaseServiceType,
)
from metadata.generated.schema.type.entityReference import EntityReference
from metadata.ingestion.ometa.ometa_api import OpenMetadata
from metadata.ingestion.ometa.openmetadata_rest import MetadataServerConfig
class OMetaModelTest(TestCase):
"""
Run this integration test with the local API available
Install the ingestion package before running the tests
"""
server_config = MetadataServerConfig(api_endpoint="http://localhost:8585/api")
metadata = OpenMetadata(server_config)
assert metadata.health_check()
user = metadata.create_or_update(
data=CreateUserRequest(name="random-user", email="<EMAIL>"),
)
owner = EntityReference(id=user.id, type="user")
entity = MlModel(
id=uuid.uuid4(),
name="test-model",
algorithm="algo",
fullyQualifiedName="test-model",
)
create = CreateMlModelRequest(name="test-model", algorithm="algo")
def test_create(self):
"""
We can create a Model and we receive it back as Entity
"""
res = self.metadata.create_or_update(data=self.create)
self.assertEqual(res.name, self.entity.name)
self.assertEqual(res.algorithm, self.entity.algorithm)
self.assertEqual(res.owner, None)
def test_update(self):
"""
Updating it properly changes its properties
"""
res_create = self.metadata.create_or_update(data=self.create)
updated = self.create.dict(exclude_unset=True)
updated["owner"] = self.owner
updated_entity = CreateMlModelRequest(**updated)
res = self.metadata.create_or_update(data=updated_entity)
# Same ID, updated algorithm
self.assertEqual(res.algorithm, updated_entity.algorithm)
self.assertEqual(res_create.id, res.id)
self.assertEqual(res.owner.id, self.user.id)
# Getting without owner field does not return it by default
res_none = self.metadata.get_by_name(
entity=MlModel, fqdn=self.entity.fullyQualifiedName
)
self.assertIsNone(res_none.owner)
# We can request specific fields to be added
res_owner = self.metadata.get_by_name(
entity=MlModel,
fqdn=self.entity.fullyQualifiedName,
fields=["owner", "followers"],
)
self.assertEqual(res_owner.owner.id, self.user.id)
def test_get_name(self):
"""
We can fetch a model by name and get it back as Entity
"""
self.metadata.create_or_update(data=self.create)
res = self.metadata.get_by_name(
entity=MlModel, fqdn=self.entity.fullyQualifiedName
)
self.assertEqual(res.name, self.entity.name)
def test_get_id(self):
"""
We can fetch a model by ID and get it back as Entity
"""
self.metadata.create_or_update(data=self.create)
# First pick up by name
res_name = self.metadata.get_by_name(
entity=MlModel, fqdn=self.entity.fullyQualifiedName
)
# Then fetch by ID
res = self.metadata.get_by_id(entity=MlModel, entity_id=res_name.id)
self.assertEqual(res_name.id, res.id)
def test_list(self):
"""
We can list all our models
"""
self.metadata.create_or_update(data=self.create)
res = self.metadata.list_entities(entity=MlModel)
# Fetch our test model. We have already inserted it, so we should find it
data = next(
iter(ent for ent in res.entities if ent.name == self.entity.name), None
)
assert data
def test_delete(self):
"""
We can delete a model by ID
"""
self.metadata.create_or_update(data=self.create)
# Find by name
res_name = self.metadata.get_by_name(
entity=MlModel, fqdn=self.entity.fullyQualifiedName
)
# Then fetch by ID
res_id = self.metadata.get_by_id(
entity=MlModel, entity_id=str(res_name.id.__root__)
)
# Delete
self.metadata.delete(entity=MlModel, entity_id=str(res_id.id.__root__))
# Then we should not find it
res = self.metadata.list_entities(entity=MlModel)
assert not next(
iter(
ent
for ent in res.entities
if ent.fullyQualifiedName == self.entity.fullyQualifiedName
),
None,
)
def test_mlmodel_properties(self):
"""
Check that we can create models with MLFeatures and MLHyperParams
We can add lineage information
"""
service = CreateDatabaseServiceRequest(
name="test-service-table-ml",
serviceType=DatabaseServiceType.MySQL,
databaseConnection=DatabaseConnection(hostPort="localhost:8000"),
)
service_entity = self.metadata.create_or_update(data=service)
create_db = CreateDatabaseRequest(
name="test-db-ml",
service=EntityReference(id=service_entity.id, type="databaseService"),
)
create_db_entity = self.metadata.create_or_update(data=create_db)
create_table1 = CreateTableRequest(
name="test-ml",
database=create_db_entity.id,
columns=[Column(name="education", dataType=DataType.STRING)],
)
table1_entity = self.metadata.create_or_update(data=create_table1)
create_table2 = CreateTableRequest(
name="another_test-ml",
database=create_db_entity.id,
columns=[Column(name="age", dataType=DataType.INT)],
)
table2_entity = self.metadata.create_or_update(data=create_table2)
model = CreateMlModelRequest(
name="test-model-lineage",
algorithm="algo",
mlFeatures=[
MlFeature(
name="age",
dataType=FeatureType.numerical,
featureSources=[
FeatureSource(
name="age",
dataType=FeatureSourceDataType.integer,
dataSource=self.metadata.get_entity_reference(
entity=Table, fqdn=table2_entity.fullyQualifiedName
),
)
],
),
MlFeature(
name="persona",
dataType=FeatureType.categorical,
featureSources=[
FeatureSource(
name="age",
dataType=FeatureSourceDataType.integer,
dataSource=self.metadata.get_entity_reference(
entity=Table, fqdn=table2_entity.fullyQualifiedName
),
),
FeatureSource(
name="education",
dataType=FeatureSourceDataType.string,
dataSource=self.metadata.get_entity_reference(
entity=Table, fqdn=table1_entity.fullyQualifiedName
),
),
FeatureSource(
name="city", dataType=FeatureSourceDataType.string
),
],
featureAlgorithm="PCA",
),
],
mlHyperParameters=[
MlHyperParameter(name="regularisation", value="0.5"),
MlHyperParameter(name="random", value="hello"),
],
target="myTarget",
)
res = self.metadata.create_or_update(data=model)
self.assertIsNotNone(res.mlFeatures)
self.assertIsNotNone(res.mlHyperParameters)
lineage = self.metadata.add_mlmodel_lineage(model=res)
nodes = {node["id"] for node in lineage["nodes"]}
assert nodes == {str(table1_entity.id.__root__), str(table2_entity.id.__root__)}
self.metadata.delete(entity=Table, entity_id=table1_entity.id)
self.metadata.delete(entity=Table, entity_id=table2_entity.id)
self.metadata.delete(entity=Database, entity_id=create_db_entity.id)
self.metadata.delete(entity=DatabaseService, entity_id=service_entity.id)
def test_list_versions(self):
"""
test list MLmodel entity versions
"""
self.metadata.create_or_update(data=self.create)
# Find by name
res_name = self.metadata.get_by_name(
entity=MlModel, fqdn=self.entity.fullyQualifiedName
)
res = self.metadata.get_list_entity_versions(
entity=MlModel, entity_id=res_name.id.__root__
)
assert res
def test_get_entity_version(self):
"""
test get MLModel entity version
"""
self.metadata.create_or_update(data=self.create)
# Find by name
res_name = self.metadata.get_by_name(
entity=MlModel, fqdn=self.entity.fullyQualifiedName
)
res = self.metadata.get_entity_version(
entity=MlModel, entity_id=res_name.id.__root__, version=0.1
)
# check we get the correct version requested and the correct entity ID
assert res.version.__root__ == 0.1
assert res.id == res_name.id
def test_get_entity_ref(self):
"""
test get EntityReference
"""
res = self.metadata.create_or_update(data=self.create)
entity_ref = self.metadata.get_entity_reference(
entity=MlModel, fqdn=res.fullyQualifiedName
)
assert res.id == entity_ref.id | ingestion/tests/integration/ometa/test_ometa_model_api.py | import uuid
from unittest import TestCase
from metadata.generated.schema.api.data.createDatabase import CreateDatabaseRequest
from metadata.generated.schema.api.data.createMlModel import CreateMlModelRequest
from metadata.generated.schema.api.data.createTable import CreateTableRequest
from metadata.generated.schema.api.services.createDatabaseService import (
CreateDatabaseServiceRequest,
)
from metadata.generated.schema.api.teams.createUser import CreateUserRequest
from metadata.generated.schema.entity.data.database import Database
from metadata.generated.schema.entity.data.mlmodel import (
FeatureSource,
FeatureSourceDataType,
FeatureType,
MlFeature,
MlHyperParameter,
MlModel,
)
from metadata.generated.schema.entity.data.table import Column, DataType, Table
from metadata.generated.schema.entity.services.databaseService import (
DatabaseConnection,
DatabaseService,
DatabaseServiceType,
)
from metadata.generated.schema.type.entityReference import EntityReference
from metadata.ingestion.ometa.ometa_api import OpenMetadata
from metadata.ingestion.ometa.openmetadata_rest import MetadataServerConfig
class OMetaModelTest(TestCase):
"""
Run this integration test with the local API available
Install the ingestion package before running the tests
"""
server_config = MetadataServerConfig(api_endpoint="http://localhost:8585/api")
metadata = OpenMetadata(server_config)
assert metadata.health_check()
user = metadata.create_or_update(
data=CreateUserRequest(name="random-user", email="<EMAIL>"),
)
owner = EntityReference(id=user.id, type="user")
entity = MlModel(
id=uuid.uuid4(),
name="test-model",
algorithm="algo",
fullyQualifiedName="test-model",
)
create = CreateMlModelRequest(name="test-model", algorithm="algo")
def test_create(self):
"""
We can create a Model and we receive it back as Entity
"""
res = self.metadata.create_or_update(data=self.create)
self.assertEqual(res.name, self.entity.name)
self.assertEqual(res.algorithm, self.entity.algorithm)
self.assertEqual(res.owner, None)
def test_update(self):
"""
Updating it properly changes its properties
"""
res_create = self.metadata.create_or_update(data=self.create)
updated = self.create.dict(exclude_unset=True)
updated["owner"] = self.owner
updated_entity = CreateMlModelRequest(**updated)
res = self.metadata.create_or_update(data=updated_entity)
# Same ID, updated algorithm
self.assertEqual(res.algorithm, updated_entity.algorithm)
self.assertEqual(res_create.id, res.id)
self.assertEqual(res.owner.id, self.user.id)
# Getting without owner field does not return it by default
res_none = self.metadata.get_by_name(
entity=MlModel, fqdn=self.entity.fullyQualifiedName
)
self.assertIsNone(res_none.owner)
# We can request specific fields to be added
res_owner = self.metadata.get_by_name(
entity=MlModel,
fqdn=self.entity.fullyQualifiedName,
fields=["owner", "followers"],
)
self.assertEqual(res_owner.owner.id, self.user.id)
def test_get_name(self):
"""
We can fetch a model by name and get it back as Entity
"""
self.metadata.create_or_update(data=self.create)
res = self.metadata.get_by_name(
entity=MlModel, fqdn=self.entity.fullyQualifiedName
)
self.assertEqual(res.name, self.entity.name)
def test_get_id(self):
"""
We can fetch a model by ID and get it back as Entity
"""
self.metadata.create_or_update(data=self.create)
# First pick up by name
res_name = self.metadata.get_by_name(
entity=MlModel, fqdn=self.entity.fullyQualifiedName
)
# Then fetch by ID
res = self.metadata.get_by_id(entity=MlModel, entity_id=res_name.id)
self.assertEqual(res_name.id, res.id)
def test_list(self):
"""
We can list all our models
"""
self.metadata.create_or_update(data=self.create)
res = self.metadata.list_entities(entity=MlModel)
# Fetch our test model. We have already inserted it, so we should find it
data = next(
iter(ent for ent in res.entities if ent.name == self.entity.name), None
)
assert data
def test_delete(self):
"""
We can delete a model by ID
"""
self.metadata.create_or_update(data=self.create)
# Find by name
res_name = self.metadata.get_by_name(
entity=MlModel, fqdn=self.entity.fullyQualifiedName
)
# Then fetch by ID
res_id = self.metadata.get_by_id(
entity=MlModel, entity_id=str(res_name.id.__root__)
)
# Delete
self.metadata.delete(entity=MlModel, entity_id=str(res_id.id.__root__))
# Then we should not find it
res = self.metadata.list_entities(entity=MlModel)
assert not next(
iter(
ent
for ent in res.entities
if ent.fullyQualifiedName == self.entity.fullyQualifiedName
),
None,
)
def test_mlmodel_properties(self):
"""
Check that we can create models with MLFeatures and MLHyperParams
We can add lineage information
"""
service = CreateDatabaseServiceRequest(
name="test-service-table-ml",
serviceType=DatabaseServiceType.MySQL,
databaseConnection=DatabaseConnection(hostPort="localhost:8000"),
)
service_entity = self.metadata.create_or_update(data=service)
create_db = CreateDatabaseRequest(
name="test-db-ml",
service=EntityReference(id=service_entity.id, type="databaseService"),
)
create_db_entity = self.metadata.create_or_update(data=create_db)
create_table1 = CreateTableRequest(
name="test-ml",
database=create_db_entity.id,
columns=[Column(name="education", dataType=DataType.STRING)],
)
table1_entity = self.metadata.create_or_update(data=create_table1)
create_table2 = CreateTableRequest(
name="another_test-ml",
database=create_db_entity.id,
columns=[Column(name="age", dataType=DataType.INT)],
)
table2_entity = self.metadata.create_or_update(data=create_table2)
model = CreateMlModelRequest(
name="test-model-lineage",
algorithm="algo",
mlFeatures=[
MlFeature(
name="age",
dataType=FeatureType.numerical,
featureSources=[
FeatureSource(
name="age",
dataType=FeatureSourceDataType.integer,
dataSource=self.metadata.get_entity_reference(
entity=Table, fqdn=table2_entity.fullyQualifiedName
),
)
],
),
MlFeature(
name="persona",
dataType=FeatureType.categorical,
featureSources=[
FeatureSource(
name="age",
dataType=FeatureSourceDataType.integer,
dataSource=self.metadata.get_entity_reference(
entity=Table, fqdn=table2_entity.fullyQualifiedName
),
),
FeatureSource(
name="education",
dataType=FeatureSourceDataType.string,
dataSource=self.metadata.get_entity_reference(
entity=Table, fqdn=table1_entity.fullyQualifiedName
),
),
FeatureSource(
name="city", dataType=FeatureSourceDataType.string
),
],
featureAlgorithm="PCA",
),
],
mlHyperParameters=[
MlHyperParameter(name="regularisation", value="0.5"),
MlHyperParameter(name="random", value="hello"),
],
target="myTarget",
)
res = self.metadata.create_or_update(data=model)
self.assertIsNotNone(res.mlFeatures)
self.assertIsNotNone(res.mlHyperParameters)
lineage = self.metadata.add_mlmodel_lineage(model=res)
nodes = {node["id"] for node in lineage["nodes"]}
assert nodes == {str(table1_entity.id.__root__), str(table2_entity.id.__root__)}
self.metadata.delete(entity=Table, entity_id=table1_entity.id)
self.metadata.delete(entity=Table, entity_id=table2_entity.id)
self.metadata.delete(entity=Database, entity_id=create_db_entity.id)
self.metadata.delete(entity=DatabaseService, entity_id=service_entity.id)
def test_list_versions(self):
"""
test list MLmodel entity versions
"""
self.metadata.create_or_update(data=self.create)
# Find by name
res_name = self.metadata.get_by_name(
entity=MlModel, fqdn=self.entity.fullyQualifiedName
)
res = self.metadata.get_list_entity_versions(
entity=MlModel, entity_id=res_name.id.__root__
)
assert res
def test_get_entity_version(self):
"""
test get MLModel entity version
"""
self.metadata.create_or_update(data=self.create)
# Find by name
res_name = self.metadata.get_by_name(
entity=MlModel, fqdn=self.entity.fullyQualifiedName
)
res = self.metadata.get_entity_version(
entity=MlModel, entity_id=res_name.id.__root__, version=0.1
)
# check we get the correct version requested and the correct entity ID
assert res.version.__root__ == 0.1
assert res.id == res_name.id
def test_get_entity_ref(self):
"""
test get EntityReference
"""
res = self.metadata.create_or_update(data=self.create)
entity_ref = self.metadata.get_entity_reference(
entity=MlModel, fqdn=res.fullyQualifiedName
)
assert res.id == entity_ref.id | 0.580828 | 0.203925 |
from logging import getLogger
from pyds8k.messages import INVALID_TYPE
from pyds8k import PYDS8K_DEFAULT_LOGGER
from pyds8k.base import Resource, Manager
from .mixins import RootResourceMixin
from pyds8k.exceptions import OperationNotAllowed, \
URLNotSpecifiedError, \
FieldReadOnly
from ....utils import get_resource_class_by_name
logger = getLogger(PYDS8K_DEFAULT_LOGGER)
class Base(RootResourceMixin, Resource):
# If there is a field named "id" in response data,
# the id_field can't be set to value other than "id"
id_field = 'id'
url_field = 'link'
base_url = '/api/v1'
create_method = 'posta'
# Required only in writable resources, fileds are from _template
# Resource id is exclude.
readonly_fileds = ()
# Not like related_resource, related_resources_list is not set during
# loading, its keys use lazy-loading to get details.
related_resources_collection = ()
def _add_details(self, info, force=False):
super(Base, self)._add_details(info, force=force)
self._start_updating()
self._set_related_resources_collection()
self._stop_updating()
def _set_related_resources_collection(self):
for key in self.related_resources_collection:
res = self.representation.get(key)
# If the related resources(should be a list) are not in info,
# will empty them and wait for lazy-loading.
if not isinstance(res, list):
self.representation[key] = ''
try:
delattr(self, key)
except AttributeError:
pass
# If the related resources(should be a list) are in info, set it.
else:
re_class, re_manager = self._get_resource_class_by_name(key)
res_list = [re_class(self.client,
manager=re_manager(self.client),
info=r)
for r in res]
setattr(self, key, res_list)
def __setattr__(self, key, value):
if key in self.readonly_fileds and not self.is_updating():
raise FieldReadOnly(key)
super(Base, self).__setattr__(key, value)
try:
if key in self.related_resources_collection:
ids = [getattr(item, item.id_field) for item in value]
self.representation[key] = ids
if not self.is_updating():
self._set_modified_info_dict(key, ids)
except AttributeError:
pass
def __getattr__(self, key):
if key in self.related_resources_collection:
try:
return getattr(self, 'get_{}'.format(key))()
except Exception as e:
logger.debug(
"Can not get {} from {}, reason is: {}".format(
key, self, type(e)
)
)
raise AttributeError(key)
return super(Base, self).__getattr__(key)
def __repr__(self):
return "<{0}: {1}>".format(self.__class__.__name__, self._get_id())
def _get_resource_class_by_name(self, resource_type):
prefix = '{}.{}'.format(self.client.service_type,
self.client.service_version
)
return get_resource_class_by_name(resource_type, prefix)
def _verify_type(self, new_type, valid_type_list):
if new_type and not (new_type in valid_type_list):
raise ValueError(
INVALID_TYPE.format(', '.join(valid_type_list))
)
class SingletonBase(Base):
# A singleton resource has no id field by default
id_field = '*'
class BaseManager(Manager):
resource_class = Base
response_key = 'data'
resource_type = ''
def _post(self, url='', body=None):
post_body = None
if not body:
if self.managed_object is not None:
post_body = self.managed_object._get_modified_info_dict()
# repre = self.managed_object.representation
# post_body = {key: value
# for key, value in repre.iteritems()
# if key not in self.managed_object.readonly_fileds
# }
else:
raise URLNotSpecifiedError()
else:
post_body = body
return super(BaseManager, self)._post(url=url, body=post_body)
# DS8K will use PUT in PATCH way, and don't use PATCH.
def _put(self, url='', body=None):
put_body = None
if not url:
if self.managed_object is not None:
self.url = self.managed_object.url
# use modified info here
put_body = body if body else \
self.managed_object._get_modified_info_dict()
else:
raise URLNotSpecifiedError()
else:
self.url = url
put_body = body
resp, body = self.client.put(self.url,
body=self._get_request_data(put_body)
)
data = self._get_data(body, method='PUT', response=resp)
return resp, data
def _patch(self, url='', body=None):
return self._put(url=url, body=body)
def get(self, resource_id='', url='', obj_class=None, **kwargs):
raise OperationNotAllowed('get', self.resource_class.__name__)
def list(self, url='', obj_class=None, body=None, **kwargs):
raise OperationNotAllowed('list', self.resource_class.__name__)
def post(self, url='', body=None):
raise OperationNotAllowed('post', self.resource_class.__name__)
def posta(self, url='', body=None):
raise OperationNotAllowed('posta', self.resource_class.__name__)
def put(self, url='', body=None):
raise OperationNotAllowed('put', self.resource_class.__name__)
def patch(self, url='', body=None):
raise OperationNotAllowed('patch', self.resource_class.__name__)
def delete(self, url=''):
raise OperationNotAllowed('delete', self.resource_class.__name__)
class ReadOnlyManager(BaseManager):
def get(self, resource_id='', url='', obj_class=None, **kwargs):
return self._get(resource_id=resource_id,
url=url, obj_class=obj_class, **kwargs)
def list(self, url='', obj_class=None, body=None, **kwargs):
return self._list(url=url, obj_class=obj_class, body=body, **kwargs)
class SingletonBaseManager(BaseManager):
def get(self, url='', obj_class=None, **kwargs):
return self._get(url=url, obj_class=obj_class, **kwargs)
def list(self, url='', obj_class=None, body=None, **kwargs):
return self._list(url=url, obj_class=obj_class, body=body, **kwargs) | pyds8k/resources/ds8k/v1/common/base.py | from logging import getLogger
from pyds8k.messages import INVALID_TYPE
from pyds8k import PYDS8K_DEFAULT_LOGGER
from pyds8k.base import Resource, Manager
from .mixins import RootResourceMixin
from pyds8k.exceptions import OperationNotAllowed, \
URLNotSpecifiedError, \
FieldReadOnly
from ....utils import get_resource_class_by_name
logger = getLogger(PYDS8K_DEFAULT_LOGGER)
class Base(RootResourceMixin, Resource):
# If there is a field named "id" in response data,
# the id_field can't be set to value other than "id"
id_field = 'id'
url_field = 'link'
base_url = '/api/v1'
create_method = 'posta'
# Required only in writable resources, fileds are from _template
# Resource id is exclude.
readonly_fileds = ()
# Not like related_resource, related_resources_list is not set during
# loading, its keys use lazy-loading to get details.
related_resources_collection = ()
def _add_details(self, info, force=False):
super(Base, self)._add_details(info, force=force)
self._start_updating()
self._set_related_resources_collection()
self._stop_updating()
def _set_related_resources_collection(self):
for key in self.related_resources_collection:
res = self.representation.get(key)
# If the related resources(should be a list) are not in info,
# will empty them and wait for lazy-loading.
if not isinstance(res, list):
self.representation[key] = ''
try:
delattr(self, key)
except AttributeError:
pass
# If the related resources(should be a list) are in info, set it.
else:
re_class, re_manager = self._get_resource_class_by_name(key)
res_list = [re_class(self.client,
manager=re_manager(self.client),
info=r)
for r in res]
setattr(self, key, res_list)
def __setattr__(self, key, value):
if key in self.readonly_fileds and not self.is_updating():
raise FieldReadOnly(key)
super(Base, self).__setattr__(key, value)
try:
if key in self.related_resources_collection:
ids = [getattr(item, item.id_field) for item in value]
self.representation[key] = ids
if not self.is_updating():
self._set_modified_info_dict(key, ids)
except AttributeError:
pass
def __getattr__(self, key):
if key in self.related_resources_collection:
try:
return getattr(self, 'get_{}'.format(key))()
except Exception as e:
logger.debug(
"Can not get {} from {}, reason is: {}".format(
key, self, type(e)
)
)
raise AttributeError(key)
return super(Base, self).__getattr__(key)
def __repr__(self):
return "<{0}: {1}>".format(self.__class__.__name__, self._get_id())
def _get_resource_class_by_name(self, resource_type):
prefix = '{}.{}'.format(self.client.service_type,
self.client.service_version
)
return get_resource_class_by_name(resource_type, prefix)
def _verify_type(self, new_type, valid_type_list):
if new_type and not (new_type in valid_type_list):
raise ValueError(
INVALID_TYPE.format(', '.join(valid_type_list))
)
class SingletonBase(Base):
# A singleton resource has no id field by default
id_field = '*'
class BaseManager(Manager):
resource_class = Base
response_key = 'data'
resource_type = ''
def _post(self, url='', body=None):
post_body = None
if not body:
if self.managed_object is not None:
post_body = self.managed_object._get_modified_info_dict()
# repre = self.managed_object.representation
# post_body = {key: value
# for key, value in repre.iteritems()
# if key not in self.managed_object.readonly_fileds
# }
else:
raise URLNotSpecifiedError()
else:
post_body = body
return super(BaseManager, self)._post(url=url, body=post_body)
# DS8K will use PUT in PATCH way, and don't use PATCH.
def _put(self, url='', body=None):
put_body = None
if not url:
if self.managed_object is not None:
self.url = self.managed_object.url
# use modified info here
put_body = body if body else \
self.managed_object._get_modified_info_dict()
else:
raise URLNotSpecifiedError()
else:
self.url = url
put_body = body
resp, body = self.client.put(self.url,
body=self._get_request_data(put_body)
)
data = self._get_data(body, method='PUT', response=resp)
return resp, data
def _patch(self, url='', body=None):
return self._put(url=url, body=body)
def get(self, resource_id='', url='', obj_class=None, **kwargs):
raise OperationNotAllowed('get', self.resource_class.__name__)
def list(self, url='', obj_class=None, body=None, **kwargs):
raise OperationNotAllowed('list', self.resource_class.__name__)
def post(self, url='', body=None):
raise OperationNotAllowed('post', self.resource_class.__name__)
def posta(self, url='', body=None):
raise OperationNotAllowed('posta', self.resource_class.__name__)
def put(self, url='', body=None):
raise OperationNotAllowed('put', self.resource_class.__name__)
def patch(self, url='', body=None):
raise OperationNotAllowed('patch', self.resource_class.__name__)
def delete(self, url=''):
raise OperationNotAllowed('delete', self.resource_class.__name__)
class ReadOnlyManager(BaseManager):
def get(self, resource_id='', url='', obj_class=None, **kwargs):
return self._get(resource_id=resource_id,
url=url, obj_class=obj_class, **kwargs)
def list(self, url='', obj_class=None, body=None, **kwargs):
return self._list(url=url, obj_class=obj_class, body=body, **kwargs)
class SingletonBaseManager(BaseManager):
def get(self, url='', obj_class=None, **kwargs):
return self._get(url=url, obj_class=obj_class, **kwargs)
def list(self, url='', obj_class=None, body=None, **kwargs):
return self._list(url=url, obj_class=obj_class, body=body, **kwargs) | 0.466603 | 0.067577 |
import pytest
from awx.main.access import (
OAuth2ApplicationAccess,
OAuth2TokenAccess,
)
from awx.main.models.oauth import (
OAuth2Application as Application,
OAuth2AccessToken as AccessToken,
)
from awx.api.versioning import reverse
@pytest.mark.django_db
class TestOAuthApplication:
@pytest.mark.parametrize("user_for_access, can_access_list", [
(0, [True, True, True, True]),
(1, [False, True, True, False]),
(2, [False, False, True, False]),
(3, [False, False, False, True]),
])
def test_can_read_change_delete(
self, admin, org_admin, org_member, alice, user_for_access, can_access_list
):
user_list = [admin, org_admin, org_member, alice]
access = OAuth2ApplicationAccess(user_list[user_for_access])
for user, can_access in zip(user_list, can_access_list):
app = Application.objects.create(
name='test app for {}'.format(user.username), user=user,
client_type='confidential', authorization_grant_type='password'
)
assert access.can_read(app) is can_access
assert access.can_change(app, {}) is can_access
assert access.can_delete(app) is can_access
def test_superuser_can_always_create(self, admin, org_admin, org_member, alice):
access = OAuth2ApplicationAccess(admin)
for user in [admin, org_admin, org_member, alice]:
assert access.can_add({
'name': 'test app', 'user': user.pk, 'client_type': 'confidential',
'authorization_grant_type': 'password'
})
def test_normal_user_cannot_create(self, admin, org_admin, org_member, alice):
for access_user in [org_member, alice]:
access = OAuth2ApplicationAccess(access_user)
for user in [admin, org_admin, org_member, alice]:
assert not access.can_add({
'name': 'test app', 'user': user.pk, 'client_type': 'confidential',
'authorization_grant_type': 'password'
})
def test_org_admin_can_create_in_org(self, admin, org_admin, org_member, alice):
access = OAuth2ApplicationAccess(org_admin)
for user in [admin, alice]:
assert not access.can_add({
'name': 'test app', 'user': user.pk, 'client_type': 'confidential',
'authorization_grant_type': 'password'
})
for user in [org_admin, org_member]:
assert access.can_add({
'name': 'test app', 'user': user.pk, 'client_type': 'confidential',
'authorization_grant_type': 'password'
})
@pytest.mark.skip(reason="Needs Update - CA")
@pytest.mark.django_db
class TestOAuthToken:
@pytest.mark.parametrize("user_for_access, can_access_list", [
(0, [True, True, True, True]),
(1, [False, True, True, False]),
(2, [False, False, True, False]),
(3, [False, False, False, True]),
])
def test_can_read_change_delete(
self, post, admin, org_admin, org_member, alice, user_for_access, can_access_list
):
user_list = [admin, org_admin, org_member, alice]
access = OAuth2TokenAccess(user_list[user_for_access])
for user, can_access in zip(user_list, can_access_list):
app = Application.objects.create(
name='test app for {}'.format(user.username), user=user,
client_type='confidential', authorization_grant_type='password'
)
response = post(
reverse('api:o_auth2_application_token_list', kwargs={'pk': app.pk}),
{'scope': 'read'}, admin, expect=201
)
token = AccessToken.objects.get(token=response.data['token'])
assert access.can_read(token) is can_access # TODO: fix this test
assert access.can_change(token, {}) is can_access
assert access.can_delete(token) is can_access
@pytest.mark.parametrize("user_for_access, can_access_list", [
(0, [True, True, True, True]),
(1, [False, True, True, False]),
(2, [False, False, True, False]),
(3, [False, False, False, True]),
])
def test_can_create(
self, post, admin, org_admin, org_member, alice, user_for_access, can_access_list
):
user_list = [admin, org_admin, org_member, alice]
for user, can_access in zip(user_list, can_access_list):
app = Application.objects.create(
name='test app for {}'.format(user.username), user=user,
client_type='confidential', authorization_grant_type='password'
)
post(
reverse('api:o_auth2_application_token_list', kwargs={'pk': app.pk}),
{'scope': 'read'}, user_list[user_for_access], expect=201 if can_access else 403
) | awx/main/tests/functional/test_rbac_oauth.py | import pytest
from awx.main.access import (
OAuth2ApplicationAccess,
OAuth2TokenAccess,
)
from awx.main.models.oauth import (
OAuth2Application as Application,
OAuth2AccessToken as AccessToken,
)
from awx.api.versioning import reverse
@pytest.mark.django_db
class TestOAuthApplication:
@pytest.mark.parametrize("user_for_access, can_access_list", [
(0, [True, True, True, True]),
(1, [False, True, True, False]),
(2, [False, False, True, False]),
(3, [False, False, False, True]),
])
def test_can_read_change_delete(
self, admin, org_admin, org_member, alice, user_for_access, can_access_list
):
user_list = [admin, org_admin, org_member, alice]
access = OAuth2ApplicationAccess(user_list[user_for_access])
for user, can_access in zip(user_list, can_access_list):
app = Application.objects.create(
name='test app for {}'.format(user.username), user=user,
client_type='confidential', authorization_grant_type='password'
)
assert access.can_read(app) is can_access
assert access.can_change(app, {}) is can_access
assert access.can_delete(app) is can_access
def test_superuser_can_always_create(self, admin, org_admin, org_member, alice):
access = OAuth2ApplicationAccess(admin)
for user in [admin, org_admin, org_member, alice]:
assert access.can_add({
'name': 'test app', 'user': user.pk, 'client_type': 'confidential',
'authorization_grant_type': 'password'
})
def test_normal_user_cannot_create(self, admin, org_admin, org_member, alice):
for access_user in [org_member, alice]:
access = OAuth2ApplicationAccess(access_user)
for user in [admin, org_admin, org_member, alice]:
assert not access.can_add({
'name': 'test app', 'user': user.pk, 'client_type': 'confidential',
'authorization_grant_type': 'password'
})
def test_org_admin_can_create_in_org(self, admin, org_admin, org_member, alice):
access = OAuth2ApplicationAccess(org_admin)
for user in [admin, alice]:
assert not access.can_add({
'name': 'test app', 'user': user.pk, 'client_type': 'confidential',
'authorization_grant_type': 'password'
})
for user in [org_admin, org_member]:
assert access.can_add({
'name': 'test app', 'user': user.pk, 'client_type': 'confidential',
'authorization_grant_type': 'password'
})
@pytest.mark.skip(reason="Needs Update - CA")
@pytest.mark.django_db
class TestOAuthToken:
@pytest.mark.parametrize("user_for_access, can_access_list", [
(0, [True, True, True, True]),
(1, [False, True, True, False]),
(2, [False, False, True, False]),
(3, [False, False, False, True]),
])
def test_can_read_change_delete(
self, post, admin, org_admin, org_member, alice, user_for_access, can_access_list
):
user_list = [admin, org_admin, org_member, alice]
access = OAuth2TokenAccess(user_list[user_for_access])
for user, can_access in zip(user_list, can_access_list):
app = Application.objects.create(
name='test app for {}'.format(user.username), user=user,
client_type='confidential', authorization_grant_type='password'
)
response = post(
reverse('api:o_auth2_application_token_list', kwargs={'pk': app.pk}),
{'scope': 'read'}, admin, expect=201
)
token = AccessToken.objects.get(token=response.data['token'])
assert access.can_read(token) is can_access # TODO: fix this test
assert access.can_change(token, {}) is can_access
assert access.can_delete(token) is can_access
@pytest.mark.parametrize("user_for_access, can_access_list", [
(0, [True, True, True, True]),
(1, [False, True, True, False]),
(2, [False, False, True, False]),
(3, [False, False, False, True]),
])
def test_can_create(
self, post, admin, org_admin, org_member, alice, user_for_access, can_access_list
):
user_list = [admin, org_admin, org_member, alice]
for user, can_access in zip(user_list, can_access_list):
app = Application.objects.create(
name='test app for {}'.format(user.username), user=user,
client_type='confidential', authorization_grant_type='password'
)
post(
reverse('api:o_auth2_application_token_list', kwargs={'pk': app.pk}),
{'scope': 'read'}, user_list[user_for_access], expect=201 if can_access else 403
) | 0.265404 | 0.23975 |
import telegram
import schedule
import time
from bs4 import BeautifulSoup
import requests
import random
import json
#MAIN
chat_ID = CHAT_ID_SIIA
bot = telegram.Bot(token='BOT_TOKEN_SIIA')
#USA Debt
debt_URL = "https://www.pgpf.org/national-debt-clock"
#HeaderUserAgentSpoof
user_agent_list = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3198.0 Safari/537.36 OPR/49.0.2711.0',
'Opera/9.80 (Linux armv7l) Presto/2.12.407 Version/12.51 , D50u-D1-UHD/V1.5.16-UHD (Vizio, D50u-D1, Wireless)',
]
def ProxyGrab():
req = requests.get("https://proxy11.com/api/proxy.json?key=PROXY11_TOKEN_SIIA&limit=1&port=80")
dataproxy = req.json()
proxy = '{' + "'http': '" + dataproxy['data'][0]["ip"] + ":" + dataproxy['data'][0]["port"] + "'}"
return proxy
def HeaderUserAgentSpoof(list):
user_agent = random.choice(list)
headers = {
'User-Agent': user_agent,
'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language' : 'en-US,en;q=0.5',
'Accept-Encoding' : 'gzip',
'DNT' : '1',
'Connection' : 'close'
}
return headers
def USADebt():
proxyFormat = eval(ProxyGrab())
header = HeaderUserAgentSpoof(user_agent_list)
s = requests.session()
req = s.get(debt_URL, headers = header, proxies = proxyFormat)
soup = BeautifulSoup(req.content, 'html.parser')
debt = soup.find("div", class_ = "debt-gross")
bot.sendMessage(chat_ID, text = "В данный момент национальный долг США составляет: \n" + debt.text)
s.cookies.clear()
if __name__ == '__main__':
schedule.every().day.at("20:00").do(USADebt)
while True:
schedule.run_pending()
time.sleep(1) | bot.py | import telegram
import schedule
import time
from bs4 import BeautifulSoup
import requests
import random
import json
#MAIN
chat_ID = CHAT_ID_SIIA
bot = telegram.Bot(token='BOT_TOKEN_SIIA')
#USA Debt
debt_URL = "https://www.pgpf.org/national-debt-clock"
#HeaderUserAgentSpoof
user_agent_list = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3198.0 Safari/537.36 OPR/49.0.2711.0',
'Opera/9.80 (Linux armv7l) Presto/2.12.407 Version/12.51 , D50u-D1-UHD/V1.5.16-UHD (Vizio, D50u-D1, Wireless)',
]
def ProxyGrab():
req = requests.get("https://proxy11.com/api/proxy.json?key=PROXY11_TOKEN_SIIA&limit=1&port=80")
dataproxy = req.json()
proxy = '{' + "'http': '" + dataproxy['data'][0]["ip"] + ":" + dataproxy['data'][0]["port"] + "'}"
return proxy
def HeaderUserAgentSpoof(list):
user_agent = random.choice(list)
headers = {
'User-Agent': user_agent,
'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language' : 'en-US,en;q=0.5',
'Accept-Encoding' : 'gzip',
'DNT' : '1',
'Connection' : 'close'
}
return headers
def USADebt():
proxyFormat = eval(ProxyGrab())
header = HeaderUserAgentSpoof(user_agent_list)
s = requests.session()
req = s.get(debt_URL, headers = header, proxies = proxyFormat)
soup = BeautifulSoup(req.content, 'html.parser')
debt = soup.find("div", class_ = "debt-gross")
bot.sendMessage(chat_ID, text = "В данный момент национальный долг США составляет: \n" + debt.text)
s.cookies.clear()
if __name__ == '__main__':
schedule.every().day.at("20:00").do(USADebt)
while True:
schedule.run_pending()
time.sleep(1) | 0.251648 | 0.066995 |
from propara.data.proglobal_dataset_reader import ProGlobalDatasetReader
from allennlp.common.testing import AllenNlpTestCase
class TestDataReader(AllenNlpTestCase):
def test_read_from_file(self):
sc_reader = ProGlobalDatasetReader()
dataset = sc_reader.read('tests/fixtures/proglobal_toy_data.tsv')
instances = dataset
assert len(instances) == 20
# read first instance
fields = instances[0].fields
assert fields["tokens_list"].sequence_length() == fields["positions_list"].sequence_length()
tokens_list_fields = fields["tokens_list"].field_list
field0 = tokens_list_fields[0]
field0_tokens = [t.text for t in field0.tokens[0:10]]
correct_field0_tokens = ["when", "water", "freeze", "it", "become", "10", "%", "bigger", ",", "or"]
assert field0_tokens == correct_field0_tokens
before_loc_start_field = fields["before_loc_start"].sequence_index
before_loc_end_field = fields["before_loc_end"].sequence_index
assert before_loc_start_field == 0
assert before_loc_end_field == 0
after_loc_start_fields = fields["after_loc_start_list"].field_list
after_loc_end_fields = fields["after_loc_end_list"].field_list
after_loc_start_fields0 = after_loc_start_fields[0].sequence_index
after_loc_end_fields0 = after_loc_end_fields[0].sequence_index
assert after_loc_start_fields0 == 0
assert after_loc_end_fields0 == 0
before_category = fields["before_category"].sequence_index
assert before_category == 1
after_category_fields = fields["after_category_list"].field_list
after_category_fields0 = after_category_fields[0].sequence_index
assert after_category_fields0 == 1
before_category_mask = fields["before_category_mask"].sequence_index
assert before_category_mask == 0
after_category_mask_fields = fields["after_category_mask_list"].field_list
after_category_mask_fields0 = after_category_mask_fields[0].sequence_index
assert after_category_mask_fields0 == 0 | tests/data/proglobal_dataset_reader_test.py | from propara.data.proglobal_dataset_reader import ProGlobalDatasetReader
from allennlp.common.testing import AllenNlpTestCase
class TestDataReader(AllenNlpTestCase):
def test_read_from_file(self):
sc_reader = ProGlobalDatasetReader()
dataset = sc_reader.read('tests/fixtures/proglobal_toy_data.tsv')
instances = dataset
assert len(instances) == 20
# read first instance
fields = instances[0].fields
assert fields["tokens_list"].sequence_length() == fields["positions_list"].sequence_length()
tokens_list_fields = fields["tokens_list"].field_list
field0 = tokens_list_fields[0]
field0_tokens = [t.text for t in field0.tokens[0:10]]
correct_field0_tokens = ["when", "water", "freeze", "it", "become", "10", "%", "bigger", ",", "or"]
assert field0_tokens == correct_field0_tokens
before_loc_start_field = fields["before_loc_start"].sequence_index
before_loc_end_field = fields["before_loc_end"].sequence_index
assert before_loc_start_field == 0
assert before_loc_end_field == 0
after_loc_start_fields = fields["after_loc_start_list"].field_list
after_loc_end_fields = fields["after_loc_end_list"].field_list
after_loc_start_fields0 = after_loc_start_fields[0].sequence_index
after_loc_end_fields0 = after_loc_end_fields[0].sequence_index
assert after_loc_start_fields0 == 0
assert after_loc_end_fields0 == 0
before_category = fields["before_category"].sequence_index
assert before_category == 1
after_category_fields = fields["after_category_list"].field_list
after_category_fields0 = after_category_fields[0].sequence_index
assert after_category_fields0 == 1
before_category_mask = fields["before_category_mask"].sequence_index
assert before_category_mask == 0
after_category_mask_fields = fields["after_category_mask_list"].field_list
after_category_mask_fields0 = after_category_mask_fields[0].sequence_index
assert after_category_mask_fields0 == 0 | 0.579519 | 0.458106 |
import mxnet as mx
import numpy as np
import os
import time
import pickle
import logging
import models
import dataloader
from mxnet import gluon
from mxnet import init
from mxnet import nd
from mxnet import autograd
from mxnet.gluon import nn
from config import DefaultConfig
from utils.visualize import Visualizer
def get_logger(opt):
logging.basicConfig(format='%(asctime)s - %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_path = opt.log_file_path
fh = logging.FileHandler(log_file_path)
formatter = logging.Formatter('%(asctime)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info('initialize logger')
return logger
def convert_model_gpu(model):
# model.initialize()
model.collect_params().reset_ctx(mx.gpu())
def convert_model_cpu(model):
model.collect_params().reset_ctx(mx.cpu())
def train(model_train, train_dataloader, val_dataloader, logger, opt):
# visualization
vis = Visualizer(opt.env)
# preload
if opt.preload:
model_train.load_parameters(opt.load_file_path)
# set train mode
model_train.collect_params().setattr('grad_req', 'write')
# model_train.collect_train_params().setattr('grad_req', 'write')
trainer = gluon.Trainer(model_train.collect_params(),
'sgd',
{'learning_rate': opt.lr,
'wd': opt.wd,
'momentum': opt.momentum,
'clip_gradient': 5})
# lr decay
lr_decay = float(opt.lr_decay)
# train_loss
train_loss = gluon.loss.SoftmaxCrossEntropyLoss()
logger.info('Starting training from Epoch {}'.format(opt.start_epoch+1))
best_acc = 0
for epoch in range(opt.start_epoch, opt.max_epoch):
start_time = time.time()
loss_his = []
for i, (data, label) in enumerate(train_dataloader):
data = data.as_in_context(opt.ctx)
label = label.astype('float32').as_in_context(opt.ctx)
with autograd.record():
output = model_train(data)
loss = train_loss(output, label)
autograd.backward(loss)
trainer.step(opt.batch_size)
loss_ = loss.sum().asscalar()
if loss_ < 1e5:
loss_his.append(loss_)
if loss_ < 1e5 and (i+1) % opt.log_interval == 0:
logger.info('[Epoch {}] [Batch {}]: train_loss: {:.5f}'.format(epoch+1, i+1, float(loss_/opt.batch_size)))
vis.plot('train_loss', float(loss_/opt.batch_size))
# epoch finish
logger.info('[Epoch {} finishes]: total {} batches, use {:.3f} seconds, speed: {:.3f} s/batch'.format(
epoch+1, i+1, time.time()-start_time, float((time.time()-start_time)/(i+1))))
vis.plot('train_epoch_loss', float(sum(loss_his)/len(loss_his)/opt.batch_size))
# validate
if not (epoch+1) % opt.val_interval:
val_acc, val_loss = validate(model_train, val_dataloader, opt)
current_acc = val_acc
vis.plot('val_acc', val_acc)
# TODO
vis.plot('val_loss', val_loss)
logger.info('[Epoch {}] Validation: predict accuracy {:.2f}'.format(epoch+1, current_acc))
else:
current_acc = 0
# save params
if current_acc > best_acc:
best_acc = current_acc
model_train.save_parameters(opt.save_path+'epoch{}_acc_{:.2f}.params'.format(epoch+1, current_acc))
logger.info('[Epoch {}] acc: {}, save parameters!!!'.format(epoch+1, current_acc))
else:
# learning rate decay
new_lr = trainer.learning_rate * lr_decay
trainer.set_learning_rate(new_lr)
logger.info('[Epoch {}]: set learing rate to {}'.format(epoch+1, new_lr))
def validate(model, val_dataloader, opt):
total_num = 0
correct_num = 0
val_loss = gluon.loss.SoftmaxCrossEntropyLoss()
val_loss_his = []
for i, (data, label) in enumerate(val_dataloader):
output = model(data.as_in_context(opt.ctx))
output = output.as_in_context(mx.cpu())
loss = val_loss(output, label)
val_loss_his.append(loss.sum().asscalar())
pred = output.argmax(axis=1).astype('int').asnumpy()
label = label.astype('int').asnumpy()
total_num += label.shape[0]
correct_num += (label == pred).sum()
# print('total correct num: ', total_num)
val_acc = 100 * float(correct_num) / float(total_num)
val_mean_loss = float(sum(val_loss_his)/len(val_loss_his)/opt.batch_size)
return val_acc, val_mean_loss
if __name__ == '__main__':
opt = DefaultConfig()
opt.parse({'model': 'VGG16',
'env': 'VGG16',
'lr': 0.001,
'train_dir': '/home/qinliang/dataset/stanford_dog_dataset/cut_images_train',
'valid_dir': '/home/qinliang/dataset/stanford_dog_dataset/cut_images_val',
'save_path': './cut_image_checkpoints/',
'lr_decay': 0.5,
'preload': True,
'start_epoch': 0,
'max_epoch': 50,
'batch_size': 32,
'wd': 15e-4,
'load_file_path': '/home/qinliang/Desktop/kaggle/dog_recognition_gluon/checkpoints/epoch16_acc_99.31.params',
'log_file_path': './log/VGG16_cut_image.log'})
logger = get_logger(opt)
model_train = getattr(models, opt.model)()
model_train.initialize()
convert_model_gpu(model_train)
model_train.hybridize()
train_dataloader, val_dataloader = dataloader.DogDataLoader(opt)
train(model_train, train_dataloader, val_dataloader, logger, opt) | dog_recognition/main.py | import mxnet as mx
import numpy as np
import os
import time
import pickle
import logging
import models
import dataloader
from mxnet import gluon
from mxnet import init
from mxnet import nd
from mxnet import autograd
from mxnet.gluon import nn
from config import DefaultConfig
from utils.visualize import Visualizer
def get_logger(opt):
logging.basicConfig(format='%(asctime)s - %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_path = opt.log_file_path
fh = logging.FileHandler(log_file_path)
formatter = logging.Formatter('%(asctime)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.info('initialize logger')
return logger
def convert_model_gpu(model):
# model.initialize()
model.collect_params().reset_ctx(mx.gpu())
def convert_model_cpu(model):
model.collect_params().reset_ctx(mx.cpu())
def train(model_train, train_dataloader, val_dataloader, logger, opt):
# visualization
vis = Visualizer(opt.env)
# preload
if opt.preload:
model_train.load_parameters(opt.load_file_path)
# set train mode
model_train.collect_params().setattr('grad_req', 'write')
# model_train.collect_train_params().setattr('grad_req', 'write')
trainer = gluon.Trainer(model_train.collect_params(),
'sgd',
{'learning_rate': opt.lr,
'wd': opt.wd,
'momentum': opt.momentum,
'clip_gradient': 5})
# lr decay
lr_decay = float(opt.lr_decay)
# train_loss
train_loss = gluon.loss.SoftmaxCrossEntropyLoss()
logger.info('Starting training from Epoch {}'.format(opt.start_epoch+1))
best_acc = 0
for epoch in range(opt.start_epoch, opt.max_epoch):
start_time = time.time()
loss_his = []
for i, (data, label) in enumerate(train_dataloader):
data = data.as_in_context(opt.ctx)
label = label.astype('float32').as_in_context(opt.ctx)
with autograd.record():
output = model_train(data)
loss = train_loss(output, label)
autograd.backward(loss)
trainer.step(opt.batch_size)
loss_ = loss.sum().asscalar()
if loss_ < 1e5:
loss_his.append(loss_)
if loss_ < 1e5 and (i+1) % opt.log_interval == 0:
logger.info('[Epoch {}] [Batch {}]: train_loss: {:.5f}'.format(epoch+1, i+1, float(loss_/opt.batch_size)))
vis.plot('train_loss', float(loss_/opt.batch_size))
# epoch finish
logger.info('[Epoch {} finishes]: total {} batches, use {:.3f} seconds, speed: {:.3f} s/batch'.format(
epoch+1, i+1, time.time()-start_time, float((time.time()-start_time)/(i+1))))
vis.plot('train_epoch_loss', float(sum(loss_his)/len(loss_his)/opt.batch_size))
# validate
if not (epoch+1) % opt.val_interval:
val_acc, val_loss = validate(model_train, val_dataloader, opt)
current_acc = val_acc
vis.plot('val_acc', val_acc)
# TODO
vis.plot('val_loss', val_loss)
logger.info('[Epoch {}] Validation: predict accuracy {:.2f}'.format(epoch+1, current_acc))
else:
current_acc = 0
# save params
if current_acc > best_acc:
best_acc = current_acc
model_train.save_parameters(opt.save_path+'epoch{}_acc_{:.2f}.params'.format(epoch+1, current_acc))
logger.info('[Epoch {}] acc: {}, save parameters!!!'.format(epoch+1, current_acc))
else:
# learning rate decay
new_lr = trainer.learning_rate * lr_decay
trainer.set_learning_rate(new_lr)
logger.info('[Epoch {}]: set learing rate to {}'.format(epoch+1, new_lr))
def validate(model, val_dataloader, opt):
total_num = 0
correct_num = 0
val_loss = gluon.loss.SoftmaxCrossEntropyLoss()
val_loss_his = []
for i, (data, label) in enumerate(val_dataloader):
output = model(data.as_in_context(opt.ctx))
output = output.as_in_context(mx.cpu())
loss = val_loss(output, label)
val_loss_his.append(loss.sum().asscalar())
pred = output.argmax(axis=1).astype('int').asnumpy()
label = label.astype('int').asnumpy()
total_num += label.shape[0]
correct_num += (label == pred).sum()
# print('total correct num: ', total_num)
val_acc = 100 * float(correct_num) / float(total_num)
val_mean_loss = float(sum(val_loss_his)/len(val_loss_his)/opt.batch_size)
return val_acc, val_mean_loss
if __name__ == '__main__':
opt = DefaultConfig()
opt.parse({'model': 'VGG16',
'env': 'VGG16',
'lr': 0.001,
'train_dir': '/home/qinliang/dataset/stanford_dog_dataset/cut_images_train',
'valid_dir': '/home/qinliang/dataset/stanford_dog_dataset/cut_images_val',
'save_path': './cut_image_checkpoints/',
'lr_decay': 0.5,
'preload': True,
'start_epoch': 0,
'max_epoch': 50,
'batch_size': 32,
'wd': 15e-4,
'load_file_path': '/home/qinliang/Desktop/kaggle/dog_recognition_gluon/checkpoints/epoch16_acc_99.31.params',
'log_file_path': './log/VGG16_cut_image.log'})
logger = get_logger(opt)
model_train = getattr(models, opt.model)()
model_train.initialize()
convert_model_gpu(model_train)
model_train.hybridize()
train_dataloader, val_dataloader = dataloader.DogDataLoader(opt)
train(model_train, train_dataloader, val_dataloader, logger, opt) | 0.444565 | 0.174164 |
from purpledefrag.app.controllers.maps import (
RandomMapController, MapInfoController
)
from purpledefrag.app.controllers.misc import (
HelpController, TimeController, MeController,
TomController, ReminderController, HadesController
)
from purpledefrag.app.controllers.records import (
LoginController, WhoController, FinishLineController,
RankingsController, RegistrationController,
LogoutController, BestTimeController, SpeedRankingsController,
SpeedAwardController, WorldRecordController,
LoginReminderController, SetPasswordController
)
import purpledefrag.app.g as g
routes = g.routes
#routes.addRule("reminder", ReminderController)
routes.addRule("entrance", LoginReminderController)
#routes.addRule("entrance", HadesController)
routes.addRule("login", LoginController)
routes.addRule("logout", LogoutController)
routes.addRule("register", RegistrationController)
routes.addRule("whoami", WhoController)
routes.addRule("random", RandomMapController)
routes.addRule("findmap", RandomMapController)
routes.addRule("h", HelpController)
routes.addRule("time", TimeController)
routes.addRule("me", MeController)
routes.addRule("mapinfo", MapInfoController)
routes.addRule("clienttimerstop", FinishLineController)
routes.addRule("clientspeedaward", SpeedAwardController)
routes.addRule("top", RankingsController)
routes.addRule("topspeed", SpeedRankingsController)
routes.addRule("mytop", BestTimeController)
routes.addRule("mypr", BestTimeController)
routes.addRule("pr", RankingsController)
routes.addRule("hip", TomController)
routes.addRule("wr", WorldRecordController)
routes.addRule("mdd", WorldRecordController)
routes.addRule("setpass", SetPasswordController)
routes.addRule("trouve", RandomMapController)
'''routes.addRule("newmaps", newmaps)
routes.addRule("request", maprequest)
routes.addRule("coolmap", upvote)
routes.addRule("crapmap", downvote)
routes.addRule("lastmap", lastmap)
def tom(request):
from purpledefrag import BunnyResponse
return BunnyResponse("^6Hippeh is piece full. Bunny luf hippeh.")
routes.addRule("tom", tom)
def me(request):
from purple import ChatResponse
return ChatResponse("^2^ --- that guy likes to talk in third person.")
routes.addRule("me", me)''' | data/train/python/4ab75c43343e3d5919ba318ad54b0297f9f70fa5routes.py | from purpledefrag.app.controllers.maps import (
RandomMapController, MapInfoController
)
from purpledefrag.app.controllers.misc import (
HelpController, TimeController, MeController,
TomController, ReminderController, HadesController
)
from purpledefrag.app.controllers.records import (
LoginController, WhoController, FinishLineController,
RankingsController, RegistrationController,
LogoutController, BestTimeController, SpeedRankingsController,
SpeedAwardController, WorldRecordController,
LoginReminderController, SetPasswordController
)
import purpledefrag.app.g as g
routes = g.routes
#routes.addRule("reminder", ReminderController)
routes.addRule("entrance", LoginReminderController)
#routes.addRule("entrance", HadesController)
routes.addRule("login", LoginController)
routes.addRule("logout", LogoutController)
routes.addRule("register", RegistrationController)
routes.addRule("whoami", WhoController)
routes.addRule("random", RandomMapController)
routes.addRule("findmap", RandomMapController)
routes.addRule("h", HelpController)
routes.addRule("time", TimeController)
routes.addRule("me", MeController)
routes.addRule("mapinfo", MapInfoController)
routes.addRule("clienttimerstop", FinishLineController)
routes.addRule("clientspeedaward", SpeedAwardController)
routes.addRule("top", RankingsController)
routes.addRule("topspeed", SpeedRankingsController)
routes.addRule("mytop", BestTimeController)
routes.addRule("mypr", BestTimeController)
routes.addRule("pr", RankingsController)
routes.addRule("hip", TomController)
routes.addRule("wr", WorldRecordController)
routes.addRule("mdd", WorldRecordController)
routes.addRule("setpass", SetPasswordController)
routes.addRule("trouve", RandomMapController)
'''routes.addRule("newmaps", newmaps)
routes.addRule("request", maprequest)
routes.addRule("coolmap", upvote)
routes.addRule("crapmap", downvote)
routes.addRule("lastmap", lastmap)
def tom(request):
from purpledefrag import BunnyResponse
return BunnyResponse("^6Hippeh is piece full. Bunny luf hippeh.")
routes.addRule("tom", tom)
def me(request):
from purple import ChatResponse
return ChatResponse("^2^ --- that guy likes to talk in third person.")
routes.addRule("me", me)''' | 0.277865 | 0.042167 |
import os
import platform
import re
import sys
import textwrap
import traceback
from urllib.parse import urlencode
import click
import filelock
import portalocker
import renku.cli.utils.color as color
from renku.core.commands.echo import ERROR
from renku.core.errors import MigrationRequired, ParameterError, ProjectNotSupported, RenkuException, UsageError
from renku.service.config import SENTRY_ENABLED, SENTRY_SAMPLERATE
_BUG = click.style("Ahhhhhhhh! You have found a bug. 🐞\n\n", fg=color.RED, bold=True)
HAS_SENTRY = SENTRY_ENABLED
if SENTRY_ENABLED:
try:
from importlib.metadata import PackageNotFoundError, distribution
except ImportError:
from importlib_metadata import PackageNotFoundError, distribution
try:
distribution("sentry-sdk")
except PackageNotFoundError:
HAS_SENTRY = False
class RenkuExceptionsHandler(click.Group):
"""Handles all RenkuExceptions."""
def main(self, *args, **kwargs):
"""Catch and print all Renku exceptions."""
try:
return super().main(*args, **kwargs)
except RenkuException as e:
click.echo(ERROR + str(e), err=True)
if e.__cause__ is not None:
click.echo(f"\n{traceback.format_exc()}")
exit_code = 1
if isinstance(e, (ParameterError, UsageError)):
exit_code = 2
elif isinstance(e, MigrationRequired):
exit_code = 3
elif isinstance(e, ProjectNotSupported):
exit_code = 4
sys.exit(exit_code)
class IssueFromTraceback(RenkuExceptionsHandler):
"""Create an issue with formatted exception."""
REPO_URL = "https://github.com/SwissDataScienceCenter/renku-python"
ISSUE_SUFFIX = "/issues/new"
def __init__(self, *args, **kwargs):
"""Initialize a Sentry client."""
super().__init__(*args, **kwargs)
if HAS_SENTRY:
import sentry_sdk
sentry_sdk.init(
dsn=os.getenv("SENTRY_DSN"), environment=os.getenv("SENTRY_ENV"), traces_sample_rate=SENTRY_SAMPLERATE
)
def main(self, *args, **kwargs):
"""Catch all exceptions."""
try:
result = super().main(*args, **kwargs)
return result
except (filelock.Timeout, portalocker.LockException, portalocker.AlreadyLocked):
click.echo(
(
click.style("Unable to acquire lock.\n", fg=color.RED) + "Hint: Please wait for another renku "
"process to finish and then try again."
)
)
except Exception:
if HAS_SENTRY:
self._handle_sentry()
if not (sys.stdin.isatty() and sys.stdout.isatty()):
raise
self._handle_github()
def _handle_sentry(self):
"""Handle exceptions using Sentry."""
from sentry_sdk import capture_exception, configure_scope
from sentry_sdk.utils import capture_internal_exceptions
with configure_scope() as scope:
with capture_internal_exceptions():
from renku.core.commands.git import get_git_home
from renku.core.metadata.repository import Repository
user = Repository(get_git_home()).get_user()
scope.user = {"name": user.name, "email": user.email}
event_id = capture_exception()
click.echo(_BUG + "Recorded in Sentry with ID: {0}\n".format(event_id), err=True)
raise
def _handle_github(self):
"""Handle exception and submit it as GitHub issue."""
value = click.prompt(
_BUG
+ click.style('1. Open an issue by typing "open";\n', fg=color.GREEN)
+ click.style("2. Print human-readable information by typing " '"print";\n', fg=color.YELLOW)
+ click.style(
"3. See the full traceback without submitting details " '(default: "ignore").\n\n', fg=color.RED
)
+ "Please select an action by typing its name",
type=click.Choice(["open", "print", "ignore"]),
default="ignore",
)
getattr(self, "_process_" + value)()
def _format_issue_title(self):
"""Return formatted title."""
return textwrap.shorten("cli: renku " + " ".join(sys.argv[1:]), width=50)
def _format_issue_body(self, limit=-5):
"""Return formatted body."""
from renku import __version__
re_paths = r"(" + r"|".join([path or os.getcwd() for path in sys.path]) + r")"
tb = re.sub(re_paths, "[...]", traceback.format_exc(limit=limit))
return (
"## Describe the bug\nA clear and concise description.\n\n"
"## Details\n"
"*Please verify and redact the details.*\n\n"
"**Renku version:** " + __version__ + "\n"
"**OS:** " + platform.system() + " (" + platform.version() + ")\n"
"**Python:** " + platform.python_version() + "\n\n"
"### Traceback\n\n```\n" + tb + "```\n\n"
"## Additional context\nAdd any other context about the problem."
)
def _format_issue_url(self):
"""Format full issue URL."""
query = urlencode({"title": self._format_issue_title(), "body": self._format_issue_body()})
return self.REPO_URL + self.ISSUE_SUFFIX + "?" + query
def _process_open(self):
"""Open link in a browser."""
click.launch(self._format_issue_url())
if not click.confirm("Did it work?", default=True):
click.echo()
self._process_print()
click.secho("\nOpen the line manually and copy the text above\n", fg=color.YELLOW)
click.secho(" " + self.REPO_URL + self.ISSUE_SUFFIX + "\n", bold=True)
def _process_print(self):
"""Print link in a console."""
click.echo(self._format_issue_body(limit=None))
def _process_ignore(self):
"""Print original exception in a console."""
raise | renku/cli/exception_handler.py | import os
import platform
import re
import sys
import textwrap
import traceback
from urllib.parse import urlencode
import click
import filelock
import portalocker
import renku.cli.utils.color as color
from renku.core.commands.echo import ERROR
from renku.core.errors import MigrationRequired, ParameterError, ProjectNotSupported, RenkuException, UsageError
from renku.service.config import SENTRY_ENABLED, SENTRY_SAMPLERATE
_BUG = click.style("Ahhhhhhhh! You have found a bug. 🐞\n\n", fg=color.RED, bold=True)
HAS_SENTRY = SENTRY_ENABLED
if SENTRY_ENABLED:
try:
from importlib.metadata import PackageNotFoundError, distribution
except ImportError:
from importlib_metadata import PackageNotFoundError, distribution
try:
distribution("sentry-sdk")
except PackageNotFoundError:
HAS_SENTRY = False
class RenkuExceptionsHandler(click.Group):
"""Handles all RenkuExceptions."""
def main(self, *args, **kwargs):
"""Catch and print all Renku exceptions."""
try:
return super().main(*args, **kwargs)
except RenkuException as e:
click.echo(ERROR + str(e), err=True)
if e.__cause__ is not None:
click.echo(f"\n{traceback.format_exc()}")
exit_code = 1
if isinstance(e, (ParameterError, UsageError)):
exit_code = 2
elif isinstance(e, MigrationRequired):
exit_code = 3
elif isinstance(e, ProjectNotSupported):
exit_code = 4
sys.exit(exit_code)
class IssueFromTraceback(RenkuExceptionsHandler):
"""Create an issue with formatted exception."""
REPO_URL = "https://github.com/SwissDataScienceCenter/renku-python"
ISSUE_SUFFIX = "/issues/new"
def __init__(self, *args, **kwargs):
"""Initialize a Sentry client."""
super().__init__(*args, **kwargs)
if HAS_SENTRY:
import sentry_sdk
sentry_sdk.init(
dsn=os.getenv("SENTRY_DSN"), environment=os.getenv("SENTRY_ENV"), traces_sample_rate=SENTRY_SAMPLERATE
)
def main(self, *args, **kwargs):
"""Catch all exceptions."""
try:
result = super().main(*args, **kwargs)
return result
except (filelock.Timeout, portalocker.LockException, portalocker.AlreadyLocked):
click.echo(
(
click.style("Unable to acquire lock.\n", fg=color.RED) + "Hint: Please wait for another renku "
"process to finish and then try again."
)
)
except Exception:
if HAS_SENTRY:
self._handle_sentry()
if not (sys.stdin.isatty() and sys.stdout.isatty()):
raise
self._handle_github()
def _handle_sentry(self):
"""Handle exceptions using Sentry."""
from sentry_sdk import capture_exception, configure_scope
from sentry_sdk.utils import capture_internal_exceptions
with configure_scope() as scope:
with capture_internal_exceptions():
from renku.core.commands.git import get_git_home
from renku.core.metadata.repository import Repository
user = Repository(get_git_home()).get_user()
scope.user = {"name": user.name, "email": user.email}
event_id = capture_exception()
click.echo(_BUG + "Recorded in Sentry with ID: {0}\n".format(event_id), err=True)
raise
def _handle_github(self):
"""Handle exception and submit it as GitHub issue."""
value = click.prompt(
_BUG
+ click.style('1. Open an issue by typing "open";\n', fg=color.GREEN)
+ click.style("2. Print human-readable information by typing " '"print";\n', fg=color.YELLOW)
+ click.style(
"3. See the full traceback without submitting details " '(default: "ignore").\n\n', fg=color.RED
)
+ "Please select an action by typing its name",
type=click.Choice(["open", "print", "ignore"]),
default="ignore",
)
getattr(self, "_process_" + value)()
def _format_issue_title(self):
"""Return formatted title."""
return textwrap.shorten("cli: renku " + " ".join(sys.argv[1:]), width=50)
def _format_issue_body(self, limit=-5):
"""Return formatted body."""
from renku import __version__
re_paths = r"(" + r"|".join([path or os.getcwd() for path in sys.path]) + r")"
tb = re.sub(re_paths, "[...]", traceback.format_exc(limit=limit))
return (
"## Describe the bug\nA clear and concise description.\n\n"
"## Details\n"
"*Please verify and redact the details.*\n\n"
"**Renku version:** " + __version__ + "\n"
"**OS:** " + platform.system() + " (" + platform.version() + ")\n"
"**Python:** " + platform.python_version() + "\n\n"
"### Traceback\n\n```\n" + tb + "```\n\n"
"## Additional context\nAdd any other context about the problem."
)
def _format_issue_url(self):
"""Format full issue URL."""
query = urlencode({"title": self._format_issue_title(), "body": self._format_issue_body()})
return self.REPO_URL + self.ISSUE_SUFFIX + "?" + query
def _process_open(self):
"""Open link in a browser."""
click.launch(self._format_issue_url())
if not click.confirm("Did it work?", default=True):
click.echo()
self._process_print()
click.secho("\nOpen the line manually and copy the text above\n", fg=color.YELLOW)
click.secho(" " + self.REPO_URL + self.ISSUE_SUFFIX + "\n", bold=True)
def _process_print(self):
"""Print link in a console."""
click.echo(self._format_issue_body(limit=None))
def _process_ignore(self):
"""Print original exception in a console."""
raise | 0.406744 | 0.103567 |
import uuid
from aitools.logic.core import Constant, Variable, Expression, LogicWrapper
from aitools.logic.language import Language
def _do_test_symbol_representation(*, function, symbol_class, name, next_id, result):
language = Language()
language._id = uuid.UUID(int=0)
language._next_id = next_id
symbol = symbol_class(name=name, language=language)
assert function(symbol) == result
def test_constant_str__no_name():
_do_test_symbol_representation(function=str, symbol_class=Constant, name=None, next_id=33, result="o33")
def test_constant_str__with_name():
_do_test_symbol_representation(function=str, symbol_class=Constant, name="foo", next_id=33, result="foo33")
def test_variable_str__no_name():
_do_test_symbol_representation(function=str, symbol_class=Variable, name=None, next_id=33, result="?v33")
def test_variable_str__with_name():
_do_test_symbol_representation(function=str, symbol_class=Variable, name="foo", next_id=33, result="?foo33")
def test_constant_repr__no_name():
_do_test_symbol_representation(
function=repr, symbol_class=Constant, name=None, next_id=33,
result="Constant(name=None, id=Identifier(language=Language(language_id=UUID("
"'00000000-0000-0000-0000-000000000000'), next_id=0), sequential_id=33))")
def test_constant_repr__with_name():
_do_test_symbol_representation(
function=repr, symbol_class=Constant, name="foo", next_id=33,
result="Constant(name='foo', id=Identifier(language=Language(language_id=UUID("
"'00000000-0000-0000-0000-000000000000'), next_id=0), sequential_id=33))")
def test_variable_repr__no_name():
_do_test_symbol_representation(
function=repr, symbol_class=Variable, name=None, next_id=33,
result="Variable(name=None, id=Identifier(language=Language(language_id=UUID("
"'00000000-0000-0000-0000-000000000000'), next_id=0), sequential_id=33))")
def test_variable_repr__with_name():
_do_test_symbol_representation(
function=repr, symbol_class=Variable, name="foo", next_id=33,
result="Variable(name='foo', id=Identifier(language=Language(language_id=UUID("
"'00000000-0000-0000-0000-000000000000'), next_id=0), sequential_id=33))")
def test_expression_str():
language = Language()
language._id = uuid.UUID(int=0)
language._next_id = 33
expr = Expression(
Constant(name='a', language=language), Constant(name='b', language=language),
Expression(Constant(name='c', language=language))
)
assert str(expr) == '(a33, b34, (c35))'
def test_expression_repr():
language = Language()
language._id = uuid.UUID(int=0)
language._next_id = 33
expr = Expression(
Constant(name='a', language=language), Constant(name='b', language=language),
Expression(Constant(name='c', language=language))
)
assert repr(expr) == "Expression(" \
"(Constant(name='a', id=Identifier(language=Language(language_id=UUID(" \
"'00000000-0000-0000-0000-000000000000'), next_id=0), sequential_id=33)), " \
"Constant(name='b', id=Identifier(language=Language(language_id=UUID(" \
"'00000000-0000-0000-0000-000000000000'), next_id=0), sequential_id=34)), " \
"Expression((Constant(name='c', id=Identifier(language=Language(language_id=UUID(" \
"'00000000-0000-0000-0000-000000000000'), next_id=0), sequential_id=35)),))))"
def test_logic_wrapper_str():
wrapper = LogicWrapper("foo")
assert str(wrapper) == "{foo}"
def test_logic_wrapper_repr():
wrapper = LogicWrapper("foo")
assert repr(wrapper) == "LogicWrapper('foo')" | pytests/logic/test_strings.py | import uuid
from aitools.logic.core import Constant, Variable, Expression, LogicWrapper
from aitools.logic.language import Language
def _do_test_symbol_representation(*, function, symbol_class, name, next_id, result):
language = Language()
language._id = uuid.UUID(int=0)
language._next_id = next_id
symbol = symbol_class(name=name, language=language)
assert function(symbol) == result
def test_constant_str__no_name():
_do_test_symbol_representation(function=str, symbol_class=Constant, name=None, next_id=33, result="o33")
def test_constant_str__with_name():
_do_test_symbol_representation(function=str, symbol_class=Constant, name="foo", next_id=33, result="foo33")
def test_variable_str__no_name():
_do_test_symbol_representation(function=str, symbol_class=Variable, name=None, next_id=33, result="?v33")
def test_variable_str__with_name():
_do_test_symbol_representation(function=str, symbol_class=Variable, name="foo", next_id=33, result="?foo33")
def test_constant_repr__no_name():
_do_test_symbol_representation(
function=repr, symbol_class=Constant, name=None, next_id=33,
result="Constant(name=None, id=Identifier(language=Language(language_id=UUID("
"'00000000-0000-0000-0000-000000000000'), next_id=0), sequential_id=33))")
def test_constant_repr__with_name():
_do_test_symbol_representation(
function=repr, symbol_class=Constant, name="foo", next_id=33,
result="Constant(name='foo', id=Identifier(language=Language(language_id=UUID("
"'00000000-0000-0000-0000-000000000000'), next_id=0), sequential_id=33))")
def test_variable_repr__no_name():
_do_test_symbol_representation(
function=repr, symbol_class=Variable, name=None, next_id=33,
result="Variable(name=None, id=Identifier(language=Language(language_id=UUID("
"'00000000-0000-0000-0000-000000000000'), next_id=0), sequential_id=33))")
def test_variable_repr__with_name():
_do_test_symbol_representation(
function=repr, symbol_class=Variable, name="foo", next_id=33,
result="Variable(name='foo', id=Identifier(language=Language(language_id=UUID("
"'00000000-0000-0000-0000-000000000000'), next_id=0), sequential_id=33))")
def test_expression_str():
language = Language()
language._id = uuid.UUID(int=0)
language._next_id = 33
expr = Expression(
Constant(name='a', language=language), Constant(name='b', language=language),
Expression(Constant(name='c', language=language))
)
assert str(expr) == '(a33, b34, (c35))'
def test_expression_repr():
language = Language()
language._id = uuid.UUID(int=0)
language._next_id = 33
expr = Expression(
Constant(name='a', language=language), Constant(name='b', language=language),
Expression(Constant(name='c', language=language))
)
assert repr(expr) == "Expression(" \
"(Constant(name='a', id=Identifier(language=Language(language_id=UUID(" \
"'00000000-0000-0000-0000-000000000000'), next_id=0), sequential_id=33)), " \
"Constant(name='b', id=Identifier(language=Language(language_id=UUID(" \
"'00000000-0000-0000-0000-000000000000'), next_id=0), sequential_id=34)), " \
"Expression((Constant(name='c', id=Identifier(language=Language(language_id=UUID(" \
"'00000000-0000-0000-0000-000000000000'), next_id=0), sequential_id=35)),))))"
def test_logic_wrapper_str():
wrapper = LogicWrapper("foo")
assert str(wrapper) == "{foo}"
def test_logic_wrapper_repr():
wrapper = LogicWrapper("foo")
assert repr(wrapper) == "LogicWrapper('foo')" | 0.601945 | 0.143397 |
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from arcanelab.ouroboros.executors import Workflow
from arcanelab.ouroboros.models import NodeSpec, TransitionSpec
from arcanelab.ouroboros.support import CallableReference
from arcanelab.ouroboros import exceptions
from .support import ValidationErrorWrappingTestCase
from .models import Task, Area
class WorkflowInstanceTestCase(ValidationErrorWrappingTestCase):
def _base_install_workflow_spec(self):
"""
Installs a dummy workflow, having all the possible nodes in a
main course, being ok.
"""
spec = {'model': 'sample.Task', 'code': 'wfspec', 'name': 'Workflow Spec',
'create_permission': 'sample.create_task',
'cancel_permission': 'sample.cancel_task',
'courses': [{
'code': '', 'name': 'Main',
'nodes': [{
'type': NodeSpec.ENTER, 'code': 'origin', 'name': 'Origin',
'description': 'Origin',
}, {
'type': NodeSpec.INPUT, 'code': 'created', 'name': 'Created',
'description': 'The task was just created at this point. Yet to review',
}, {
'type': NodeSpec.INPUT, 'code': 'reviewed', 'name': 'Reviewed',
'description': 'The task was just reviewed at this point. Yet to be assigned',
}, {
'type': NodeSpec.INPUT, 'code': 'assigned', 'name': 'Assigned',
'description': 'The task was just assigned at this point. Yet to be started',
}, {
'type': NodeSpec.INPUT, 'code': 'started', 'name': 'Started',
'description': 'The task was just started at this point. Yet to be completed',
}, {
'type': NodeSpec.STEP, 'code': 'completed', 'name': 'Completed',
'description': 'The task was completed at this point. Will start post-complete tasks',
}, {
'type': NodeSpec.SPLIT, 'code': 'invoice-control', 'name': 'Split Invoice/Control',
'description': 'Invoicing and Task Control parallel branches',
'branches': ['control', 'invoice'], 'joiner': 'sample.support.invoice_control_joiner'
}, {
'type': NodeSpec.MULTIPLEXER, 'code': 'service-type', 'name': 'Service Type'
}, {
'type': NodeSpec.INPUT, 'code': 'pending-delivery', 'name': 'Pending Delivery',
'description': 'The product is about to be delivered',
'landing_handler': 'sample.support.on_pending_delivery'
}, {
'type': NodeSpec.INPUT, 'code': 'pending-pick', 'name': 'Pending Customer Pick',
'description': 'The product is about to be picked',
}, {
'type': NodeSpec.STEP, 'code': 'notify', 'name': 'Notify',
}, {
'type': NodeSpec.EXIT, 'code': 'finished', 'name': 'Finished', 'exit_value': 105
}, {
'type': NodeSpec.CANCEL, 'code': 'cancel', 'name': 'Cancel',
}],
'transitions': [{
'origin': 'origin', 'destination': 'created', 'name': 'Enter Created',
}, {
'origin': 'created', 'destination': 'reviewed', 'name': 'Review',
'permission': 'sample.review_task', 'action_name': 'review'
}, {
'origin': 'reviewed', 'destination': 'assigned', 'name': 'Assign',
'permission': 'sample.create_task', 'action_name': 'assign'
}, {
'origin': 'assigned', 'destination': 'started', 'name': 'Start',
'permission': 'sample.start_task', 'action_name': 'start'
}, {
'origin': 'started', 'destination': 'completed', 'name': 'Complete',
'permission': 'sample.complete_task', 'action_name': 'complete'
}, {
'origin': 'completed', 'destination': 'invoice-control', 'name': 'Start I/C Split',
}, {
'origin': 'invoice-control', 'destination': 'started', 'name': 'On Reject',
'action_name': 'on-reject'
}, {
'origin': 'invoice-control', 'destination': 'service-type', 'name': 'On Accept',
'action_name': 'on-accept'
}, {
'origin': 'service-type', 'destination': 'pending-delivery', 'name': 'Is Deliverable?',
'priority': 1, 'condition': 'sample.support.is_deliverable'
}, {
'origin': 'service-type', 'destination': 'pending-pick', 'name': 'Is Non-Deliverable?',
'priority': 2, 'condition': 'sample.support.is_non_deliverable'
}, {
'origin': 'service-type', 'destination': 'notify', 'name': 'Is Service?',
'priority': 3, 'condition': 'sample.support.is_service'
}, {
'origin': 'pending-delivery', 'destination': 'notify', 'name': 'Deliver',
'action_name': 'deliver', 'permission': 'sample.deliver_task'
}, {
'origin': 'pending-pick', 'destination': 'notify', 'name': 'Pick-Attend',
'action_name': 'pick-attend', 'permission': 'sample.pick_attend_task'
}, {
'origin': 'notify', 'destination': 'finished', 'name': 'Finish'
}]
}, {
'code': 'control', 'name': 'Control',
'nodes': [{
'type': NodeSpec.ENTER, 'code': 'origin', 'name': 'Origin',
}, {
'type': NodeSpec.SPLIT, 'code': 'approve-audit', 'name': 'Split Audit/Approve',
'description': 'Audit and Approval parallel branches',
'branches': ['approval', 'audit'], 'joiner': 'sample.support.approve_audit_joiner'
}, {
'type': NodeSpec.EXIT, 'code': 'was-rejected', 'name': 'Was Rejected', 'exit_value': 100,
}, {
'type': NodeSpec.EXIT, 'code': 'was-satisfied', 'name': 'Was Rejected', 'exit_value': 101,
}, {
'type': NodeSpec.CANCEL, 'code': 'cancel', 'name': 'Cancel',
}, {
'type': NodeSpec.JOINED, 'code': 'joined', 'name': 'Joined',
}],
'transitions': [{
'origin': 'origin', 'destination': 'approve-audit', 'name': 'Enter A/E'
}, {
'origin': 'approve-audit', 'destination': 'was-rejected', 'name': 'Rejected',
'action_name': 'rejected'
}, {
'origin': 'approve-audit', 'destination': 'was-satisfied', 'name': 'Satisfied',
'action_name': 'satisfied'
}]
}, {
'code': 'approval', 'name': 'Approval',
'nodes': [{
'type': NodeSpec.ENTER, 'code': 'origin', 'name': 'Origin',
}, {
'type': NodeSpec.INPUT, 'code': 'pending-approval', 'name': 'Pending Approval',
'description': 'The task is about to be approved or rejected',
}, {
'type': NodeSpec.EXIT, 'code': 'approved', 'name': 'Approved', 'exit_value': 101,
}, {
'type': NodeSpec.EXIT, 'code': 'rejected', 'name': 'Rejected', 'exit_value': 102,
}, {
'type': NodeSpec.CANCEL, 'code': 'cancel', 'name': 'Cancel',
}, {
'type': NodeSpec.JOINED, 'code': 'joined', 'name': 'Joined',
}],
'transitions': [{
'origin': 'origin', 'destination': 'pending-approval', 'name': 'Enter P/A'
}, {
'origin': 'pending-approval', 'destination': 'approved', 'name': 'Approve',
'action_name': 'approve', 'permission': 'sample.accept_task'
}, {
'origin': 'pending-approval', 'destination': 'rejected', 'name': 'Reject',
'action_name': 'reject', 'permission': 'sample.reject_task'
}]
}, {
'code': 'audit', 'name': 'Audit',
'nodes': [{
'type': NodeSpec.ENTER, 'code': 'origin', 'name': 'Origin',
}, {
'type': NodeSpec.INPUT, 'code': 'pending-audit', 'name': 'Pending Audit',
'description': 'The task is about to be audited',
}, {
'type': NodeSpec.EXIT, 'code': 'audited', 'name': 'Audited', 'exit_value': 103,
}, {
'type': NodeSpec.CANCEL, 'code': 'cancel', 'name': 'Cancel',
}, {
'type': NodeSpec.JOINED, 'code': 'joined', 'name': 'Joined',
}],
'transitions': [{
'origin': 'origin', 'destination': 'pending-audit', 'name': 'Enter Audit'
}, {
'origin': 'pending-audit', 'destination': 'audited', 'name': 'Audit',
'action_name': 'audit', 'permission': 'sample.audit_task'
}]
}, {
'code': 'invoice', 'name': 'Invoice',
'nodes': [{
'type': NodeSpec.ENTER, 'code': 'origin', 'name': 'Origin',
}, {
'type': NodeSpec.INPUT, 'code': 'pending-invoice', 'name': 'Pending Invoice',
'description': 'The task is about to be invoiced',
}, {
'type': NodeSpec.EXIT, 'code': 'invoiced', 'name': 'Invoiced', 'exit_value': 104,
}, {
'type': NodeSpec.CANCEL, 'code': 'cancel', 'name': 'Cancel',
}, {
'type': NodeSpec.JOINED, 'code': 'joined', 'name': 'Joined',
}],
'transitions': [{
'origin': 'origin', 'destination': 'pending-invoice', 'name': 'Enter Invoice'
}, {
'origin': 'pending-invoice', 'destination': 'invoiced', 'name': 'Invoice',
'action_name': 'invoice', 'permission': 'sample.invoice_task'
}]
}]}
return Workflow.Spec.install(spec)
def _install_users_and_data(self, service_type):
User = get_user_model()
users = [
User.objects.create_user('foo', '<EMAIL>', 'foo1'),
User.objects.create_user('bar', '<EMAIL>', 'bar1'),
User.objects.create_user('baz', '<EMAIL>', 'baz1'),
User.objects.create_user('bat', '<EMAIL>', 'bat1'),
User.objects.create_user('boo', '<EMAIL>', 'boo1'),
User.objects.create_user('poo', '<EMAIL>', 'poo1'),
User.objects.create_user('god', '<EMAIL>', 'god1'),
]
area = Area.objects.create(head=users[6])
task = Task.objects.create(area=area, service_type=service_type, title='Sample',
content='Lorem ipsum dolor sit amet', performer=users[0], reviewer=users[1],
accountant=users[2], auditor=users[3], dispatcher=users[4], attendant=users[5])
return users, task
def test_base_workflow(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.SERVICE)
def test_user_not_able_to_create_is_bad(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.SERVICE)
with self.assertRaises(exceptions.WorkflowCreateDenied):
Workflow.create(users[1], workflow, task)
def test_user_not_able_to_execute_action_is_bad(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.SERVICE)
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
with self.assertRaises(exceptions.WorkflowActionDenied):
instance.execute(users[2], 'review')
def test_execute_invalid_action_is_bad(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.SERVICE)
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
with self.assertRaises(exceptions.WorkflowCourseNodeTransitionDoesNotExist):
instance.execute(users[1], 'review')
instance.execute(users[6], 'assign')
instance.execute(users[0], 'start')
instance.execute(users[0], 'complit') # funny enough for a typo
def test_execute_invalid_course_is_bad(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.SERVICE)
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
with self.assertRaises(exceptions.WorkflowCourseInstanceDoesNotExist):
instance.execute(users[1], 'review')
instance.execute(users[6], 'assign')
instance.execute(users[0], 'start')
instance.execute(users[0], 'complete', 'wtf')
def test_execute_invalid_nested_course_is_bad(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.SERVICE)
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
with self.assertRaises(exceptions.WorkflowCourseInstanceDoesNotExist):
instance.execute(users[1], 'review')
instance.execute(users[6], 'assign')
instance.execute(users[0], 'start')
instance.execute(users[0], 'complete')
instance.execute(users[3], 'audit', 'control.clorch') # this one should also fail!
def test_execute_adequately_split_is_good(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.SERVICE)
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
instance.execute(users[1], 'review')
instance.execute(users[6], 'assign')
instance.execute(users[0], 'start')
instance.execute(users[0], 'complete')
actions = instance.get_workflow_available_actions(users[2])
target = {
'invoice': {
'display_name': _('Invoice'),
'actions': [{
'display_name': _('Invoice'),
'action_name': 'invoice'
}]
},
}
self.assertTrue(actions == target, "expected %r == %r" % (actions, target))
instance.execute(users[2], 'invoice', 'invoice')
actions = instance.get_workflow_available_actions(users[3])
target = {
'control.audit': {
'display_name': _('Audit'),
'actions': [{
'display_name': _('Audit'),
'action_name': 'audit'
}]
},
}
self.assertTrue(actions == target, "expected %r == %r" % (actions, target))
instance.execute(users[3], 'audit', 'control.audit')
actions = instance.get_workflow_available_actions(users[1])
target = {
'control.approval': {
'display_name': _('Approval'),
'actions': [{
'display_name': _('Approve'),
'action_name': 'approve'
}, {
'display_name': _('Reject'),
'action_name': 'reject'
}]
},
}
self.assertTrue(actions == target, "expected %r == %r" % (actions, target))
instance.execute(users[1], 'approve', 'control.approval')
workflow_status = instance.get_workflow_status()
target = {'': ('ended', 105)}
self.assertTrue(workflow_status == target, "expected %r == %r" % (workflow_status, target))
def test_rejection_and_loopback_is_good(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.SERVICE)
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
instance.execute(users[1], 'review')
instance.execute(users[6], 'assign')
instance.execute(users[0], 'start')
instance.execute(users[0], 'complete')
instance.execute(users[1], 'reject', 'control.approval')
workflow_status = instance.get_workflow_status()
target = {'': ('waiting', 'started')}
self.assertTrue(workflow_status == target, "expected %r == %r" % (workflow_status, target))
def test_approval_deliverable_waiting_delivery_is_good(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.DELIVERABLE)
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
instance.execute(users[1], 'review')
instance.execute(users[6], 'assign')
instance.execute(users[0], 'start')
instance.execute(users[0], 'complete')
instance.execute(users[1], 'approve', 'control.approval')
instance.execute(users[2], 'invoice', 'invoice')
instance.execute(users[3], 'audit', 'control.audit')
workflow_status = instance.get_workflow_status()
target = {'': ('waiting', 'pending-delivery')}
self.assertTrue(workflow_status == target, "expected %r == %r" % (workflow_status, target))
self.assertEqual(instance.instance.document.content, 'Lorem ipsum dolor sit amet Pending Delivery')
def test_unmatched_condition_is_bad(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data('crap')
with self.assertRaises(exceptions.WorkflowCourseNodeMultiplexerDidNotSatisfyAnyCondition):
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
instance.execute(users[1], 'review')
instance.execute(users[6], 'assign')
instance.execute(users[0], 'start')
instance.execute(users[0], 'complete')
instance.execute(users[1], 'approve', 'control.approval')
instance.execute(users[2], 'invoice', 'invoice')
instance.execute(users[3], 'audit', 'control.audit')
def test_cancel_terminated_course_is_bad(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.DELIVERABLE)
with self.assertRaises(exceptions.WorkflowCourseInstanceAlreadyTerminated):
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
instance.cancel(users[6])
instance.cancel(users[6])
def test_cancel_course_without_workflow_permission_is_bad(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.DELIVERABLE)
with self.assertRaises(exceptions.WorkflowCourseCancelDeniedByWorkflow):
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
instance.cancel(users[3])
def test_cancel_course_without_course_permission_is_bad(self):
workflow = self._base_install_workflow_spec()
spec = workflow.spec
permission = spec.cancel_permission
spec.cancel_permission = ''
spec.save()
course_spec = spec.course_specs.get(code='')
course_spec.cancel_permission = permission
course_spec.save()
users, task = self._install_users_and_data(Task.DELIVERABLE)
with self.assertRaises(exceptions.WorkflowCourseCancelDeniedByCourse):
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
instance.cancel(users[3])
def test_start_a_started_workflow_is_bad(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.DELIVERABLE)
with self.assertRaises(exceptions.WorkflowInstanceNotPending):
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
instance.start(users[1])
def test_execute_existing_action_from_split_node_is_bad(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.SERVICE)
with self.assertRaises(exceptions.WorkflowCourseInstanceNotWaiting):
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
instance.execute(users[1], 'review')
instance.execute(users[6], 'assign')
instance.execute(users[0], 'start')
instance.execute(users[0], 'complete')
instance.execute(users[0], 'on-accept') | sample/test_instances.py | from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from arcanelab.ouroboros.executors import Workflow
from arcanelab.ouroboros.models import NodeSpec, TransitionSpec
from arcanelab.ouroboros.support import CallableReference
from arcanelab.ouroboros import exceptions
from .support import ValidationErrorWrappingTestCase
from .models import Task, Area
class WorkflowInstanceTestCase(ValidationErrorWrappingTestCase):
def _base_install_workflow_spec(self):
"""
Installs a dummy workflow, having all the possible nodes in a
main course, being ok.
"""
spec = {'model': 'sample.Task', 'code': 'wfspec', 'name': 'Workflow Spec',
'create_permission': 'sample.create_task',
'cancel_permission': 'sample.cancel_task',
'courses': [{
'code': '', 'name': 'Main',
'nodes': [{
'type': NodeSpec.ENTER, 'code': 'origin', 'name': 'Origin',
'description': 'Origin',
}, {
'type': NodeSpec.INPUT, 'code': 'created', 'name': 'Created',
'description': 'The task was just created at this point. Yet to review',
}, {
'type': NodeSpec.INPUT, 'code': 'reviewed', 'name': 'Reviewed',
'description': 'The task was just reviewed at this point. Yet to be assigned',
}, {
'type': NodeSpec.INPUT, 'code': 'assigned', 'name': 'Assigned',
'description': 'The task was just assigned at this point. Yet to be started',
}, {
'type': NodeSpec.INPUT, 'code': 'started', 'name': 'Started',
'description': 'The task was just started at this point. Yet to be completed',
}, {
'type': NodeSpec.STEP, 'code': 'completed', 'name': 'Completed',
'description': 'The task was completed at this point. Will start post-complete tasks',
}, {
'type': NodeSpec.SPLIT, 'code': 'invoice-control', 'name': 'Split Invoice/Control',
'description': 'Invoicing and Task Control parallel branches',
'branches': ['control', 'invoice'], 'joiner': 'sample.support.invoice_control_joiner'
}, {
'type': NodeSpec.MULTIPLEXER, 'code': 'service-type', 'name': 'Service Type'
}, {
'type': NodeSpec.INPUT, 'code': 'pending-delivery', 'name': 'Pending Delivery',
'description': 'The product is about to be delivered',
'landing_handler': 'sample.support.on_pending_delivery'
}, {
'type': NodeSpec.INPUT, 'code': 'pending-pick', 'name': 'Pending Customer Pick',
'description': 'The product is about to be picked',
}, {
'type': NodeSpec.STEP, 'code': 'notify', 'name': 'Notify',
}, {
'type': NodeSpec.EXIT, 'code': 'finished', 'name': 'Finished', 'exit_value': 105
}, {
'type': NodeSpec.CANCEL, 'code': 'cancel', 'name': 'Cancel',
}],
'transitions': [{
'origin': 'origin', 'destination': 'created', 'name': 'Enter Created',
}, {
'origin': 'created', 'destination': 'reviewed', 'name': 'Review',
'permission': 'sample.review_task', 'action_name': 'review'
}, {
'origin': 'reviewed', 'destination': 'assigned', 'name': 'Assign',
'permission': 'sample.create_task', 'action_name': 'assign'
}, {
'origin': 'assigned', 'destination': 'started', 'name': 'Start',
'permission': 'sample.start_task', 'action_name': 'start'
}, {
'origin': 'started', 'destination': 'completed', 'name': 'Complete',
'permission': 'sample.complete_task', 'action_name': 'complete'
}, {
'origin': 'completed', 'destination': 'invoice-control', 'name': 'Start I/C Split',
}, {
'origin': 'invoice-control', 'destination': 'started', 'name': 'On Reject',
'action_name': 'on-reject'
}, {
'origin': 'invoice-control', 'destination': 'service-type', 'name': 'On Accept',
'action_name': 'on-accept'
}, {
'origin': 'service-type', 'destination': 'pending-delivery', 'name': 'Is Deliverable?',
'priority': 1, 'condition': 'sample.support.is_deliverable'
}, {
'origin': 'service-type', 'destination': 'pending-pick', 'name': 'Is Non-Deliverable?',
'priority': 2, 'condition': 'sample.support.is_non_deliverable'
}, {
'origin': 'service-type', 'destination': 'notify', 'name': 'Is Service?',
'priority': 3, 'condition': 'sample.support.is_service'
}, {
'origin': 'pending-delivery', 'destination': 'notify', 'name': 'Deliver',
'action_name': 'deliver', 'permission': 'sample.deliver_task'
}, {
'origin': 'pending-pick', 'destination': 'notify', 'name': 'Pick-Attend',
'action_name': 'pick-attend', 'permission': 'sample.pick_attend_task'
}, {
'origin': 'notify', 'destination': 'finished', 'name': 'Finish'
}]
}, {
'code': 'control', 'name': 'Control',
'nodes': [{
'type': NodeSpec.ENTER, 'code': 'origin', 'name': 'Origin',
}, {
'type': NodeSpec.SPLIT, 'code': 'approve-audit', 'name': 'Split Audit/Approve',
'description': 'Audit and Approval parallel branches',
'branches': ['approval', 'audit'], 'joiner': 'sample.support.approve_audit_joiner'
}, {
'type': NodeSpec.EXIT, 'code': 'was-rejected', 'name': 'Was Rejected', 'exit_value': 100,
}, {
'type': NodeSpec.EXIT, 'code': 'was-satisfied', 'name': 'Was Rejected', 'exit_value': 101,
}, {
'type': NodeSpec.CANCEL, 'code': 'cancel', 'name': 'Cancel',
}, {
'type': NodeSpec.JOINED, 'code': 'joined', 'name': 'Joined',
}],
'transitions': [{
'origin': 'origin', 'destination': 'approve-audit', 'name': 'Enter A/E'
}, {
'origin': 'approve-audit', 'destination': 'was-rejected', 'name': 'Rejected',
'action_name': 'rejected'
}, {
'origin': 'approve-audit', 'destination': 'was-satisfied', 'name': 'Satisfied',
'action_name': 'satisfied'
}]
}, {
'code': 'approval', 'name': 'Approval',
'nodes': [{
'type': NodeSpec.ENTER, 'code': 'origin', 'name': 'Origin',
}, {
'type': NodeSpec.INPUT, 'code': 'pending-approval', 'name': 'Pending Approval',
'description': 'The task is about to be approved or rejected',
}, {
'type': NodeSpec.EXIT, 'code': 'approved', 'name': 'Approved', 'exit_value': 101,
}, {
'type': NodeSpec.EXIT, 'code': 'rejected', 'name': 'Rejected', 'exit_value': 102,
}, {
'type': NodeSpec.CANCEL, 'code': 'cancel', 'name': 'Cancel',
}, {
'type': NodeSpec.JOINED, 'code': 'joined', 'name': 'Joined',
}],
'transitions': [{
'origin': 'origin', 'destination': 'pending-approval', 'name': 'Enter P/A'
}, {
'origin': 'pending-approval', 'destination': 'approved', 'name': 'Approve',
'action_name': 'approve', 'permission': 'sample.accept_task'
}, {
'origin': 'pending-approval', 'destination': 'rejected', 'name': 'Reject',
'action_name': 'reject', 'permission': 'sample.reject_task'
}]
}, {
'code': 'audit', 'name': 'Audit',
'nodes': [{
'type': NodeSpec.ENTER, 'code': 'origin', 'name': 'Origin',
}, {
'type': NodeSpec.INPUT, 'code': 'pending-audit', 'name': 'Pending Audit',
'description': 'The task is about to be audited',
}, {
'type': NodeSpec.EXIT, 'code': 'audited', 'name': 'Audited', 'exit_value': 103,
}, {
'type': NodeSpec.CANCEL, 'code': 'cancel', 'name': 'Cancel',
}, {
'type': NodeSpec.JOINED, 'code': 'joined', 'name': 'Joined',
}],
'transitions': [{
'origin': 'origin', 'destination': 'pending-audit', 'name': 'Enter Audit'
}, {
'origin': 'pending-audit', 'destination': 'audited', 'name': 'Audit',
'action_name': 'audit', 'permission': 'sample.audit_task'
}]
}, {
'code': 'invoice', 'name': 'Invoice',
'nodes': [{
'type': NodeSpec.ENTER, 'code': 'origin', 'name': 'Origin',
}, {
'type': NodeSpec.INPUT, 'code': 'pending-invoice', 'name': 'Pending Invoice',
'description': 'The task is about to be invoiced',
}, {
'type': NodeSpec.EXIT, 'code': 'invoiced', 'name': 'Invoiced', 'exit_value': 104,
}, {
'type': NodeSpec.CANCEL, 'code': 'cancel', 'name': 'Cancel',
}, {
'type': NodeSpec.JOINED, 'code': 'joined', 'name': 'Joined',
}],
'transitions': [{
'origin': 'origin', 'destination': 'pending-invoice', 'name': 'Enter Invoice'
}, {
'origin': 'pending-invoice', 'destination': 'invoiced', 'name': 'Invoice',
'action_name': 'invoice', 'permission': 'sample.invoice_task'
}]
}]}
return Workflow.Spec.install(spec)
def _install_users_and_data(self, service_type):
User = get_user_model()
users = [
User.objects.create_user('foo', '<EMAIL>', 'foo1'),
User.objects.create_user('bar', '<EMAIL>', 'bar1'),
User.objects.create_user('baz', '<EMAIL>', 'baz1'),
User.objects.create_user('bat', '<EMAIL>', 'bat1'),
User.objects.create_user('boo', '<EMAIL>', 'boo1'),
User.objects.create_user('poo', '<EMAIL>', 'poo1'),
User.objects.create_user('god', '<EMAIL>', 'god1'),
]
area = Area.objects.create(head=users[6])
task = Task.objects.create(area=area, service_type=service_type, title='Sample',
content='Lorem ipsum dolor sit amet', performer=users[0], reviewer=users[1],
accountant=users[2], auditor=users[3], dispatcher=users[4], attendant=users[5])
return users, task
def test_base_workflow(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.SERVICE)
def test_user_not_able_to_create_is_bad(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.SERVICE)
with self.assertRaises(exceptions.WorkflowCreateDenied):
Workflow.create(users[1], workflow, task)
def test_user_not_able_to_execute_action_is_bad(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.SERVICE)
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
with self.assertRaises(exceptions.WorkflowActionDenied):
instance.execute(users[2], 'review')
def test_execute_invalid_action_is_bad(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.SERVICE)
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
with self.assertRaises(exceptions.WorkflowCourseNodeTransitionDoesNotExist):
instance.execute(users[1], 'review')
instance.execute(users[6], 'assign')
instance.execute(users[0], 'start')
instance.execute(users[0], 'complit') # funny enough for a typo
def test_execute_invalid_course_is_bad(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.SERVICE)
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
with self.assertRaises(exceptions.WorkflowCourseInstanceDoesNotExist):
instance.execute(users[1], 'review')
instance.execute(users[6], 'assign')
instance.execute(users[0], 'start')
instance.execute(users[0], 'complete', 'wtf')
def test_execute_invalid_nested_course_is_bad(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.SERVICE)
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
with self.assertRaises(exceptions.WorkflowCourseInstanceDoesNotExist):
instance.execute(users[1], 'review')
instance.execute(users[6], 'assign')
instance.execute(users[0], 'start')
instance.execute(users[0], 'complete')
instance.execute(users[3], 'audit', 'control.clorch') # this one should also fail!
def test_execute_adequately_split_is_good(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.SERVICE)
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
instance.execute(users[1], 'review')
instance.execute(users[6], 'assign')
instance.execute(users[0], 'start')
instance.execute(users[0], 'complete')
actions = instance.get_workflow_available_actions(users[2])
target = {
'invoice': {
'display_name': _('Invoice'),
'actions': [{
'display_name': _('Invoice'),
'action_name': 'invoice'
}]
},
}
self.assertTrue(actions == target, "expected %r == %r" % (actions, target))
instance.execute(users[2], 'invoice', 'invoice')
actions = instance.get_workflow_available_actions(users[3])
target = {
'control.audit': {
'display_name': _('Audit'),
'actions': [{
'display_name': _('Audit'),
'action_name': 'audit'
}]
},
}
self.assertTrue(actions == target, "expected %r == %r" % (actions, target))
instance.execute(users[3], 'audit', 'control.audit')
actions = instance.get_workflow_available_actions(users[1])
target = {
'control.approval': {
'display_name': _('Approval'),
'actions': [{
'display_name': _('Approve'),
'action_name': 'approve'
}, {
'display_name': _('Reject'),
'action_name': 'reject'
}]
},
}
self.assertTrue(actions == target, "expected %r == %r" % (actions, target))
instance.execute(users[1], 'approve', 'control.approval')
workflow_status = instance.get_workflow_status()
target = {'': ('ended', 105)}
self.assertTrue(workflow_status == target, "expected %r == %r" % (workflow_status, target))
def test_rejection_and_loopback_is_good(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.SERVICE)
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
instance.execute(users[1], 'review')
instance.execute(users[6], 'assign')
instance.execute(users[0], 'start')
instance.execute(users[0], 'complete')
instance.execute(users[1], 'reject', 'control.approval')
workflow_status = instance.get_workflow_status()
target = {'': ('waiting', 'started')}
self.assertTrue(workflow_status == target, "expected %r == %r" % (workflow_status, target))
def test_approval_deliverable_waiting_delivery_is_good(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.DELIVERABLE)
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
instance.execute(users[1], 'review')
instance.execute(users[6], 'assign')
instance.execute(users[0], 'start')
instance.execute(users[0], 'complete')
instance.execute(users[1], 'approve', 'control.approval')
instance.execute(users[2], 'invoice', 'invoice')
instance.execute(users[3], 'audit', 'control.audit')
workflow_status = instance.get_workflow_status()
target = {'': ('waiting', 'pending-delivery')}
self.assertTrue(workflow_status == target, "expected %r == %r" % (workflow_status, target))
self.assertEqual(instance.instance.document.content, 'Lorem ipsum dolor sit amet Pending Delivery')
def test_unmatched_condition_is_bad(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data('crap')
with self.assertRaises(exceptions.WorkflowCourseNodeMultiplexerDidNotSatisfyAnyCondition):
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
instance.execute(users[1], 'review')
instance.execute(users[6], 'assign')
instance.execute(users[0], 'start')
instance.execute(users[0], 'complete')
instance.execute(users[1], 'approve', 'control.approval')
instance.execute(users[2], 'invoice', 'invoice')
instance.execute(users[3], 'audit', 'control.audit')
def test_cancel_terminated_course_is_bad(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.DELIVERABLE)
with self.assertRaises(exceptions.WorkflowCourseInstanceAlreadyTerminated):
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
instance.cancel(users[6])
instance.cancel(users[6])
def test_cancel_course_without_workflow_permission_is_bad(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.DELIVERABLE)
with self.assertRaises(exceptions.WorkflowCourseCancelDeniedByWorkflow):
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
instance.cancel(users[3])
def test_cancel_course_without_course_permission_is_bad(self):
workflow = self._base_install_workflow_spec()
spec = workflow.spec
permission = spec.cancel_permission
spec.cancel_permission = ''
spec.save()
course_spec = spec.course_specs.get(code='')
course_spec.cancel_permission = permission
course_spec.save()
users, task = self._install_users_and_data(Task.DELIVERABLE)
with self.assertRaises(exceptions.WorkflowCourseCancelDeniedByCourse):
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
instance.cancel(users[3])
def test_start_a_started_workflow_is_bad(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.DELIVERABLE)
with self.assertRaises(exceptions.WorkflowInstanceNotPending):
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
instance.start(users[1])
def test_execute_existing_action_from_split_node_is_bad(self):
workflow = self._base_install_workflow_spec()
users, task = self._install_users_and_data(Task.SERVICE)
with self.assertRaises(exceptions.WorkflowCourseInstanceNotWaiting):
instance = Workflow.create(users[6], workflow, task)
instance.start(users[1])
instance.execute(users[1], 'review')
instance.execute(users[6], 'assign')
instance.execute(users[0], 'start')
instance.execute(users[0], 'complete')
instance.execute(users[0], 'on-accept') | 0.470493 | 0.139807 |
import groupdocs_merger_cloud
from Common import Common
# Get your ClientId and ClientSecret at https://dashboard.groupdocs.cloud (free registration is required).
Common.client_id = "XXXX-XXXX-XXXX-XXXX"
Common.client_secret = "<KEY>"
Common.myStorage = "First Storage"
# Upload Sample Files
Common.UploadSampleFiles()
# Get Supported File Types
from GetSupportedFileTypes import GetSupportedFileTypes
GetSupportedFileTypes.Run()
# Get Document Info
from GetInfo import GetInfo
GetInfo.Run()
# Split To Multi-Page Documents
from DocumentOperations.SplitDocument.SplitToMultiPageDocuments import SplitToMultiPageDocuments
SplitToMultiPageDocuments.Run()
# Split To Single Pages
from DocumentOperations.SplitDocument.SplitToSinglePages import SplitToSinglePages
SplitToSinglePages.Run()
# Split To Single Pages By Range
from DocumentOperations.SplitDocument.SplitToSinglePagesByRange import SplitToSinglePagesByRange
SplitToSinglePagesByRange.Run()
# Split To Single Pages By Range With Filter
from DocumentOperations.SplitDocument.SplitToSinglePagesByRangeWithFilter import SplitToSinglePagesByRangeWithFilter
SplitToSinglePagesByRangeWithFilter.Run()
# Join Multiple Documents
from DocumentOperations.JoinMultipleDocuments import JoinMultipleDocuments
JoinMultipleDocuments.Run()
# Join multiple documents of various formats
from DocumentOperations.JoinDocumentsCrossFormat import JoinDocumentsCrossFormat
JoinDocumentsCrossFormat.Run()
# Join Pages From Various Documents
from DocumentOperations.JoinPagesFromVariousDocuments import JoinPagesFromVariousDocuments
JoinPagesFromVariousDocuments.Run()
# Preview Document
from DocumentOperations.PreviewDocument import PreviewDocument
PreviewDocument.Run()
# Import attachment into pdf document
from DocumentOperations.ImportAttachment import ImportAttachment
ImportAttachment.Run()
# Change Page Orientation
from PagesOperations.ChangePageOrientation import ChangePageOrientation
ChangePageOrientation.Run()
# Extract Pages By Numbers
from PagesOperations.ExtractPages.ExtractPagesByNumbers import ExtractPagesByNumbers
ExtractPagesByNumbers.Run()
# Extract Pages By Range
from PagesOperations.ExtractPages.ExtractPagesByRange import ExtractPagesByRange
ExtractPagesByRange.Run()
# Move Page
from PagesOperations.MovePage import MovePage
MovePage.Run()
# Remove Pages
from PagesOperations.RemovePages import RemovePages
RemovePages.Run()
# Rotate Pages
from PagesOperations.RotatePages import RotatePages
RotatePages.Run()
# Swap Pages
from PagesOperations.SwapPages import SwapPages
SwapPages.Run()
# Add Document Password
from SecurityOperations.AddDocumentPassword import AddDocumentPassword
AddDocumentPassword.Run()
# Check Document Password Protection
from SecurityOperations.CheckDocumentPasswordProtection import CheckDocumentPasswordProtection
CheckDocumentPasswordProtection.Run()
# Remove Document Password
from SecurityOperations.RemoveDocumentPassword import RemoveDocumentPassword
RemoveDocumentPassword.Run()
# Update Document Password
from SecurityOperations.UpdateDocumentPassword import UpdateDocumentPassword
UpdateDocumentPassword.Run() | Examples/RunExamples.py | import groupdocs_merger_cloud
from Common import Common
# Get your ClientId and ClientSecret at https://dashboard.groupdocs.cloud (free registration is required).
Common.client_id = "XXXX-XXXX-XXXX-XXXX"
Common.client_secret = "<KEY>"
Common.myStorage = "First Storage"
# Upload Sample Files
Common.UploadSampleFiles()
# Get Supported File Types
from GetSupportedFileTypes import GetSupportedFileTypes
GetSupportedFileTypes.Run()
# Get Document Info
from GetInfo import GetInfo
GetInfo.Run()
# Split To Multi-Page Documents
from DocumentOperations.SplitDocument.SplitToMultiPageDocuments import SplitToMultiPageDocuments
SplitToMultiPageDocuments.Run()
# Split To Single Pages
from DocumentOperations.SplitDocument.SplitToSinglePages import SplitToSinglePages
SplitToSinglePages.Run()
# Split To Single Pages By Range
from DocumentOperations.SplitDocument.SplitToSinglePagesByRange import SplitToSinglePagesByRange
SplitToSinglePagesByRange.Run()
# Split To Single Pages By Range With Filter
from DocumentOperations.SplitDocument.SplitToSinglePagesByRangeWithFilter import SplitToSinglePagesByRangeWithFilter
SplitToSinglePagesByRangeWithFilter.Run()
# Join Multiple Documents
from DocumentOperations.JoinMultipleDocuments import JoinMultipleDocuments
JoinMultipleDocuments.Run()
# Join multiple documents of various formats
from DocumentOperations.JoinDocumentsCrossFormat import JoinDocumentsCrossFormat
JoinDocumentsCrossFormat.Run()
# Join Pages From Various Documents
from DocumentOperations.JoinPagesFromVariousDocuments import JoinPagesFromVariousDocuments
JoinPagesFromVariousDocuments.Run()
# Preview Document
from DocumentOperations.PreviewDocument import PreviewDocument
PreviewDocument.Run()
# Import attachment into pdf document
from DocumentOperations.ImportAttachment import ImportAttachment
ImportAttachment.Run()
# Change Page Orientation
from PagesOperations.ChangePageOrientation import ChangePageOrientation
ChangePageOrientation.Run()
# Extract Pages By Numbers
from PagesOperations.ExtractPages.ExtractPagesByNumbers import ExtractPagesByNumbers
ExtractPagesByNumbers.Run()
# Extract Pages By Range
from PagesOperations.ExtractPages.ExtractPagesByRange import ExtractPagesByRange
ExtractPagesByRange.Run()
# Move Page
from PagesOperations.MovePage import MovePage
MovePage.Run()
# Remove Pages
from PagesOperations.RemovePages import RemovePages
RemovePages.Run()
# Rotate Pages
from PagesOperations.RotatePages import RotatePages
RotatePages.Run()
# Swap Pages
from PagesOperations.SwapPages import SwapPages
SwapPages.Run()
# Add Document Password
from SecurityOperations.AddDocumentPassword import AddDocumentPassword
AddDocumentPassword.Run()
# Check Document Password Protection
from SecurityOperations.CheckDocumentPasswordProtection import CheckDocumentPasswordProtection
CheckDocumentPasswordProtection.Run()
# Remove Document Password
from SecurityOperations.RemoveDocumentPassword import RemoveDocumentPassword
RemoveDocumentPassword.Run()
# Update Document Password
from SecurityOperations.UpdateDocumentPassword import UpdateDocumentPassword
UpdateDocumentPassword.Run() | 0.497559 | 0.089973 |
# code was heavily based on https://github.com/wtjiang98/PSGAN
# MIT License
# Copyright (c) 2020 <NAME>
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import functools
import numpy as np
from ...modules.norm import build_norm_layer
from .builder import GENERATORS
class PONO(paddle.nn.Layer):
def __init__(self, eps=1e-5):
super(PONO, self).__init__()
self.eps = eps
def forward(self, x):
mean = paddle.mean(x, axis=1, keepdim=True)
var = paddle.mean(paddle.square(x - mean), axis=1, keepdim=True)
tmp = (x - mean) / paddle.sqrt(var + self.eps)
return tmp
class ResidualBlock(paddle.nn.Layer):
"""Residual Block with instance normalization."""
def __init__(self, dim_in, dim_out, mode=None):
super(ResidualBlock, self).__init__()
if mode == 't':
weight_attr = False
bias_attr = False
elif mode == 'p' or (mode is None):
weight_attr = None
bias_attr = None
self.main = nn.Sequential(
nn.Conv2D(dim_in,
dim_out,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False),
nn.InstanceNorm2D(dim_out,
weight_attr=weight_attr,
bias_attr=bias_attr), nn.ReLU(),
nn.Conv2D(dim_out,
dim_out,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False),
nn.InstanceNorm2D(dim_out,
weight_attr=weight_attr,
bias_attr=bias_attr))
def forward(self, x):
"""forward"""
return x + self.main(x)
class StyleResidualBlock(paddle.nn.Layer):
"""Residual Block with instance normalization."""
def __init__(self, dim_in, dim_out):
super(StyleResidualBlock, self).__init__()
self.block1 = nn.Sequential(
nn.Conv2D(dim_in,
dim_out,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False), PONO())
ks = 3
pw = ks // 2
self.beta1 = nn.Conv2D(dim_in, dim_out, kernel_size=ks, padding=pw)
self.gamma1 = nn.Conv2D(dim_in, dim_out, kernel_size=ks, padding=pw)
self.block2 = nn.Sequential(
nn.ReLU(),
nn.Conv2D(dim_out,
dim_out,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False), PONO())
self.beta2 = nn.Conv2D(dim_in, dim_out, kernel_size=ks, padding=pw)
self.gamma2 = nn.Conv2D(dim_in, dim_out, kernel_size=ks, padding=pw)
def forward(self, x, y):
"""forward"""
x_ = self.block1(x)
b = self.beta1(y)
g = self.gamma1(y)
x_ = (g + 1) * x_ + b
x_ = self.block2(x_)
b = self.beta2(y)
g = self.gamma2(y)
x_ = (g + 1) * x_ + b
return x + x_
class MDNet(paddle.nn.Layer):
"""MDNet in PSGAN"""
def __init__(self, conv_dim=64, repeat_num=3):
super(MDNet, self).__init__()
layers = []
layers.append(
nn.Conv2D(3,
conv_dim,
kernel_size=7,
stride=1,
padding=3,
bias_attr=False))
layers.append(
nn.InstanceNorm2D(conv_dim, weight_attr=None, bias_attr=None))
layers.append(nn.ReLU())
# Down-Sampling
curr_dim = conv_dim
for i in range(2):
layers.append(
nn.Conv2D(curr_dim,
curr_dim * 2,
kernel_size=4,
stride=2,
padding=1,
bias_attr=False))
layers.append(
nn.InstanceNorm2D(curr_dim * 2,
weight_attr=None,
bias_attr=None))
layers.append(nn.ReLU())
curr_dim = curr_dim * 2
# Bottleneck
for i in range(repeat_num):
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim))
self.main = nn.Sequential(*layers)
def forward(self, x):
"""forward"""
out = self.main(x)
return out
class TNetDown(paddle.nn.Layer):
"""MDNet in PSGAN"""
def __init__(self, conv_dim=64, repeat_num=3):
super(TNetDown, self).__init__()
layers = []
layers.append(
nn.Conv2D(3,
conv_dim,
kernel_size=7,
stride=1,
padding=3,
bias_attr=False))
layers.append(
nn.InstanceNorm2D(conv_dim, weight_attr=False, bias_attr=False))
layers.append(nn.ReLU())
# Down-Sampling
curr_dim = conv_dim
for i in range(2):
layers.append(
nn.Conv2D(curr_dim,
curr_dim * 2,
kernel_size=4,
stride=2,
padding=1,
bias_attr=False))
layers.append(
nn.InstanceNorm2D(curr_dim * 2,
weight_attr=False,
bias_attr=False))
layers.append(nn.ReLU())
curr_dim = curr_dim * 2
# Bottleneck
for i in range(repeat_num):
layers.append(
ResidualBlock(dim_in=curr_dim, dim_out=curr_dim, mode='t'))
self.main = nn.Sequential(*layers)
def forward(self, x):
"""forward"""
out = self.main(x)
return out
class GetMatrix(paddle.nn.Layer):
def __init__(self, dim_in, dim_out):
super(GetMatrix, self).__init__()
self.get_gamma = nn.Conv2D(dim_in,
dim_out,
kernel_size=1,
stride=1,
padding=0,
bias_attr=False)
self.get_beta = nn.Conv2D(dim_in,
dim_out,
kernel_size=1,
stride=1,
padding=0,
bias_attr=False)
def forward(self, x):
gamma = self.get_gamma(x)
beta = self.get_beta(x)
return gamma, beta
class MANet(paddle.nn.Layer):
"""MANet in PSGAN"""
def __init__(self, conv_dim=64, repeat_num=3, w=0.01):
super(MANet, self).__init__()
self.encoder = TNetDown(conv_dim=conv_dim, repeat_num=repeat_num)
curr_dim = conv_dim * 4
self.w = w
self.beta = nn.Conv2D(curr_dim, curr_dim, kernel_size=3, padding=1)
self.gamma = nn.Conv2D(curr_dim, curr_dim, kernel_size=3, padding=1)
self.simple_spade = GetMatrix(curr_dim, 1) # get the makeup matrix
self.repeat_num = repeat_num
for i in range(repeat_num):
setattr(self, "bottlenecks_" + str(i),
ResidualBlock(dim_in=curr_dim, dim_out=curr_dim, mode='t'))
# Up-Sampling
self.upsamplers = []
self.up_betas = []
self.up_gammas = []
self.up_acts = []
y_dim = curr_dim
for i in range(2):
layers = []
layers.append(
nn.Conv2DTranspose(curr_dim,
curr_dim // 2,
kernel_size=4,
stride=2,
padding=1,
bias_attr=False))
layers.append(
nn.InstanceNorm2D(curr_dim // 2,
weight_attr=False,
bias_attr=False))
setattr(self, "up_acts_" + str(i), nn.ReLU())
setattr(
self, "up_betas_" + str(i),
nn.Conv2DTranspose(y_dim,
curr_dim // 2,
kernel_size=4,
stride=2,
padding=1))
setattr(
self, "up_gammas_" + str(i),
nn.Conv2DTranspose(y_dim,
curr_dim // 2,
kernel_size=4,
stride=2,
padding=1))
setattr(self, "up_samplers_" + str(i), nn.Sequential(*layers))
curr_dim = curr_dim // 2
self.img_reg = [
nn.Conv2D(curr_dim,
3,
kernel_size=7,
stride=1,
padding=3,
bias_attr=False)
]
self.img_reg = nn.Sequential(*self.img_reg)
def forward(self, x, y, x_p, y_p, consistency_mask, mask_x, mask_y):
"""forward"""
# y -> ref feature
# x -> src img
x = self.encoder(x)
_, c, h, w = x.shape
_, c2, h2, w2 = y.shape
mask_x = F.interpolate(mask_x, size=(64, 64))
mask_x = mask_x.transpose((1, 0, 2, 3))
mask_x_re = mask_x.tile([1, x.shape[1], 1, 1])
mask_x_diff_re = mask_x.tile([1, x_p.shape[1], 1, 1])
mask_y = F.interpolate(mask_y, size=(64, 64))
mask_y = mask_y.transpose((1, 0, 2, 3))
mask_y_re = mask_y.tile([1, y.shape[1], 1, 1])
mask_y_diff_re = mask_y.tile([1, y_p.shape[1], 1, 1])
x_re = x.tile([3, 1, 1, 1])
y_re = y.tile([3, 1, 1, 1])
x_flat = x_re * mask_x_re
y_flat = y_re * mask_y_re
x_p = x_p.tile([3, 1, 1, 1]) * mask_x_diff_re
y_p = y_p.tile([3, 1, 1, 1]) * mask_y_diff_re
norm_x = paddle.norm(x_p, axis=1,
keepdim=True).tile([1, x_p.shape[1], 1, 1])
norm_x = paddle.where(norm_x == 0, paddle.to_tensor(1e10), norm_x)
x_p = x_p / norm_x
norm_y = paddle.norm(y_p, axis=1,
keepdim=True).tile([1, y_p.shape[1], 1, 1])
norm_y = paddle.where(norm_y == 0, paddle.to_tensor(1e10), norm_y)
y_p = y_p / norm_y
x_flat = paddle.concat([x_flat * 0.01, x_p], axis=1)
y_flat = paddle.concat([y_flat * 0.01, y_p], axis=1)
x_flat_re = x_flat.reshape([3, x_flat.shape[1], h * w])
y_flat_re = y_flat.reshape([3, y_flat.shape[1], h2 * w2])
a_ = paddle.matmul(x_flat_re, y_flat_re, transpose_x=True)
with paddle.no_grad():
a_mask = a_ != 0
a_ *= 200
a = F.softmax(a_, axis=-1)
a = a * a_mask
gamma, beta = self.simple_spade(y)
gamma = gamma.tile([3, 1, 1, 1]) * mask_y
beta = beta.tile([3, 1, 1, 1]) * mask_y
beta = beta.reshape([-1, h2 * w2, 1])
beta = paddle.matmul(a, beta)
beta = beta.transpose((0, 2, 1))
beta = beta.reshape([-1, 1, h2, w2])
gamma = gamma.reshape([-1, h2 * w2, 1])
gamma = paddle.matmul(a, gamma)
gamma = gamma.transpose((0, 2, 1))
gamma = gamma.reshape([-1, 1, h2, w2])
beta = (beta[0] + beta[1] + beta[2]).unsqueeze(0)
gamma = (gamma[0] + gamma[1] + gamma[2]).unsqueeze(0)
x = x * (1 + gamma) + beta
for i in range(self.repeat_num):
layer = getattr(self, "bottlenecks_" + str(i))
x = layer(x)
for idx in range(2):
layer = getattr(self, "up_samplers_" + str(idx))
x = layer(x)
layer = getattr(self, "up_acts_" + str(idx))
x = layer(x)
x = self.img_reg(x)
x = paddle.tanh(x)
return x, a
@GENERATORS.register()
class GeneratorPSGANAttention(paddle.nn.Layer):
def __init__(self, conv_dim=64, repeat_num=3):
super(GeneratorPSGANAttention, self).__init__()
self.ma_net = MANet(conv_dim=conv_dim, repeat_num=repeat_num)
self.md_net = MDNet(conv_dim=conv_dim, repeat_num=repeat_num)
def forward(self, x, y, x_p, y_p, consistency_mask, mask_x, mask_y):
"""forward"""
y = self.md_net(y)
out, a = self.ma_net(x, y, x_p, y_p, consistency_mask, mask_x, mask_y)
return out, a | ppgan/models/generators/makeup.py |
# code was heavily based on https://github.com/wtjiang98/PSGAN
# MIT License
# Copyright (c) 2020 <NAME>
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import functools
import numpy as np
from ...modules.norm import build_norm_layer
from .builder import GENERATORS
class PONO(paddle.nn.Layer):
def __init__(self, eps=1e-5):
super(PONO, self).__init__()
self.eps = eps
def forward(self, x):
mean = paddle.mean(x, axis=1, keepdim=True)
var = paddle.mean(paddle.square(x - mean), axis=1, keepdim=True)
tmp = (x - mean) / paddle.sqrt(var + self.eps)
return tmp
class ResidualBlock(paddle.nn.Layer):
"""Residual Block with instance normalization."""
def __init__(self, dim_in, dim_out, mode=None):
super(ResidualBlock, self).__init__()
if mode == 't':
weight_attr = False
bias_attr = False
elif mode == 'p' or (mode is None):
weight_attr = None
bias_attr = None
self.main = nn.Sequential(
nn.Conv2D(dim_in,
dim_out,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False),
nn.InstanceNorm2D(dim_out,
weight_attr=weight_attr,
bias_attr=bias_attr), nn.ReLU(),
nn.Conv2D(dim_out,
dim_out,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False),
nn.InstanceNorm2D(dim_out,
weight_attr=weight_attr,
bias_attr=bias_attr))
def forward(self, x):
"""forward"""
return x + self.main(x)
class StyleResidualBlock(paddle.nn.Layer):
"""Residual Block with instance normalization."""
def __init__(self, dim_in, dim_out):
super(StyleResidualBlock, self).__init__()
self.block1 = nn.Sequential(
nn.Conv2D(dim_in,
dim_out,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False), PONO())
ks = 3
pw = ks // 2
self.beta1 = nn.Conv2D(dim_in, dim_out, kernel_size=ks, padding=pw)
self.gamma1 = nn.Conv2D(dim_in, dim_out, kernel_size=ks, padding=pw)
self.block2 = nn.Sequential(
nn.ReLU(),
nn.Conv2D(dim_out,
dim_out,
kernel_size=3,
stride=1,
padding=1,
bias_attr=False), PONO())
self.beta2 = nn.Conv2D(dim_in, dim_out, kernel_size=ks, padding=pw)
self.gamma2 = nn.Conv2D(dim_in, dim_out, kernel_size=ks, padding=pw)
def forward(self, x, y):
"""forward"""
x_ = self.block1(x)
b = self.beta1(y)
g = self.gamma1(y)
x_ = (g + 1) * x_ + b
x_ = self.block2(x_)
b = self.beta2(y)
g = self.gamma2(y)
x_ = (g + 1) * x_ + b
return x + x_
class MDNet(paddle.nn.Layer):
"""MDNet in PSGAN"""
def __init__(self, conv_dim=64, repeat_num=3):
super(MDNet, self).__init__()
layers = []
layers.append(
nn.Conv2D(3,
conv_dim,
kernel_size=7,
stride=1,
padding=3,
bias_attr=False))
layers.append(
nn.InstanceNorm2D(conv_dim, weight_attr=None, bias_attr=None))
layers.append(nn.ReLU())
# Down-Sampling
curr_dim = conv_dim
for i in range(2):
layers.append(
nn.Conv2D(curr_dim,
curr_dim * 2,
kernel_size=4,
stride=2,
padding=1,
bias_attr=False))
layers.append(
nn.InstanceNorm2D(curr_dim * 2,
weight_attr=None,
bias_attr=None))
layers.append(nn.ReLU())
curr_dim = curr_dim * 2
# Bottleneck
for i in range(repeat_num):
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim))
self.main = nn.Sequential(*layers)
def forward(self, x):
"""forward"""
out = self.main(x)
return out
class TNetDown(paddle.nn.Layer):
"""MDNet in PSGAN"""
def __init__(self, conv_dim=64, repeat_num=3):
super(TNetDown, self).__init__()
layers = []
layers.append(
nn.Conv2D(3,
conv_dim,
kernel_size=7,
stride=1,
padding=3,
bias_attr=False))
layers.append(
nn.InstanceNorm2D(conv_dim, weight_attr=False, bias_attr=False))
layers.append(nn.ReLU())
# Down-Sampling
curr_dim = conv_dim
for i in range(2):
layers.append(
nn.Conv2D(curr_dim,
curr_dim * 2,
kernel_size=4,
stride=2,
padding=1,
bias_attr=False))
layers.append(
nn.InstanceNorm2D(curr_dim * 2,
weight_attr=False,
bias_attr=False))
layers.append(nn.ReLU())
curr_dim = curr_dim * 2
# Bottleneck
for i in range(repeat_num):
layers.append(
ResidualBlock(dim_in=curr_dim, dim_out=curr_dim, mode='t'))
self.main = nn.Sequential(*layers)
def forward(self, x):
"""forward"""
out = self.main(x)
return out
class GetMatrix(paddle.nn.Layer):
def __init__(self, dim_in, dim_out):
super(GetMatrix, self).__init__()
self.get_gamma = nn.Conv2D(dim_in,
dim_out,
kernel_size=1,
stride=1,
padding=0,
bias_attr=False)
self.get_beta = nn.Conv2D(dim_in,
dim_out,
kernel_size=1,
stride=1,
padding=0,
bias_attr=False)
def forward(self, x):
gamma = self.get_gamma(x)
beta = self.get_beta(x)
return gamma, beta
class MANet(paddle.nn.Layer):
"""MANet in PSGAN"""
def __init__(self, conv_dim=64, repeat_num=3, w=0.01):
super(MANet, self).__init__()
self.encoder = TNetDown(conv_dim=conv_dim, repeat_num=repeat_num)
curr_dim = conv_dim * 4
self.w = w
self.beta = nn.Conv2D(curr_dim, curr_dim, kernel_size=3, padding=1)
self.gamma = nn.Conv2D(curr_dim, curr_dim, kernel_size=3, padding=1)
self.simple_spade = GetMatrix(curr_dim, 1) # get the makeup matrix
self.repeat_num = repeat_num
for i in range(repeat_num):
setattr(self, "bottlenecks_" + str(i),
ResidualBlock(dim_in=curr_dim, dim_out=curr_dim, mode='t'))
# Up-Sampling
self.upsamplers = []
self.up_betas = []
self.up_gammas = []
self.up_acts = []
y_dim = curr_dim
for i in range(2):
layers = []
layers.append(
nn.Conv2DTranspose(curr_dim,
curr_dim // 2,
kernel_size=4,
stride=2,
padding=1,
bias_attr=False))
layers.append(
nn.InstanceNorm2D(curr_dim // 2,
weight_attr=False,
bias_attr=False))
setattr(self, "up_acts_" + str(i), nn.ReLU())
setattr(
self, "up_betas_" + str(i),
nn.Conv2DTranspose(y_dim,
curr_dim // 2,
kernel_size=4,
stride=2,
padding=1))
setattr(
self, "up_gammas_" + str(i),
nn.Conv2DTranspose(y_dim,
curr_dim // 2,
kernel_size=4,
stride=2,
padding=1))
setattr(self, "up_samplers_" + str(i), nn.Sequential(*layers))
curr_dim = curr_dim // 2
self.img_reg = [
nn.Conv2D(curr_dim,
3,
kernel_size=7,
stride=1,
padding=3,
bias_attr=False)
]
self.img_reg = nn.Sequential(*self.img_reg)
def forward(self, x, y, x_p, y_p, consistency_mask, mask_x, mask_y):
"""forward"""
# y -> ref feature
# x -> src img
x = self.encoder(x)
_, c, h, w = x.shape
_, c2, h2, w2 = y.shape
mask_x = F.interpolate(mask_x, size=(64, 64))
mask_x = mask_x.transpose((1, 0, 2, 3))
mask_x_re = mask_x.tile([1, x.shape[1], 1, 1])
mask_x_diff_re = mask_x.tile([1, x_p.shape[1], 1, 1])
mask_y = F.interpolate(mask_y, size=(64, 64))
mask_y = mask_y.transpose((1, 0, 2, 3))
mask_y_re = mask_y.tile([1, y.shape[1], 1, 1])
mask_y_diff_re = mask_y.tile([1, y_p.shape[1], 1, 1])
x_re = x.tile([3, 1, 1, 1])
y_re = y.tile([3, 1, 1, 1])
x_flat = x_re * mask_x_re
y_flat = y_re * mask_y_re
x_p = x_p.tile([3, 1, 1, 1]) * mask_x_diff_re
y_p = y_p.tile([3, 1, 1, 1]) * mask_y_diff_re
norm_x = paddle.norm(x_p, axis=1,
keepdim=True).tile([1, x_p.shape[1], 1, 1])
norm_x = paddle.where(norm_x == 0, paddle.to_tensor(1e10), norm_x)
x_p = x_p / norm_x
norm_y = paddle.norm(y_p, axis=1,
keepdim=True).tile([1, y_p.shape[1], 1, 1])
norm_y = paddle.where(norm_y == 0, paddle.to_tensor(1e10), norm_y)
y_p = y_p / norm_y
x_flat = paddle.concat([x_flat * 0.01, x_p], axis=1)
y_flat = paddle.concat([y_flat * 0.01, y_p], axis=1)
x_flat_re = x_flat.reshape([3, x_flat.shape[1], h * w])
y_flat_re = y_flat.reshape([3, y_flat.shape[1], h2 * w2])
a_ = paddle.matmul(x_flat_re, y_flat_re, transpose_x=True)
with paddle.no_grad():
a_mask = a_ != 0
a_ *= 200
a = F.softmax(a_, axis=-1)
a = a * a_mask
gamma, beta = self.simple_spade(y)
gamma = gamma.tile([3, 1, 1, 1]) * mask_y
beta = beta.tile([3, 1, 1, 1]) * mask_y
beta = beta.reshape([-1, h2 * w2, 1])
beta = paddle.matmul(a, beta)
beta = beta.transpose((0, 2, 1))
beta = beta.reshape([-1, 1, h2, w2])
gamma = gamma.reshape([-1, h2 * w2, 1])
gamma = paddle.matmul(a, gamma)
gamma = gamma.transpose((0, 2, 1))
gamma = gamma.reshape([-1, 1, h2, w2])
beta = (beta[0] + beta[1] + beta[2]).unsqueeze(0)
gamma = (gamma[0] + gamma[1] + gamma[2]).unsqueeze(0)
x = x * (1 + gamma) + beta
for i in range(self.repeat_num):
layer = getattr(self, "bottlenecks_" + str(i))
x = layer(x)
for idx in range(2):
layer = getattr(self, "up_samplers_" + str(idx))
x = layer(x)
layer = getattr(self, "up_acts_" + str(idx))
x = layer(x)
x = self.img_reg(x)
x = paddle.tanh(x)
return x, a
@GENERATORS.register()
class GeneratorPSGANAttention(paddle.nn.Layer):
def __init__(self, conv_dim=64, repeat_num=3):
super(GeneratorPSGANAttention, self).__init__()
self.ma_net = MANet(conv_dim=conv_dim, repeat_num=repeat_num)
self.md_net = MDNet(conv_dim=conv_dim, repeat_num=repeat_num)
def forward(self, x, y, x_p, y_p, consistency_mask, mask_x, mask_y):
"""forward"""
y = self.md_net(y)
out, a = self.ma_net(x, y, x_p, y_p, consistency_mask, mask_x, mask_y)
return out, a | 0.843412 | 0.297483 |
from datetime import datetime, timedelta
from discord import Member, Embed
from discord.ext.commands import Cog, BucketType, command, cooldown, has_permissions
from discord.ext.commands.errors import MissingPermissions
import os
import logging
class Bierjunge(Cog):
BJ_LEVELS = ["Bierjunge", "Doktor", "Papst", "kleiner Ozean", "großer Ozean"]
def __init__(self, bot):
self.bot = bot
self.log = logging.getLogger("gerfroniabot.bierjunge")
self.QUARANTINE_CHANNEL_ID = int(os.getenv("QUARANTINE_CHANNEL_ID"))
self.MAIN_CHANNEL_ID = int(os.getenv("MAIN_CHANNEL_ID"))
self.bierjungen = {}
self.bierverschiss = []
self.bierkrank = []
@Cog.listener()
async def on_ready(self):
if not self.bot.ready:
self.bot.cogs_ready.ready_up("bierjunge")
self.log.info("Bierjunge cog ready")
@command(name="bierjunge", aliases=["bj"], brief="Hänge einem Mitglied einen Bierjungen an")
@cooldown(1, 5*60, BucketType.user)
async def declare_bierjunge(self, ctx, member: Member):
"""
Erwähne ein Mitglied, um ihm einen Bierjungen anzuhängen. Dein Gegner hat 10 Bierminuten
Zeit, um die Forderung mit dem Befehl `hängt` anzunehmen oder mit `doppelt` zu verdoppeln, andernfalls
fährt er in den Bierverschiss. Du kannst niemandem einen Bierjungen anhängen, wenn du oder dein Gegner
schon in ein anderes Bierduell verwickelt sind. Wenn du dich mit `bierkrank` für bierkrank oder
bierimpotent erklärt hast, oder wenn du dich im Bierverschiss befindest, kannst du ebenfalls nicht an
Bierduellen teilnehmen.
"""
if member.bot:
await ctx.send(f":beer: Du kannst Bots keinen Bierjungen anhängen. Sie können sich nicht wehren.")
return
if ctx.author in self.bierverschiss:
await ctx.send(f":beer: Du kannst niemanden zum Bierjungen herausfordern, da du im Bierverschiss bist.")
return
if ctx.author in self.bierkrank:
await ctx.send(f":cup_with_straw: Du kannst keine Bierduelle eingehen, da du dich bierkrank gemeldet hast.")
return
if member in self.bierkrank:
await ctx.send(f":cup_with_straw: Du kannst {member.display_name} keinen Bierjungen anhängen, da er bierkrank gemeldet ist.")
return
if member in self.bierverschiss:
await ctx.send(f":beer: Du kannst {member.display_name} nicht zum Bierjungen herausfordern, da er im Bierverschiss ist.")
return
for party_a, party_b, bj_level in self.bierjungen.keys():
if party_a == member or party_b == member:
await ctx.send(f":beer: Du kannst {member.display_name} keinen Bierjungen anhängen, da er bereits in einen Bierskandal involviert ist.")
return
elif party_a == ctx.author or party_b == ctx.author:
await ctx.send(f":beer: Du kannst {member.display_name} keinen Bierjungen anhängen, da du selbst bereits in einen Bierskandal involviert bist.")
return
await ctx.send(f":beer: {ctx.author.display_name} hat {member.mention} einen Bierjungen angehängt! {member.display_name} muss innerhalb"
f" von 5 Bierminuten mit {os.getenv('PREFIX')}hängt antworten, sonst wird er zum ersten Mal getreten.")
self.bot.scheduler.add_job(
self.send_kick,
trigger='interval',
args=[ctx, ctx.author, member, 0],
minutes=3,
end_date=datetime.now()+timedelta(minutes=10)
)
self.bierjungen[ctx.author, member, 0] = {"num_of_kicks": 0}
@command(name="hängt", aliases=["ht"], brief="Nimm einen geforderten Bierjungen an")
async def bierjunge_haengt(self, ctx):
"""
Wenn du mit `bierjunge` zu einem Bierjungen aufgefordert wurdest oder dein Gegner mit `doppelt` deine
Forderung verdoppelt hat, kannst du mit `hängt` das Duell annehmen.
"""
for party_a, party_b, bj_level in self.bierjungen.keys():
if party_b == ctx.author:
await ctx.send(f":beer: Du hast die Forderung \"{self.BJ_LEVELS[bj_level]}\" von {party_a.mention} angenommen. Prost!")
self.bierjungen.pop((party_a, party_b, bj_level))
return
await ctx.send(f":beer: Du wurdest zu keinem Bierjungen herausgefordert.")
@command(name="doppelt", aliases=["dp"], brief="Verdoppele einen geforderten Bierjungen")
async def haengt_doppelt(self, ctx):
"""
Wenn du mit `bierjunge` zu einem Bierduell herausgefordert wurdest, kannst du statt mit `hängt` das
Duell anzunehmen auch mittels `doppelt` die Biermenge verdoppeln. Die Verdopplungsstufen entsprechen
dem Teutonenkomment: Nach dem Bierjungen kommt der Doktor (2 Bier), dann der Papst (4 Bier), der
kleine Ozean (8 Bier) und der große Ozean (16 Bier). Nach dem großen Ozean ist keine weitere Verdopplung
möglich. Wenn du eine Forderung verdoppelst, wird dein Gegner der Geforderte und kann wiederum mit
`hängt` annehmen oder mit `doppelt` verdoppeln.
"""
for party_a, party_b, bj_level in self.bierjungen.keys():
if party_b == ctx.author:
if bj_level > 3:
await ctx.send(f":beer: Du kannst nicht mehr verdoppeln, da es sich bereits um einen großen Ozean handelt."
f" Nimm ihn mit {os.getenv('PREFIX')}hängt an oder du fährst in den Bierverschiss.")
return
await ctx.send(f":beer: {ctx.author.display_name} hat die Forderung von {party_a.mention} verdoppelt."
f" Der {self.BJ_LEVELS[bj_level]} ist jetzt ein {self.BJ_LEVELS[bj_level+1]}."
f" {party_a.mention}, du bist jetzt der Geforderte und musst innerhalb von"
f" 5 Bierminuten mit {os.getenv('PREFIX')}hängt antworten."
f" Ansonsten fährst du in den Bierverschiss.")
self.bierjungen.pop((party_a, party_b, bj_level))
self.bierjungen[party_b, party_a, bj_level + 1] = {"num_of_kicks": 0}
self.bot.scheduler.add_job(
self.send_kick,
trigger='interval',
args=[ctx, party_b, party_a, bj_level],
minutes=3,
end_date=datetime.now()+timedelta(minutes=10)
)
return
await ctx.send(f":beer: Gegen dich besteht keine Forderung, die du verdoppeln könntest.")
@command("bierverschiss", aliases=["bv"], brief="Schicke ein Mitglied in den Bierverschiss")
@has_permissions(manage_guild=True)
async def send_to_bierverschiss(self, ctx, member: Member):
"""
Mit diesem Befehl kannst du ein Mitglied in den Bierverschiss schicken. Dieser Befehl erfordert
Server-Verwaltungsrechte. Wenn du ein Mitglied in den Bierverschiss schickst, werden alle ausstehenden
Bierduelle des Mitglieds für beendet erklärt.
"""
if member in self.bierverschiss:
await ctx.send(f":beer: {member.display_name} ist bereits im Bierverschiss.")
return
await ctx.send(f":beer: {ctx.author.display_name} hat {member.mention} in den Bierverschiss geschickt.")
await member.edit(voice_channel=self.bot.guild.get_channel(self.QUARANTINE_CHANNEL_ID))
self.bierverschiss.append(member)
await self.remove_all_bierjungen(ctx, member)
@send_to_bierverschiss.error
async def send_to_bierverschiss_error(self, ctx, exc):
if isinstance(exc, MissingPermissions):
await ctx.send("Du musst Verwaltungsrechte für den Server haben, um diesen Befehl benutzen zu können.")
else:
raise exc
@command("bierehrlich", aliases=["be"], brief="Hole ein Mitglied aus dem Bierverschiss")
async def get_from_bierverschiss(self, ctx, member: Member):
"""
Mit diesem Befehl kannst du ein anderes Mitglied aus dem Bierverschiss auspauken. Du kannst dich
nicht selbst aus dem Bierverschiss auspauken.
"""
if ctx.author == member:
await ctx.send(f":beer: Du kannst dich nicht selbst aus dem Bierverschiss auspauken!")
return
if member not in self.bierverschiss:
await ctx.send(f":beer: {member.display_name} ist nicht im Bierverschiss.")
return
await ctx.send(f":beer: Wer ist bierehrlich? {member.mention}! Was ist {member.display_name}? Bierehrlich!")
self.bierverschiss.remove(member)
@command("listebv", aliases=["lsbv"], brief="Zeige alle Mitglieder im Bierverschiss")
async def list_bierverschiss(self, ctx):
"""
Dieser Befehl zeigt eine Bierschissertafel an, auf der alle Mitglieder verzeichnet sind,
die sich momentan im Bierverschiss befinden.
"""
if not self.bierverschiss:
await ctx.send(":beer: Es befindet sich derzeit niemand im Bierverschiss.")
return
embed = Embed(title="Bierschissertafel", description="Die folgenden Mitglieder sind im Bierverschiss.")
embed.add_field(name="Name", value=", ".join(m.display_name for m in self.bierverschiss))
await ctx.send(embed=embed)
@command("bierkrank", aliases=["bk"], brief="Erkläre dich selbst für bierkrank oder bierimpotent")
async def make_bierkrank(self, ctx):
"""
Wenn du kein Bier zuhause hast oder aus anderen Gründen keines trinken kannst oder willst,
kannst du dich für bierkrank erklären. Du kannst dann nicht zu Bierduellen herausgefordert werden.
Wenn du dich für bierkrank erklärst, werden alle ausstehenden Duelle sofort beendet.
"""
if ctx.author in self.bierkrank:
await ctx.send(f":cup_with_straw: Du bist bereits bierkrank gemeldet.")
return
await ctx.send(f":cup_with_straw: {ctx.author.display_name} hat sich für bierkrank erklärt.")
self.bierkrank.append(ctx.author)
if ctx.author in self.bierverschiss:
self.bierverschiss.remove(ctx.author)
await ctx.send(f":beer: {ctx.author.display_name} wurde automatisch aus dem Bierverschiss entfernt.")
await self.remove_all_bierjungen(ctx, ctx.author)
@command("biergesund", aliases=["bg"], brief="Erkläre deine Bierkrankheit für beendet")
async def make_biergesund(self, ctx):
"""
Wenn du wieder trinken willst und für Bierduelle bereitstehst, kannst du dich mit diesem Befehl
selbst aus der Liste der Bierkranken austragen.
"""
if ctx.author in self.bierkrank:
await ctx.send(f":cup_with_straw: {ctx.author.display_name} hat sich aus der Liste der Bierkranken ausgetragen.")
self.bierkrank.remove(ctx.author)
else:
await ctx.send(f":cup_with_straw: Du bist nicht bierkrank gemeldet.")
@command("listebk", aliases=["lsbk"], brief="Zeige alle bierkranken Mitglieder")
async def list_bierkrank(self, ctx):
"""
Dieser Befehl zeigt eine Bierkrankentafel an, auf der alle Mitglieder verzeichnet sind,
die momentan bierkrank gemeldet sind.
"""
if not self.bierkrank:
await ctx.send(":cup_with_straw: Es befindet sich derzeit niemand im Bierverschiss.")
return
embed = Embed(title="Bierkrankentafel", description="Die folgenden Mitglieder sind bierkrank gemeldet.")
embed.add_field(name="Name", value=", ".join(m.display_name for m in self.bierkrank))
await ctx.send(embed=embed)
async def send_kick(self, ctx, party_a, party_b, bj_level):
bj = self.bierjungen[party_a, party_b, bj_level]
if bj is None:
return
num_of_kicks = bj["num_of_kicks"]
if num_of_kicks == 0:
await ctx.send(f":beer: {party_b.mention}, ich trete dich zum ersten Mal!"
f" Antworte mit {os.getenv('PREFIX')}hängt oder du landest bald im Bierverschiss!")
self.bierjungen[party_a, party_b, bj_level]["num_of_kicks"] = 1
elif num_of_kicks == 1:
await ctx.send(f":beer: {party_b.mention}, ich trete dich zum **zweiten** Mal!"
f" Antworte mit {os.getenv('PREFIX')}hängt oder du landest sehr bald im Bierverschiss!")
self.bierjungen[party_a, party_b, bj_level]["num_of_kicks"] = 2
elif num_of_kicks == 2:
await ctx.send(f":beer: {party_b.mention} fährt hiermit wegen versäumter Annahme der Forderung \"{self.BJ_LEVELS[bj_level]}\""
f" von {party_a.mention} in den ersten Bierverschiss.")
self.bierjungen.pop((party_a, party_b, bj_level))
self.bierverschiss.append(party_b)
async def remove_all_bierjungen(self, ctx, member):
for party_a, party_b, bj_level in self.bierjungen:
if party_a == member or party_b == member:
await ctx.send(f":beer: Der {self.BJ_LEVELS[bj_level]} zwischen {party_a.display_name} und {party_b.display_name} wurde abgebrochen.")
self.bierjungen.pop((party_a, party_b, bj_level))
return
@Cog.listener()
async def on_voice_state_update(self, member, before, after):
if member in self.bierverschiss and after.channel is not None and after.channel.id != self.QUARANTINE_CHANNEL_ID:
await member.move_to(self.bot.guild.get_channel(self.QUARANTINE_CHANNEL_ID))
await self.bot.guild.get_channel(self.MAIN_CHANNEL_ID).send(f":poop: **{member.display_name} hat versucht, aus dem Bierverschiss auszubrechen!**")
def setup(bot):
bot.add_cog(Bierjunge(bot)) | lib/cogs/bierjunge.py | from datetime import datetime, timedelta
from discord import Member, Embed
from discord.ext.commands import Cog, BucketType, command, cooldown, has_permissions
from discord.ext.commands.errors import MissingPermissions
import os
import logging
class Bierjunge(Cog):
BJ_LEVELS = ["Bierjunge", "Doktor", "Papst", "kleiner Ozean", "großer Ozean"]
def __init__(self, bot):
self.bot = bot
self.log = logging.getLogger("gerfroniabot.bierjunge")
self.QUARANTINE_CHANNEL_ID = int(os.getenv("QUARANTINE_CHANNEL_ID"))
self.MAIN_CHANNEL_ID = int(os.getenv("MAIN_CHANNEL_ID"))
self.bierjungen = {}
self.bierverschiss = []
self.bierkrank = []
@Cog.listener()
async def on_ready(self):
if not self.bot.ready:
self.bot.cogs_ready.ready_up("bierjunge")
self.log.info("Bierjunge cog ready")
@command(name="bierjunge", aliases=["bj"], brief="Hänge einem Mitglied einen Bierjungen an")
@cooldown(1, 5*60, BucketType.user)
async def declare_bierjunge(self, ctx, member: Member):
"""
Erwähne ein Mitglied, um ihm einen Bierjungen anzuhängen. Dein Gegner hat 10 Bierminuten
Zeit, um die Forderung mit dem Befehl `hängt` anzunehmen oder mit `doppelt` zu verdoppeln, andernfalls
fährt er in den Bierverschiss. Du kannst niemandem einen Bierjungen anhängen, wenn du oder dein Gegner
schon in ein anderes Bierduell verwickelt sind. Wenn du dich mit `bierkrank` für bierkrank oder
bierimpotent erklärt hast, oder wenn du dich im Bierverschiss befindest, kannst du ebenfalls nicht an
Bierduellen teilnehmen.
"""
if member.bot:
await ctx.send(f":beer: Du kannst Bots keinen Bierjungen anhängen. Sie können sich nicht wehren.")
return
if ctx.author in self.bierverschiss:
await ctx.send(f":beer: Du kannst niemanden zum Bierjungen herausfordern, da du im Bierverschiss bist.")
return
if ctx.author in self.bierkrank:
await ctx.send(f":cup_with_straw: Du kannst keine Bierduelle eingehen, da du dich bierkrank gemeldet hast.")
return
if member in self.bierkrank:
await ctx.send(f":cup_with_straw: Du kannst {member.display_name} keinen Bierjungen anhängen, da er bierkrank gemeldet ist.")
return
if member in self.bierverschiss:
await ctx.send(f":beer: Du kannst {member.display_name} nicht zum Bierjungen herausfordern, da er im Bierverschiss ist.")
return
for party_a, party_b, bj_level in self.bierjungen.keys():
if party_a == member or party_b == member:
await ctx.send(f":beer: Du kannst {member.display_name} keinen Bierjungen anhängen, da er bereits in einen Bierskandal involviert ist.")
return
elif party_a == ctx.author or party_b == ctx.author:
await ctx.send(f":beer: Du kannst {member.display_name} keinen Bierjungen anhängen, da du selbst bereits in einen Bierskandal involviert bist.")
return
await ctx.send(f":beer: {ctx.author.display_name} hat {member.mention} einen Bierjungen angehängt! {member.display_name} muss innerhalb"
f" von 5 Bierminuten mit {os.getenv('PREFIX')}hängt antworten, sonst wird er zum ersten Mal getreten.")
self.bot.scheduler.add_job(
self.send_kick,
trigger='interval',
args=[ctx, ctx.author, member, 0],
minutes=3,
end_date=datetime.now()+timedelta(minutes=10)
)
self.bierjungen[ctx.author, member, 0] = {"num_of_kicks": 0}
@command(name="hängt", aliases=["ht"], brief="Nimm einen geforderten Bierjungen an")
async def bierjunge_haengt(self, ctx):
"""
Wenn du mit `bierjunge` zu einem Bierjungen aufgefordert wurdest oder dein Gegner mit `doppelt` deine
Forderung verdoppelt hat, kannst du mit `hängt` das Duell annehmen.
"""
for party_a, party_b, bj_level in self.bierjungen.keys():
if party_b == ctx.author:
await ctx.send(f":beer: Du hast die Forderung \"{self.BJ_LEVELS[bj_level]}\" von {party_a.mention} angenommen. Prost!")
self.bierjungen.pop((party_a, party_b, bj_level))
return
await ctx.send(f":beer: Du wurdest zu keinem Bierjungen herausgefordert.")
@command(name="doppelt", aliases=["dp"], brief="Verdoppele einen geforderten Bierjungen")
async def haengt_doppelt(self, ctx):
"""
Wenn du mit `bierjunge` zu einem Bierduell herausgefordert wurdest, kannst du statt mit `hängt` das
Duell anzunehmen auch mittels `doppelt` die Biermenge verdoppeln. Die Verdopplungsstufen entsprechen
dem Teutonenkomment: Nach dem Bierjungen kommt der Doktor (2 Bier), dann der Papst (4 Bier), der
kleine Ozean (8 Bier) und der große Ozean (16 Bier). Nach dem großen Ozean ist keine weitere Verdopplung
möglich. Wenn du eine Forderung verdoppelst, wird dein Gegner der Geforderte und kann wiederum mit
`hängt` annehmen oder mit `doppelt` verdoppeln.
"""
for party_a, party_b, bj_level in self.bierjungen.keys():
if party_b == ctx.author:
if bj_level > 3:
await ctx.send(f":beer: Du kannst nicht mehr verdoppeln, da es sich bereits um einen großen Ozean handelt."
f" Nimm ihn mit {os.getenv('PREFIX')}hängt an oder du fährst in den Bierverschiss.")
return
await ctx.send(f":beer: {ctx.author.display_name} hat die Forderung von {party_a.mention} verdoppelt."
f" Der {self.BJ_LEVELS[bj_level]} ist jetzt ein {self.BJ_LEVELS[bj_level+1]}."
f" {party_a.mention}, du bist jetzt der Geforderte und musst innerhalb von"
f" 5 Bierminuten mit {os.getenv('PREFIX')}hängt antworten."
f" Ansonsten fährst du in den Bierverschiss.")
self.bierjungen.pop((party_a, party_b, bj_level))
self.bierjungen[party_b, party_a, bj_level + 1] = {"num_of_kicks": 0}
self.bot.scheduler.add_job(
self.send_kick,
trigger='interval',
args=[ctx, party_b, party_a, bj_level],
minutes=3,
end_date=datetime.now()+timedelta(minutes=10)
)
return
await ctx.send(f":beer: Gegen dich besteht keine Forderung, die du verdoppeln könntest.")
@command("bierverschiss", aliases=["bv"], brief="Schicke ein Mitglied in den Bierverschiss")
@has_permissions(manage_guild=True)
async def send_to_bierverschiss(self, ctx, member: Member):
"""
Mit diesem Befehl kannst du ein Mitglied in den Bierverschiss schicken. Dieser Befehl erfordert
Server-Verwaltungsrechte. Wenn du ein Mitglied in den Bierverschiss schickst, werden alle ausstehenden
Bierduelle des Mitglieds für beendet erklärt.
"""
if member in self.bierverschiss:
await ctx.send(f":beer: {member.display_name} ist bereits im Bierverschiss.")
return
await ctx.send(f":beer: {ctx.author.display_name} hat {member.mention} in den Bierverschiss geschickt.")
await member.edit(voice_channel=self.bot.guild.get_channel(self.QUARANTINE_CHANNEL_ID))
self.bierverschiss.append(member)
await self.remove_all_bierjungen(ctx, member)
@send_to_bierverschiss.error
async def send_to_bierverschiss_error(self, ctx, exc):
if isinstance(exc, MissingPermissions):
await ctx.send("Du musst Verwaltungsrechte für den Server haben, um diesen Befehl benutzen zu können.")
else:
raise exc
@command("bierehrlich", aliases=["be"], brief="Hole ein Mitglied aus dem Bierverschiss")
async def get_from_bierverschiss(self, ctx, member: Member):
"""
Mit diesem Befehl kannst du ein anderes Mitglied aus dem Bierverschiss auspauken. Du kannst dich
nicht selbst aus dem Bierverschiss auspauken.
"""
if ctx.author == member:
await ctx.send(f":beer: Du kannst dich nicht selbst aus dem Bierverschiss auspauken!")
return
if member not in self.bierverschiss:
await ctx.send(f":beer: {member.display_name} ist nicht im Bierverschiss.")
return
await ctx.send(f":beer: Wer ist bierehrlich? {member.mention}! Was ist {member.display_name}? Bierehrlich!")
self.bierverschiss.remove(member)
@command("listebv", aliases=["lsbv"], brief="Zeige alle Mitglieder im Bierverschiss")
async def list_bierverschiss(self, ctx):
"""
Dieser Befehl zeigt eine Bierschissertafel an, auf der alle Mitglieder verzeichnet sind,
die sich momentan im Bierverschiss befinden.
"""
if not self.bierverschiss:
await ctx.send(":beer: Es befindet sich derzeit niemand im Bierverschiss.")
return
embed = Embed(title="Bierschissertafel", description="Die folgenden Mitglieder sind im Bierverschiss.")
embed.add_field(name="Name", value=", ".join(m.display_name for m in self.bierverschiss))
await ctx.send(embed=embed)
@command("bierkrank", aliases=["bk"], brief="Erkläre dich selbst für bierkrank oder bierimpotent")
async def make_bierkrank(self, ctx):
"""
Wenn du kein Bier zuhause hast oder aus anderen Gründen keines trinken kannst oder willst,
kannst du dich für bierkrank erklären. Du kannst dann nicht zu Bierduellen herausgefordert werden.
Wenn du dich für bierkrank erklärst, werden alle ausstehenden Duelle sofort beendet.
"""
if ctx.author in self.bierkrank:
await ctx.send(f":cup_with_straw: Du bist bereits bierkrank gemeldet.")
return
await ctx.send(f":cup_with_straw: {ctx.author.display_name} hat sich für bierkrank erklärt.")
self.bierkrank.append(ctx.author)
if ctx.author in self.bierverschiss:
self.bierverschiss.remove(ctx.author)
await ctx.send(f":beer: {ctx.author.display_name} wurde automatisch aus dem Bierverschiss entfernt.")
await self.remove_all_bierjungen(ctx, ctx.author)
@command("biergesund", aliases=["bg"], brief="Erkläre deine Bierkrankheit für beendet")
async def make_biergesund(self, ctx):
"""
Wenn du wieder trinken willst und für Bierduelle bereitstehst, kannst du dich mit diesem Befehl
selbst aus der Liste der Bierkranken austragen.
"""
if ctx.author in self.bierkrank:
await ctx.send(f":cup_with_straw: {ctx.author.display_name} hat sich aus der Liste der Bierkranken ausgetragen.")
self.bierkrank.remove(ctx.author)
else:
await ctx.send(f":cup_with_straw: Du bist nicht bierkrank gemeldet.")
@command("listebk", aliases=["lsbk"], brief="Zeige alle bierkranken Mitglieder")
async def list_bierkrank(self, ctx):
"""
Dieser Befehl zeigt eine Bierkrankentafel an, auf der alle Mitglieder verzeichnet sind,
die momentan bierkrank gemeldet sind.
"""
if not self.bierkrank:
await ctx.send(":cup_with_straw: Es befindet sich derzeit niemand im Bierverschiss.")
return
embed = Embed(title="Bierkrankentafel", description="Die folgenden Mitglieder sind bierkrank gemeldet.")
embed.add_field(name="Name", value=", ".join(m.display_name for m in self.bierkrank))
await ctx.send(embed=embed)
async def send_kick(self, ctx, party_a, party_b, bj_level):
bj = self.bierjungen[party_a, party_b, bj_level]
if bj is None:
return
num_of_kicks = bj["num_of_kicks"]
if num_of_kicks == 0:
await ctx.send(f":beer: {party_b.mention}, ich trete dich zum ersten Mal!"
f" Antworte mit {os.getenv('PREFIX')}hängt oder du landest bald im Bierverschiss!")
self.bierjungen[party_a, party_b, bj_level]["num_of_kicks"] = 1
elif num_of_kicks == 1:
await ctx.send(f":beer: {party_b.mention}, ich trete dich zum **zweiten** Mal!"
f" Antworte mit {os.getenv('PREFIX')}hängt oder du landest sehr bald im Bierverschiss!")
self.bierjungen[party_a, party_b, bj_level]["num_of_kicks"] = 2
elif num_of_kicks == 2:
await ctx.send(f":beer: {party_b.mention} fährt hiermit wegen versäumter Annahme der Forderung \"{self.BJ_LEVELS[bj_level]}\""
f" von {party_a.mention} in den ersten Bierverschiss.")
self.bierjungen.pop((party_a, party_b, bj_level))
self.bierverschiss.append(party_b)
async def remove_all_bierjungen(self, ctx, member):
for party_a, party_b, bj_level in self.bierjungen:
if party_a == member or party_b == member:
await ctx.send(f":beer: Der {self.BJ_LEVELS[bj_level]} zwischen {party_a.display_name} und {party_b.display_name} wurde abgebrochen.")
self.bierjungen.pop((party_a, party_b, bj_level))
return
@Cog.listener()
async def on_voice_state_update(self, member, before, after):
if member in self.bierverschiss and after.channel is not None and after.channel.id != self.QUARANTINE_CHANNEL_ID:
await member.move_to(self.bot.guild.get_channel(self.QUARANTINE_CHANNEL_ID))
await self.bot.guild.get_channel(self.MAIN_CHANNEL_ID).send(f":poop: **{member.display_name} hat versucht, aus dem Bierverschiss auszubrechen!**")
def setup(bot):
bot.add_cog(Bierjunge(bot)) | 0.45181 | 0.251119 |
import logging
from typing import List, Optional, Tuple, Iterator
from volatility.framework import interfaces, renderers, exceptions, symbols
from volatility.framework.configuration import requirements
from volatility.framework.interfaces import configuration
from volatility.framework.renderers import format_hints
from volatility.framework.symbols import intermed
from volatility.framework.symbols.windows import extensions
from volatility.framework.symbols.windows import versions
vollog = logging.getLogger(__name__)
class BigPools(interfaces.plugins.PluginInterface):
"""List big page pools."""
_required_framework_version = (2, 0, 0)
_version = (1, 0, 0)
@classmethod
def get_requirements(cls) -> List[interfaces.configuration.RequirementInterface]:
# Since we're calling the plugin, make sure we have the plugin's requirements
return [
requirements.TranslationLayerRequirement(name = 'primary',
description = 'Memory layer for the kernel',
architectures = ["Intel32", "Intel64"]),
requirements.SymbolTableRequirement(name = "nt_symbols", description = "Windows kernel symbols"),
requirements.StringRequirement(name = 'tags',
description = "Comma separated list of pool tags to filter pools returned",
optional = True,
default = None)
]
@classmethod
def list_big_pools(cls,
context: interfaces.context.ContextInterface,
layer_name: str,
symbol_table: str,
tags: Optional[list] = None):
"""Returns the big page pool objects from the kernel PoolBigPageTable array.
Args:
context: The context to retrieve required elements (layers, symbol tables) from
layer_name: The name of the layer on which to operate
symbol_table: The name of the table containing the kernel symbols
tags: An optional list of pool tags to filter big page pool tags by
Yields:
A big page pool object
"""
kvo = context.layers[layer_name].config['kernel_virtual_offset']
ntkrnlmp = context.module(symbol_table, layer_name = layer_name, offset = kvo)
big_page_table_offset = ntkrnlmp.get_symbol("PoolBigPageTable").address
big_page_table = ntkrnlmp.object(object_type = "unsigned long long", offset = big_page_table_offset)
big_page_table_size_offset = ntkrnlmp.get_symbol("PoolBigPageTableSize").address
big_page_table_size = ntkrnlmp.object(object_type = "unsigned long", offset = big_page_table_size_offset)
try:
big_page_table_type = ntkrnlmp.get_type("_POOL_TRACKER_BIG_PAGES")
except exceptions.SymbolError:
# We have to manually load a symbol table
is_vista_or_later = versions.is_vista_or_later(context, symbol_table)
is_win10 = versions.is_win10(context, symbol_table)
if is_win10:
big_pools_json_filename = "bigpools-win10"
elif is_vista_or_later:
big_pools_json_filename = "bigpools-vista"
else:
big_pools_json_filename = "bigpools"
if symbols.symbol_table_is_64bit(context, symbol_table):
big_pools_json_filename += "-x64"
else:
big_pools_json_filename += "-x86"
new_table_name = intermed.IntermediateSymbolTable.create(
context = context,
config_path = configuration.path_join(context.symbol_space[symbol_table].config_path, "bigpools"),
sub_path = "windows",
filename = big_pools_json_filename,
table_mapping = {'nt_symbols': symbol_table},
class_types = {'_POOL_TRACKER_BIG_PAGES': extensions.pool.POOL_TRACKER_BIG_PAGES})
module = context.module(new_table_name, layer_name, offset = 0)
big_page_table_type = module.get_type("_POOL_TRACKER_BIG_PAGES")
big_pools = ntkrnlmp.object(object_type = "array",
offset = big_page_table,
subtype = big_page_table_type,
count = big_page_table_size,
absolute = True)
for big_pool in big_pools:
if big_pool.is_valid():
if tags is None or big_pool.get_key() in tags:
yield big_pool
def _generator(self) -> Iterator[Tuple[int, Tuple[int, str]]]: # , str, int]]]:
if self.config.get("tags"):
tags = [tag for tag in self.config["tags"].split(',')]
else:
tags = None
for big_pool in self.list_big_pools(context = self.context,
layer_name = self.config["primary"],
symbol_table = self.config["nt_symbols"],
tags = tags):
num_bytes = big_pool.get_number_of_bytes()
if not isinstance(num_bytes, interfaces.renderers.BaseAbsentValue):
num_bytes = format_hints.Hex(num_bytes)
yield (0, (format_hints.Hex(big_pool.Va), big_pool.get_key(), big_pool.get_pool_type(), num_bytes))
def run(self):
return renderers.TreeGrid([
('Allocation', format_hints.Hex),
('Tag', str),
('PoolType', str),
('NumberOfBytes', format_hints.Hex),
], self._generator()) | volatility/framework/plugins/windows/bigpools.py |
import logging
from typing import List, Optional, Tuple, Iterator
from volatility.framework import interfaces, renderers, exceptions, symbols
from volatility.framework.configuration import requirements
from volatility.framework.interfaces import configuration
from volatility.framework.renderers import format_hints
from volatility.framework.symbols import intermed
from volatility.framework.symbols.windows import extensions
from volatility.framework.symbols.windows import versions
vollog = logging.getLogger(__name__)
class BigPools(interfaces.plugins.PluginInterface):
"""List big page pools."""
_required_framework_version = (2, 0, 0)
_version = (1, 0, 0)
@classmethod
def get_requirements(cls) -> List[interfaces.configuration.RequirementInterface]:
# Since we're calling the plugin, make sure we have the plugin's requirements
return [
requirements.TranslationLayerRequirement(name = 'primary',
description = 'Memory layer for the kernel',
architectures = ["Intel32", "Intel64"]),
requirements.SymbolTableRequirement(name = "nt_symbols", description = "Windows kernel symbols"),
requirements.StringRequirement(name = 'tags',
description = "Comma separated list of pool tags to filter pools returned",
optional = True,
default = None)
]
@classmethod
def list_big_pools(cls,
context: interfaces.context.ContextInterface,
layer_name: str,
symbol_table: str,
tags: Optional[list] = None):
"""Returns the big page pool objects from the kernel PoolBigPageTable array.
Args:
context: The context to retrieve required elements (layers, symbol tables) from
layer_name: The name of the layer on which to operate
symbol_table: The name of the table containing the kernel symbols
tags: An optional list of pool tags to filter big page pool tags by
Yields:
A big page pool object
"""
kvo = context.layers[layer_name].config['kernel_virtual_offset']
ntkrnlmp = context.module(symbol_table, layer_name = layer_name, offset = kvo)
big_page_table_offset = ntkrnlmp.get_symbol("PoolBigPageTable").address
big_page_table = ntkrnlmp.object(object_type = "unsigned long long", offset = big_page_table_offset)
big_page_table_size_offset = ntkrnlmp.get_symbol("PoolBigPageTableSize").address
big_page_table_size = ntkrnlmp.object(object_type = "unsigned long", offset = big_page_table_size_offset)
try:
big_page_table_type = ntkrnlmp.get_type("_POOL_TRACKER_BIG_PAGES")
except exceptions.SymbolError:
# We have to manually load a symbol table
is_vista_or_later = versions.is_vista_or_later(context, symbol_table)
is_win10 = versions.is_win10(context, symbol_table)
if is_win10:
big_pools_json_filename = "bigpools-win10"
elif is_vista_or_later:
big_pools_json_filename = "bigpools-vista"
else:
big_pools_json_filename = "bigpools"
if symbols.symbol_table_is_64bit(context, symbol_table):
big_pools_json_filename += "-x64"
else:
big_pools_json_filename += "-x86"
new_table_name = intermed.IntermediateSymbolTable.create(
context = context,
config_path = configuration.path_join(context.symbol_space[symbol_table].config_path, "bigpools"),
sub_path = "windows",
filename = big_pools_json_filename,
table_mapping = {'nt_symbols': symbol_table},
class_types = {'_POOL_TRACKER_BIG_PAGES': extensions.pool.POOL_TRACKER_BIG_PAGES})
module = context.module(new_table_name, layer_name, offset = 0)
big_page_table_type = module.get_type("_POOL_TRACKER_BIG_PAGES")
big_pools = ntkrnlmp.object(object_type = "array",
offset = big_page_table,
subtype = big_page_table_type,
count = big_page_table_size,
absolute = True)
for big_pool in big_pools:
if big_pool.is_valid():
if tags is None or big_pool.get_key() in tags:
yield big_pool
def _generator(self) -> Iterator[Tuple[int, Tuple[int, str]]]: # , str, int]]]:
if self.config.get("tags"):
tags = [tag for tag in self.config["tags"].split(',')]
else:
tags = None
for big_pool in self.list_big_pools(context = self.context,
layer_name = self.config["primary"],
symbol_table = self.config["nt_symbols"],
tags = tags):
num_bytes = big_pool.get_number_of_bytes()
if not isinstance(num_bytes, interfaces.renderers.BaseAbsentValue):
num_bytes = format_hints.Hex(num_bytes)
yield (0, (format_hints.Hex(big_pool.Va), big_pool.get_key(), big_pool.get_pool_type(), num_bytes))
def run(self):
return renderers.TreeGrid([
('Allocation', format_hints.Hex),
('Tag', str),
('PoolType', str),
('NumberOfBytes', format_hints.Hex),
], self._generator()) | 0.89833 | 0.172939 |
import csv
import json
import pandas as pd
def multi_index_to_single_index(df):
columns = []
for column in df.columns:
column = list(column)
column[1] = str(column[1])
columns.append(''.join(column))
df.columns = columns
return df.reset_index()
df = pd.read_csv('REGION_DEMOGR_death_tl3.csv')
# First remove geos with names that we don't have mappings to dcid for.
name2dcid = dict(json.loads(open('../name2dcid.json').read()))
df = df[df['Region'].isin(name2dcid.keys())]
# Second, replace the names with dcids.
df.replace({'Region': name2dcid}, inplace=True)
df['Year'] = '"' + df['Year'].astype(str) + '"'
temp = df[['REG_ID', 'Region', 'VAR', 'SEX', 'Year', 'Value']]
temp_multi_index = temp.pivot_table(values='Value',
index=['REG_ID', 'Region', 'Year'],
columns=['VAR', 'SEX'])
df_cleaned = multi_index_to_single_index(temp_multi_index)
VAR_to_statsvars = {
'D_TT': 'Count_MortalityEvent',
'D_Y0_4T': 'Count_MortalityEvent_Upto4Years',
'D_Y5_9T': 'Count_MortalityEvent_5To9Years',
'D_Y10_14T': 'Count_MortalityEvent_10To14Years',
'D_Y15_19T': 'Count_MortalityEvent_15To19Years',
'D_Y20_24T': 'Count_MortalityEvent_20To24Years',
'D_Y25_29T': 'Count_MortalityEvent_25To29Years',
'D_Y30_34T': 'Count_MortalityEvent_30To34Years',
'D_Y35_39T': 'Count_MortalityEvent_35To39Years',
'D_Y40_44T': 'Count_MortalityEvent_40To44Years',
'D_Y45_49T': 'Count_MortalityEvent_45To49Years',
'D_Y50_54T': 'Count_MortalityEvent_50To54Years',
'D_Y55_59T': 'Count_MortalityEvent_55To59Years',
'D_Y60_64T': 'Count_MortalityEvent_60To64Years',
'D_Y65_69T': 'Count_MortalityEvent_65To69Years',
'D_Y70_74T': 'Count_MortalityEvent_70To74Years',
'D_Y75_79T': 'Count_MortalityEvent_75To79Years',
'D_Y80_MAXT': 'Count_MortalityEvent_80OrMoreYears',
'D_Y0_14T': 'Count_MortalityEvent_Upto14Years',
'D_Y15_64T': 'Count_MortalityEvent_15To64Years',
'D_Y65_MAXT': 'Count_MortalityEvent_65OrMoreYears',
'D_TM': 'Count_MortalityEvent_Male',
'D_Y0_4M': 'Count_MortalityEvent_Upto4Years_Male',
'D_Y5_9M': 'Count_MortalityEvent_5To9Years_Male',
'D_Y10_14M': 'Count_MortalityEvent_10To14Years_Male',
'D_Y15_19M': 'Count_MortalityEvent_15To19Years_Male',
'D_Y20_24M': 'Count_MortalityEvent_20To24Years_Male',
'D_Y25_29M': 'Count_MortalityEvent_25To29Years_Male',
'D_Y30_34M': 'Count_MortalityEvent_30To34Years_Male',
'D_Y35_39M': 'Count_MortalityEvent_35To39Years_Male',
'D_Y40_44M': 'Count_MortalityEvent_40To44Years_Male',
'D_Y45_49M': 'Count_MortalityEvent_45To49Years_Male',
'D_Y50_54M': 'Count_MortalityEvent_50To54Years_Male',
'D_Y55_59M': 'Count_MortalityEvent_55To59Years_Male',
'D_Y60_64M': 'Count_MortalityEvent_60To64Years_Male',
'D_Y65_69M': 'Count_MortalityEvent_65To69Years_Male',
'D_Y70_74M': 'Count_MortalityEvent_70To74Years_Male',
'D_Y75_79M': 'Count_MortalityEvent_75To79Years_Male',
'D_Y80_MAXM': 'Count_MortalityEvent_80OrMoreYears_Male',
'D_Y0_14M': 'Count_MortalityEvent_Upto14Years_Male',
'D_Y15_64M': 'Count_MortalityEvent_15To64Years_Male',
'D_Y65_MAXM': 'Count_MortalityEvent_65OrMoreYears_Male',
'D_TF': 'Count_MortalityEvent_Female',
'D_Y0_4F': 'Count_MortalityEvent_Upto4Years_Female',
'D_Y5_9F': 'Count_MortalityEvent_5To9Years_Female',
'D_Y10_14F': 'Count_MortalityEvent_10To14Years_Female',
'D_Y15_19F': 'Count_MortalityEvent_15To19Years_Female',
'D_Y20_24F': 'Count_MortalityEvent_20To24Years_Female',
'D_Y25_29F': 'Count_MortalityEvent_25To29Years_Female',
'D_Y30_34F': 'Count_MortalityEvent_30To34Years_Female',
'D_Y35_39F': 'Count_MortalityEvent_35To39Years_Female',
'D_Y40_44F': 'Count_MortalityEvent_40To44Years_Female',
'D_Y45_49F': 'Count_MortalityEvent_45To49Years_Female',
'D_Y50_54F': 'Count_MortalityEvent_50To54Years_Female',
'D_Y55_59F': 'Count_MortalityEvent_55To59Years_Female',
'D_Y60_64F': 'Count_MortalityEvent_60To64Years_Female',
'D_Y65_69F': 'Count_MortalityEvent_65To69Years_Female',
'D_Y70_74F': 'Count_MortalityEvent_70To74Years_Female',
'D_Y75_79F': 'Count_MortalityEvent_75To79Years_Female',
'D_Y80_MAXF': 'Count_MortalityEvent_80OrMoreYears_Female',
'D_Y0_14F': 'Count_MortalityEvent_Upto14Years_Female',
'D_Y15_64F': 'Count_MortalityEvent_15To64Years_Female',
'D_Y65_MAXF': 'Count_MortalityEvent_65OrMoreYears_Female',
}
df_cleaned.rename(columns=VAR_to_statsvars, inplace=True)
df_cleaned.to_csv('OECD_deaths_cleaned.csv',
index=False,
quoting=csv.QUOTE_NONE)
# Automate Template MCF generation since there are many Statistical Variables.
TEMPLATE_MCF_TEMPLATE = """
Node: E:OECD_deaths_cleaned->E{index}
typeOf: dcs:StatVarObservation
variableMeasured: dcs:{stat_var}
measurementMethod: dcs:OECDRegionalStatistics
observationAbout: C:OECD_deaths_cleaned->Region
observationDate: C:OECD_deaths_cleaned->Year
observationPeriod: "P1Y"
value: C:OECD_deaths_cleaned->{stat_var}
"""
stat_vars = df_cleaned.columns[3:]
with open('OECD_deaths.tmcf', 'w', newline='') as f_out:
for i in range(len(stat_vars)):
f_out.write(
TEMPLATE_MCF_TEMPLATE.format_map({
'index': i + 1,
'stat_var': stat_vars[i]
})) | scripts/oecd/regional_demography/deaths/preprocess_csv.py |
import csv
import json
import pandas as pd
def multi_index_to_single_index(df):
columns = []
for column in df.columns:
column = list(column)
column[1] = str(column[1])
columns.append(''.join(column))
df.columns = columns
return df.reset_index()
df = pd.read_csv('REGION_DEMOGR_death_tl3.csv')
# First remove geos with names that we don't have mappings to dcid for.
name2dcid = dict(json.loads(open('../name2dcid.json').read()))
df = df[df['Region'].isin(name2dcid.keys())]
# Second, replace the names with dcids.
df.replace({'Region': name2dcid}, inplace=True)
df['Year'] = '"' + df['Year'].astype(str) + '"'
temp = df[['REG_ID', 'Region', 'VAR', 'SEX', 'Year', 'Value']]
temp_multi_index = temp.pivot_table(values='Value',
index=['REG_ID', 'Region', 'Year'],
columns=['VAR', 'SEX'])
df_cleaned = multi_index_to_single_index(temp_multi_index)
VAR_to_statsvars = {
'D_TT': 'Count_MortalityEvent',
'D_Y0_4T': 'Count_MortalityEvent_Upto4Years',
'D_Y5_9T': 'Count_MortalityEvent_5To9Years',
'D_Y10_14T': 'Count_MortalityEvent_10To14Years',
'D_Y15_19T': 'Count_MortalityEvent_15To19Years',
'D_Y20_24T': 'Count_MortalityEvent_20To24Years',
'D_Y25_29T': 'Count_MortalityEvent_25To29Years',
'D_Y30_34T': 'Count_MortalityEvent_30To34Years',
'D_Y35_39T': 'Count_MortalityEvent_35To39Years',
'D_Y40_44T': 'Count_MortalityEvent_40To44Years',
'D_Y45_49T': 'Count_MortalityEvent_45To49Years',
'D_Y50_54T': 'Count_MortalityEvent_50To54Years',
'D_Y55_59T': 'Count_MortalityEvent_55To59Years',
'D_Y60_64T': 'Count_MortalityEvent_60To64Years',
'D_Y65_69T': 'Count_MortalityEvent_65To69Years',
'D_Y70_74T': 'Count_MortalityEvent_70To74Years',
'D_Y75_79T': 'Count_MortalityEvent_75To79Years',
'D_Y80_MAXT': 'Count_MortalityEvent_80OrMoreYears',
'D_Y0_14T': 'Count_MortalityEvent_Upto14Years',
'D_Y15_64T': 'Count_MortalityEvent_15To64Years',
'D_Y65_MAXT': 'Count_MortalityEvent_65OrMoreYears',
'D_TM': 'Count_MortalityEvent_Male',
'D_Y0_4M': 'Count_MortalityEvent_Upto4Years_Male',
'D_Y5_9M': 'Count_MortalityEvent_5To9Years_Male',
'D_Y10_14M': 'Count_MortalityEvent_10To14Years_Male',
'D_Y15_19M': 'Count_MortalityEvent_15To19Years_Male',
'D_Y20_24M': 'Count_MortalityEvent_20To24Years_Male',
'D_Y25_29M': 'Count_MortalityEvent_25To29Years_Male',
'D_Y30_34M': 'Count_MortalityEvent_30To34Years_Male',
'D_Y35_39M': 'Count_MortalityEvent_35To39Years_Male',
'D_Y40_44M': 'Count_MortalityEvent_40To44Years_Male',
'D_Y45_49M': 'Count_MortalityEvent_45To49Years_Male',
'D_Y50_54M': 'Count_MortalityEvent_50To54Years_Male',
'D_Y55_59M': 'Count_MortalityEvent_55To59Years_Male',
'D_Y60_64M': 'Count_MortalityEvent_60To64Years_Male',
'D_Y65_69M': 'Count_MortalityEvent_65To69Years_Male',
'D_Y70_74M': 'Count_MortalityEvent_70To74Years_Male',
'D_Y75_79M': 'Count_MortalityEvent_75To79Years_Male',
'D_Y80_MAXM': 'Count_MortalityEvent_80OrMoreYears_Male',
'D_Y0_14M': 'Count_MortalityEvent_Upto14Years_Male',
'D_Y15_64M': 'Count_MortalityEvent_15To64Years_Male',
'D_Y65_MAXM': 'Count_MortalityEvent_65OrMoreYears_Male',
'D_TF': 'Count_MortalityEvent_Female',
'D_Y0_4F': 'Count_MortalityEvent_Upto4Years_Female',
'D_Y5_9F': 'Count_MortalityEvent_5To9Years_Female',
'D_Y10_14F': 'Count_MortalityEvent_10To14Years_Female',
'D_Y15_19F': 'Count_MortalityEvent_15To19Years_Female',
'D_Y20_24F': 'Count_MortalityEvent_20To24Years_Female',
'D_Y25_29F': 'Count_MortalityEvent_25To29Years_Female',
'D_Y30_34F': 'Count_MortalityEvent_30To34Years_Female',
'D_Y35_39F': 'Count_MortalityEvent_35To39Years_Female',
'D_Y40_44F': 'Count_MortalityEvent_40To44Years_Female',
'D_Y45_49F': 'Count_MortalityEvent_45To49Years_Female',
'D_Y50_54F': 'Count_MortalityEvent_50To54Years_Female',
'D_Y55_59F': 'Count_MortalityEvent_55To59Years_Female',
'D_Y60_64F': 'Count_MortalityEvent_60To64Years_Female',
'D_Y65_69F': 'Count_MortalityEvent_65To69Years_Female',
'D_Y70_74F': 'Count_MortalityEvent_70To74Years_Female',
'D_Y75_79F': 'Count_MortalityEvent_75To79Years_Female',
'D_Y80_MAXF': 'Count_MortalityEvent_80OrMoreYears_Female',
'D_Y0_14F': 'Count_MortalityEvent_Upto14Years_Female',
'D_Y15_64F': 'Count_MortalityEvent_15To64Years_Female',
'D_Y65_MAXF': 'Count_MortalityEvent_65OrMoreYears_Female',
}
df_cleaned.rename(columns=VAR_to_statsvars, inplace=True)
df_cleaned.to_csv('OECD_deaths_cleaned.csv',
index=False,
quoting=csv.QUOTE_NONE)
# Automate Template MCF generation since there are many Statistical Variables.
TEMPLATE_MCF_TEMPLATE = """
Node: E:OECD_deaths_cleaned->E{index}
typeOf: dcs:StatVarObservation
variableMeasured: dcs:{stat_var}
measurementMethod: dcs:OECDRegionalStatistics
observationAbout: C:OECD_deaths_cleaned->Region
observationDate: C:OECD_deaths_cleaned->Year
observationPeriod: "P1Y"
value: C:OECD_deaths_cleaned->{stat_var}
"""
stat_vars = df_cleaned.columns[3:]
with open('OECD_deaths.tmcf', 'w', newline='') as f_out:
for i in range(len(stat_vars)):
f_out.write(
TEMPLATE_MCF_TEMPLATE.format_map({
'index': i + 1,
'stat_var': stat_vars[i]
})) | 0.252016 | 0.37585 |
import yaml
import os, subprocess, pathlib, time, string, random, requests, signal, click
from webbrowser import open_new
from auth.auth import renewAccessToken, getScope, app
from data import api, view, chart
from subprocess import Popen, PIPE
@click.command(hidden=True)
@click.option('--login', '-l', is_flag=True, help="Login into Coinbase account")
@click.option('--switch', '-s', is_flag=True, help = "Switch wallets")
@click.option('--wallet', '-w', is_flag=True, help = "For viewing wallet information")
@click.option('--refresh', '-r', is_flag=True, help = "Force refresh access token")
@click.option('--graph', '-g', is_flag=True, help = "For displaying the crypto. asset's price graph")
@click.option('--version', '-v', is_flag=True, help = "version number")
def start(login, switch, wallet, graph, refresh, version):
if version:
output("v1.0.7", "bright_white")
return
elif login:
init()
return
elif switch:
init()
switchWallet()
return
elif wallet:
init()
userWallet()
return
elif graph:
init()
coinGraph()
return
elif refresh:
init()
tokenRefresh()
return
else:
welcomeText()
click.echo()
click.echo("Enter morax --help to get a list of commmands")
def init():
config = loadConfig()
if config.get('LOGIN_STATE') == None:
login()
else:
if config.get('TIME') == None:
login()
elif time.time() - float(config.get('TIME')) > 7200:
output("⚠️ Access token expired", "yellow")
output("Redirecting you to login page to renew it", "yellow")
time.sleep(2)
login()
else:
refreshToken()
def userWallet():
if verifyLogin():
view.selectCoin(api.getCoin())
else:
output("Please login first 🥺", "bright_white")
def coinGraph():
if verifyLogin():
coin = api.getCoin()
chart.getChartData(api.getCoin())
else:
output("Please login first 🥺", "bright_white")
def tokenRefresh():
if verifyLogin():
try:
refreshToken()
output("Successfully renewed access token 👏",'green')
except Exception as err:
output("Failed to renewed access token, please login again", 'red')
login()
else:
output("Please login first 🥺", "bright_white")
def switchWallet():
if verifyLogin():
output("I'll need you to authorize me to switch wallets 😁", 'yellow')
time.sleep(1)
login()
else:
output("Please login first 🥺", "bright_white")
def verifyLogin():
config = loadConfig()
if config.get("ACCESS_TOKEN") != None and config.get("REFRESH_TOKEN") != None:
return True
else:
return False
def output(inp, color):
click.echo()
click.echo(
click.style(inp, fg=color, bold=True)
)
click.echo()
def login():
config = loadConfig()
#Kill any process running at PORT 6660
removeProcess()
output("In order to continue, you must login to your Coinbase account 💳", 'bright_white')
output("I'm taking you to the login page right now", 'bright_white')
time.sleep(2)
AUTH_URI = ('https://www.coinbase.com/oauth/'
+ 'authorize?response_type=code&client_id=' + config.get('CLIENTID') + '&redirect_uri='
+ config.get('REDIRECT_URL') + '&scope=' + getScope() +'&code=' + '302')
open_new(AUTH_URI)
#start the flask server for OAuth
app.run(port=6660)
def refreshToken():
config = loadConfig()
#Fetch new access token using refresh token
if time.time() - float(config.get('TIME')) <= 7200:
renewAccessToken()
def removeProcess():
port = 6660
process = Popen(["lsof", "-i", ":{0}".format(port)], stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
for process in str(stdout.decode("utf-8")).split("\n")[1:]:
data = [x for x in process.split(" ") if x != '']
if (len(data) <= 1):
continue
os.kill(int(data[1]), signal.SIGKILL)
def welcomeText():
path = os.getcwd()
p = str(pathlib.Path(__file__).parent.absolute())
p1 = os.path.join(p,"data","assets")
os.chdir(p1)
txt = open(f"ascii.txt", 'r')
txt = txt.readlines()
for i in range(len(txt)):
temp = txt[i].rstrip("\n")
click.secho(temp, fg = "bright_white")
os.chdir(path)
def loadConfig():
path = os.getcwd()
p = str(pathlib.Path(__file__).parent.absolute())
os.chdir(p)
with open("config.yaml", "r") as f:
os.chdir(path)
return yaml.safe_load(f)
def genState():
return ''.join(random.choice(string.ascii_uppercase
+ string.ascii_lowercase + string.digits) for _ in range(16))
if __name__ == "__main__":
start() | morax/__main__.py | import yaml
import os, subprocess, pathlib, time, string, random, requests, signal, click
from webbrowser import open_new
from auth.auth import renewAccessToken, getScope, app
from data import api, view, chart
from subprocess import Popen, PIPE
@click.command(hidden=True)
@click.option('--login', '-l', is_flag=True, help="Login into Coinbase account")
@click.option('--switch', '-s', is_flag=True, help = "Switch wallets")
@click.option('--wallet', '-w', is_flag=True, help = "For viewing wallet information")
@click.option('--refresh', '-r', is_flag=True, help = "Force refresh access token")
@click.option('--graph', '-g', is_flag=True, help = "For displaying the crypto. asset's price graph")
@click.option('--version', '-v', is_flag=True, help = "version number")
def start(login, switch, wallet, graph, refresh, version):
if version:
output("v1.0.7", "bright_white")
return
elif login:
init()
return
elif switch:
init()
switchWallet()
return
elif wallet:
init()
userWallet()
return
elif graph:
init()
coinGraph()
return
elif refresh:
init()
tokenRefresh()
return
else:
welcomeText()
click.echo()
click.echo("Enter morax --help to get a list of commmands")
def init():
config = loadConfig()
if config.get('LOGIN_STATE') == None:
login()
else:
if config.get('TIME') == None:
login()
elif time.time() - float(config.get('TIME')) > 7200:
output("⚠️ Access token expired", "yellow")
output("Redirecting you to login page to renew it", "yellow")
time.sleep(2)
login()
else:
refreshToken()
def userWallet():
if verifyLogin():
view.selectCoin(api.getCoin())
else:
output("Please login first 🥺", "bright_white")
def coinGraph():
if verifyLogin():
coin = api.getCoin()
chart.getChartData(api.getCoin())
else:
output("Please login first 🥺", "bright_white")
def tokenRefresh():
if verifyLogin():
try:
refreshToken()
output("Successfully renewed access token 👏",'green')
except Exception as err:
output("Failed to renewed access token, please login again", 'red')
login()
else:
output("Please login first 🥺", "bright_white")
def switchWallet():
if verifyLogin():
output("I'll need you to authorize me to switch wallets 😁", 'yellow')
time.sleep(1)
login()
else:
output("Please login first 🥺", "bright_white")
def verifyLogin():
config = loadConfig()
if config.get("ACCESS_TOKEN") != None and config.get("REFRESH_TOKEN") != None:
return True
else:
return False
def output(inp, color):
click.echo()
click.echo(
click.style(inp, fg=color, bold=True)
)
click.echo()
def login():
config = loadConfig()
#Kill any process running at PORT 6660
removeProcess()
output("In order to continue, you must login to your Coinbase account 💳", 'bright_white')
output("I'm taking you to the login page right now", 'bright_white')
time.sleep(2)
AUTH_URI = ('https://www.coinbase.com/oauth/'
+ 'authorize?response_type=code&client_id=' + config.get('CLIENTID') + '&redirect_uri='
+ config.get('REDIRECT_URL') + '&scope=' + getScope() +'&code=' + '302')
open_new(AUTH_URI)
#start the flask server for OAuth
app.run(port=6660)
def refreshToken():
config = loadConfig()
#Fetch new access token using refresh token
if time.time() - float(config.get('TIME')) <= 7200:
renewAccessToken()
def removeProcess():
port = 6660
process = Popen(["lsof", "-i", ":{0}".format(port)], stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
for process in str(stdout.decode("utf-8")).split("\n")[1:]:
data = [x for x in process.split(" ") if x != '']
if (len(data) <= 1):
continue
os.kill(int(data[1]), signal.SIGKILL)
def welcomeText():
path = os.getcwd()
p = str(pathlib.Path(__file__).parent.absolute())
p1 = os.path.join(p,"data","assets")
os.chdir(p1)
txt = open(f"ascii.txt", 'r')
txt = txt.readlines()
for i in range(len(txt)):
temp = txt[i].rstrip("\n")
click.secho(temp, fg = "bright_white")
os.chdir(path)
def loadConfig():
path = os.getcwd()
p = str(pathlib.Path(__file__).parent.absolute())
os.chdir(p)
with open("config.yaml", "r") as f:
os.chdir(path)
return yaml.safe_load(f)
def genState():
return ''.join(random.choice(string.ascii_uppercase
+ string.ascii_lowercase + string.digits) for _ in range(16))
if __name__ == "__main__":
start() | 0.10263 | 0.071689 |
from tkinter import *
w = Tk()
w.geometry("500x500")
w.title("Calculatorax")
w.configure(bg="#03befc")
# Functions(Keypad)
def calc1():
b = txt1.get()
txt1.delete(0, END)
b1 = b + btn1["text"]
txt1.insert(0, b1)
def calc2():
b = txt1.get()
txt1.delete(0, END)
b1 = b + btn2["text"]
txt1.insert(0, b1)
def calc3():
b = txt1.get()
txt1.delete(0, END)
b1 = b + btn3["text"]
txt1.insert(0, b1)
def calc4():
b = txt1.get()
txt1.delete(0, END)
b1 = b + btn4["text"]
txt1.insert(0, b1)
def calc5():
b = txt1.get()
txt1.delete(0, END)
b1 = b + btn5["text"]
txt1.insert(0, b1)
def calc6():
b = txt1.get()
txt1.delete(0, END)
b1 = b + btn6["text"]
txt1.insert(0, b1)
def calc7():
b = txt1.get()
txt1.delete(0, END)
b1 = b + btn7["text"]
txt1.insert(0, b1)
def calc8():
b = txt1.get()
txt1.delete(0, END)
b1 = b + btn8["text"]
txt1.insert(0, b1)
def calc9():
b = txt1.get()
txt1.delete(0, END)
b1 = b + btn9["text"]
txt1.insert(0, b1)
def calc0():
b = txt1.get()
txt1.delete(0, END)
b1 = b + btn0["text"]
txt1.insert(0, b1)
# Functions(operators)
x = 0
def add():
global x
add.b = eval(txt1.get())
txt1.delete(0, END)
x = x + 1
def subtract():
global x
subtract.b = eval(txt1.get())
txt1.delete(0, END)
x = x + 2
def get():
b = txt1.get()
def equals():
global x
if x == 1:
c = (eval(txt1.get())) + add.b
cls()
txt1.insert(0, c)
elif x == 2:
c = subtract.b - (eval(txt1.get()))
cls()
txt1.insert(0, c)
elif x == 3:
c = multiply.b * (eval(txt1.get()))
cls()
txt1.insert(0, c)
elif x == 4:
c = divide.b / (eval(txt1.get()))
cls()
txt1.insert(0, c)
def cls():
global x
x = 0
txt1.delete(0, END)
def multiply():
global x
multiply.b = eval(txt1.get())
txt1.delete(0, END)
x = x + 3
def divide():
global x
divide.b = eval(txt1.get())
txt1.delete(0, END)
x = x + 4
# Labels
lbl1 = Label(
w, text="Calculatorax", font=("Times New Roman", 35), fg="#232226", bg="#fc9d03"
)
# Entryboxes
txt1 = Entry(w, width=80, font=30)
# Buttons
btn1 = Button(w, text="1", font=("Unispace", 25), command=calc1, bg="#c3c6d9")
btn2 = Button(w, text="2", font=("Unispace", 25), command=calc2, bg="#c3c6d9")
btn3 = Button(w, text="3", font=("Unispace", 25), command=calc3, bg="#c3c6d9")
btn4 = Button(w, text="4", font=("Unispace", 25), command=calc4, bg="#c3c6d9")
btn5 = Button(w, text="5", font=("Unispace", 25), command=calc5, bg="#c3c6d9")
btn6 = Button(w, text="6", font=("Unispace", 25), command=calc6, bg="#c3c6d9")
btn7 = Button(w, text="7", font=("Unispace", 25), command=calc7, bg="#c3c6d9")
btn8 = Button(w, text="8", font=("Unispace", 25), command=calc8, bg="#c3c6d9")
btn9 = Button(w, text="9", font=("Unispace", 25), command=calc9, bg="#c3c6d9")
btn0 = Button(w, text="0", font=("Unispace", 25), command=calc0, bg="#c3c6d9")
btn_addition = Button(w, text="+", font=("Unispace", 26), command=add, bg="#3954ed")
btn_equals = Button(
w,
text="Calculate",
font=(
"Unispace",
24,
),
command=equals,
bg="#e876e6",
)
btn_clear = Button(
w,
text="Clear",
font=(
"Unispace",
24,
),
command=cls,
bg="#e876e6",
)
btn_subtract = Button(
w, text="-", font=("Unispace", 26), command=subtract, bg="#3954ed"
)
btn_multiplication = Button(
w, text="x", font=("Unispace", 26), command=multiply, bg="#3954ed"
)
btn_division = Button(w, text="÷", font=("Unispace", 26), command=divide, bg="#3954ed")
# Placements(Labels)
lbl1.place(x=120, y=0)
# Placements(entrybox)
txt1.place(x=7, y=50, height=35)
# Placements(Buttons)
btn1.place(x=50, y=100)
btn2.place(x=120, y=100)
btn3.place(x=190, y=100)
btn4.place(x=50, y=200)
btn5.place(x=120, y=200)
btn6.place(x=190, y=200)
btn7.place(x=50, y=300)
btn8.place(x=120, y=300)
btn9.place(x=190, y=300)
btn0.place(x=120, y=400)
btn_addition.place(x=290, y=100)
btn_equals.place(x=260, y=420)
btn_clear.place(x=290, y=350)
btn_subtract.place(x=360, y=100)
btn_multiplication.place(x=290, y=200)
btn_division.place(x=360, y=200)
w.mainloop() | gui_calculator.py | from tkinter import *
w = Tk()
w.geometry("500x500")
w.title("Calculatorax")
w.configure(bg="#03befc")
# Functions(Keypad)
def calc1():
b = txt1.get()
txt1.delete(0, END)
b1 = b + btn1["text"]
txt1.insert(0, b1)
def calc2():
b = txt1.get()
txt1.delete(0, END)
b1 = b + btn2["text"]
txt1.insert(0, b1)
def calc3():
b = txt1.get()
txt1.delete(0, END)
b1 = b + btn3["text"]
txt1.insert(0, b1)
def calc4():
b = txt1.get()
txt1.delete(0, END)
b1 = b + btn4["text"]
txt1.insert(0, b1)
def calc5():
b = txt1.get()
txt1.delete(0, END)
b1 = b + btn5["text"]
txt1.insert(0, b1)
def calc6():
b = txt1.get()
txt1.delete(0, END)
b1 = b + btn6["text"]
txt1.insert(0, b1)
def calc7():
b = txt1.get()
txt1.delete(0, END)
b1 = b + btn7["text"]
txt1.insert(0, b1)
def calc8():
b = txt1.get()
txt1.delete(0, END)
b1 = b + btn8["text"]
txt1.insert(0, b1)
def calc9():
b = txt1.get()
txt1.delete(0, END)
b1 = b + btn9["text"]
txt1.insert(0, b1)
def calc0():
b = txt1.get()
txt1.delete(0, END)
b1 = b + btn0["text"]
txt1.insert(0, b1)
# Functions(operators)
x = 0
def add():
global x
add.b = eval(txt1.get())
txt1.delete(0, END)
x = x + 1
def subtract():
global x
subtract.b = eval(txt1.get())
txt1.delete(0, END)
x = x + 2
def get():
b = txt1.get()
def equals():
global x
if x == 1:
c = (eval(txt1.get())) + add.b
cls()
txt1.insert(0, c)
elif x == 2:
c = subtract.b - (eval(txt1.get()))
cls()
txt1.insert(0, c)
elif x == 3:
c = multiply.b * (eval(txt1.get()))
cls()
txt1.insert(0, c)
elif x == 4:
c = divide.b / (eval(txt1.get()))
cls()
txt1.insert(0, c)
def cls():
global x
x = 0
txt1.delete(0, END)
def multiply():
global x
multiply.b = eval(txt1.get())
txt1.delete(0, END)
x = x + 3
def divide():
global x
divide.b = eval(txt1.get())
txt1.delete(0, END)
x = x + 4
# Labels
lbl1 = Label(
w, text="Calculatorax", font=("Times New Roman", 35), fg="#232226", bg="#fc9d03"
)
# Entryboxes
txt1 = Entry(w, width=80, font=30)
# Buttons
btn1 = Button(w, text="1", font=("Unispace", 25), command=calc1, bg="#c3c6d9")
btn2 = Button(w, text="2", font=("Unispace", 25), command=calc2, bg="#c3c6d9")
btn3 = Button(w, text="3", font=("Unispace", 25), command=calc3, bg="#c3c6d9")
btn4 = Button(w, text="4", font=("Unispace", 25), command=calc4, bg="#c3c6d9")
btn5 = Button(w, text="5", font=("Unispace", 25), command=calc5, bg="#c3c6d9")
btn6 = Button(w, text="6", font=("Unispace", 25), command=calc6, bg="#c3c6d9")
btn7 = Button(w, text="7", font=("Unispace", 25), command=calc7, bg="#c3c6d9")
btn8 = Button(w, text="8", font=("Unispace", 25), command=calc8, bg="#c3c6d9")
btn9 = Button(w, text="9", font=("Unispace", 25), command=calc9, bg="#c3c6d9")
btn0 = Button(w, text="0", font=("Unispace", 25), command=calc0, bg="#c3c6d9")
btn_addition = Button(w, text="+", font=("Unispace", 26), command=add, bg="#3954ed")
btn_equals = Button(
w,
text="Calculate",
font=(
"Unispace",
24,
),
command=equals,
bg="#e876e6",
)
btn_clear = Button(
w,
text="Clear",
font=(
"Unispace",
24,
),
command=cls,
bg="#e876e6",
)
btn_subtract = Button(
w, text="-", font=("Unispace", 26), command=subtract, bg="#3954ed"
)
btn_multiplication = Button(
w, text="x", font=("Unispace", 26), command=multiply, bg="#3954ed"
)
btn_division = Button(w, text="÷", font=("Unispace", 26), command=divide, bg="#3954ed")
# Placements(Labels)
lbl1.place(x=120, y=0)
# Placements(entrybox)
txt1.place(x=7, y=50, height=35)
# Placements(Buttons)
btn1.place(x=50, y=100)
btn2.place(x=120, y=100)
btn3.place(x=190, y=100)
btn4.place(x=50, y=200)
btn5.place(x=120, y=200)
btn6.place(x=190, y=200)
btn7.place(x=50, y=300)
btn8.place(x=120, y=300)
btn9.place(x=190, y=300)
btn0.place(x=120, y=400)
btn_addition.place(x=290, y=100)
btn_equals.place(x=260, y=420)
btn_clear.place(x=290, y=350)
btn_subtract.place(x=360, y=100)
btn_multiplication.place(x=290, y=200)
btn_division.place(x=360, y=200)
w.mainloop() | 0.237753 | 0.111072 |
import unittest
import os
import re
from time import sleep, time
from appium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as econd
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
import apiritif
class TestRequests(unittest.TestCase):
def setUp(self):
self.vars = {}
self.template = Template(self.vars)
self.driver = webdriver.Remote(command_executor='http://localhost:4723/wd/hub', desired_capabilities={"browserName": "Chrome", "deviceName": "", "platformName": "Android"})
self.driver.implicitly_wait(3.5)
self.wnd_mng = WindowManager(self.driver)
self.frm_mng = FrameManager(self.driver)
def tearDown(self):
self.driver.quit()
def test_requests(self):
self.driver.implicitly_wait(3.5)
with apiritif.transaction_logged(self.template('/')):
self.driver.get(self.template('http://blazedemo.com/'))
WebDriverWait(self.driver, 3.5).until(econd.presence_of_element_located((By.XPATH, self.template("//input[@type='submit']"))), 'Element "//input[@type=\'submit\']" failed to appear within 3.5s')
self.assertEqual(self.driver.title, self.template('BlazeDemo'))
body = self.driver.page_source
re_pattern = re.compile(r'contained_text')
self.assertEqual(0, len(re.findall(re_pattern, body)), "Assertion: 'contained_text' found in BODY")
with apiritif.transaction_logged(self.template('empty')):
pass
# Utility functions and classes for Taurus Selenium tests
from string import Template as StrTemplate
from selenium.common.exceptions import NoSuchWindowException, NoSuchFrameException
class Template:
def __init__(self, variables):
self.variables = variables
def apply(self, template):
tmpl = StrTemplate(b''.decode() + template)
return tmpl.safe_substitute(self.variables)
__call__ = apply
@staticmethod
def str_repr(text):
return repr(text)[1:] if repr(text)[0] == "u" else repr(text)
class FrameManager:
def __init__(self, driver):
self.driver = driver
def switch(self, frame_name=None):
try:
if not frame_name or frame_name == "relative=top":
self.driver.switch_to_default_content()
elif frame_name.startswith("index="): # Switch using index frame using relative position
self.driver.switch_to.frame(int(frame_name.split("=")[1]))
elif frame_name == "relative=parent": # Switch to parent frame of the current frame
self.driver.switch_to.parent_frame()
else: # Use the selenium alternative
self.driver.switch_to.frame(frame_name)
except NoSuchFrameException:
raise NoSuchFrameException("Invalid Frame ID: %s" % frame_name)
class WindowManager:
def __init__(self, driver):
self.driver = driver
self.windows = {}
def switch(self, window_name=None):
try:
if not window_name: # Switch to last window created
self.driver.switch_to.window(self.driver.window_handles[-1])
else:
if window_name.isdigit(): # Switch to window handler index
self._switch_by_idx(int(window_name))
else:
if window_name.startswith("win_ser_"): # Switch using window sequential mode
self._switch_by_win_ser(window_name)
else: # Switch using window name
self.driver.switch_to.window(window_name)
except NoSuchWindowException:
raise NoSuchWindowException("Invalid Window ID: %s" % window_name)
def _switch_by_idx(self, win_index):
wnd_handlers = self.driver.window_handles
if len(wnd_handlers) <= win_index and win_index >= 0:
self.driver.switch_to.window(wnd_handlers[win_index])
else:
raise NoSuchWindowException("Invalid Window ID: %s" % str(win_index))
def _switch_by_win_ser(self, window_name):
if window_name == "win_ser_local":
wnd_handlers = self.driver.window_handles
if len(wnd_handlers) > 0:
self.driver.switch_to.window(wnd_handlers[0])
else:
raise NoSuchWindowException("Invalid Window ID: %s" % window_name)
else:
if window_name not in self.windows:
self.windows[window_name] = self.driver.window_handles[-1]
self.driver.switch_to.window(self.windows[window_name])
def close(self, window_name=None):
if window_name:
self.switch(window_name)
self.driver.close() | tests/resources/selenium/generated_from_requests_appium_browser.py | import unittest
import os
import re
from time import sleep, time
from appium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as econd
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
import apiritif
class TestRequests(unittest.TestCase):
def setUp(self):
self.vars = {}
self.template = Template(self.vars)
self.driver = webdriver.Remote(command_executor='http://localhost:4723/wd/hub', desired_capabilities={"browserName": "Chrome", "deviceName": "", "platformName": "Android"})
self.driver.implicitly_wait(3.5)
self.wnd_mng = WindowManager(self.driver)
self.frm_mng = FrameManager(self.driver)
def tearDown(self):
self.driver.quit()
def test_requests(self):
self.driver.implicitly_wait(3.5)
with apiritif.transaction_logged(self.template('/')):
self.driver.get(self.template('http://blazedemo.com/'))
WebDriverWait(self.driver, 3.5).until(econd.presence_of_element_located((By.XPATH, self.template("//input[@type='submit']"))), 'Element "//input[@type=\'submit\']" failed to appear within 3.5s')
self.assertEqual(self.driver.title, self.template('BlazeDemo'))
body = self.driver.page_source
re_pattern = re.compile(r'contained_text')
self.assertEqual(0, len(re.findall(re_pattern, body)), "Assertion: 'contained_text' found in BODY")
with apiritif.transaction_logged(self.template('empty')):
pass
# Utility functions and classes for Taurus Selenium tests
from string import Template as StrTemplate
from selenium.common.exceptions import NoSuchWindowException, NoSuchFrameException
class Template:
def __init__(self, variables):
self.variables = variables
def apply(self, template):
tmpl = StrTemplate(b''.decode() + template)
return tmpl.safe_substitute(self.variables)
__call__ = apply
@staticmethod
def str_repr(text):
return repr(text)[1:] if repr(text)[0] == "u" else repr(text)
class FrameManager:
def __init__(self, driver):
self.driver = driver
def switch(self, frame_name=None):
try:
if not frame_name or frame_name == "relative=top":
self.driver.switch_to_default_content()
elif frame_name.startswith("index="): # Switch using index frame using relative position
self.driver.switch_to.frame(int(frame_name.split("=")[1]))
elif frame_name == "relative=parent": # Switch to parent frame of the current frame
self.driver.switch_to.parent_frame()
else: # Use the selenium alternative
self.driver.switch_to.frame(frame_name)
except NoSuchFrameException:
raise NoSuchFrameException("Invalid Frame ID: %s" % frame_name)
class WindowManager:
def __init__(self, driver):
self.driver = driver
self.windows = {}
def switch(self, window_name=None):
try:
if not window_name: # Switch to last window created
self.driver.switch_to.window(self.driver.window_handles[-1])
else:
if window_name.isdigit(): # Switch to window handler index
self._switch_by_idx(int(window_name))
else:
if window_name.startswith("win_ser_"): # Switch using window sequential mode
self._switch_by_win_ser(window_name)
else: # Switch using window name
self.driver.switch_to.window(window_name)
except NoSuchWindowException:
raise NoSuchWindowException("Invalid Window ID: %s" % window_name)
def _switch_by_idx(self, win_index):
wnd_handlers = self.driver.window_handles
if len(wnd_handlers) <= win_index and win_index >= 0:
self.driver.switch_to.window(wnd_handlers[win_index])
else:
raise NoSuchWindowException("Invalid Window ID: %s" % str(win_index))
def _switch_by_win_ser(self, window_name):
if window_name == "win_ser_local":
wnd_handlers = self.driver.window_handles
if len(wnd_handlers) > 0:
self.driver.switch_to.window(wnd_handlers[0])
else:
raise NoSuchWindowException("Invalid Window ID: %s" % window_name)
else:
if window_name not in self.windows:
self.windows[window_name] = self.driver.window_handles[-1]
self.driver.switch_to.window(self.windows[window_name])
def close(self, window_name=None):
if window_name:
self.switch(window_name)
self.driver.close() | 0.40251 | 0.124186 |
from .StarList import StarList
from .file_helpers import *
from .daofiles import parse_dao_hdr, write_dao_header, DAO_file_firstline, DAO
from .file_helpers import as_starlist
import pandas as pd
import re
_ds9_regexp = re.compile(
r'[+-]? *circle[( ] *([+-]?\d+[.]?\d*) *[, ] *([+-]?\d+[.]?\d*).+#.*id *= *(\d+)')
_ds9_wcs_regexp = re.compile(
r'[+-]? *circle[( ] *([+-]?\d+:\d+:\d+[.]?\d*) *[, ] *([+-]?\d+:\d+:\d+[.]?\d*).+#.*id *= *(\d+)')
_ds9_no_id_regexp = re.compile(
r'[+-]? *circle[( ] *([+-]?\d+[.]?\d*) *[, ] *([+-]?\d+[.]?\d*)')
_ds9_no_id_wcs_regexp = re.compile(
r'[+-]? *circle[( ] *([+-]?\d+:\d+:\d+[.]?\d*) *[, ] *([+-]?\d+:\d+:\d+[.]?\d*)')
_ds9_system_wcs = re.compile('fk4|fk5|J2000|B1950|ICRS', re.IGNORECASE)
_ds9_system_xy = re.compile('PHYSICAL|IMAGE', re.IGNORECASE)
def read_ds9_regions(file):
# type: (object) -> StarList
"""
Reads ds9 region
:param file: filename or open input stream
:return: StarList object
Returned object has columns id, x, y, auto_id
Boolean column auto_id indicates weather id for item is read from file (#id=xxx comment) or
generated by function.
"""
f, to_close = get_stream(file, 'rt')
# s = StarList.new()
data = []
data_noid = []
dao_hdr1 = None
hdr = None
sys_wcs, sys_xy = (1,2)
system = None
for line in f:
if line[0] == '#':
if line[1:11] == DAO_file_firstline[:10]: # dao header found in comment
dao_hdr1 = line
continue
if dao_hdr1 is not None: # second line of dao header
hdr = parse_dao_hdr(dao_hdr1, line, '#')
else:
if system is None:
if _ds9_system_wcs.search(line):
system = sys_wcs
elif _ds9_system_xy.search(line):
system = sys_xy
pass
m = _ds9_regexp.search(line)
if m is not None: # s[id] = (id, x, y)
data.append([int(m.group(3)), float(m.group(1)), float(m.group(2))])
else:
m = _ds9_wcs_regexp.search(line)
if m is not None: # s[id] = (id, ra, dec)
data.append([int(m.group(3)), str(m.group(1)), str(m.group(2))])
else:
m = _ds9_no_id_regexp.search(line) # s[?] = (x, y)
if m is not None:
data_noid.append([float(m.group(1)), float(m.group(2))])
else:
m = _ds9_no_id_wcs_regexp.search(line) # s[?] = (ra, dec)
if m is not None:
data_noid.append([str(m.group(1)), str(m.group(2))])
dao_hdr1 = None
close_files(to_close)
if system == sys_wcs:
s = StarList(data, columns = ['id', 'ra', 'dec'])
s_noid = StarList(data_noid, columns=['ra', 'dec'])
else:
s = StarList(data, columns = ['id', 'x', 'y'])
s_noid = StarList(data_noid, columns = ['x', 'y'])
s.index = s['id']
s['auto_id'] = False
if not s_noid.empty:
id_starts_from = 1 if s.empty else s.id.max() + 1
ids = range(id_starts_from, id_starts_from + s_noid.stars_number())
s_noid['id'] = ids
s_noid.index = ids
s_noid['auto_id'] = True
if s.empty:
s = s_noid
else:
s = s.append(s_noid)
s.DAO_hdr = hdr
s.DAO_type = DAO.RADEC_FILE if system == sys_wcs else DAO.XY_FILE
return s
def write_ds9_regions(starlist, filename,
color='green', width=1, size=None, font=None, label='{id:.0f}',
exclude=None, indexes=None, colors=None, sizes=None, labels=None,
color_column=None, size_column=None,
comment=None, add_global=None, WCS=False):
"""
Writes ds9 region file.
Some regions can be visually distinguish by providing additional indexes to select those regions
with specific attributes
:param StarList starlist: StarList object to dump
:param str filename: output filename or stream open for writing
:param str color: default color
:param int width: default line width
:param int size: default radius (default 8px or 2")
:param str font: ds9 font specification e.g. "times 12 bold italic"
:param str label: format expression for label, use col names
:param pd.Index exclude: index of disabled regions, if None all are enabled
:param [pd.Index] indexes: additional indexes to include specific color and size attributes
:param [str] colors: specific colors for indexes
:param [int] sizes: specific sizes for indexes
:param [str] labels: specific labels for indexes
:param str color_column: column of starlist with color values
:param str size_column: column of starlist with size values
:param str add_global: content of additional 'global' if not None
:param str comment: content of additional comment line if not None
:param bool or str WCS: If true, columns `ra` and `dec` will be used and coord system set to ICRS
If nonepmpty string, string will be used as system description
If None, False or '', columns 'x','y' will be used and system set to IMAGE
Example:
write_ds9_regions(sl, 'i.reg', color='blue',
indexes=[saturated, psf],
colours=['yellow', 'red'],
sizes=[12, None],
labels=[None, 'PDF:{id}'],
exclude=faint)
Generates regions file i.reg of blue circles, radius 8,
objects present in index saturated will have larger yellow circles
objects present in index psf will be red and labeled with prefix PSF:
objects present in index faint will be disabled by '-' sign and not displayed by ds9, but can be parsed back
"""
if WCS:
xcol = 'ra'
ycol = 'dec'
starlist = as_starlist(starlist)
else:
xcol = 'x'
ycol = 'y'
starlist = as_starlist(starlist, updateskycoord=False)
try: (starlist[xcol], starlist[ycol])
except KeyError as e:
raise KeyError('No coordinate columns ({},{}) in starlist. Check WCS parameter also'.format(xcol, ycol))
f, to_close = get_stream(filename, 'w')
f.write('# Region file format: DS9 version 4.0\n')
if starlist.DAO_hdr is not None:
write_dao_header(starlist.DAO_hdr, f, '#')
if comment is not None:
f.write('#{}\n'.format(comment))
if color is not None:
f.write('global color={}\n'.format(color))
if width is not None:
f.write('global width={}\n'.format(width))
if font is not None:
f.write('global font={}\n'.format(font))
if add_global is not None:
f.write('global {}\n'.format(add_global))
if not WCS:
f.write('image\n')
else:
system = WCS if isinstance(WCS, str) else 'icrs'
f.write(system+'\n')
for i, row in starlist.iterrows():
if exclude is not None and i in exclude:
f.write('-')
if size is not None:
s = size
else:
s = '2"' if WCS else 8
text = label.format(**row)
c = ''
if size_column is not None:
s = row[size_column]
if color_column is not None:
c = ' color=' + row[color_column]
if indexes is not None:
for n in range(len(indexes)):
if i in indexes[n]:
if sizes and sizes[n] is not None:
s = sizes[n]
if colors and colors[n] is not None:
c = ' color=' + colors[n]
if labels and labels[n] is not None:
text = labels[n].format(**row)
f.write('circle({},{},{}) #{} text="{}" id={:d}\n'.format(row[xcol], row[ycol], s, c, text, i))
close_files(to_close) | astwro/starlist/ds9.py | from .StarList import StarList
from .file_helpers import *
from .daofiles import parse_dao_hdr, write_dao_header, DAO_file_firstline, DAO
from .file_helpers import as_starlist
import pandas as pd
import re
_ds9_regexp = re.compile(
r'[+-]? *circle[( ] *([+-]?\d+[.]?\d*) *[, ] *([+-]?\d+[.]?\d*).+#.*id *= *(\d+)')
_ds9_wcs_regexp = re.compile(
r'[+-]? *circle[( ] *([+-]?\d+:\d+:\d+[.]?\d*) *[, ] *([+-]?\d+:\d+:\d+[.]?\d*).+#.*id *= *(\d+)')
_ds9_no_id_regexp = re.compile(
r'[+-]? *circle[( ] *([+-]?\d+[.]?\d*) *[, ] *([+-]?\d+[.]?\d*)')
_ds9_no_id_wcs_regexp = re.compile(
r'[+-]? *circle[( ] *([+-]?\d+:\d+:\d+[.]?\d*) *[, ] *([+-]?\d+:\d+:\d+[.]?\d*)')
_ds9_system_wcs = re.compile('fk4|fk5|J2000|B1950|ICRS', re.IGNORECASE)
_ds9_system_xy = re.compile('PHYSICAL|IMAGE', re.IGNORECASE)
def read_ds9_regions(file):
# type: (object) -> StarList
"""
Reads ds9 region
:param file: filename or open input stream
:return: StarList object
Returned object has columns id, x, y, auto_id
Boolean column auto_id indicates weather id for item is read from file (#id=xxx comment) or
generated by function.
"""
f, to_close = get_stream(file, 'rt')
# s = StarList.new()
data = []
data_noid = []
dao_hdr1 = None
hdr = None
sys_wcs, sys_xy = (1,2)
system = None
for line in f:
if line[0] == '#':
if line[1:11] == DAO_file_firstline[:10]: # dao header found in comment
dao_hdr1 = line
continue
if dao_hdr1 is not None: # second line of dao header
hdr = parse_dao_hdr(dao_hdr1, line, '#')
else:
if system is None:
if _ds9_system_wcs.search(line):
system = sys_wcs
elif _ds9_system_xy.search(line):
system = sys_xy
pass
m = _ds9_regexp.search(line)
if m is not None: # s[id] = (id, x, y)
data.append([int(m.group(3)), float(m.group(1)), float(m.group(2))])
else:
m = _ds9_wcs_regexp.search(line)
if m is not None: # s[id] = (id, ra, dec)
data.append([int(m.group(3)), str(m.group(1)), str(m.group(2))])
else:
m = _ds9_no_id_regexp.search(line) # s[?] = (x, y)
if m is not None:
data_noid.append([float(m.group(1)), float(m.group(2))])
else:
m = _ds9_no_id_wcs_regexp.search(line) # s[?] = (ra, dec)
if m is not None:
data_noid.append([str(m.group(1)), str(m.group(2))])
dao_hdr1 = None
close_files(to_close)
if system == sys_wcs:
s = StarList(data, columns = ['id', 'ra', 'dec'])
s_noid = StarList(data_noid, columns=['ra', 'dec'])
else:
s = StarList(data, columns = ['id', 'x', 'y'])
s_noid = StarList(data_noid, columns = ['x', 'y'])
s.index = s['id']
s['auto_id'] = False
if not s_noid.empty:
id_starts_from = 1 if s.empty else s.id.max() + 1
ids = range(id_starts_from, id_starts_from + s_noid.stars_number())
s_noid['id'] = ids
s_noid.index = ids
s_noid['auto_id'] = True
if s.empty:
s = s_noid
else:
s = s.append(s_noid)
s.DAO_hdr = hdr
s.DAO_type = DAO.RADEC_FILE if system == sys_wcs else DAO.XY_FILE
return s
def write_ds9_regions(starlist, filename,
color='green', width=1, size=None, font=None, label='{id:.0f}',
exclude=None, indexes=None, colors=None, sizes=None, labels=None,
color_column=None, size_column=None,
comment=None, add_global=None, WCS=False):
"""
Writes ds9 region file.
Some regions can be visually distinguish by providing additional indexes to select those regions
with specific attributes
:param StarList starlist: StarList object to dump
:param str filename: output filename or stream open for writing
:param str color: default color
:param int width: default line width
:param int size: default radius (default 8px or 2")
:param str font: ds9 font specification e.g. "times 12 bold italic"
:param str label: format expression for label, use col names
:param pd.Index exclude: index of disabled regions, if None all are enabled
:param [pd.Index] indexes: additional indexes to include specific color and size attributes
:param [str] colors: specific colors for indexes
:param [int] sizes: specific sizes for indexes
:param [str] labels: specific labels for indexes
:param str color_column: column of starlist with color values
:param str size_column: column of starlist with size values
:param str add_global: content of additional 'global' if not None
:param str comment: content of additional comment line if not None
:param bool or str WCS: If true, columns `ra` and `dec` will be used and coord system set to ICRS
If nonepmpty string, string will be used as system description
If None, False or '', columns 'x','y' will be used and system set to IMAGE
Example:
write_ds9_regions(sl, 'i.reg', color='blue',
indexes=[saturated, psf],
colours=['yellow', 'red'],
sizes=[12, None],
labels=[None, 'PDF:{id}'],
exclude=faint)
Generates regions file i.reg of blue circles, radius 8,
objects present in index saturated will have larger yellow circles
objects present in index psf will be red and labeled with prefix PSF:
objects present in index faint will be disabled by '-' sign and not displayed by ds9, but can be parsed back
"""
if WCS:
xcol = 'ra'
ycol = 'dec'
starlist = as_starlist(starlist)
else:
xcol = 'x'
ycol = 'y'
starlist = as_starlist(starlist, updateskycoord=False)
try: (starlist[xcol], starlist[ycol])
except KeyError as e:
raise KeyError('No coordinate columns ({},{}) in starlist. Check WCS parameter also'.format(xcol, ycol))
f, to_close = get_stream(filename, 'w')
f.write('# Region file format: DS9 version 4.0\n')
if starlist.DAO_hdr is not None:
write_dao_header(starlist.DAO_hdr, f, '#')
if comment is not None:
f.write('#{}\n'.format(comment))
if color is not None:
f.write('global color={}\n'.format(color))
if width is not None:
f.write('global width={}\n'.format(width))
if font is not None:
f.write('global font={}\n'.format(font))
if add_global is not None:
f.write('global {}\n'.format(add_global))
if not WCS:
f.write('image\n')
else:
system = WCS if isinstance(WCS, str) else 'icrs'
f.write(system+'\n')
for i, row in starlist.iterrows():
if exclude is not None and i in exclude:
f.write('-')
if size is not None:
s = size
else:
s = '2"' if WCS else 8
text = label.format(**row)
c = ''
if size_column is not None:
s = row[size_column]
if color_column is not None:
c = ' color=' + row[color_column]
if indexes is not None:
for n in range(len(indexes)):
if i in indexes[n]:
if sizes and sizes[n] is not None:
s = sizes[n]
if colors and colors[n] is not None:
c = ' color=' + colors[n]
if labels and labels[n] is not None:
text = labels[n].format(**row)
f.write('circle({},{},{}) #{} text="{}" id={:d}\n'.format(row[xcol], row[ycol], s, c, text, i))
close_files(to_close) | 0.422028 | 0.141459 |